id
stringlengths
25
30
content
stringlengths
14
942k
max_stars_repo_path
stringlengths
49
55
crossvul-cpp_data_bad_1238_0
/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ /* * vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp, * VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn. * * Copyright (C) 2006-2016 Oracle Corporation */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/vbox_err.h> #include <linux/vbox_utils.h> #include "vboxguest_core.h" /* Get the pointer to the first parameter of a HGCM call request. */ #define VMMDEV_HGCM_CALL_PARMS(a) \ ((struct vmmdev_hgcm_function_parameter *)( \ (u8 *)(a) + sizeof(struct vmmdev_hgcm_call))) /* The max parameter buffer size for a user request. */ #define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M) /* The max parameter buffer size for a kernel request. */ #define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M) #define VBG_DEBUG_PORT 0x504 /* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */ static DEFINE_SPINLOCK(vbg_log_lock); static char vbg_log_buf[128]; #define VBG_LOG(name, pr_func) \ void name(const char *fmt, ...) \ { \ unsigned long flags; \ va_list args; \ int i, count; \ \ va_start(args, fmt); \ spin_lock_irqsave(&vbg_log_lock, flags); \ \ count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\ for (i = 0; i < count; i++) \ outb(vbg_log_buf[i], VBG_DEBUG_PORT); \ \ pr_func("%s", vbg_log_buf); \ \ spin_unlock_irqrestore(&vbg_log_lock, flags); \ va_end(args); \ } \ EXPORT_SYMBOL(name) VBG_LOG(vbg_info, pr_info); VBG_LOG(vbg_warn, pr_warn); VBG_LOG(vbg_err, pr_err); #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG) VBG_LOG(vbg_debug, pr_debug); #endif void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type, u32 requestor) { struct vmmdev_request_header *req; int order = get_order(PAGE_ALIGN(len)); req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order); if (!req) return NULL; memset(req, 0xaa, len); req->size = len; req->version = VMMDEV_REQUEST_HEADER_VERSION; req->request_type = req_type; req->rc = VERR_GENERAL_FAILURE; req->reserved1 = 0; req->requestor = requestor; return req; } void vbg_req_free(void *req, size_t len) { if (!req) return; free_pages((unsigned long)req, get_order(PAGE_ALIGN(len))); } /* Note this function returns a VBox status code, not a negative errno!! */ int vbg_req_perform(struct vbg_dev *gdev, void *req) { unsigned long phys_req = virt_to_phys(req); outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST); /* * The host changes the request as a result of the outl, make sure * the outl and any reads of the req happen in the correct order. */ mb(); return ((struct vmmdev_request_header *)req)->rc; } static bool hgcm_req_done(struct vbg_dev *gdev, struct vmmdev_hgcmreq_header *header) { unsigned long flags; bool done; spin_lock_irqsave(&gdev->event_spinlock, flags); done = header->flags & VMMDEV_HGCM_REQ_DONE; spin_unlock_irqrestore(&gdev->event_spinlock, flags); return done; } int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor, struct vmmdev_hgcm_service_location *loc, u32 *client_id, int *vbox_status) { struct vmmdev_hgcm_connect *hgcm_connect = NULL; int rc; hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect), VMMDEVREQ_HGCM_CONNECT, requestor); if (!hgcm_connect) return -ENOMEM; hgcm_connect->header.flags = 0; memcpy(&hgcm_connect->loc, loc, sizeof(*loc)); hgcm_connect->client_id = 0; rc = vbg_req_perform(gdev, hgcm_connect); if (rc == VINF_HGCM_ASYNC_EXECUTE) wait_event(gdev->hgcm_wq, hgcm_req_done(gdev, &hgcm_connect->header)); if (rc >= 0) { *client_id = hgcm_connect->client_id; rc = hgcm_connect->header.result; } vbg_req_free(hgcm_connect, sizeof(*hgcm_connect)); *vbox_status = rc; return 0; } EXPORT_SYMBOL(vbg_hgcm_connect); int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor, u32 client_id, int *vbox_status) { struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL; int rc; hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect), VMMDEVREQ_HGCM_DISCONNECT, requestor); if (!hgcm_disconnect) return -ENOMEM; hgcm_disconnect->header.flags = 0; hgcm_disconnect->client_id = client_id; rc = vbg_req_perform(gdev, hgcm_disconnect); if (rc == VINF_HGCM_ASYNC_EXECUTE) wait_event(gdev->hgcm_wq, hgcm_req_done(gdev, &hgcm_disconnect->header)); if (rc >= 0) rc = hgcm_disconnect->header.result; vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect)); *vbox_status = rc; return 0; } EXPORT_SYMBOL(vbg_hgcm_disconnect); static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len) { u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK)); return size >> PAGE_SHIFT; } static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra) { u32 page_count; page_count = hgcm_call_buf_size_in_pages(buf, len); *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); } static int hgcm_call_preprocess_linaddr( const struct vmmdev_hgcm_function_parameter *src_parm, void **bounce_buf_ret, size_t *extra) { void *buf, *bounce_buf; bool copy_in; u32 len; int ret; buf = (void *)src_parm->u.pointer.u.linear_addr; len = src_parm->u.pointer.size; copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT; if (len > VBG_MAX_HGCM_USER_PARM) return -E2BIG; bounce_buf = kvmalloc(len, GFP_KERNEL); if (!bounce_buf) return -ENOMEM; if (copy_in) { ret = copy_from_user(bounce_buf, (void __user *)buf, len); if (ret) return -EFAULT; } else { memset(bounce_buf, 0, len); } *bounce_buf_ret = bounce_buf; hgcm_call_add_pagelist_size(bounce_buf, len, extra); return 0; } /** * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and * figure out how much extra storage we need for page lists. * Return: 0 or negative errno value. * @src_parm: Pointer to source function call parameters * @parm_count: Number of function call parameters. * @bounce_bufs_ret: Where to return the allocated bouncebuffer array * @extra: Where to return the extra request space needed for * physical page lists. */ static int hgcm_call_preprocess( const struct vmmdev_hgcm_function_parameter *src_parm, u32 parm_count, void ***bounce_bufs_ret, size_t *extra) { void *buf, **bounce_bufs = NULL; u32 i, len; int ret; for (i = 0; i < parm_count; i++, src_parm++) { switch (src_parm->type) { case VMMDEV_HGCM_PARM_TYPE_32BIT: case VMMDEV_HGCM_PARM_TYPE_64BIT: break; case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: if (!bounce_bufs) { bounce_bufs = kcalloc(parm_count, sizeof(void *), GFP_KERNEL); if (!bounce_bufs) return -ENOMEM; *bounce_bufs_ret = bounce_bufs; } ret = hgcm_call_preprocess_linaddr(src_parm, &bounce_bufs[i], extra); if (ret) return ret; break; case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: buf = (void *)src_parm->u.pointer.u.linear_addr; len = src_parm->u.pointer.size; if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM)) return -E2BIG; hgcm_call_add_pagelist_size(buf, len, extra); break; default: return -EINVAL; } } return 0; } /** * Translates linear address types to page list direction flags. * * Return: page list flags. * @type: The type. */ static u32 hgcm_call_linear_addr_type_to_pagelist_flags( enum vmmdev_hgcm_function_parameter_type type) { switch (type) { default: WARN_ON(1); /* Fall through */ case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH; case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST; case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST; } } static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call, struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len, enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra) { struct vmmdev_hgcm_pagelist *dst_pg_lst; struct page *page; bool is_vmalloc; u32 i, page_count; dst_parm->type = type; if (len == 0) { dst_parm->u.pointer.size = 0; dst_parm->u.pointer.u.linear_addr = 0; return; } dst_pg_lst = (void *)call + *off_extra; page_count = hgcm_call_buf_size_in_pages(buf, len); is_vmalloc = is_vmalloc_addr(buf); dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST; dst_parm->u.page_list.size = len; dst_parm->u.page_list.offset = *off_extra; dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type); dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK; dst_pg_lst->page_count = page_count; for (i = 0; i < page_count; i++) { if (is_vmalloc) page = vmalloc_to_page(buf); else page = virt_to_page(buf); dst_pg_lst->pages[i] = page_to_phys(page); buf += PAGE_SIZE; } *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); } /** * Initializes the call request that we're sending to the host. * @call: The call to initialize. * @client_id: The client ID of the caller. * @function: The function number of the function to call. * @src_parm: Pointer to source function call parameters. * @parm_count: Number of function call parameters. * @bounce_bufs: The bouncebuffer array. */ static void hgcm_call_init_call( struct vmmdev_hgcm_call *call, u32 client_id, u32 function, const struct vmmdev_hgcm_function_parameter *src_parm, u32 parm_count, void **bounce_bufs) { struct vmmdev_hgcm_function_parameter *dst_parm = VMMDEV_HGCM_CALL_PARMS(call); u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call; void *buf; call->header.flags = 0; call->header.result = VINF_SUCCESS; call->client_id = client_id; call->function = function; call->parm_count = parm_count; for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) { switch (src_parm->type) { case VMMDEV_HGCM_PARM_TYPE_32BIT: case VMMDEV_HGCM_PARM_TYPE_64BIT: *dst_parm = *src_parm; break; case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i], src_parm->u.pointer.size, src_parm->type, &off_extra); break; case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: buf = (void *)src_parm->u.pointer.u.linear_addr; hgcm_call_init_linaddr(call, dst_parm, buf, src_parm->u.pointer.size, src_parm->type, &off_extra); break; default: WARN_ON(1); dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID; } } } /** * Tries to cancel a pending HGCM call. * * Return: VBox status code */ static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call) { int rc; /* * We use a pre-allocated request for cancellations, which is * protected by cancel_req_mutex. This means that all cancellations * get serialized, this should be fine since they should be rare. */ mutex_lock(&gdev->cancel_req_mutex); gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call); rc = vbg_req_perform(gdev, gdev->cancel_req); mutex_unlock(&gdev->cancel_req_mutex); if (rc == VERR_NOT_IMPLEMENTED) { call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED; call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL; rc = vbg_req_perform(gdev, call); if (rc == VERR_INVALID_PARAMETER) rc = VERR_NOT_FOUND; } if (rc >= 0) call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED; return rc; } /** * Performs the call and completion wait. * Return: 0 or negative errno value. * @gdev: The VBoxGuest device extension. * @call: The call to execute. * @timeout_ms: Timeout in ms. * @leak_it: Where to return the leak it / free it, indicator. * Cancellation fun. */ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call, u32 timeout_ms, bool *leak_it) { int rc, cancel_rc, ret; long timeout; *leak_it = false; rc = vbg_req_perform(gdev, call); /* * If the call failed, then pretend success. Upper layers will * interpret the result code in the packet. */ if (rc < 0) { call->header.result = rc; return 0; } if (rc != VINF_HGCM_ASYNC_EXECUTE) return 0; /* Host decided to process the request asynchronously, wait for it */ if (timeout_ms == U32_MAX) timeout = MAX_SCHEDULE_TIMEOUT; else timeout = msecs_to_jiffies(timeout_ms); timeout = wait_event_interruptible_timeout( gdev->hgcm_wq, hgcm_req_done(gdev, &call->header), timeout); /* timeout > 0 means hgcm_req_done has returned true, so success */ if (timeout > 0) return 0; if (timeout == 0) ret = -ETIMEDOUT; else ret = -EINTR; /* Cancel the request */ cancel_rc = hgcm_cancel_call(gdev, call); if (cancel_rc >= 0) return ret; /* * Failed to cancel, this should mean that the cancel has lost the * race with normal completion, wait while the host completes it. */ if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED) timeout = msecs_to_jiffies(500); else timeout = msecs_to_jiffies(2000); timeout = wait_event_timeout(gdev->hgcm_wq, hgcm_req_done(gdev, &call->header), timeout); if (WARN_ON(timeout == 0)) { /* We really should never get here */ vbg_err("%s: Call timedout and cancellation failed, leaking the request\n", __func__); *leak_it = true; return ret; } /* The call has completed normally after all */ return 0; } /** * Copies the result of the call back to the caller info structure and user * buffers. * Return: 0 or negative errno value. * @call: HGCM call request. * @dst_parm: Pointer to function call parameters destination. * @parm_count: Number of function call parameters. * @bounce_bufs: The bouncebuffer array. */ static int hgcm_call_copy_back_result( const struct vmmdev_hgcm_call *call, struct vmmdev_hgcm_function_parameter *dst_parm, u32 parm_count, void **bounce_bufs) { const struct vmmdev_hgcm_function_parameter *src_parm = VMMDEV_HGCM_CALL_PARMS(call); void __user *p; int ret; u32 i; /* Copy back parameters. */ for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) { switch (dst_parm->type) { case VMMDEV_HGCM_PARM_TYPE_32BIT: case VMMDEV_HGCM_PARM_TYPE_64BIT: *dst_parm = *src_parm; break; case VMMDEV_HGCM_PARM_TYPE_PAGELIST: dst_parm->u.page_list.size = src_parm->u.page_list.size; break; case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: dst_parm->u.pointer.size = src_parm->u.pointer.size; break; case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: dst_parm->u.pointer.size = src_parm->u.pointer.size; p = (void __user *)dst_parm->u.pointer.u.linear_addr; ret = copy_to_user(p, bounce_bufs[i], min(src_parm->u.pointer.size, dst_parm->u.pointer.size)); if (ret) return -EFAULT; break; default: WARN_ON(1); return -EINVAL; } } return 0; } int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function, u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, u32 parm_count, int *vbox_status) { struct vmmdev_hgcm_call *call; void **bounce_bufs = NULL; bool leak_it; size_t size; int i, ret; size = sizeof(struct vmmdev_hgcm_call) + parm_count * sizeof(struct vmmdev_hgcm_function_parameter); /* * Validate and buffer the parameters for the call. This also increases * call_size with the amount of extra space needed for page lists. */ ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size); if (ret) { /* Even on error bounce bufs may still have been allocated */ goto free_bounce_bufs; } call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor); if (!call) { ret = -ENOMEM; goto free_bounce_bufs; } hgcm_call_init_call(call, client_id, function, parms, parm_count, bounce_bufs); ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it); if (ret == 0) { *vbox_status = call->header.result; ret = hgcm_call_copy_back_result(call, parms, parm_count, bounce_bufs); } if (!leak_it) vbg_req_free(call, size); free_bounce_bufs: if (bounce_bufs) { for (i = 0; i < parm_count; i++) kvfree(bounce_bufs[i]); kfree(bounce_bufs); } return ret; } EXPORT_SYMBOL(vbg_hgcm_call); #ifdef CONFIG_COMPAT int vbg_hgcm_call32( struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function, u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, int *vbox_status) { struct vmmdev_hgcm_function_parameter *parm64 = NULL; u32 i, size; int ret = 0; /* KISS allocate a temporary request and convert the parameters. */ size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter); parm64 = kzalloc(size, GFP_KERNEL); if (!parm64) return -ENOMEM; for (i = 0; i < parm_count; i++) { switch (parm32[i].type) { case VMMDEV_HGCM_PARM_TYPE_32BIT: parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT; parm64[i].u.value32 = parm32[i].u.value32; break; case VMMDEV_HGCM_PARM_TYPE_64BIT: parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT; parm64[i].u.value64 = parm32[i].u.value64; break; case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: parm64[i].type = parm32[i].type; parm64[i].u.pointer.size = parm32[i].u.pointer.size; parm64[i].u.pointer.u.linear_addr = parm32[i].u.pointer.u.linear_addr; break; default: ret = -EINVAL; } if (ret < 0) goto out_free; } ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms, parm64, parm_count, vbox_status); if (ret < 0) goto out_free; /* Copy back. */ for (i = 0; i < parm_count; i++, parm32++, parm64++) { switch (parm64[i].type) { case VMMDEV_HGCM_PARM_TYPE_32BIT: parm32[i].u.value32 = parm64[i].u.value32; break; case VMMDEV_HGCM_PARM_TYPE_64BIT: parm32[i].u.value64 = parm64[i].u.value64; break; case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: parm32[i].u.pointer.size = parm64[i].u.pointer.size; break; default: WARN_ON(1); ret = -EINVAL; } } out_free: kfree(parm64); return ret; } #endif static const int vbg_status_code_to_errno_table[] = { [-VERR_ACCESS_DENIED] = -EPERM, [-VERR_FILE_NOT_FOUND] = -ENOENT, [-VERR_PROCESS_NOT_FOUND] = -ESRCH, [-VERR_INTERRUPTED] = -EINTR, [-VERR_DEV_IO_ERROR] = -EIO, [-VERR_TOO_MUCH_DATA] = -E2BIG, [-VERR_BAD_EXE_FORMAT] = -ENOEXEC, [-VERR_INVALID_HANDLE] = -EBADF, [-VERR_TRY_AGAIN] = -EAGAIN, [-VERR_NO_MEMORY] = -ENOMEM, [-VERR_INVALID_POINTER] = -EFAULT, [-VERR_RESOURCE_BUSY] = -EBUSY, [-VERR_ALREADY_EXISTS] = -EEXIST, [-VERR_NOT_SAME_DEVICE] = -EXDEV, [-VERR_NOT_A_DIRECTORY] = -ENOTDIR, [-VERR_PATH_NOT_FOUND] = -ENOTDIR, [-VERR_INVALID_NAME] = -ENOENT, [-VERR_IS_A_DIRECTORY] = -EISDIR, [-VERR_INVALID_PARAMETER] = -EINVAL, [-VERR_TOO_MANY_OPEN_FILES] = -ENFILE, [-VERR_INVALID_FUNCTION] = -ENOTTY, [-VERR_SHARING_VIOLATION] = -ETXTBSY, [-VERR_FILE_TOO_BIG] = -EFBIG, [-VERR_DISK_FULL] = -ENOSPC, [-VERR_SEEK_ON_DEVICE] = -ESPIPE, [-VERR_WRITE_PROTECT] = -EROFS, [-VERR_BROKEN_PIPE] = -EPIPE, [-VERR_DEADLOCK] = -EDEADLK, [-VERR_FILENAME_TOO_LONG] = -ENAMETOOLONG, [-VERR_FILE_LOCK_FAILED] = -ENOLCK, [-VERR_NOT_IMPLEMENTED] = -ENOSYS, [-VERR_NOT_SUPPORTED] = -ENOSYS, [-VERR_DIR_NOT_EMPTY] = -ENOTEMPTY, [-VERR_TOO_MANY_SYMLINKS] = -ELOOP, [-VERR_NO_MORE_FILES] = -ENODATA, [-VERR_NO_DATA] = -ENODATA, [-VERR_NET_NO_NETWORK] = -ENONET, [-VERR_NET_NOT_UNIQUE_NAME] = -ENOTUNIQ, [-VERR_NO_TRANSLATION] = -EILSEQ, [-VERR_NET_NOT_SOCKET] = -ENOTSOCK, [-VERR_NET_DEST_ADDRESS_REQUIRED] = -EDESTADDRREQ, [-VERR_NET_MSG_SIZE] = -EMSGSIZE, [-VERR_NET_PROTOCOL_TYPE] = -EPROTOTYPE, [-VERR_NET_PROTOCOL_NOT_AVAILABLE] = -ENOPROTOOPT, [-VERR_NET_PROTOCOL_NOT_SUPPORTED] = -EPROTONOSUPPORT, [-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED] = -ESOCKTNOSUPPORT, [-VERR_NET_OPERATION_NOT_SUPPORTED] = -EOPNOTSUPP, [-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED] = -EPFNOSUPPORT, [-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED] = -EAFNOSUPPORT, [-VERR_NET_ADDRESS_IN_USE] = -EADDRINUSE, [-VERR_NET_ADDRESS_NOT_AVAILABLE] = -EADDRNOTAVAIL, [-VERR_NET_DOWN] = -ENETDOWN, [-VERR_NET_UNREACHABLE] = -ENETUNREACH, [-VERR_NET_CONNECTION_RESET] = -ENETRESET, [-VERR_NET_CONNECTION_ABORTED] = -ECONNABORTED, [-VERR_NET_CONNECTION_RESET_BY_PEER] = -ECONNRESET, [-VERR_NET_NO_BUFFER_SPACE] = -ENOBUFS, [-VERR_NET_ALREADY_CONNECTED] = -EISCONN, [-VERR_NET_NOT_CONNECTED] = -ENOTCONN, [-VERR_NET_SHUTDOWN] = -ESHUTDOWN, [-VERR_NET_TOO_MANY_REFERENCES] = -ETOOMANYREFS, [-VERR_TIMEOUT] = -ETIMEDOUT, [-VERR_NET_CONNECTION_REFUSED] = -ECONNREFUSED, [-VERR_NET_HOST_DOWN] = -EHOSTDOWN, [-VERR_NET_HOST_UNREACHABLE] = -EHOSTUNREACH, [-VERR_NET_ALREADY_IN_PROGRESS] = -EALREADY, [-VERR_NET_IN_PROGRESS] = -EINPROGRESS, [-VERR_MEDIA_NOT_PRESENT] = -ENOMEDIUM, [-VERR_MEDIA_NOT_RECOGNIZED] = -EMEDIUMTYPE, }; int vbg_status_code_to_errno(int rc) { if (rc >= 0) return 0; rc = -rc; if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) || vbg_status_code_to_errno_table[rc] == 0) { vbg_warn("%s: Unhandled err %d\n", __func__, -rc); return -EPROTO; } return vbg_status_code_to_errno_table[rc]; } EXPORT_SYMBOL(vbg_status_code_to_errno);
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1238_0
crossvul-cpp_data_good_4773_2
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % X X M M L % % X X MM MM L % % X M M M L % % X X M M L % % X X M M LLLLL % % % % TTTTT RRRR EEEEE EEEEE % % T R R E E % % T RRRR EEE EEE % % T R R E E % % T R R EEEEE EEEEE % % % % % % XML Tree Methods % % % % Software Design % % Cristy % % December 2004 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % This module implements the standard handy xml-tree methods for storing and % retrieving nodes and attributes from an XML string. % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image-private.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/semaphore.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/token-private.h" #include "magick/utility.h" #include "magick/utility-private.h" #include "magick/xml-tree.h" #include "magick/xml-tree-private.h" /* Define declarations. */ #define NumberPredefinedEntities 10 #define XMLWhitespace "\t\r\n " /* Typedef declarations. */ struct _XMLTreeInfo { char *tag, **attributes, *content; size_t offset; XMLTreeInfo *parent, *next, *sibling, *ordered, *child; MagickBooleanType debug; SemaphoreInfo *semaphore; size_t signature; }; typedef struct _XMLTreeRoot XMLTreeRoot; struct _XMLTreeRoot { struct _XMLTreeInfo root; XMLTreeInfo *node; MagickBooleanType standalone; char ***processing_instructions, **entities, ***attributes; MagickBooleanType debug; SemaphoreInfo *semaphore; size_t signature; }; /* Global declarations. */ static char *sentinel[] = { (char *) NULL }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d d C h i l d T o X M L T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AddChildToXMLTree() adds a child tag at an offset relative to the start of % the parent tag's character content. Return the child tag. % % The format of the AddChildToXMLTree method is: % % XMLTreeInfo *AddChildToXMLTree(XMLTreeInfo *xml_info,const char *tag, % const size_t offset) % % A description of each parameter follows: % % o xml_info: the xml info. % % o tag: the tag. % % o offset: the tag offset. % */ MagickExport XMLTreeInfo *AddChildToXMLTree(XMLTreeInfo *xml_info, const char *tag,const size_t offset) { XMLTreeInfo *child; if (xml_info == (XMLTreeInfo *) NULL) return((XMLTreeInfo *) NULL); child=(XMLTreeInfo *) AcquireMagickMemory(sizeof(*child)); if (child == (XMLTreeInfo *) NULL) return((XMLTreeInfo *) NULL); (void) ResetMagickMemory(child,0,sizeof(*child)); child->tag=ConstantString(tag); child->attributes=sentinel; child->content=ConstantString(""); child->debug=IsEventLogging(); child->signature=MagickSignature; return(InsertTagIntoXMLTree(xml_info,child,offset)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d d P a t h T o X M L T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AddPathToXMLTree() adds a child tag at an offset relative to the start of % the parent tag's character content. This method returns the child tag. % % The format of the AddPathToXMLTree method is: % % XMLTreeInfo *AddPathToXMLTree(XMLTreeInfo *xml_info,const char *path, % const size_t offset) % % A description of each parameter follows: % % o xml_info: the xml info. % % o path: the path. % % o offset: the tag offset. % */ MagickExport XMLTreeInfo *AddPathToXMLTree(XMLTreeInfo *xml_info, const char *path,const size_t offset) { char **components, subnode[MaxTextExtent], tag[MaxTextExtent]; register ssize_t i; size_t number_components; ssize_t j; XMLTreeInfo *child, *node; assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); node=xml_info; components=GetPathComponents(path,&number_components); if (components == (char **) NULL) return((XMLTreeInfo *) NULL); for (i=0; i < (ssize_t) number_components; i++) { GetPathComponent(components[i],SubimagePath,subnode); GetPathComponent(components[i],CanonicalPath,tag); child=GetXMLTreeChild(node,tag); if (child == (XMLTreeInfo *) NULL) child=AddChildToXMLTree(node,tag,offset); node=child; if (node == (XMLTreeInfo *) NULL) break; for (j=(ssize_t) StringToLong(subnode)-1; j > 0; j--) { node=GetXMLTreeOrdered(node); if (node == (XMLTreeInfo *) NULL) break; } if (node == (XMLTreeInfo *) NULL) break; components[i]=DestroyString(components[i]); } for ( ; i < (ssize_t) number_components; i++) components[i]=DestroyString(components[i]); components=(char **) RelinquishMagickMemory(components); return(node); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a n o n i c a l X M L C o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CanonicalXMLContent() converts text to canonical XML content by converting % to UTF-8, substituting predefined entities, wrapping as CDATA, or encoding % as base-64 as required. % % The format of the CanonicalXMLContent method is: % % % char *CanonicalXMLContent(const char *content, % const MagickBooleanType pedantic) % % A description of each parameter follows: % % o content: the content. % % o pedantic: if true, replace newlines and tabs with their respective % entities. % */ MagickExport char *CanonicalXMLContent(const char *content, const MagickBooleanType pedantic) { char *base64, *canonical_content; register const unsigned char *p; register ssize_t i; size_t extent, length; unsigned char *utf8; utf8=ConvertLatin1ToUTF8((const unsigned char *) content); if (utf8 == (unsigned char *) NULL) return((char *) NULL); for (p=utf8; *p != '\0'; p++) if ((*p < 0x20) && (*p != 0x09) && (*p != 0x0a) && (*p != 0x0d)) break; if (*p != '\0') { /* String is binary, base64-encode it. */ base64=Base64Encode(utf8,strlen((char *) utf8),&length); utf8=(unsigned char *) RelinquishMagickMemory(utf8); if (base64 == (char *) NULL) return((char *) NULL); canonical_content=AcquireString("<base64>"); (void) ConcatenateString(&canonical_content,base64); base64=DestroyString(base64); (void) ConcatenateString(&canonical_content,"</base64>"); return(canonical_content); } /* Substitute predefined entities. */ i=0; canonical_content=AcquireString((char *) NULL); extent=MaxTextExtent; for (p=utf8; *p != '\0'; p++) { if ((i+MaxTextExtent) > (ssize_t) extent) { extent+=MaxTextExtent; canonical_content=(char *) ResizeQuantumMemory(canonical_content,extent, sizeof(*canonical_content)); if (canonical_content == (char *) NULL) return(canonical_content); } switch (*p) { case '&': { i+=FormatLocaleString(canonical_content+i,extent,"&amp;"); break; } case '<': { i+=FormatLocaleString(canonical_content+i,extent,"&lt;"); break; } case '>': { i+=FormatLocaleString(canonical_content+i,extent,"&gt;"); break; } case '"': { i+=FormatLocaleString(canonical_content+i,extent,"&quot;"); break; } case '\n': { if (pedantic == MagickFalse) { canonical_content[i++]=(char) (*p); break; } i+=FormatLocaleString(canonical_content+i,extent,"&#xA;"); break; } case '\t': { if (pedantic == MagickFalse) { canonical_content[i++]=(char) (*p); break; } i+=FormatLocaleString(canonical_content+i,extent,"&#x9;"); break; } case '\r': { i+=FormatLocaleString(canonical_content+i,extent,"&#xD;"); break; } default: { canonical_content[i++]=(char) (*p); break; } } } canonical_content[i]='\0'; utf8=(unsigned char *) RelinquishMagickMemory(utf8); return(canonical_content); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y X M L T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyXMLTree() destroys the xml-tree. % % The format of the DestroyXMLTree method is: % % XMLTreeInfo *DestroyXMLTree(XMLTreeInfo *xml_info) % % A description of each parameter follows: % % o xml_info: the xml info. % */ static char **DestroyXMLTreeAttributes(char **attributes) { register ssize_t i; /* Destroy a tag attribute list. */ if ((attributes == (char **) NULL) || (attributes == sentinel)) return((char **) NULL); for (i=0; attributes[i] != (char *) NULL; i+=2) { /* Destroy attribute tag and value. */ if (attributes[i] != (char *) NULL) attributes[i]=DestroyString(attributes[i]); if (attributes[i+1] != (char *) NULL) attributes[i+1]=DestroyString(attributes[i+1]); } attributes=(char **) RelinquishMagickMemory(attributes); return((char **) NULL); } static void DestroyXMLTreeChild(XMLTreeInfo *xml_info) { XMLTreeInfo *child, *node; child=xml_info->child; while(child != (XMLTreeInfo *) NULL) { node=child; child=node->child; node->child=(XMLTreeInfo *) NULL; (void) DestroyXMLTree(node); } } static void DestroyXMLTreeOrdered(XMLTreeInfo *xml_info) { XMLTreeInfo *node, *ordered; ordered=xml_info->ordered; while(ordered != (XMLTreeInfo *) NULL) { node=ordered; ordered=node->ordered; node->ordered=(XMLTreeInfo *) NULL; (void) DestroyXMLTree(node); } } static void DestroyXMLTreeRoot(XMLTreeInfo *xml_info) { char **attributes; register ssize_t i; ssize_t j; XMLTreeRoot *root; assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (xml_info->parent != (XMLTreeInfo *) NULL) return; /* Free root tag allocations. */ root=(XMLTreeRoot *) xml_info; for (i=NumberPredefinedEntities; root->entities[i] != (char *) NULL; i+=2) root->entities[i+1]=DestroyString(root->entities[i+1]); root->entities=(char **) RelinquishMagickMemory(root->entities); for (i=0; root->attributes[i] != (char **) NULL; i++) { attributes=root->attributes[i]; if (attributes[0] != (char *) NULL) attributes[0]=DestroyString(attributes[0]); for (j=1; attributes[j] != (char *) NULL; j+=3) { if (attributes[j] != (char *) NULL) attributes[j]=DestroyString(attributes[j]); if (attributes[j+1] != (char *) NULL) attributes[j+1]=DestroyString(attributes[j+1]); if (attributes[j+2] != (char *) NULL) attributes[j+2]=DestroyString(attributes[j+2]); } attributes=(char **) RelinquishMagickMemory(attributes); } if (root->attributes[0] != (char **) NULL) root->attributes=(char ***) RelinquishMagickMemory(root->attributes); if (root->processing_instructions[0] != (char **) NULL) { for (i=0; root->processing_instructions[i] != (char **) NULL; i++) { for (j=0; root->processing_instructions[i][j] != (char *) NULL; j++) root->processing_instructions[i][j]=DestroyString( root->processing_instructions[i][j]); root->processing_instructions[i][j+1]=DestroyString( root->processing_instructions[i][j+1]); root->processing_instructions[i]=(char **) RelinquishMagickMemory( root->processing_instructions[i]); } root->processing_instructions=(char ***) RelinquishMagickMemory( root->processing_instructions); } } MagickExport XMLTreeInfo *DestroyXMLTree(XMLTreeInfo *xml_info) { assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); DestroyXMLTreeChild(xml_info); DestroyXMLTreeOrdered(xml_info); DestroyXMLTreeRoot(xml_info); xml_info->attributes=DestroyXMLTreeAttributes(xml_info->attributes); xml_info->content=DestroyString(xml_info->content); xml_info->tag=DestroyString(xml_info->tag); xml_info=(XMLTreeInfo *) RelinquishMagickMemory(xml_info); return((XMLTreeInfo *) NULL); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F i l e T o X M L % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FileToXML() returns the contents of a file as a XML string. % % The format of the FileToXML method is: % % char *FileToXML(const char *filename,const size_t extent) % % A description of each parameter follows: % % o filename: the filename. % % o extent: Maximum length of the string. % */ MagickPrivate char *FileToXML(const char *filename,const size_t extent) { char *xml; int file; MagickOffsetType offset; register size_t i; size_t length; ssize_t count; void *map; assert(filename != (const char *) NULL); length=0; file=fileno(stdin); if (LocaleCompare(filename,"-") != 0) file=open_utf8(filename,O_RDONLY | O_BINARY,0); if (file == -1) return((char *) NULL); offset=(MagickOffsetType) lseek(file,0,SEEK_END); count=0; if ((file == fileno(stdin)) || (offset < 0) || (offset != (MagickOffsetType) ((ssize_t) offset))) { size_t quantum; struct stat file_stats; /* Stream is not seekable. */ offset=(MagickOffsetType) lseek(file,0,SEEK_SET); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(file,&file_stats) == 0) && (file_stats.st_size > 0)) quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); xml=(char *) AcquireQuantumMemory(quantum,sizeof(*xml)); for (i=0; xml != (char *) NULL; i+=count) { count=read(file,xml+i,quantum); if (count <= 0) { count=0; if (errno != EINTR) break; } if (~((size_t) i) < (quantum+1)) { xml=(char *) RelinquishMagickMemory(xml); break; } xml=(char *) ResizeQuantumMemory(xml,i+quantum+1,sizeof(*xml)); if ((size_t) (i+count) >= extent) break; } if (LocaleCompare(filename,"-") != 0) file=close(file); if (xml == (char *) NULL) return((char *) NULL); if (file == -1) { xml=(char *) RelinquishMagickMemory(xml); return((char *) NULL); } length=(size_t) MagickMin(i+count,extent); xml[length]='\0'; return(xml); } length=(size_t) MagickMin(offset,(MagickOffsetType) extent); xml=(char *) NULL; if (~length >= (MaxTextExtent-1)) xml=(char *) AcquireQuantumMemory(length+MaxTextExtent,sizeof(*xml)); if (xml == (char *) NULL) { file=close(file); return((char *) NULL); } map=MapBlob(file,ReadMode,0,length); if (map != (char *) NULL) { (void) memcpy(xml,map,length); (void) UnmapBlob(map,length); } else { (void) lseek(file,0,SEEK_SET); for (i=0; i < length; i+=count) { count=read(file,xml+i,(size_t) MagickMin(length-i,SSIZE_MAX)); if (count <= 0) { count=0; if (errno != EINTR) break; } } if (i < length) { file=close(file)-1; xml=(char *) RelinquishMagickMemory(xml); return((char *) NULL); } } xml[length]='\0'; if (LocaleCompare(filename,"-") != 0) file=close(file); if (file == -1) xml=(char *) RelinquishMagickMemory(xml); return(xml); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t X M L T r e e T a g % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextXMLTreeTag() returns the next tag or NULL if not found. % % The format of the GetNextXMLTreeTag method is: % % XMLTreeInfo *GetNextXMLTreeTag(XMLTreeInfo *xml_info) % % A description of each parameter follows: % % o xml_info: the xml info. % */ MagickExport XMLTreeInfo *GetNextXMLTreeTag(XMLTreeInfo *xml_info) { assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); return(xml_info->next); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t X M L T r e e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetXMLTreeAttribute() returns the value of the attribute tag with the % specified tag if found, otherwise NULL. % % The format of the GetXMLTreeAttribute method is: % % const char *GetXMLTreeAttribute(XMLTreeInfo *xml_info,const char *tag) % % A description of each parameter follows: % % o xml_info: the xml info. % % o tag: the attribute tag. % */ MagickExport const char *GetXMLTreeAttribute(XMLTreeInfo *xml_info, const char *tag) { register ssize_t i; ssize_t j; XMLTreeRoot *root; assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (xml_info->attributes == (char **) NULL) return((const char *) NULL); i=0; while ((xml_info->attributes[i] != (char *) NULL) && (strcmp(xml_info->attributes[i],tag) != 0)) i+=2; if (xml_info->attributes[i] != (char *) NULL) return(xml_info->attributes[i+1]); root=(XMLTreeRoot*) xml_info; while (root->root.parent != (XMLTreeInfo *) NULL) root=(XMLTreeRoot *) root->root.parent; i=0; while ((root->attributes[i] != (char **) NULL) && (strcmp(root->attributes[i][0],xml_info->tag) != 0)) i++; if (root->attributes[i] == (char **) NULL) return((const char *) NULL); j=1; while ((root->attributes[i][j] != (char *) NULL) && (strcmp(root->attributes[i][j],tag) != 0)) j+=3; if (root->attributes[i][j] == (char *) NULL) return((const char *) NULL); return(root->attributes[i][j+1]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t X M L T r e e A t t r i b u t e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetXMLTreeAttributes() injects all attributes associated with the current % tag in the specified splay-tree. % % The format of the GetXMLTreeAttributes method is: % % MagickBooleanType GetXMLTreeAttributes(const XMLTreeInfo *xml_info, % SplayTreeInfo *attributes) % % A description of each parameter follows: % % o xml_info: the xml info. % % o attributes: the attribute splay-tree. % */ MagickExport MagickBooleanType GetXMLTreeAttributes(const XMLTreeInfo *xml_info, SplayTreeInfo *attributes) { register ssize_t i; assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((const XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(attributes != (SplayTreeInfo *) NULL); if (xml_info->attributes == (char **) NULL) return(MagickTrue); i=0; while (xml_info->attributes[i] != (char *) NULL) { (void) AddValueToSplayTree(attributes, ConstantString(xml_info->attributes[i]), ConstantString(xml_info->attributes[i+1])); i+=2; } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t X M L T r e e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetXMLTreeChild() returns the first child tag with the specified tag if % found, otherwise NULL. % % The format of the GetXMLTreeChild method is: % % XMLTreeInfo *GetXMLTreeChild(XMLTreeInfo *xml_info,const char *tag) % % A description of each parameter follows: % % o xml_info: the xml info. % */ MagickExport XMLTreeInfo *GetXMLTreeChild(XMLTreeInfo *xml_info,const char *tag) { XMLTreeInfo *child; assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); child=xml_info->child; if (tag != (const char *) NULL) while ((child != (XMLTreeInfo *) NULL) && (strcmp(child->tag,tag) != 0)) child=child->sibling; return(child); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t X M L T r e e C o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetXMLTreeContent() returns any content associated with specified % xml-tree node. % % The format of the GetXMLTreeContent method is: % % const char *GetXMLTreeContent(XMLTreeInfo *xml_info) % % A description of each parameter follows: % % o xml_info: the xml info. % */ MagickExport const char *GetXMLTreeContent(XMLTreeInfo *xml_info) { assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); return(xml_info->content); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t X M L T r e e O r d e r e d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetXMLTreeOrdered() returns the next ordered node if found, otherwise NULL. % % The format of the GetXMLTreeOrdered method is: % % XMLTreeInfo *GetXMLTreeOrdered(XMLTreeInfo *xml_info) % % A description of each parameter follows: % % o xml_info: the xml info. % */ MagickExport XMLTreeInfo *GetXMLTreeOrdered(XMLTreeInfo *xml_info) { assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); return(xml_info->ordered); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t X M L T r e e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetXMLTreePath() traverses the XML-tree as defined by the specified path % and returns the node if found, otherwise NULL. % % The format of the GetXMLTreePath method is: % % XMLTreeInfo *GetXMLTreePath(XMLTreeInfo *xml_info,const char *path) % % A description of each parameter follows: % % o xml_info: the xml info. % % o path: the path (e.g. property/elapsed-time). % */ MagickExport XMLTreeInfo *GetXMLTreePath(XMLTreeInfo *xml_info,const char *path) { char **components, subnode[MaxTextExtent], tag[MaxTextExtent]; register ssize_t i; size_t number_components; ssize_t j; XMLTreeInfo *node; assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); node=xml_info; components=GetPathComponents(path,&number_components); if (components == (char **) NULL) return((XMLTreeInfo *) NULL); for (i=0; i < (ssize_t) number_components; i++) { GetPathComponent(components[i],SubimagePath,subnode); GetPathComponent(components[i],CanonicalPath,tag); node=GetXMLTreeChild(node,tag); if (node == (XMLTreeInfo *) NULL) break; for (j=(ssize_t) StringToLong(subnode)-1; j > 0; j--) { node=GetXMLTreeOrdered(node); if (node == (XMLTreeInfo *) NULL) break; } if (node == (XMLTreeInfo *) NULL) break; components[i]=DestroyString(components[i]); } for ( ; i < (ssize_t) number_components; i++) components[i]=DestroyString(components[i]); components=(char **) RelinquishMagickMemory(components); return(node); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t X M L T r e e P r o c e s s i n g I n s t r u c t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetXMLTreeProcessingInstructions() returns a null terminated array of % processing instructions for the given target. % % The format of the GetXMLTreeProcessingInstructions method is: % % const char **GetXMLTreeProcessingInstructions(XMLTreeInfo *xml_info, % const char *target) % % A description of each parameter follows: % % o xml_info: the xml info. % */ MagickExport const char **GetXMLTreeProcessingInstructions( XMLTreeInfo *xml_info,const char *target) { register ssize_t i; XMLTreeRoot *root; assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); root=(XMLTreeRoot *) xml_info; while (root->root.parent != (XMLTreeInfo *) NULL) root=(XMLTreeRoot *) root->root.parent; i=0; while ((root->processing_instructions[i] != (char **) NULL) && (strcmp(root->processing_instructions[i][0],target) != 0)) i++; if (root->processing_instructions[i] == (char **) NULL) return((const char **) sentinel); return((const char **) (root->processing_instructions[i]+1)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t X M L T r e e S i b l i n g % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetXMLTreeSibling() returns the node sibling if found, otherwise NULL. % % The format of the GetXMLTreeSibling method is: % % XMLTreeInfo *GetXMLTreeSibling(XMLTreeInfo *xml_info) % % A description of each parameter follows: % % o xml_info: the xml info. % */ MagickExport XMLTreeInfo *GetXMLTreeSibling(XMLTreeInfo *xml_info) { assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); return(xml_info->sibling); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t X M L T r e e T a g % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetXMLTreeTag() returns the tag associated with specified xml-tree node. % % The format of the GetXMLTreeTag method is: % % const char *GetXMLTreeTag(XMLTreeInfo *xml_info) % % A description of each parameter follows: % % o xml_info: the xml info. % */ MagickExport const char *GetXMLTreeTag(XMLTreeInfo *xml_info) { assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); return(xml_info->tag); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n s e r t I n t o T a g X M L T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InsertTagIntoXMLTree() inserts a tag at an offset relative to the start of % the parent tag's character content. This method returns the child tag. % % The format of the InsertTagIntoXMLTree method is: % % XMLTreeInfo *InsertTagIntoXMLTree(XMLTreeInfo *xml_info, % XMLTreeInfo *child,const size_t offset) % % A description of each parameter follows: % % o xml_info: the xml info. % % o child: the child tag. % % o offset: the tag offset. % */ MagickExport XMLTreeInfo *InsertTagIntoXMLTree(XMLTreeInfo *xml_info, XMLTreeInfo *child,const size_t offset) { XMLTreeInfo *head, *node, *previous; child->ordered=(XMLTreeInfo *) NULL; child->sibling=(XMLTreeInfo *) NULL; child->next=(XMLTreeInfo *) NULL; child->offset=offset; child->parent=xml_info; if (xml_info->child == (XMLTreeInfo *) NULL) { xml_info->child=child; return(child); } head=xml_info->child; if (head->offset > offset) { child->ordered=head; xml_info->child=child; } else { node=head; while ((node->ordered != (XMLTreeInfo *) NULL) && (node->ordered->offset <= offset)) node=node->ordered; child->ordered=node->ordered; node->ordered=child; } previous=(XMLTreeInfo *) NULL; node=head; while ((node != (XMLTreeInfo *) NULL) && (strcmp(node->tag,child->tag) != 0)) { previous=node; node=node->sibling; } if ((node != (XMLTreeInfo *) NULL) && (node->offset <= offset)) { while ((node->next != (XMLTreeInfo *) NULL) && (node->next->offset <= offset)) node=node->next; child->next=node->next; node->next=child; } else { if ((previous != (XMLTreeInfo *) NULL) && (node != (XMLTreeInfo *) NULL)) previous->sibling=node->sibling; child->next=node; previous=(XMLTreeInfo *) NULL; node=head; while ((node != (XMLTreeInfo *) NULL) && (node->offset <= offset)) { previous=node; node=node->sibling; } child->sibling=node; if (previous != (XMLTreeInfo *) NULL) previous->sibling=child; } return(child); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w X M L T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewXMLTree() returns a XMLTreeInfo xml-tree as defined by the specified % XML string. % % The format of the NewXMLTree method is: % % XMLTreeInfo *NewXMLTree(const char *xml,ExceptionInfo *exception) % % A description of each parameter follows: % % o xml: The XML string. % % o exception: return any errors or warnings in this structure. % */ static char *ConvertUTF16ToUTF8(const char *content,size_t *length) { char *utf8; int bits, byte, c, encoding; register ssize_t i; size_t extent; ssize_t j; utf8=(char *) AcquireQuantumMemory(*length+1,sizeof(*utf8)); if (utf8 == (char *) NULL) return((char *) NULL); encoding=(*content == '\xFE') ? 1 : (*content == '\xFF') ? 0 : -1; if (encoding == -1) { /* Already UTF-8. */ (void) CopyMagickMemory(utf8,content,*length*sizeof(*utf8)); utf8[*length]='\0'; return(utf8); } j=0; extent=(*length); for (i=2; i < (ssize_t) (*length-1); i+=2) { c=(encoding != 0) ? ((content[i] & 0xff) << 8) | (content[i+1] & 0xff) : ((content[i+1] & 0xff) << 8) | (content[i] & 0xff); if ((c >= 0xd800) && (c <= 0xdfff) && ((i+=2) < (ssize_t) (*length-1))) { byte=(encoding != 0) ? ((content[i] & 0xff) << 8) | (content[i+1] & 0xff) : ((content[i+1] & 0xff) << 8) | (content[i] & 0xff); c=(((c & 0x3ff) << 10) | (byte & 0x3ff))+0x10000; } if ((size_t) (j+MaxTextExtent) > extent) { extent=(size_t) j+MaxTextExtent; utf8=(char *) ResizeQuantumMemory(utf8,extent,sizeof(*utf8)); if (utf8 == (char *) NULL) return(utf8); } if (c < 0x80) { utf8[j]=c; j++; continue; } /* Multi-byte UTF-8 sequence. */ byte=c; for (bits=0; byte != 0; byte/=2) bits++; bits=(bits-2)/5; utf8[j++]=(0xFF << (7-bits)) | (c >> (6*bits)); while (bits != 0) { bits--; utf8[j]=0x80 | ((c >> (6*bits)) & 0x3f); j++; } } *length=(size_t) j; utf8=(char *) ResizeQuantumMemory(utf8,*length,sizeof(*utf8)); if (utf8 != (char *) NULL) utf8[*length]='\0'; return(utf8); } static char *ParseEntities(char *xml,char **entities,int state) { char *entity; int byte, c; register char *p, *q; register ssize_t i; size_t extent, length; ssize_t offset; /* Normalize line endings. */ p=xml; q=xml; for ( ; *xml != '\0'; xml++) while (*xml == '\r') { *(xml++)='\n'; if (*xml == '\n') (void) CopyMagickMemory(xml,xml+1,strlen(xml)); } for (xml=p; ; ) { while ((*xml != '\0') && (*xml != '&') && ((*xml != '%') || (state != '%')) && (isspace((int) ((unsigned char) *xml) == 0))) xml++; if (*xml == '\0') break; /* States include: '&' for general entity decoding '%' for parameter entity decoding 'c' for CDATA sections ' ' for attributes normalization '*' for non-CDATA attributes normalization */ if ((state != 'c') && (strncmp(xml,"&#",2) == 0)) { /* Character reference. */ if (xml[2] != 'x') c=strtol(xml+2,&entity,10); /* base 10 */ else c=strtol(xml+3,&entity,16); /* base 16 */ if ((c == 0) || (*entity != ';')) { /* Not a character reference. */ xml++; continue; } if (c < 0x80) *(xml++)=c; else { /* Multi-byte UTF-8 sequence. */ byte=c; for (i=0; byte != 0; byte/=2) i++; i=(i-2)/5; *xml=(char) ((0xFF << (7-i)) | (c >> (6*i))); xml++; while (i != 0) { i--; *xml=(char) (0x80 | ((c >> (6*i)) & 0x3F)); xml++; } } (void) CopyMagickMemory(xml,strchr(xml,';')+1,strlen(strchr(xml,';'))); } else if (((*xml == '&') && ((state == '&') || (state == ' ') || (state == '*'))) || ((state == '%') && (*xml == '%'))) { /* Find entity in the list. */ i=0; while ((entities[i] != (char *) NULL) && (strncmp(xml+1,entities[i],strlen(entities[i])) != 0)) i+=2; if (entities[i++] == (char *) NULL) xml++; else if (entities[i] != (char *) NULL) { /* Found a match. */ length=strlen(entities[i]); entity=strchr(xml,';'); if ((entity != (char *) NULL) && ((length-1L) >= (size_t) (entity-xml))) { offset=(ssize_t) (xml-p); extent=(size_t) (offset+length+strlen(entity)); if (p != q) p=(char *) ResizeQuantumMemory(p,extent,sizeof(*p)); else { char *xml; xml=(char *) AcquireQuantumMemory(extent,sizeof(*xml)); if (xml != (char *) NULL) { (void) CopyMagickString(xml,p,extent*sizeof(*xml)); p=xml; } } if (p == (char *) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); xml=p+offset; entity=strchr(xml,';'); } if (entity != (char *) NULL) (void) CopyMagickMemory(xml+length,entity+1,strlen(entity)); (void) strncpy(xml,entities[i],length); } } else if (((state == ' ') || (state == '*')) && (isspace((int) ((unsigned char) *xml) != 0))) *(xml++)=' '; else xml++; } if (state == '*') { /* Normalize spaces for non-CDATA attributes. */ for (xml=p; *xml != '\0'; xml++) { char accept[] = " "; i=(ssize_t) strspn(xml,accept); if (i != 0) (void) CopyMagickMemory(xml,xml+i,strlen(xml+i)+1); while ((*xml != '\0') && (*xml != ' ')) xml++; } xml--; if ((xml >= p) && (*xml == ' ')) *xml='\0'; } return(p == q ? ConstantString(p) : p); } static void ParseCharacterContent(XMLTreeRoot *root,char *xml, const size_t length,const char state) { XMLTreeInfo *xml_info; xml_info=root->node; if ((xml_info == (XMLTreeInfo *) NULL) || (xml_info->tag == (char *) NULL) || (length == 0)) return; xml[length]='\0'; xml=ParseEntities(xml,root->entities,state); if ((xml_info->content != (char *) NULL) && (*xml_info->content != '\0')) { (void) ConcatenateString(&xml_info->content,xml); xml=DestroyString(xml); } else { if (xml_info->content != (char *) NULL) xml_info->content=DestroyString(xml_info->content); xml_info->content=xml; } } static XMLTreeInfo *ParseCloseTag(XMLTreeRoot *root,char *tag, ExceptionInfo *exception) { if ((root->node == (XMLTreeInfo *) NULL) || (root->node->tag == (char *) NULL) || (strcmp(tag,root->node->tag) != 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "ParseError","unexpected closing tag </%s>",tag); return(&root->root); } root->node=root->node->parent; return((XMLTreeInfo *) NULL); } static MagickBooleanType ValidateEntities(char *tag,char *xml,char **entities) { register ssize_t i; /* Check for circular entity references. */ for ( ; ; xml++) { while ((*xml != '\0') && (*xml != '&')) xml++; if (*xml == '\0') return(MagickTrue); if (strncmp(xml+1,tag,strlen(tag)) == 0) return(MagickFalse); i=0; while ((entities[i] != (char *) NULL) && (strncmp(entities[i],xml+1,strlen(entities[i])) == 0)) i+=2; if ((entities[i] != (char *) NULL) && (ValidateEntities(tag,entities[i+1],entities) == 0)) return(MagickFalse); } } static void ParseProcessingInstructions(XMLTreeRoot *root,char *xml, size_t length) { char *target; register ssize_t i; ssize_t j; target=xml; xml[length]='\0'; xml+=strcspn(xml,XMLWhitespace); if (*xml != '\0') { *xml='\0'; xml+=strspn(xml+1,XMLWhitespace)+1; } if (strcmp(target,"xml") == 0) { xml=strstr(xml,"standalone"); if ((xml != (char *) NULL) && (strncmp(xml+strspn(xml+10,XMLWhitespace "='\"")+10,"yes",3) == 0)) root->standalone=MagickTrue; return; } if (root->processing_instructions[0] == (char **) NULL) { root->processing_instructions=(char ***) AcquireMagickMemory(sizeof( *root->processing_instructions)); if (root->processing_instructions ==(char ***) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *root->processing_instructions=(char **) NULL; } i=0; while ((root->processing_instructions[i] != (char **) NULL) && (strcmp(target,root->processing_instructions[i][0]) != 0)) i++; if (root->processing_instructions[i] == (char **) NULL) { root->processing_instructions=(char ***) ResizeQuantumMemory( root->processing_instructions,(size_t) (i+2), sizeof(*root->processing_instructions)); if (root->processing_instructions == (char ***) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); root->processing_instructions[i]=(char **) AcquireQuantumMemory(3, sizeof(**root->processing_instructions)); if (root->processing_instructions[i] == (char **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); root->processing_instructions[i+1]=(char **) NULL; root->processing_instructions[i][0]=ConstantString(target); root->processing_instructions[i][1]=(char *) root->processing_instructions[i+1]; root->processing_instructions[i+1]=(char **) NULL; root->processing_instructions[i][2]=ConstantString(""); } j=1; while (root->processing_instructions[i][j] != (char *) NULL) j++; root->processing_instructions[i]=(char **) ResizeQuantumMemory( root->processing_instructions[i],(size_t) (j+3), sizeof(**root->processing_instructions)); if (root->processing_instructions[i] == (char **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); root->processing_instructions[i][j+2]=(char *) ResizeQuantumMemory( root->processing_instructions[i][j+1],(size_t) (j+1), sizeof(***root->processing_instructions)); if (root->processing_instructions[i][j+2] == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) CopyMagickString(root->processing_instructions[i][j+2]+j-1, root->root.tag != (char *) NULL ? ">" : "<",2); root->processing_instructions[i][j]=ConstantString(xml); root->processing_instructions[i][j+1]=(char *) NULL; } static MagickBooleanType ParseInternalDoctype(XMLTreeRoot *root,char *xml, size_t length,ExceptionInfo *exception) { char *c, **entities, *n, **predefined_entitites, q, *t, *v; register ssize_t i; ssize_t j; n=(char *) NULL; predefined_entitites=(char **) AcquireMagickMemory(sizeof(sentinel)); if (predefined_entitites == (char **) NULL) ThrowFatalException(ResourceLimitError,"MemoryAllocationFailed"); (void) CopyMagickMemory(predefined_entitites,sentinel,sizeof(sentinel)); for (xml[length]='\0'; xml != (char *) NULL; ) { while ((*xml != '\0') && (*xml != '<') && (*xml != '%')) xml++; if (*xml == '\0') break; if (strncmp(xml,"<!ENTITY",8) == 0) { /* Parse entity definitions. */ xml+=strspn(xml+8,XMLWhitespace)+8; c=xml; n=xml+strspn(xml,XMLWhitespace "%"); xml=n+strcspn(n,XMLWhitespace); *xml=';'; v=xml+strspn(xml+1,XMLWhitespace)+1; q=(*v); v++; if ((q != '"') && (q != '\'')) { /* Skip externals. */ xml=strchr(xml,'>'); continue; } entities=(*c == '%') ? predefined_entitites : root->entities; for (i=0; entities[i] != (char *) NULL; i++) ; entities=(char **) ResizeQuantumMemory(entities,(size_t) (i+3), sizeof(*entities)); if (entities == (char **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); if (*c == '%') predefined_entitites=entities; else root->entities=entities; xml++; *xml='\0'; xml=strchr(v,q); if (xml != (char *) NULL) { *xml='\0'; xml++; } entities[i+1]=ParseEntities(v,predefined_entitites,'%'); entities[i+2]=(char *) NULL; if (ValidateEntities(n,entities[i+1],entities) != MagickFalse) entities[i]=n; else { if (entities[i+1] != v) entities[i+1]=DestroyString(entities[i+1]); (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"ParseError","circular entity declaration &%s",n); predefined_entitites=(char **) RelinquishMagickMemory( predefined_entitites); return(MagickFalse); } } else if (strncmp(xml,"<!ATTLIST",9) == 0) { /* Parse default attributes. */ t=xml+strspn(xml+9,XMLWhitespace)+9; if (*t == '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"ParseError","unclosed <!ATTLIST"); predefined_entitites=(char **) RelinquishMagickMemory( predefined_entitites); return(MagickFalse); } xml=t+strcspn(t,XMLWhitespace ">"); if (*xml == '>') continue; *xml='\0'; i=0; while ((root->attributes[i] != (char **) NULL) && (n != (char *) NULL) && (strcmp(n,root->attributes[i][0]) != 0)) i++; while ((*(n=xml+strspn(xml+1,XMLWhitespace)+1) != '\0') && (*n != '>')) { xml=n+strcspn(n,XMLWhitespace); if (*xml != '\0') *xml='\0'; else { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"ParseError","malformed <!ATTLIST"); predefined_entitites=(char **) RelinquishMagickMemory( predefined_entitites); return(MagickFalse); } xml+=strspn(xml+1,XMLWhitespace)+1; c=(char *) (strncmp(xml,"CDATA",5) != 0 ? "*" : " "); if (strncmp(xml,"NOTATION",8) == 0) xml+=strspn(xml+8,XMLWhitespace)+8; xml=(*xml == '(') ? strchr(xml,')') : xml+ strcspn(xml,XMLWhitespace); if (xml == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"ParseError","malformed <!ATTLIST"); predefined_entitites=(char **) RelinquishMagickMemory( predefined_entitites); return(MagickFalse); } xml+=strspn(xml,XMLWhitespace ")"); if (strncmp(xml,"#FIXED",6) == 0) xml+=strspn(xml+6,XMLWhitespace)+6; if (*xml == '#') { xml+=strcspn(xml,XMLWhitespace ">")-1; if (*c == ' ') continue; v=(char *) NULL; } else if (((*xml == '"') || (*xml == '\'')) && ((xml=strchr(v=xml+1,*xml)) != (char *) NULL)) *xml='\0'; else { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"ParseError","malformed <!ATTLIST"); predefined_entitites=(char **) RelinquishMagickMemory( predefined_entitites); return(MagickFalse); } if (root->attributes[i] == (char **) NULL) { /* New attribute tag. */ if (i == 0) root->attributes=(char ***) AcquireQuantumMemory(2, sizeof(*root->attributes)); else root->attributes=(char ***) ResizeQuantumMemory( root->attributes,(size_t) (i+2), sizeof(*root->attributes)); if (root->attributes == (char ***) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); root->attributes[i]=(char **) AcquireQuantumMemory(2, sizeof(**root->attributes)); if (root->attributes[i] == (char **) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); root->attributes[i][0]=ConstantString(t); root->attributes[i][1]=(char *) NULL; root->attributes[i+1]=(char **) NULL; } for (j=1; root->attributes[i][j] != (char *) NULL; j+=3) ; root->attributes[i]=(char **) ResizeQuantumMemory( root->attributes[i],(size_t) (j+4),sizeof(**root->attributes)); if (root->attributes[i] == (char **) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); root->attributes[i][j+3]=(char *) NULL; root->attributes[i][j+2]=ConstantString(c); root->attributes[i][j+1]=(char *) NULL; if (v != (char *) NULL) root->attributes[i][j+1]=ParseEntities(v,root->entities,*c); root->attributes[i][j]=ConstantString(n); } } else if (strncmp(xml, "<!--", 4) == 0) xml=strstr(xml+4,"-->"); else if (strncmp(xml,"<?", 2) == 0) { c=xml+2; xml=strstr(c,"?>"); if (xml != (char *) NULL) { ParseProcessingInstructions(root,c,(size_t) (xml-c)); xml++; } } else if (*xml == '<') xml=strchr(xml,'>'); else if ((*(xml++) == '%') && (root->standalone == MagickFalse)) break; } predefined_entitites=(char **) RelinquishMagickMemory(predefined_entitites); return(MagickTrue); } static void ParseOpenTag(XMLTreeRoot *root,char *tag,char **attributes) { XMLTreeInfo *xml_info; xml_info=root->node; if (xml_info->tag == (char *) NULL) xml_info->tag=ConstantString(tag); else xml_info=AddChildToXMLTree(xml_info,tag,strlen(xml_info->content)); if (xml_info != (XMLTreeInfo *) NULL) xml_info->attributes=attributes; root->node=xml_info; } static const char *skip_tags[3] = { "rdf:Bag", "rdf:Seq", (const char *) NULL }; static inline MagickBooleanType IsSkipTag(const char *tag) { register ssize_t i; i=0; while (skip_tags[i] != (const char *) NULL) { if (LocaleCompare(tag,skip_tags[i]) == 0) return(MagickTrue); i++; } return(MagickFalse); } MagickExport XMLTreeInfo *NewXMLTree(const char *xml,ExceptionInfo *exception) { char **attribute, **attributes, *tag, *utf8; int c, terminal; MagickBooleanType status; register char *p; register ssize_t i; size_t ignore_depth, length; ssize_t j, l; XMLTreeRoot *root; /* Convert xml-string to UTF8. */ if ((xml == (const char *) NULL) || (strlen(xml) == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "ParseError","root tag missing"); return((XMLTreeInfo *) NULL); } root=(XMLTreeRoot *) NewXMLTreeTag((char *) NULL); length=strlen(xml); utf8=ConvertUTF16ToUTF8(xml,&length); if (utf8 == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "ParseError","UTF16 to UTF8 failed"); return((XMLTreeInfo *) NULL); } terminal=utf8[length-1]; utf8[length-1]='\0'; p=utf8; while ((*p != '\0') && (*p != '<')) p++; if (*p == '\0') { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "ParseError","root tag missing"); utf8=DestroyString(utf8); return((XMLTreeInfo *) NULL); } attribute=(char **) NULL; l=0; ignore_depth=0; for (p++; ; p++) { attributes=(char **) sentinel; tag=p; c=(*p); if ((isalpha((int) ((unsigned char) *p)) !=0) || (*p == '_') || (*p == ':') || (c < '\0')) { /* Tag. */ if (root->node == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"ParseError","root tag missing"); utf8=DestroyString(utf8); return(&root->root); } p+=strcspn(p,XMLWhitespace "/>"); while (isspace((int) ((unsigned char) *p)) != 0) *p++='\0'; if (ignore_depth == 0) { if ((*p != '\0') && (*p != '/') && (*p != '>')) { /* Find tag in default attributes list. */ i=0; while ((root->attributes[i] != (char **) NULL) && (strcmp(root->attributes[i][0],tag) != 0)) i++; attribute=root->attributes[i]; } for (l=0; (*p != '\0') && (*p != '/') && (*p != '>'); l+=2) { /* Attribute. */ if (l == 0) attributes=(char **) AcquireQuantumMemory(4, sizeof(*attributes)); else attributes=(char **) ResizeQuantumMemory(attributes, (size_t) (l+4),sizeof(*attributes)); if (attributes == (char **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); utf8=DestroyString(utf8); return(&root->root); } attributes[l+2]=(char *) NULL; attributes[l+1]=(char *) NULL; attributes[l]=p; p+=strcspn(p,XMLWhitespace "=/>"); if ((*p != '=') && (isspace((int) ((unsigned char) *p)) == 0)) attributes[l]=ConstantString(""); else { *p++='\0'; p+=strspn(p,XMLWhitespace "="); c=(*p); if ((c == '"') || (c == '\'')) { /* Attributes value. */ p++; attributes[l+1]=p; while ((*p != '\0') && (*p != c)) p++; if (*p != '\0') *p++='\0'; else { attributes[l]=ConstantString(""); attributes[l+1]=ConstantString(""); (void) DestroyXMLTreeAttributes(attributes); (void) ThrowMagickException(exception, GetMagickModule(),OptionWarning,"ParseError", "missing %c",c); utf8=DestroyString(utf8); return(&root->root); } j=1; while ((attribute != (char **) NULL) && (attribute[j] != (char *) NULL) && (strcmp(attribute[j],attributes[l]) != 0)) j+=3; attributes[l+1]=ParseEntities(attributes[l+1], root->entities,(attribute != (char **) NULL) && (attribute[j] != (char *) NULL) ? *attribute[j+2] : ' '); } attributes[l]=ConstantString(attributes[l]); } while (isspace((int) ((unsigned char) *p)) != 0) p++; } } else { while((*p != '\0') && (*p != '/') && (*p != '>')) p++; } if (*p == '/') { /* Self closing tag. */ *p++='\0'; if (((*p != '\0') && (*p != '>')) || ((*p == '\0') && (terminal != '>'))) { if (l != 0) (void) DestroyXMLTreeAttributes(attributes); (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"ParseError","missing >"); utf8=DestroyString(utf8); return(&root->root); } if ((ignore_depth == 0) && (IsSkipTag(tag) == MagickFalse)) { ParseOpenTag(root,tag,attributes); (void) ParseCloseTag(root,tag,exception); } } else { c=(*p); if ((*p == '>') || ((*p == '\0') && (terminal == '>'))) { *p='\0'; if ((ignore_depth == 0) && (IsSkipTag(tag) == MagickFalse)) ParseOpenTag(root,tag,attributes); else { ignore_depth++; (void) DestroyXMLTreeAttributes(attributes); } *p=c; } else { if (l != 0) (void) DestroyXMLTreeAttributes(attributes); (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"ParseError","missing >"); utf8=DestroyString(utf8); return(&root->root); } } } else if (*p == '/') { /* Close tag. */ tag=p+1; p+=strcspn(tag,XMLWhitespace ">")+1; c=(*p); if ((c == '\0') && (terminal != '>')) { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"ParseError","missing >"); utf8=DestroyString(utf8); return(&root->root); } *p='\0'; if (ignore_depth == 0 && ParseCloseTag(root,tag,exception) != (XMLTreeInfo *) NULL) { utf8=DestroyString(utf8); return(&root->root); } if (ignore_depth > 0) ignore_depth--; *p=c; if (isspace((int) ((unsigned char) *p)) != 0) p+=strspn(p,XMLWhitespace); } else if (strncmp(p,"!--",3) == 0) { /* Comment. */ p=strstr(p+3,"--"); if ((p == (char *) NULL) || ((*(p+=2) != '>') && (*p != '\0')) || ((*p == '\0') && (terminal != '>'))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"ParseError","unclosed <!--"); utf8=DestroyString(utf8); return(&root->root); } } else if (strncmp(p,"![CDATA[",8) == 0) { /* Cdata. */ p=strstr(p,"]]>"); if (p != (char *) NULL) { p+=2; if (ignore_depth == 0) ParseCharacterContent(root,tag+8,(size_t) (p-tag-10),'c'); } else { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"ParseError","unclosed <![CDATA["); utf8=DestroyString(utf8); return(&root->root); } } else if (strncmp(p,"!DOCTYPE",8) == 0) { /* DTD. */ for (l=0; (*p != '\0') && (((l == 0) && (*p != '>')) || ((l != 0) && ((*p != ']') || (*(p+strspn(p+1,XMLWhitespace)+1) != '>')))); l=(ssize_t) ((*p == '[') ? 1 : l)) p+=strcspn(p+1,"[]>")+1; if ((*p == '\0') && (terminal != '>')) { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"ParseError","unclosed <!DOCTYPE"); utf8=DestroyString(utf8); return(&root->root); } if (l != 0) tag=strchr(tag,'[')+1; if (l != 0) { status=ParseInternalDoctype(root,tag,(size_t) (p-tag), exception); if (status == MagickFalse) { utf8=DestroyString(utf8); return(&root->root); } p++; } } else if (*p == '?') { /* Processing instructions. */ do { p=strchr(p,'?'); if (p == (char *) NULL) break; p++; } while ((*p != '\0') && (*p != '>')); if ((p == (char *) NULL) || ((*p == '\0') && (terminal != '>'))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"ParseError","unclosed <?"); utf8=DestroyString(utf8); return(&root->root); } ParseProcessingInstructions(root,tag+1,(size_t) (p-tag-2)); } else { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"ParseError","unexpected <"); utf8=DestroyString(utf8); return(&root->root); } if ((p == (char *) NULL) || (*p == '\0')) break; *p++='\0'; tag=p; if ((*p != '\0') && (*p != '<')) { /* Tag character content. */ while ((*p != '\0') && (*p != '<')) p++; if (*p == '\0') break; if (ignore_depth == 0) ParseCharacterContent(root,tag,(size_t) (p-tag),'&'); } else if (*p == '\0') break; } utf8=DestroyString(utf8); if (root->node == (XMLTreeInfo *) NULL) return(&root->root); if (root->node->tag == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "ParseError","root tag missing"); return(&root->root); } (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "ParseError","unclosed tag: `%s'",root->node->tag); return(&root->root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w X M L T r e e T a g % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewXMLTreeTag() returns a new empty xml structure for the xml-tree tag. % % The format of the NewXMLTreeTag method is: % % XMLTreeInfo *NewXMLTreeTag(const char *tag) % % A description of each parameter follows: % % o tag: the tag. % */ MagickExport XMLTreeInfo *NewXMLTreeTag(const char *tag) { static const char *predefined_entities[NumberPredefinedEntities+1] = { "lt;", "&#60;", "gt;", "&#62;", "quot;", "&#34;", "apos;", "&#39;", "amp;", "&#38;", (char *) NULL }; XMLTreeRoot *root; root=(XMLTreeRoot *) AcquireMagickMemory(sizeof(*root)); if (root == (XMLTreeRoot *) NULL) return((XMLTreeInfo *) NULL); (void) ResetMagickMemory(root,0,sizeof(*root)); root->root.tag=(char *) NULL; if (tag != (char *) NULL) root->root.tag=ConstantString(tag); root->node=(&root->root); root->root.content=ConstantString(""); root->entities=(char **) AcquireMagickMemory(sizeof(predefined_entities)); if (root->entities == (char **) NULL) return((XMLTreeInfo *) NULL); (void) CopyMagickMemory(root->entities,predefined_entities, sizeof(predefined_entities)); root->root.attributes=sentinel; root->attributes=(char ***) root->root.attributes; root->processing_instructions=(char ***) root->root.attributes; root->debug=IsEventLogging(); root->signature=MagickSignature; return(&root->root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r u n e T a g F r o m X M L T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneTagFromXMLTree() prunes a tag from the xml-tree along with all its % subtags. % % The format of the PruneTagFromXMLTree method is: % % XMLTreeInfo *PruneTagFromXMLTree(XMLTreeInfo *xml_info) % % A description of each parameter follows: % % o xml_info: the xml info. % */ MagickExport XMLTreeInfo *PruneTagFromXMLTree(XMLTreeInfo *xml_info) { XMLTreeInfo *node; assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (xml_info->next != (XMLTreeInfo *) NULL) xml_info->next->sibling=xml_info->sibling; if (xml_info->parent != (XMLTreeInfo *) NULL) { node=xml_info->parent->child; if (node == xml_info) xml_info->parent->child=xml_info->ordered; else { while (node->ordered != xml_info) node=node->ordered; node->ordered=node->ordered->ordered; node=xml_info->parent->child; if (strcmp(node->tag,xml_info->tag) != 0) { while (strcmp(node->sibling->tag,xml_info->tag) != 0) node=node->sibling; if (node->sibling != xml_info) node=node->sibling; else node->sibling=(xml_info->next != (XMLTreeInfo *) NULL) ? xml_info->next : node->sibling->sibling; } while ((node->next != (XMLTreeInfo *) NULL) && (node->next != xml_info)) node=node->next; if (node->next != (XMLTreeInfo *) NULL) node->next=node->next->next; } } xml_info->ordered=(XMLTreeInfo *) NULL; xml_info->sibling=(XMLTreeInfo *) NULL; xml_info->next=(XMLTreeInfo *) NULL; return(xml_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t X M L T r e e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetXMLTreeAttribute() sets the tag attributes or adds a new attribute if not % found. A value of NULL removes the specified attribute. % % The format of the SetXMLTreeAttribute method is: % % XMLTreeInfo *SetXMLTreeAttribute(XMLTreeInfo *xml_info,const char *tag, % const char *value) % % A description of each parameter follows: % % o xml_info: the xml info. % % o tag: The attribute tag. % % o value: The attribute value. % */ MagickExport XMLTreeInfo *SetXMLTreeAttribute(XMLTreeInfo *xml_info, const char *tag,const char *value) { register ssize_t i; ssize_t j; assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); i=0; while ((xml_info->attributes[i] != (char *) NULL) && (strcmp(xml_info->attributes[i],tag) != 0)) i+=2; if (xml_info->attributes[i] == (char *) NULL) { /* Add new attribute tag. */ if (value == (const char *) NULL) return(xml_info); if (xml_info->attributes != sentinel) xml_info->attributes=(char **) ResizeQuantumMemory( xml_info->attributes,(size_t) (i+4),sizeof(*xml_info->attributes)); else { xml_info->attributes=(char **) AcquireQuantumMemory(4, sizeof(*xml_info->attributes)); if (xml_info->attributes != (char **) NULL) xml_info->attributes[1]=ConstantString(""); } if (xml_info->attributes == (char **) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireString"); xml_info->attributes[i]=ConstantString(tag); xml_info->attributes[i+2]=(char *) NULL; (void) strlen(xml_info->attributes[i+1]); } /* Add new value to an existing attribute. */ for (j=i; xml_info->attributes[j] != (char *) NULL; j+=2) ; if (xml_info->attributes[i+1] != (char *) NULL) xml_info->attributes[i+1]=DestroyString(xml_info->attributes[i+1]); if (value != (const char *) NULL) { xml_info->attributes[i+1]=ConstantString(value); return(xml_info); } if (xml_info->attributes[i] != (char *) NULL) xml_info->attributes[i]=DestroyString(xml_info->attributes[i]); (void) CopyMagickMemory(xml_info->attributes+i,xml_info->attributes+i+2, (size_t) (j-i)*sizeof(*xml_info->attributes)); xml_info->attributes=(char **) ResizeQuantumMemory(xml_info->attributes, (size_t) (j+2),sizeof(*xml_info->attributes)); if (xml_info->attributes == (char **) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireString"); j-=2; (void) CopyMagickMemory(xml_info->attributes[j+1]+(i/2), xml_info->attributes[j+1]+(i/2)+1,(size_t) (((j+2)/2)-(i/2))* sizeof(**xml_info->attributes)); return(xml_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t X M L T r e e C o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetXMLTreeContent() sets the character content for the given tag and % returns the tag. % % The format of the SetXMLTreeContent method is: % % XMLTreeInfo *SetXMLTreeContent(XMLTreeInfo *xml_info, % const char *content) % % A description of each parameter follows: % % o xml_info: the xml info. % % o content: The content. % */ MagickExport XMLTreeInfo *SetXMLTreeContent(XMLTreeInfo *xml_info, const char *content) { assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (xml_info->content != (char *) NULL) xml_info->content=DestroyString(xml_info->content); xml_info->content=(char *) ConstantString(content); return(xml_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % X M L T r e e I n f o T o X M L % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % XMLTreeInfoToXML() converts an xml-tree to an XML string. % % The format of the XMLTreeInfoToXML method is: % % char *XMLTreeInfoToXML(XMLTreeInfo *xml_info) % % A description of each parameter follows: % % o xml_info: the xml info. % */ static char *EncodePredefinedEntities(const char *source,ssize_t offset, char **destination,size_t *length,size_t *extent,MagickBooleanType pedantic) { char *canonical_content; if (offset < 0) canonical_content=CanonicalXMLContent(source,pedantic); else { char *content; content=AcquireString(source); content[offset]='\0'; canonical_content=CanonicalXMLContent(content,pedantic); content=DestroyString(content); } if (canonical_content == (char *) NULL) return(*destination); if ((*length+strlen(canonical_content)+MaxTextExtent) > *extent) { *extent=(*length)+strlen(canonical_content)+MaxTextExtent; *destination=(char *) ResizeQuantumMemory(*destination,*extent, sizeof(**destination)); if (*destination == (char *) NULL) return(*destination); } *length+=FormatLocaleString(*destination+(*length),*extent,"%s", canonical_content); canonical_content=DestroyString(canonical_content); return(*destination); } static char *XMLTreeTagToXML(XMLTreeInfo *xml_info,char **source,size_t *length, size_t *extent,size_t start,char ***attributes) { char *content; const char *attribute; register ssize_t i; size_t offset; ssize_t j; content=(char *) ""; if (xml_info->parent != (XMLTreeInfo *) NULL) content=xml_info->parent->content; offset=0; *source=EncodePredefinedEntities(content+start,(ssize_t) (xml_info->offset- start),source,length,extent,MagickFalse); if ((*length+strlen(xml_info->tag)+MaxTextExtent) > *extent) { *extent=(*length)+strlen(xml_info->tag)+MaxTextExtent; *source=(char *) ResizeQuantumMemory(*source,*extent,sizeof(**source)); if (*source == (char *) NULL) return(*source); } *length+=FormatLocaleString(*source+(*length),*extent,"<%s",xml_info->tag); for (i=0; xml_info->attributes[i]; i+=2) { attribute=GetXMLTreeAttribute(xml_info,xml_info->attributes[i]); if (attribute != xml_info->attributes[i+1]) continue; if ((*length+strlen(xml_info->attributes[i])+MaxTextExtent) > *extent) { *extent=(*length)+strlen(xml_info->attributes[i])+MaxTextExtent; *source=(char *) ResizeQuantumMemory(*source,*extent,sizeof(**source)); if (*source == (char *) NULL) return((char *) NULL); } *length+=FormatLocaleString(*source+(*length),*extent," %s=\"", xml_info->attributes[i]); (void) EncodePredefinedEntities(xml_info->attributes[i+1],-1,source,length, extent,MagickTrue); *length+=FormatLocaleString(*source+(*length),*extent,"\""); } i=0; while ((attributes[i] != (char **) NULL) && (strcmp(attributes[i][0],xml_info->tag) != 0)) i++; j=1; while ((attributes[i] != (char **) NULL) && (attributes[i][j] != (char *) NULL)) { if ((attributes[i][j+1] == (char *) NULL) || (GetXMLTreeAttribute(xml_info,attributes[i][j]) != attributes[i][j+1])) { j+=3; continue; } if ((*length+strlen(attributes[i][j])+MaxTextExtent) > *extent) { *extent=(*length)+strlen(attributes[i][j])+MaxTextExtent; *source=(char *) ResizeQuantumMemory(*source,*extent,sizeof(**source)); if (*source == (char *) NULL) return((char *) NULL); } *length+=FormatLocaleString(*source+(*length),*extent," %s=\"", attributes[i][j]); (void) EncodePredefinedEntities(attributes[i][j+1],-1,source,length,extent, MagickTrue); *length+=FormatLocaleString(*source+(*length),*extent,"\""); j+=3; } *length+=FormatLocaleString(*source+(*length),*extent,*xml_info->content ? ">" : "/>"); if (xml_info->child != (XMLTreeInfo *) NULL) *source=XMLTreeTagToXML(xml_info->child,source,length,extent,0,attributes); else *source=EncodePredefinedEntities(xml_info->content,-1,source,length,extent, MagickFalse); if ((*length+strlen(xml_info->tag)+MaxTextExtent) > *extent) { *extent=(*length)+strlen(xml_info->tag)+MaxTextExtent; *source=(char *) ResizeQuantumMemory(*source,*extent,sizeof(**source)); if (*source == (char *) NULL) return((char *) NULL); } if (*xml_info->content != '\0') *length+=FormatLocaleString(*source+(*length),*extent,"</%s>", xml_info->tag); while ((content[offset] != '\0') && (offset < xml_info->offset)) offset++; if (xml_info->ordered != (XMLTreeInfo *) NULL) content=XMLTreeTagToXML(xml_info->ordered,source,length,extent,offset, attributes); else content=EncodePredefinedEntities(content+offset,-1,source,length,extent, MagickFalse); return(content); } MagickExport char *XMLTreeInfoToXML(XMLTreeInfo *xml_info) { char *xml; register char *p, *q; register ssize_t i; size_t extent, length; ssize_t j, k; XMLTreeInfo *ordered, *parent; XMLTreeRoot *root; assert(xml_info != (XMLTreeInfo *) NULL); assert((xml_info->signature == MagickSignature) || (((XMLTreeRoot *) xml_info)->signature == MagickSignature)); if (xml_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (xml_info->tag == (char *) NULL) return((char *) NULL); xml=AcquireString((char *) NULL); length=0; extent=MaxTextExtent; root=(XMLTreeRoot *) xml_info; while (root->root.parent != (XMLTreeInfo *) NULL) root=(XMLTreeRoot *) root->root.parent; parent=xml_info->parent; if (parent == (XMLTreeInfo *) NULL) for (i=0; root->processing_instructions[i] != (char **) NULL; i++) { /* Pre-root processing instructions. */ for (k=2; root->processing_instructions[i][k-1]; k++) ; p=root->processing_instructions[i][1]; for (j=1; p != (char *) NULL; j++) { if (root->processing_instructions[i][k][j-1] == '>') { p=root->processing_instructions[i][j]; continue; } q=root->processing_instructions[i][0]; if ((length+strlen(p)+strlen(q)+MaxTextExtent) > extent) { extent=length+strlen(p)+strlen(q)+MaxTextExtent; xml=(char *) ResizeQuantumMemory(xml,extent,sizeof(*xml)); if (xml == (char *) NULL) return(xml); } length+=FormatLocaleString(xml+length,extent,"<?%s%s%s?>\n",q, *p != '\0' ? " " : "",p); p=root->processing_instructions[i][j]; } } ordered=xml_info->ordered; xml_info->parent=(XMLTreeInfo *) NULL; xml_info->ordered=(XMLTreeInfo *) NULL; xml=XMLTreeTagToXML(xml_info,&xml,&length,&extent,0,root->attributes); xml_info->parent=parent; xml_info->ordered=ordered; if (parent == (XMLTreeInfo *) NULL) for (i=0; root->processing_instructions[i] != (char **) NULL; i++) { /* Post-root processing instructions. */ for (k=2; root->processing_instructions[i][k-1]; k++) ; p=root->processing_instructions[i][1]; for (j=1; p != (char *) NULL; j++) { if (root->processing_instructions[i][k][j-1] == '<') { p=root->processing_instructions[i][j]; continue; } q=root->processing_instructions[i][0]; if ((length+strlen(p)+strlen(q)+MaxTextExtent) > extent) { extent=length+strlen(p)+strlen(q)+MaxTextExtent; xml=(char *) ResizeQuantumMemory(xml,extent,sizeof(*xml)); if (xml == (char *) NULL) return(xml); } length+=FormatLocaleString(xml+length,extent,"\n<?%s%s%s?>",q, *p != '\0' ? " " : "",p); p=root->processing_instructions[i][j]; } } return((char *) ResizeQuantumMemory(xml,length+1,sizeof(*xml))); }
./CrossVul/dataset_final_sorted/CWE-400/c/good_4773_2
crossvul-cpp_data_bad_5200_3
/* * linux/fs/pnode.c * * (C) Copyright IBM Corporation 2005. * Released under GPL v2. * Author : Ram Pai (linuxram@us.ibm.com) * */ #include <linux/mnt_namespace.h> #include <linux/mount.h> #include <linux/fs.h> #include <linux/nsproxy.h> #include "internal.h" #include "pnode.h" /* return the next shared peer mount of @p */ static inline struct mount *next_peer(struct mount *p) { return list_entry(p->mnt_share.next, struct mount, mnt_share); } static inline struct mount *first_slave(struct mount *p) { return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave); } static inline struct mount *next_slave(struct mount *p) { return list_entry(p->mnt_slave.next, struct mount, mnt_slave); } static struct mount *get_peer_under_root(struct mount *mnt, struct mnt_namespace *ns, const struct path *root) { struct mount *m = mnt; do { /* Check the namespace first for optimization */ if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root)) return m; m = next_peer(m); } while (m != mnt); return NULL; } /* * Get ID of closest dominating peer group having a representative * under the given root. * * Caller must hold namespace_sem */ int get_dominating_id(struct mount *mnt, const struct path *root) { struct mount *m; for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) { struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root); if (d) return d->mnt_group_id; } return 0; } static int do_make_slave(struct mount *mnt) { struct mount *peer_mnt = mnt, *master = mnt->mnt_master; struct mount *slave_mnt; /* * slave 'mnt' to a peer mount that has the * same root dentry. If none is available then * slave it to anything that is available. */ while ((peer_mnt = next_peer(peer_mnt)) != mnt && peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ; if (peer_mnt == mnt) { peer_mnt = next_peer(mnt); if (peer_mnt == mnt) peer_mnt = NULL; } if (mnt->mnt_group_id && IS_MNT_SHARED(mnt) && list_empty(&mnt->mnt_share)) mnt_release_group_id(mnt); list_del_init(&mnt->mnt_share); mnt->mnt_group_id = 0; if (peer_mnt) master = peer_mnt; if (master) { list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave) slave_mnt->mnt_master = master; list_move(&mnt->mnt_slave, &master->mnt_slave_list); list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev); INIT_LIST_HEAD(&mnt->mnt_slave_list); } else { struct list_head *p = &mnt->mnt_slave_list; while (!list_empty(p)) { slave_mnt = list_first_entry(p, struct mount, mnt_slave); list_del_init(&slave_mnt->mnt_slave); slave_mnt->mnt_master = NULL; } } mnt->mnt_master = master; CLEAR_MNT_SHARED(mnt); return 0; } /* * vfsmount lock must be held for write */ void change_mnt_propagation(struct mount *mnt, int type) { if (type == MS_SHARED) { set_mnt_shared(mnt); return; } do_make_slave(mnt); if (type != MS_SLAVE) { list_del_init(&mnt->mnt_slave); mnt->mnt_master = NULL; if (type == MS_UNBINDABLE) mnt->mnt.mnt_flags |= MNT_UNBINDABLE; else mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE; } } /* * get the next mount in the propagation tree. * @m: the mount seen last * @origin: the original mount from where the tree walk initiated * * Note that peer groups form contiguous segments of slave lists. * We rely on that in get_source() to be able to find out if * vfsmount found while iterating with propagation_next() is * a peer of one we'd found earlier. */ static struct mount *propagation_next(struct mount *m, struct mount *origin) { /* are there any slaves of this mount? */ if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) return first_slave(m); while (1) { struct mount *master = m->mnt_master; if (master == origin->mnt_master) { struct mount *next = next_peer(m); return (next == origin) ? NULL : next; } else if (m->mnt_slave.next != &master->mnt_slave_list) return next_slave(m); /* back at master */ m = master; } } static struct mount *next_group(struct mount *m, struct mount *origin) { while (1) { while (1) { struct mount *next; if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) return first_slave(m); next = next_peer(m); if (m->mnt_group_id == origin->mnt_group_id) { if (next == origin) return NULL; } else if (m->mnt_slave.next != &next->mnt_slave) break; m = next; } /* m is the last peer */ while (1) { struct mount *master = m->mnt_master; if (m->mnt_slave.next != &master->mnt_slave_list) return next_slave(m); m = next_peer(master); if (master->mnt_group_id == origin->mnt_group_id) break; if (master->mnt_slave.next == &m->mnt_slave) break; m = master; } if (m == origin) return NULL; } } /* all accesses are serialized by namespace_sem */ static struct user_namespace *user_ns; static struct mount *last_dest, *first_source, *last_source, *dest_master; static struct mountpoint *mp; static struct hlist_head *list; static inline bool peers(struct mount *m1, struct mount *m2) { return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id; } static int propagate_one(struct mount *m) { struct mount *child; int type; /* skip ones added by this propagate_mnt() */ if (IS_MNT_NEW(m)) return 0; /* skip if mountpoint isn't covered by it */ if (!is_subdir(mp->m_dentry, m->mnt.mnt_root)) return 0; if (peers(m, last_dest)) { type = CL_MAKE_SHARED; } else { struct mount *n, *p; bool done; for (n = m; ; n = p) { p = n->mnt_master; if (p == dest_master || IS_MNT_MARKED(p)) break; } do { struct mount *parent = last_source->mnt_parent; if (last_source == first_source) break; done = parent->mnt_master == p; if (done && peers(n, parent)) break; last_source = last_source->mnt_master; } while (!done); type = CL_SLAVE; /* beginning of peer group among the slaves? */ if (IS_MNT_SHARED(m)) type |= CL_MAKE_SHARED; } /* Notice when we are propagating across user namespaces */ if (m->mnt_ns->user_ns != user_ns) type |= CL_UNPRIVILEGED; child = copy_tree(last_source, last_source->mnt.mnt_root, type); if (IS_ERR(child)) return PTR_ERR(child); child->mnt.mnt_flags &= ~MNT_LOCKED; mnt_set_mountpoint(m, mp, child); last_dest = m; last_source = child; if (m->mnt_master != dest_master) { read_seqlock_excl(&mount_lock); SET_MNT_MARK(m->mnt_master); read_sequnlock_excl(&mount_lock); } hlist_add_head(&child->mnt_hash, list); return 0; } /* * mount 'source_mnt' under the destination 'dest_mnt' at * dentry 'dest_dentry'. And propagate that mount to * all the peer and slave mounts of 'dest_mnt'. * Link all the new mounts into a propagation tree headed at * source_mnt. Also link all the new mounts using ->mnt_list * headed at source_mnt's ->mnt_list * * @dest_mnt: destination mount. * @dest_dentry: destination dentry. * @source_mnt: source mount. * @tree_list : list of heads of trees to be attached. */ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, struct mount *source_mnt, struct hlist_head *tree_list) { struct mount *m, *n; int ret = 0; /* * we don't want to bother passing tons of arguments to * propagate_one(); everything is serialized by namespace_sem, * so globals will do just fine. */ user_ns = current->nsproxy->mnt_ns->user_ns; last_dest = dest_mnt; first_source = source_mnt; last_source = source_mnt; mp = dest_mp; list = tree_list; dest_master = dest_mnt->mnt_master; /* all peers of dest_mnt, except dest_mnt itself */ for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) { ret = propagate_one(n); if (ret) goto out; } /* all slave groups */ for (m = next_group(dest_mnt, dest_mnt); m; m = next_group(m, dest_mnt)) { /* everything in that slave group */ n = m; do { ret = propagate_one(n); if (ret) goto out; n = next_peer(n); } while (n != m); } out: read_seqlock_excl(&mount_lock); hlist_for_each_entry(n, tree_list, mnt_hash) { m = n->mnt_parent; if (m->mnt_master != dest_mnt->mnt_master) CLEAR_MNT_MARK(m->mnt_master); } read_sequnlock_excl(&mount_lock); return ret; } /* * return true if the refcount is greater than count */ static inline int do_refcount_check(struct mount *mnt, int count) { return mnt_get_count(mnt) > count; } /* * check if the mount 'mnt' can be unmounted successfully. * @mnt: the mount to be checked for unmount * NOTE: unmounting 'mnt' would naturally propagate to all * other mounts its parent propagates to. * Check if any of these mounts that **do not have submounts** * have more references than 'refcnt'. If so return busy. * * vfsmount lock must be held for write */ int propagate_mount_busy(struct mount *mnt, int refcnt) { struct mount *m, *child; struct mount *parent = mnt->mnt_parent; int ret = 0; if (mnt == parent) return do_refcount_check(mnt, refcnt); /* * quickly check if the current mount can be unmounted. * If not, we don't have to go checking for all other * mounts */ if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt)) return 1; for (m = propagation_next(parent, parent); m; m = propagation_next(m, parent)) { child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); if (child && list_empty(&child->mnt_mounts) && (ret = do_refcount_check(child, 1))) break; } return ret; } /* * Clear MNT_LOCKED when it can be shown to be safe. * * mount_lock lock must be held for write */ void propagate_mount_unlock(struct mount *mnt) { struct mount *parent = mnt->mnt_parent; struct mount *m, *child; BUG_ON(parent == mnt); for (m = propagation_next(parent, parent); m; m = propagation_next(m, parent)) { child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); if (child) child->mnt.mnt_flags &= ~MNT_LOCKED; } } /* * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted. */ static void mark_umount_candidates(struct mount *mnt) { struct mount *parent = mnt->mnt_parent; struct mount *m; BUG_ON(parent == mnt); for (m = propagation_next(parent, parent); m; m = propagation_next(m, parent)) { struct mount *child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) { SET_MNT_MARK(child); } } } /* * NOTE: unmounting 'mnt' naturally propagates to all other mounts its * parent propagates to. */ static void __propagate_umount(struct mount *mnt) { struct mount *parent = mnt->mnt_parent; struct mount *m; BUG_ON(parent == mnt); for (m = propagation_next(parent, parent); m; m = propagation_next(m, parent)) { struct mount *child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); /* * umount the child only if the child has no children * and the child is marked safe to unmount. */ if (!child || !IS_MNT_MARKED(child)) continue; CLEAR_MNT_MARK(child); if (list_empty(&child->mnt_mounts)) { list_del_init(&child->mnt_child); child->mnt.mnt_flags |= MNT_UMOUNT; list_move_tail(&child->mnt_list, &mnt->mnt_list); } } } /* * collect all mounts that receive propagation from the mount in @list, * and return these additional mounts in the same list. * @list: the list of mounts to be unmounted. * * vfsmount lock must be held for write */ int propagate_umount(struct list_head *list) { struct mount *mnt; list_for_each_entry_reverse(mnt, list, mnt_list) mark_umount_candidates(mnt); list_for_each_entry(mnt, list, mnt_list) __propagate_umount(mnt); return 0; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_5200_3
crossvul-cpp_data_bad_1262_0
// SPDX-License-Identifier: GPL-2.0 /* * trace_events_filter - generic event filtering * * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> */ #include <linux/module.h> #include <linux/ctype.h> #include <linux/mutex.h> #include <linux/perf_event.h> #include <linux/slab.h> #include "trace.h" #include "trace_output.h" #define DEFAULT_SYS_FILTER_MESSAGE \ "### global filter ###\n" \ "# Use this to set filters for multiple events.\n" \ "# Only events with the given fields will be affected.\n" \ "# If no events are modified, an error message will be displayed here" /* Due to token parsing '<=' must be before '<' and '>=' must be before '>' */ #define OPS \ C( OP_GLOB, "~" ), \ C( OP_NE, "!=" ), \ C( OP_EQ, "==" ), \ C( OP_LE, "<=" ), \ C( OP_LT, "<" ), \ C( OP_GE, ">=" ), \ C( OP_GT, ">" ), \ C( OP_BAND, "&" ), \ C( OP_MAX, NULL ) #undef C #define C(a, b) a enum filter_op_ids { OPS }; #undef C #define C(a, b) b static const char * ops[] = { OPS }; /* * pred functions are OP_LE, OP_LT, OP_GE, OP_GT, and OP_BAND * pred_funcs_##type below must match the order of them above. */ #define PRED_FUNC_START OP_LE #define PRED_FUNC_MAX (OP_BAND - PRED_FUNC_START) #define ERRORS \ C(NONE, "No error"), \ C(INVALID_OP, "Invalid operator"), \ C(TOO_MANY_OPEN, "Too many '('"), \ C(TOO_MANY_CLOSE, "Too few '('"), \ C(MISSING_QUOTE, "Missing matching quote"), \ C(OPERAND_TOO_LONG, "Operand too long"), \ C(EXPECT_STRING, "Expecting string field"), \ C(EXPECT_DIGIT, "Expecting numeric field"), \ C(ILLEGAL_FIELD_OP, "Illegal operation for field type"), \ C(FIELD_NOT_FOUND, "Field not found"), \ C(ILLEGAL_INTVAL, "Illegal integer value"), \ C(BAD_SUBSYS_FILTER, "Couldn't find or set field in one of a subsystem's events"), \ C(TOO_MANY_PREDS, "Too many terms in predicate expression"), \ C(INVALID_FILTER, "Meaningless filter expression"), \ C(IP_FIELD_ONLY, "Only 'ip' field is supported for function trace"), \ C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), \ C(ERRNO, "Error"), \ C(NO_FILTER, "No filter found") #undef C #define C(a, b) FILT_ERR_##a enum { ERRORS }; #undef C #define C(a, b) b static const char *err_text[] = { ERRORS }; /* Called after a '!' character but "!=" and "!~" are not "not"s */ static bool is_not(const char *str) { switch (str[1]) { case '=': case '~': return false; } return true; } /** * prog_entry - a singe entry in the filter program * @target: Index to jump to on a branch (actually one minus the index) * @when_to_branch: The value of the result of the predicate to do a branch * @pred: The predicate to execute. */ struct prog_entry { int target; int when_to_branch; struct filter_pred *pred; }; /** * update_preds- assign a program entry a label target * @prog: The program array * @N: The index of the current entry in @prog * @when_to_branch: What to assign a program entry for its branch condition * * The program entry at @N has a target that points to the index of a program * entry that can have its target and when_to_branch fields updated. * Update the current program entry denoted by index @N target field to be * that of the updated entry. This will denote the entry to update if * we are processing an "||" after an "&&" */ static void update_preds(struct prog_entry *prog, int N, int invert) { int t, s; t = prog[N].target; s = prog[t].target; prog[t].when_to_branch = invert; prog[t].target = N; prog[N].target = s; } struct filter_parse_error { int lasterr; int lasterr_pos; }; static void parse_error(struct filter_parse_error *pe, int err, int pos) { pe->lasterr = err; pe->lasterr_pos = pos; } typedef int (*parse_pred_fn)(const char *str, void *data, int pos, struct filter_parse_error *pe, struct filter_pred **pred); enum { INVERT = 1, PROCESS_AND = 2, PROCESS_OR = 4, }; /* * Without going into a formal proof, this explains the method that is used in * parsing the logical expressions. * * For example, if we have: "a && !(!b || (c && g)) || d || e && !f" * The first pass will convert it into the following program: * * n1: r=a; l1: if (!r) goto l4; * n2: r=b; l2: if (!r) goto l4; * n3: r=c; r=!r; l3: if (r) goto l4; * n4: r=g; r=!r; l4: if (r) goto l5; * n5: r=d; l5: if (r) goto T * n6: r=e; l6: if (!r) goto l7; * n7: r=f; r=!r; l7: if (!r) goto F * T: return TRUE * F: return FALSE * * To do this, we use a data structure to represent each of the above * predicate and conditions that has: * * predicate, when_to_branch, invert, target * * The "predicate" will hold the function to determine the result "r". * The "when_to_branch" denotes what "r" should be if a branch is to be taken * "&&" would contain "!r" or (0) and "||" would contain "r" or (1). * The "invert" holds whether the value should be reversed before testing. * The "target" contains the label "l#" to jump to. * * A stack is created to hold values when parentheses are used. * * To simplify the logic, the labels will start at 0 and not 1. * * The possible invert values are 1 and 0. The number of "!"s that are in scope * before the predicate determines the invert value, if the number is odd then * the invert value is 1 and 0 otherwise. This means the invert value only * needs to be toggled when a new "!" is introduced compared to what is stored * on the stack, where parentheses were used. * * The top of the stack and "invert" are initialized to zero. * * ** FIRST PASS ** * * #1 A loop through all the tokens is done: * * #2 If the token is an "(", the stack is push, and the current stack value * gets the current invert value, and the loop continues to the next token. * The top of the stack saves the "invert" value to keep track of what * the current inversion is. As "!(a && !b || c)" would require all * predicates being affected separately by the "!" before the parentheses. * And that would end up being equivalent to "(!a || b) && !c" * * #3 If the token is an "!", the current "invert" value gets inverted, and * the loop continues. Note, if the next token is a predicate, then * this "invert" value is only valid for the current program entry, * and does not affect other predicates later on. * * The only other acceptable token is the predicate string. * * #4 A new entry into the program is added saving: the predicate and the * current value of "invert". The target is currently assigned to the * previous program index (this will not be its final value). * * #5 We now enter another loop and look at the next token. The only valid * tokens are ")", "&&", "||" or end of the input string "\0". * * #6 The invert variable is reset to the current value saved on the top of * the stack. * * #7 The top of the stack holds not only the current invert value, but also * if a "&&" or "||" needs to be processed. Note, the "&&" takes higher * precedence than "||". That is "a && b || c && d" is equivalent to * "(a && b) || (c && d)". Thus the first thing to do is to see if "&&" needs * to be processed. This is the case if an "&&" was the last token. If it was * then we call update_preds(). This takes the program, the current index in * the program, and the current value of "invert". More will be described * below about this function. * * #8 If the next token is "&&" then we set a flag in the top of the stack * that denotes that "&&" needs to be processed, break out of this loop * and continue with the outer loop. * * #9 Otherwise, if a "||" needs to be processed then update_preds() is called. * This is called with the program, the current index in the program, but * this time with an inverted value of "invert" (that is !invert). This is * because the value taken will become the "when_to_branch" value of the * program. * Note, this is called when the next token is not an "&&". As stated before, * "&&" takes higher precedence, and "||" should not be processed yet if the * next logical operation is "&&". * * #10 If the next token is "||" then we set a flag in the top of the stack * that denotes that "||" needs to be processed, break out of this loop * and continue with the outer loop. * * #11 If this is the end of the input string "\0" then we break out of both * loops. * * #12 Otherwise, the next token is ")", where we pop the stack and continue * this inner loop. * * Now to discuss the update_pred() function, as that is key to the setting up * of the program. Remember the "target" of the program is initialized to the * previous index and not the "l" label. The target holds the index into the * program that gets affected by the operand. Thus if we have something like * "a || b && c", when we process "a" the target will be "-1" (undefined). * When we process "b", its target is "0", which is the index of "a", as that's * the predicate that is affected by "||". But because the next token after "b" * is "&&" we don't call update_preds(). Instead continue to "c". As the * next token after "c" is not "&&" but the end of input, we first process the * "&&" by calling update_preds() for the "&&" then we process the "||" by * callin updates_preds() with the values for processing "||". * * What does that mean? What update_preds() does is to first save the "target" * of the program entry indexed by the current program entry's "target" * (remember the "target" is initialized to previous program entry), and then * sets that "target" to the current index which represents the label "l#". * That entry's "when_to_branch" is set to the value passed in (the "invert" * or "!invert"). Then it sets the current program entry's target to the saved * "target" value (the old value of the program that had its "target" updated * to the label). * * Looking back at "a || b && c", we have the following steps: * "a" - prog[0] = { "a", X, -1 } // pred, when_to_branch, target * "||" - flag that we need to process "||"; continue outer loop * "b" - prog[1] = { "b", X, 0 } * "&&" - flag that we need to process "&&"; continue outer loop * (Notice we did not process "||") * "c" - prog[2] = { "c", X, 1 } * update_preds(prog, 2, 0); // invert = 0 as we are processing "&&" * t = prog[2].target; // t = 1 * s = prog[t].target; // s = 0 * prog[t].target = 2; // Set target to "l2" * prog[t].when_to_branch = 0; * prog[2].target = s; * update_preds(prog, 2, 1); // invert = 1 as we are now processing "||" * t = prog[2].target; // t = 0 * s = prog[t].target; // s = -1 * prog[t].target = 2; // Set target to "l2" * prog[t].when_to_branch = 1; * prog[2].target = s; * * #13 Which brings us to the final step of the first pass, which is to set * the last program entry's when_to_branch and target, which will be * when_to_branch = 0; target = N; ( the label after the program entry after * the last program entry processed above). * * If we denote "TRUE" to be the entry after the last program entry processed, * and "FALSE" the program entry after that, we are now done with the first * pass. * * Making the above "a || b && c" have a progam of: * prog[0] = { "a", 1, 2 } * prog[1] = { "b", 0, 2 } * prog[2] = { "c", 0, 3 } * * Which translates into: * n0: r = a; l0: if (r) goto l2; * n1: r = b; l1: if (!r) goto l2; * n2: r = c; l2: if (!r) goto l3; // Which is the same as "goto F;" * T: return TRUE; l3: * F: return FALSE * * Although, after the first pass, the program is correct, it is * inefficient. The simple sample of "a || b && c" could be easily been * converted into: * n0: r = a; if (r) goto T * n1: r = b; if (!r) goto F * n2: r = c; if (!r) goto F * T: return TRUE; * F: return FALSE; * * The First Pass is over the input string. The next too passes are over * the program itself. * * ** SECOND PASS ** * * Which brings us to the second pass. If a jump to a label has the * same condition as that label, it can instead jump to its target. * The original example of "a && !(!b || (c && g)) || d || e && !f" * where the first pass gives us: * * n1: r=a; l1: if (!r) goto l4; * n2: r=b; l2: if (!r) goto l4; * n3: r=c; r=!r; l3: if (r) goto l4; * n4: r=g; r=!r; l4: if (r) goto l5; * n5: r=d; l5: if (r) goto T * n6: r=e; l6: if (!r) goto l7; * n7: r=f; r=!r; l7: if (!r) goto F: * T: return TRUE; * F: return FALSE * * We can see that "l3: if (r) goto l4;" and at l4, we have "if (r) goto l5;". * And "l5: if (r) goto T", we could optimize this by converting l3 and l4 * to go directly to T. To accomplish this, we start from the last * entry in the program and work our way back. If the target of the entry * has the same "when_to_branch" then we could use that entry's target. * Doing this, the above would end up as: * * n1: r=a; l1: if (!r) goto l4; * n2: r=b; l2: if (!r) goto l4; * n3: r=c; r=!r; l3: if (r) goto T; * n4: r=g; r=!r; l4: if (r) goto T; * n5: r=d; l5: if (r) goto T; * n6: r=e; l6: if (!r) goto F; * n7: r=f; r=!r; l7: if (!r) goto F; * T: return TRUE * F: return FALSE * * In that same pass, if the "when_to_branch" doesn't match, we can simply * go to the program entry after the label. That is, "l2: if (!r) goto l4;" * where "l4: if (r) goto T;", then we can convert l2 to be: * "l2: if (!r) goto n5;". * * This will have the second pass give us: * n1: r=a; l1: if (!r) goto n5; * n2: r=b; l2: if (!r) goto n5; * n3: r=c; r=!r; l3: if (r) goto T; * n4: r=g; r=!r; l4: if (r) goto T; * n5: r=d; l5: if (r) goto T * n6: r=e; l6: if (!r) goto F; * n7: r=f; r=!r; l7: if (!r) goto F * T: return TRUE * F: return FALSE * * Notice, all the "l#" labels are no longer used, and they can now * be discarded. * * ** THIRD PASS ** * * For the third pass we deal with the inverts. As they simply just * make the "when_to_branch" get inverted, a simple loop over the * program to that does: "when_to_branch ^= invert;" will do the * job, leaving us with: * n1: r=a; if (!r) goto n5; * n2: r=b; if (!r) goto n5; * n3: r=c: if (!r) goto T; * n4: r=g; if (!r) goto T; * n5: r=d; if (r) goto T * n6: r=e; if (!r) goto F; * n7: r=f; if (r) goto F * T: return TRUE * F: return FALSE * * As "r = a; if (!r) goto n5;" is obviously the same as * "if (!a) goto n5;" without doing anything we can interperate the * program as: * n1: if (!a) goto n5; * n2: if (!b) goto n5; * n3: if (!c) goto T; * n4: if (!g) goto T; * n5: if (d) goto T * n6: if (!e) goto F; * n7: if (f) goto F * T: return TRUE * F: return FALSE * * Since the inverts are discarded at the end, there's no reason to store * them in the program array (and waste memory). A separate array to hold * the inverts is used and freed at the end. */ static struct prog_entry * predicate_parse(const char *str, int nr_parens, int nr_preds, parse_pred_fn parse_pred, void *data, struct filter_parse_error *pe) { struct prog_entry *prog_stack; struct prog_entry *prog; const char *ptr = str; char *inverts = NULL; int *op_stack; int *top; int invert = 0; int ret = -ENOMEM; int len; int N = 0; int i; nr_preds += 2; /* For TRUE and FALSE */ op_stack = kmalloc_array(nr_parens, sizeof(*op_stack), GFP_KERNEL); if (!op_stack) return ERR_PTR(-ENOMEM); prog_stack = kcalloc(nr_preds, sizeof(*prog_stack), GFP_KERNEL); if (!prog_stack) { parse_error(pe, -ENOMEM, 0); goto out_free; } inverts = kmalloc_array(nr_preds, sizeof(*inverts), GFP_KERNEL); if (!inverts) { parse_error(pe, -ENOMEM, 0); goto out_free; } top = op_stack; prog = prog_stack; *top = 0; /* First pass */ while (*ptr) { /* #1 */ const char *next = ptr++; if (isspace(*next)) continue; switch (*next) { case '(': /* #2 */ if (top - op_stack > nr_parens) return ERR_PTR(-EINVAL); *(++top) = invert; continue; case '!': /* #3 */ if (!is_not(next)) break; invert = !invert; continue; } if (N >= nr_preds) { parse_error(pe, FILT_ERR_TOO_MANY_PREDS, next - str); goto out_free; } inverts[N] = invert; /* #4 */ prog[N].target = N-1; len = parse_pred(next, data, ptr - str, pe, &prog[N].pred); if (len < 0) { ret = len; goto out_free; } ptr = next + len; N++; ret = -1; while (1) { /* #5 */ next = ptr++; if (isspace(*next)) continue; switch (*next) { case ')': case '\0': break; case '&': case '|': /* accepting only "&&" or "||" */ if (next[1] == next[0]) { ptr++; break; } /* fall through */ default: parse_error(pe, FILT_ERR_TOO_MANY_PREDS, next - str); goto out_free; } invert = *top & INVERT; if (*top & PROCESS_AND) { /* #7 */ update_preds(prog, N - 1, invert); *top &= ~PROCESS_AND; } if (*next == '&') { /* #8 */ *top |= PROCESS_AND; break; } if (*top & PROCESS_OR) { /* #9 */ update_preds(prog, N - 1, !invert); *top &= ~PROCESS_OR; } if (*next == '|') { /* #10 */ *top |= PROCESS_OR; break; } if (!*next) /* #11 */ goto out; if (top == op_stack) { ret = -1; /* Too few '(' */ parse_error(pe, FILT_ERR_TOO_MANY_CLOSE, ptr - str); goto out_free; } top--; /* #12 */ } } out: if (top != op_stack) { /* Too many '(' */ parse_error(pe, FILT_ERR_TOO_MANY_OPEN, ptr - str); goto out_free; } if (!N) { /* No program? */ ret = -EINVAL; parse_error(pe, FILT_ERR_NO_FILTER, ptr - str); goto out_free; } prog[N].pred = NULL; /* #13 */ prog[N].target = 1; /* TRUE */ prog[N+1].pred = NULL; prog[N+1].target = 0; /* FALSE */ prog[N-1].target = N; prog[N-1].when_to_branch = false; /* Second Pass */ for (i = N-1 ; i--; ) { int target = prog[i].target; if (prog[i].when_to_branch == prog[target].when_to_branch) prog[i].target = prog[target].target; } /* Third Pass */ for (i = 0; i < N; i++) { invert = inverts[i] ^ prog[i].when_to_branch; prog[i].when_to_branch = invert; /* Make sure the program always moves forward */ if (WARN_ON(prog[i].target <= i)) { ret = -EINVAL; goto out_free; } } kfree(op_stack); kfree(inverts); return prog; out_free: kfree(op_stack); kfree(inverts); if (prog_stack) { for (i = 0; prog_stack[i].pred; i++) kfree(prog_stack[i].pred); kfree(prog_stack); } return ERR_PTR(ret); } #define DEFINE_COMPARISON_PRED(type) \ static int filter_pred_LT_##type(struct filter_pred *pred, void *event) \ { \ type *addr = (type *)(event + pred->offset); \ type val = (type)pred->val; \ return *addr < val; \ } \ static int filter_pred_LE_##type(struct filter_pred *pred, void *event) \ { \ type *addr = (type *)(event + pred->offset); \ type val = (type)pred->val; \ return *addr <= val; \ } \ static int filter_pred_GT_##type(struct filter_pred *pred, void *event) \ { \ type *addr = (type *)(event + pred->offset); \ type val = (type)pred->val; \ return *addr > val; \ } \ static int filter_pred_GE_##type(struct filter_pred *pred, void *event) \ { \ type *addr = (type *)(event + pred->offset); \ type val = (type)pred->val; \ return *addr >= val; \ } \ static int filter_pred_BAND_##type(struct filter_pred *pred, void *event) \ { \ type *addr = (type *)(event + pred->offset); \ type val = (type)pred->val; \ return !!(*addr & val); \ } \ static const filter_pred_fn_t pred_funcs_##type[] = { \ filter_pred_LE_##type, \ filter_pred_LT_##type, \ filter_pred_GE_##type, \ filter_pred_GT_##type, \ filter_pred_BAND_##type, \ }; #define DEFINE_EQUALITY_PRED(size) \ static int filter_pred_##size(struct filter_pred *pred, void *event) \ { \ u##size *addr = (u##size *)(event + pred->offset); \ u##size val = (u##size)pred->val; \ int match; \ \ match = (val == *addr) ^ pred->not; \ \ return match; \ } DEFINE_COMPARISON_PRED(s64); DEFINE_COMPARISON_PRED(u64); DEFINE_COMPARISON_PRED(s32); DEFINE_COMPARISON_PRED(u32); DEFINE_COMPARISON_PRED(s16); DEFINE_COMPARISON_PRED(u16); DEFINE_COMPARISON_PRED(s8); DEFINE_COMPARISON_PRED(u8); DEFINE_EQUALITY_PRED(64); DEFINE_EQUALITY_PRED(32); DEFINE_EQUALITY_PRED(16); DEFINE_EQUALITY_PRED(8); /* Filter predicate for fixed sized arrays of characters */ static int filter_pred_string(struct filter_pred *pred, void *event) { char *addr = (char *)(event + pred->offset); int cmp, match; cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len); match = cmp ^ pred->not; return match; } /* Filter predicate for char * pointers */ static int filter_pred_pchar(struct filter_pred *pred, void *event) { char **addr = (char **)(event + pred->offset); int cmp, match; int len = strlen(*addr) + 1; /* including tailing '\0' */ cmp = pred->regex.match(*addr, &pred->regex, len); match = cmp ^ pred->not; return match; } /* * Filter predicate for dynamic sized arrays of characters. * These are implemented through a list of strings at the end * of the entry. * Also each of these strings have a field in the entry which * contains its offset from the beginning of the entry. * We have then first to get this field, dereference it * and add it to the address of the entry, and at last we have * the address of the string. */ static int filter_pred_strloc(struct filter_pred *pred, void *event) { u32 str_item = *(u32 *)(event + pred->offset); int str_loc = str_item & 0xffff; int str_len = str_item >> 16; char *addr = (char *)(event + str_loc); int cmp, match; cmp = pred->regex.match(addr, &pred->regex, str_len); match = cmp ^ pred->not; return match; } /* Filter predicate for CPUs. */ static int filter_pred_cpu(struct filter_pred *pred, void *event) { int cpu, cmp; cpu = raw_smp_processor_id(); cmp = pred->val; switch (pred->op) { case OP_EQ: return cpu == cmp; case OP_NE: return cpu != cmp; case OP_LT: return cpu < cmp; case OP_LE: return cpu <= cmp; case OP_GT: return cpu > cmp; case OP_GE: return cpu >= cmp; default: return 0; } } /* Filter predicate for COMM. */ static int filter_pred_comm(struct filter_pred *pred, void *event) { int cmp; cmp = pred->regex.match(current->comm, &pred->regex, TASK_COMM_LEN); return cmp ^ pred->not; } static int filter_pred_none(struct filter_pred *pred, void *event) { return 0; } /* * regex_match_foo - Basic regex callbacks * * @str: the string to be searched * @r: the regex structure containing the pattern string * @len: the length of the string to be searched (including '\0') * * Note: * - @str might not be NULL-terminated if it's of type DYN_STRING * or STATIC_STRING, unless @len is zero. */ static int regex_match_full(char *str, struct regex *r, int len) { /* len of zero means str is dynamic and ends with '\0' */ if (!len) return strcmp(str, r->pattern) == 0; return strncmp(str, r->pattern, len) == 0; } static int regex_match_front(char *str, struct regex *r, int len) { if (len && len < r->len) return 0; return strncmp(str, r->pattern, r->len) == 0; } static int regex_match_middle(char *str, struct regex *r, int len) { if (!len) return strstr(str, r->pattern) != NULL; return strnstr(str, r->pattern, len) != NULL; } static int regex_match_end(char *str, struct regex *r, int len) { int strlen = len - 1; if (strlen >= r->len && memcmp(str + strlen - r->len, r->pattern, r->len) == 0) return 1; return 0; } static int regex_match_glob(char *str, struct regex *r, int len __maybe_unused) { if (glob_match(r->pattern, str)) return 1; return 0; } /** * filter_parse_regex - parse a basic regex * @buff: the raw regex * @len: length of the regex * @search: will point to the beginning of the string to compare * @not: tell whether the match will have to be inverted * * This passes in a buffer containing a regex and this function will * set search to point to the search part of the buffer and * return the type of search it is (see enum above). * This does modify buff. * * Returns enum type. * search returns the pointer to use for comparison. * not returns 1 if buff started with a '!' * 0 otherwise. */ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not) { int type = MATCH_FULL; int i; if (buff[0] == '!') { *not = 1; buff++; len--; } else *not = 0; *search = buff; if (isdigit(buff[0])) return MATCH_INDEX; for (i = 0; i < len; i++) { if (buff[i] == '*') { if (!i) { type = MATCH_END_ONLY; } else if (i == len - 1) { if (type == MATCH_END_ONLY) type = MATCH_MIDDLE_ONLY; else type = MATCH_FRONT_ONLY; buff[i] = 0; break; } else { /* pattern continues, use full glob */ return MATCH_GLOB; } } else if (strchr("[?\\", buff[i])) { return MATCH_GLOB; } } if (buff[0] == '*') *search = buff + 1; return type; } static void filter_build_regex(struct filter_pred *pred) { struct regex *r = &pred->regex; char *search; enum regex_type type = MATCH_FULL; if (pred->op == OP_GLOB) { type = filter_parse_regex(r->pattern, r->len, &search, &pred->not); r->len = strlen(search); memmove(r->pattern, search, r->len+1); } switch (type) { /* MATCH_INDEX should not happen, but if it does, match full */ case MATCH_INDEX: case MATCH_FULL: r->match = regex_match_full; break; case MATCH_FRONT_ONLY: r->match = regex_match_front; break; case MATCH_MIDDLE_ONLY: r->match = regex_match_middle; break; case MATCH_END_ONLY: r->match = regex_match_end; break; case MATCH_GLOB: r->match = regex_match_glob; break; } } /* return 1 if event matches, 0 otherwise (discard) */ int filter_match_preds(struct event_filter *filter, void *rec) { struct prog_entry *prog; int i; /* no filter is considered a match */ if (!filter) return 1; /* Protected by either SRCU(tracepoint_srcu) or preempt_disable */ prog = rcu_dereference_raw(filter->prog); if (!prog) return 1; for (i = 0; prog[i].pred; i++) { struct filter_pred *pred = prog[i].pred; int match = pred->fn(pred, rec); if (match == prog[i].when_to_branch) i = prog[i].target; } return prog[i].target; } EXPORT_SYMBOL_GPL(filter_match_preds); static void remove_filter_string(struct event_filter *filter) { if (!filter) return; kfree(filter->filter_string); filter->filter_string = NULL; } static void append_filter_err(struct trace_array *tr, struct filter_parse_error *pe, struct event_filter *filter) { struct trace_seq *s; int pos = pe->lasterr_pos; char *buf; int len; if (WARN_ON(!filter->filter_string)) return; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return; trace_seq_init(s); len = strlen(filter->filter_string); if (pos > len) pos = len; /* indexing is off by one */ if (pos) pos++; trace_seq_puts(s, filter->filter_string); if (pe->lasterr > 0) { trace_seq_printf(s, "\n%*s", pos, "^"); trace_seq_printf(s, "\nparse_error: %s\n", err_text[pe->lasterr]); tracing_log_err(tr, "event filter parse error", filter->filter_string, err_text, pe->lasterr, pe->lasterr_pos); } else { trace_seq_printf(s, "\nError: (%d)\n", pe->lasterr); tracing_log_err(tr, "event filter parse error", filter->filter_string, err_text, FILT_ERR_ERRNO, 0); } trace_seq_putc(s, 0); buf = kmemdup_nul(s->buffer, s->seq.len, GFP_KERNEL); if (buf) { kfree(filter->filter_string); filter->filter_string = buf; } kfree(s); } static inline struct event_filter *event_filter(struct trace_event_file *file) { return file->filter; } /* caller must hold event_mutex */ void print_event_filter(struct trace_event_file *file, struct trace_seq *s) { struct event_filter *filter = event_filter(file); if (filter && filter->filter_string) trace_seq_printf(s, "%s\n", filter->filter_string); else trace_seq_puts(s, "none\n"); } void print_subsystem_event_filter(struct event_subsystem *system, struct trace_seq *s) { struct event_filter *filter; mutex_lock(&event_mutex); filter = system->filter; if (filter && filter->filter_string) trace_seq_printf(s, "%s\n", filter->filter_string); else trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n"); mutex_unlock(&event_mutex); } static void free_prog(struct event_filter *filter) { struct prog_entry *prog; int i; prog = rcu_access_pointer(filter->prog); if (!prog) return; for (i = 0; prog[i].pred; i++) kfree(prog[i].pred); kfree(prog); } static void filter_disable(struct trace_event_file *file) { unsigned long old_flags = file->flags; file->flags &= ~EVENT_FILE_FL_FILTERED; if (old_flags != file->flags) trace_buffered_event_disable(); } static void __free_filter(struct event_filter *filter) { if (!filter) return; free_prog(filter); kfree(filter->filter_string); kfree(filter); } void free_event_filter(struct event_filter *filter) { __free_filter(filter); } static inline void __remove_filter(struct trace_event_file *file) { filter_disable(file); remove_filter_string(file->filter); } static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir, struct trace_array *tr) { struct trace_event_file *file; list_for_each_entry(file, &tr->events, list) { if (file->system != dir) continue; __remove_filter(file); } } static inline void __free_subsystem_filter(struct trace_event_file *file) { __free_filter(file->filter); file->filter = NULL; } static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir, struct trace_array *tr) { struct trace_event_file *file; list_for_each_entry(file, &tr->events, list) { if (file->system != dir) continue; __free_subsystem_filter(file); } } int filter_assign_type(const char *type) { if (strstr(type, "__data_loc") && strstr(type, "char")) return FILTER_DYN_STRING; if (strchr(type, '[') && strstr(type, "char")) return FILTER_STATIC_STRING; if (strcmp(type, "char *") == 0 || strcmp(type, "const char *") == 0) return FILTER_PTR_STRING; return FILTER_OTHER; } static filter_pred_fn_t select_comparison_fn(enum filter_op_ids op, int field_size, int field_is_signed) { filter_pred_fn_t fn = NULL; int pred_func_index = -1; switch (op) { case OP_EQ: case OP_NE: break; default: if (WARN_ON_ONCE(op < PRED_FUNC_START)) return NULL; pred_func_index = op - PRED_FUNC_START; if (WARN_ON_ONCE(pred_func_index > PRED_FUNC_MAX)) return NULL; } switch (field_size) { case 8: if (pred_func_index < 0) fn = filter_pred_64; else if (field_is_signed) fn = pred_funcs_s64[pred_func_index]; else fn = pred_funcs_u64[pred_func_index]; break; case 4: if (pred_func_index < 0) fn = filter_pred_32; else if (field_is_signed) fn = pred_funcs_s32[pred_func_index]; else fn = pred_funcs_u32[pred_func_index]; break; case 2: if (pred_func_index < 0) fn = filter_pred_16; else if (field_is_signed) fn = pred_funcs_s16[pred_func_index]; else fn = pred_funcs_u16[pred_func_index]; break; case 1: if (pred_func_index < 0) fn = filter_pred_8; else if (field_is_signed) fn = pred_funcs_s8[pred_func_index]; else fn = pred_funcs_u8[pred_func_index]; break; } return fn; } /* Called when a predicate is encountered by predicate_parse() */ static int parse_pred(const char *str, void *data, int pos, struct filter_parse_error *pe, struct filter_pred **pred_ptr) { struct trace_event_call *call = data; struct ftrace_event_field *field; struct filter_pred *pred = NULL; char num_buf[24]; /* Big enough to hold an address */ char *field_name; char q; u64 val; int len; int ret; int op; int s; int i = 0; /* First find the field to associate to */ while (isspace(str[i])) i++; s = i; while (isalnum(str[i]) || str[i] == '_') i++; len = i - s; if (!len) return -1; field_name = kmemdup_nul(str + s, len, GFP_KERNEL); if (!field_name) return -ENOMEM; /* Make sure that the field exists */ field = trace_find_event_field(call, field_name); kfree(field_name); if (!field) { parse_error(pe, FILT_ERR_FIELD_NOT_FOUND, pos + i); return -EINVAL; } while (isspace(str[i])) i++; /* Make sure this op is supported */ for (op = 0; ops[op]; op++) { /* This is why '<=' must come before '<' in ops[] */ if (strncmp(str + i, ops[op], strlen(ops[op])) == 0) break; } if (!ops[op]) { parse_error(pe, FILT_ERR_INVALID_OP, pos + i); goto err_free; } i += strlen(ops[op]); while (isspace(str[i])) i++; s = i; pred = kzalloc(sizeof(*pred), GFP_KERNEL); if (!pred) return -ENOMEM; pred->field = field; pred->offset = field->offset; pred->op = op; if (ftrace_event_is_function(call)) { /* * Perf does things different with function events. * It only allows an "ip" field, and expects a string. * But the string does not need to be surrounded by quotes. * If it is a string, the assigned function as a nop, * (perf doesn't use it) and grab everything. */ if (strcmp(field->name, "ip") != 0) { parse_error(pe, FILT_ERR_IP_FIELD_ONLY, pos + i); goto err_free; } pred->fn = filter_pred_none; /* * Quotes are not required, but if they exist then we need * to read them till we hit a matching one. */ if (str[i] == '\'' || str[i] == '"') q = str[i]; else q = 0; for (i++; str[i]; i++) { if (q && str[i] == q) break; if (!q && (str[i] == ')' || str[i] == '&' || str[i] == '|')) break; } /* Skip quotes */ if (q) s++; len = i - s; if (len >= MAX_FILTER_STR_VAL) { parse_error(pe, FILT_ERR_OPERAND_TOO_LONG, pos + i); goto err_free; } pred->regex.len = len; strncpy(pred->regex.pattern, str + s, len); pred->regex.pattern[len] = 0; /* This is either a string, or an integer */ } else if (str[i] == '\'' || str[i] == '"') { char q = str[i]; /* Make sure the op is OK for strings */ switch (op) { case OP_NE: pred->not = 1; /* Fall through */ case OP_GLOB: case OP_EQ: break; default: parse_error(pe, FILT_ERR_ILLEGAL_FIELD_OP, pos + i); goto err_free; } /* Make sure the field is OK for strings */ if (!is_string_field(field)) { parse_error(pe, FILT_ERR_EXPECT_DIGIT, pos + i); goto err_free; } for (i++; str[i]; i++) { if (str[i] == q) break; } if (!str[i]) { parse_error(pe, FILT_ERR_MISSING_QUOTE, pos + i); goto err_free; } /* Skip quotes */ s++; len = i - s; if (len >= MAX_FILTER_STR_VAL) { parse_error(pe, FILT_ERR_OPERAND_TOO_LONG, pos + i); goto err_free; } pred->regex.len = len; strncpy(pred->regex.pattern, str + s, len); pred->regex.pattern[len] = 0; filter_build_regex(pred); if (field->filter_type == FILTER_COMM) { pred->fn = filter_pred_comm; } else if (field->filter_type == FILTER_STATIC_STRING) { pred->fn = filter_pred_string; pred->regex.field_len = field->size; } else if (field->filter_type == FILTER_DYN_STRING) pred->fn = filter_pred_strloc; else pred->fn = filter_pred_pchar; /* go past the last quote */ i++; } else if (isdigit(str[i]) || str[i] == '-') { /* Make sure the field is not a string */ if (is_string_field(field)) { parse_error(pe, FILT_ERR_EXPECT_STRING, pos + i); goto err_free; } if (op == OP_GLOB) { parse_error(pe, FILT_ERR_ILLEGAL_FIELD_OP, pos + i); goto err_free; } if (str[i] == '-') i++; /* We allow 0xDEADBEEF */ while (isalnum(str[i])) i++; len = i - s; /* 0xfeedfacedeadbeef is 18 chars max */ if (len >= sizeof(num_buf)) { parse_error(pe, FILT_ERR_OPERAND_TOO_LONG, pos + i); goto err_free; } strncpy(num_buf, str + s, len); num_buf[len] = 0; /* Make sure it is a value */ if (field->is_signed) ret = kstrtoll(num_buf, 0, &val); else ret = kstrtoull(num_buf, 0, &val); if (ret) { parse_error(pe, FILT_ERR_ILLEGAL_INTVAL, pos + s); goto err_free; } pred->val = val; if (field->filter_type == FILTER_CPU) pred->fn = filter_pred_cpu; else { pred->fn = select_comparison_fn(pred->op, field->size, field->is_signed); if (pred->op == OP_NE) pred->not = 1; } } else { parse_error(pe, FILT_ERR_INVALID_VALUE, pos + i); goto err_free; } *pred_ptr = pred; return i; err_free: kfree(pred); return -EINVAL; } enum { TOO_MANY_CLOSE = -1, TOO_MANY_OPEN = -2, MISSING_QUOTE = -3, }; /* * Read the filter string once to calculate the number of predicates * as well as how deep the parentheses go. * * Returns: * 0 - everything is fine (err is undefined) * -1 - too many ')' * -2 - too many '(' * -3 - No matching quote */ static int calc_stack(const char *str, int *parens, int *preds, int *err) { bool is_pred = false; int nr_preds = 0; int open = 1; /* Count the expression as "(E)" */ int last_quote = 0; int max_open = 1; int quote = 0; int i; *err = 0; for (i = 0; str[i]; i++) { if (isspace(str[i])) continue; if (quote) { if (str[i] == quote) quote = 0; continue; } switch (str[i]) { case '\'': case '"': quote = str[i]; last_quote = i; break; case '|': case '&': if (str[i+1] != str[i]) break; is_pred = false; continue; case '(': is_pred = false; open++; if (open > max_open) max_open = open; continue; case ')': is_pred = false; if (open == 1) { *err = i; return TOO_MANY_CLOSE; } open--; continue; } if (!is_pred) { nr_preds++; is_pred = true; } } if (quote) { *err = last_quote; return MISSING_QUOTE; } if (open != 1) { int level = open; /* find the bad open */ for (i--; i; i--) { if (quote) { if (str[i] == quote) quote = 0; continue; } switch (str[i]) { case '(': if (level == open) { *err = i; return TOO_MANY_OPEN; } level--; break; case ')': level++; break; case '\'': case '"': quote = str[i]; break; } } /* First character is the '(' with missing ')' */ *err = 0; return TOO_MANY_OPEN; } /* Set the size of the required stacks */ *parens = max_open; *preds = nr_preds; return 0; } static int process_preds(struct trace_event_call *call, const char *filter_string, struct event_filter *filter, struct filter_parse_error *pe) { struct prog_entry *prog; int nr_parens; int nr_preds; int index; int ret; ret = calc_stack(filter_string, &nr_parens, &nr_preds, &index); if (ret < 0) { switch (ret) { case MISSING_QUOTE: parse_error(pe, FILT_ERR_MISSING_QUOTE, index); break; case TOO_MANY_OPEN: parse_error(pe, FILT_ERR_TOO_MANY_OPEN, index); break; default: parse_error(pe, FILT_ERR_TOO_MANY_CLOSE, index); } return ret; } if (!nr_preds) return -EINVAL; prog = predicate_parse(filter_string, nr_parens, nr_preds, parse_pred, call, pe); if (IS_ERR(prog)) return PTR_ERR(prog); rcu_assign_pointer(filter->prog, prog); return 0; } static inline void event_set_filtered_flag(struct trace_event_file *file) { unsigned long old_flags = file->flags; file->flags |= EVENT_FILE_FL_FILTERED; if (old_flags != file->flags) trace_buffered_event_enable(); } static inline void event_set_filter(struct trace_event_file *file, struct event_filter *filter) { rcu_assign_pointer(file->filter, filter); } static inline void event_clear_filter(struct trace_event_file *file) { RCU_INIT_POINTER(file->filter, NULL); } static inline void event_set_no_set_filter_flag(struct trace_event_file *file) { file->flags |= EVENT_FILE_FL_NO_SET_FILTER; } static inline void event_clear_no_set_filter_flag(struct trace_event_file *file) { file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER; } static inline bool event_no_set_filter_flag(struct trace_event_file *file) { if (file->flags & EVENT_FILE_FL_NO_SET_FILTER) return true; return false; } struct filter_list { struct list_head list; struct event_filter *filter; }; static int process_system_preds(struct trace_subsystem_dir *dir, struct trace_array *tr, struct filter_parse_error *pe, char *filter_string) { struct trace_event_file *file; struct filter_list *filter_item; struct event_filter *filter = NULL; struct filter_list *tmp; LIST_HEAD(filter_list); bool fail = true; int err; list_for_each_entry(file, &tr->events, list) { if (file->system != dir) continue; filter = kzalloc(sizeof(*filter), GFP_KERNEL); if (!filter) goto fail_mem; filter->filter_string = kstrdup(filter_string, GFP_KERNEL); if (!filter->filter_string) goto fail_mem; err = process_preds(file->event_call, filter_string, filter, pe); if (err) { filter_disable(file); parse_error(pe, FILT_ERR_BAD_SUBSYS_FILTER, 0); append_filter_err(tr, pe, filter); } else event_set_filtered_flag(file); filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); if (!filter_item) goto fail_mem; list_add_tail(&filter_item->list, &filter_list); /* * Regardless of if this returned an error, we still * replace the filter for the call. */ filter_item->filter = event_filter(file); event_set_filter(file, filter); filter = NULL; fail = false; } if (fail) goto fail; /* * The calls can still be using the old filters. * Do a synchronize_rcu() and to ensure all calls are * done with them before we free them. */ tracepoint_synchronize_unregister(); list_for_each_entry_safe(filter_item, tmp, &filter_list, list) { __free_filter(filter_item->filter); list_del(&filter_item->list); kfree(filter_item); } return 0; fail: /* No call succeeded */ list_for_each_entry_safe(filter_item, tmp, &filter_list, list) { list_del(&filter_item->list); kfree(filter_item); } parse_error(pe, FILT_ERR_BAD_SUBSYS_FILTER, 0); return -EINVAL; fail_mem: kfree(filter); /* If any call succeeded, we still need to sync */ if (!fail) tracepoint_synchronize_unregister(); list_for_each_entry_safe(filter_item, tmp, &filter_list, list) { __free_filter(filter_item->filter); list_del(&filter_item->list); kfree(filter_item); } return -ENOMEM; } static int create_filter_start(char *filter_string, bool set_str, struct filter_parse_error **pse, struct event_filter **filterp) { struct event_filter *filter; struct filter_parse_error *pe = NULL; int err = 0; if (WARN_ON_ONCE(*pse || *filterp)) return -EINVAL; filter = kzalloc(sizeof(*filter), GFP_KERNEL); if (filter && set_str) { filter->filter_string = kstrdup(filter_string, GFP_KERNEL); if (!filter->filter_string) err = -ENOMEM; } pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!filter || !pe || err) { kfree(pe); __free_filter(filter); return -ENOMEM; } /* we're committed to creating a new filter */ *filterp = filter; *pse = pe; return 0; } static void create_filter_finish(struct filter_parse_error *pe) { kfree(pe); } /** * create_filter - create a filter for a trace_event_call * @call: trace_event_call to create a filter for * @filter_str: filter string * @set_str: remember @filter_str and enable detailed error in filter * @filterp: out param for created filter (always updated on return) * Must be a pointer that references a NULL pointer. * * Creates a filter for @call with @filter_str. If @set_str is %true, * @filter_str is copied and recorded in the new filter. * * On success, returns 0 and *@filterp points to the new filter. On * failure, returns -errno and *@filterp may point to %NULL or to a new * filter. In the latter case, the returned filter contains error * information if @set_str is %true and the caller is responsible for * freeing it. */ static int create_filter(struct trace_array *tr, struct trace_event_call *call, char *filter_string, bool set_str, struct event_filter **filterp) { struct filter_parse_error *pe = NULL; int err; /* filterp must point to NULL */ if (WARN_ON(*filterp)) *filterp = NULL; err = create_filter_start(filter_string, set_str, &pe, filterp); if (err) return err; err = process_preds(call, filter_string, *filterp, pe); if (err && set_str) append_filter_err(tr, pe, *filterp); create_filter_finish(pe); return err; } int create_event_filter(struct trace_array *tr, struct trace_event_call *call, char *filter_str, bool set_str, struct event_filter **filterp) { return create_filter(tr, call, filter_str, set_str, filterp); } /** * create_system_filter - create a filter for an event_subsystem * @system: event_subsystem to create a filter for * @filter_str: filter string * @filterp: out param for created filter (always updated on return) * * Identical to create_filter() except that it creates a subsystem filter * and always remembers @filter_str. */ static int create_system_filter(struct trace_subsystem_dir *dir, struct trace_array *tr, char *filter_str, struct event_filter **filterp) { struct filter_parse_error *pe = NULL; int err; err = create_filter_start(filter_str, true, &pe, filterp); if (!err) { err = process_system_preds(dir, tr, pe, filter_str); if (!err) { /* System filters just show a default message */ kfree((*filterp)->filter_string); (*filterp)->filter_string = NULL; } else { append_filter_err(tr, pe, *filterp); } } create_filter_finish(pe); return err; } /* caller must hold event_mutex */ int apply_event_filter(struct trace_event_file *file, char *filter_string) { struct trace_event_call *call = file->event_call; struct event_filter *filter = NULL; int err; if (!strcmp(strstrip(filter_string), "0")) { filter_disable(file); filter = event_filter(file); if (!filter) return 0; event_clear_filter(file); /* Make sure the filter is not being used */ tracepoint_synchronize_unregister(); __free_filter(filter); return 0; } err = create_filter(file->tr, call, filter_string, true, &filter); /* * Always swap the call filter with the new filter * even if there was an error. If there was an error * in the filter, we disable the filter and show the error * string */ if (filter) { struct event_filter *tmp; tmp = event_filter(file); if (!err) event_set_filtered_flag(file); else filter_disable(file); event_set_filter(file, filter); if (tmp) { /* Make sure the call is done with the filter */ tracepoint_synchronize_unregister(); __free_filter(tmp); } } return err; } int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, char *filter_string) { struct event_subsystem *system = dir->subsystem; struct trace_array *tr = dir->tr; struct event_filter *filter = NULL; int err = 0; mutex_lock(&event_mutex); /* Make sure the system still has events */ if (!dir->nr_events) { err = -ENODEV; goto out_unlock; } if (!strcmp(strstrip(filter_string), "0")) { filter_free_subsystem_preds(dir, tr); remove_filter_string(system->filter); filter = system->filter; system->filter = NULL; /* Ensure all filters are no longer used */ tracepoint_synchronize_unregister(); filter_free_subsystem_filters(dir, tr); __free_filter(filter); goto out_unlock; } err = create_system_filter(dir, tr, filter_string, &filter); if (filter) { /* * No event actually uses the system filter * we can free it without synchronize_rcu(). */ __free_filter(system->filter); system->filter = filter; } out_unlock: mutex_unlock(&event_mutex); return err; } #ifdef CONFIG_PERF_EVENTS void ftrace_profile_free_filter(struct perf_event *event) { struct event_filter *filter = event->filter; event->filter = NULL; __free_filter(filter); } struct function_filter_data { struct ftrace_ops *ops; int first_filter; int first_notrace; }; #ifdef CONFIG_FUNCTION_TRACER static char ** ftrace_function_filter_re(char *buf, int len, int *count) { char *str, **re; str = kstrndup(buf, len, GFP_KERNEL); if (!str) return NULL; /* * The argv_split function takes white space * as a separator, so convert ',' into spaces. */ strreplace(str, ',', ' '); re = argv_split(GFP_KERNEL, str, count); kfree(str); return re; } static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter, int reset, char *re, int len) { int ret; if (filter) ret = ftrace_set_filter(ops, re, len, reset); else ret = ftrace_set_notrace(ops, re, len, reset); return ret; } static int __ftrace_function_set_filter(int filter, char *buf, int len, struct function_filter_data *data) { int i, re_cnt, ret = -EINVAL; int *reset; char **re; reset = filter ? &data->first_filter : &data->first_notrace; /* * The 'ip' field could have multiple filters set, separated * either by space or comma. We first cut the filter and apply * all pieces separatelly. */ re = ftrace_function_filter_re(buf, len, &re_cnt); if (!re) return -EINVAL; for (i = 0; i < re_cnt; i++) { ret = ftrace_function_set_regexp(data->ops, filter, *reset, re[i], strlen(re[i])); if (ret) break; if (*reset) *reset = 0; } argv_free(re); return ret; } static int ftrace_function_check_pred(struct filter_pred *pred) { struct ftrace_event_field *field = pred->field; /* * Check the predicate for function trace, verify: * - only '==' and '!=' is used * - the 'ip' field is used */ if ((pred->op != OP_EQ) && (pred->op != OP_NE)) return -EINVAL; if (strcmp(field->name, "ip")) return -EINVAL; return 0; } static int ftrace_function_set_filter_pred(struct filter_pred *pred, struct function_filter_data *data) { int ret; /* Checking the node is valid for function trace. */ ret = ftrace_function_check_pred(pred); if (ret) return ret; return __ftrace_function_set_filter(pred->op == OP_EQ, pred->regex.pattern, pred->regex.len, data); } static bool is_or(struct prog_entry *prog, int i) { int target; /* * Only "||" is allowed for function events, thus, * all true branches should jump to true, and any * false branch should jump to false. */ target = prog[i].target + 1; /* True and false have NULL preds (all prog entries should jump to one */ if (prog[target].pred) return false; /* prog[target].target is 1 for TRUE, 0 for FALSE */ return prog[i].when_to_branch == prog[target].target; } static int ftrace_function_set_filter(struct perf_event *event, struct event_filter *filter) { struct prog_entry *prog = rcu_dereference_protected(filter->prog, lockdep_is_held(&event_mutex)); struct function_filter_data data = { .first_filter = 1, .first_notrace = 1, .ops = &event->ftrace_ops, }; int i; for (i = 0; prog[i].pred; i++) { struct filter_pred *pred = prog[i].pred; if (!is_or(prog, i)) return -EINVAL; if (ftrace_function_set_filter_pred(pred, &data) < 0) return -EINVAL; } return 0; } #else static int ftrace_function_set_filter(struct perf_event *event, struct event_filter *filter) { return -ENODEV; } #endif /* CONFIG_FUNCTION_TRACER */ int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str) { int err; struct event_filter *filter = NULL; struct trace_event_call *call; mutex_lock(&event_mutex); call = event->tp_event; err = -EINVAL; if (!call) goto out_unlock; err = -EEXIST; if (event->filter) goto out_unlock; err = create_filter(NULL, call, filter_str, false, &filter); if (err) goto free_filter; if (ftrace_event_is_function(call)) err = ftrace_function_set_filter(event, filter); else event->filter = filter; free_filter: if (err || ftrace_event_is_function(call)) __free_filter(filter); out_unlock: mutex_unlock(&event_mutex); return err; } #endif /* CONFIG_PERF_EVENTS */ #ifdef CONFIG_FTRACE_STARTUP_TEST #include <linux/types.h> #include <linux/tracepoint.h> #define CREATE_TRACE_POINTS #include "trace_events_filter_test.h" #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \ { \ .filter = FILTER, \ .rec = { .a = va, .b = vb, .c = vc, .d = vd, \ .e = ve, .f = vf, .g = vg, .h = vh }, \ .match = m, \ .not_visited = nvisit, \ } #define YES 1 #define NO 0 static struct test_filter_data_t { char *filter; struct trace_event_raw_ftrace_test_filter rec; int match; char *not_visited; } test_filter_data[] = { #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \ "e == 1 && f == 1 && g == 1 && h == 1" DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""), DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"), DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""), #undef FILTER #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \ "e == 1 || f == 1 || g == 1 || h == 1" DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""), DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""), DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"), #undef FILTER #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \ "(e == 1 || f == 1) && (g == 1 || h == 1)" DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"), DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""), DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"), DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"), #undef FILTER #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \ "(e == 1 && f == 1) || (g == 1 && h == 1)" DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"), DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""), DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""), #undef FILTER #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \ "(e == 1 && f == 1) || (g == 1 && h == 1)" DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"), DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""), DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""), #undef FILTER #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \ "(e == 1 || f == 1)) && (g == 1 || h == 1)" DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"), DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""), DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"), #undef FILTER #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \ "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))" DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"), DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""), DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""), #undef FILTER #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \ "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))" DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"), DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""), DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"), }; #undef DATA_REC #undef FILTER #undef YES #undef NO #define DATA_CNT ARRAY_SIZE(test_filter_data) static int test_pred_visited; static int test_pred_visited_fn(struct filter_pred *pred, void *event) { struct ftrace_event_field *field = pred->field; test_pred_visited = 1; printk(KERN_INFO "\npred visited %s\n", field->name); return 1; } static void update_pred_fn(struct event_filter *filter, char *fields) { struct prog_entry *prog = rcu_dereference_protected(filter->prog, lockdep_is_held(&event_mutex)); int i; for (i = 0; prog[i].pred; i++) { struct filter_pred *pred = prog[i].pred; struct ftrace_event_field *field = pred->field; WARN_ON_ONCE(!pred->fn); if (!field) { WARN_ONCE(1, "all leafs should have field defined %d", i); continue; } if (!strchr(fields, *field->name)) continue; pred->fn = test_pred_visited_fn; } } static __init int ftrace_test_event_filter(void) { int i; printk(KERN_INFO "Testing ftrace filter: "); for (i = 0; i < DATA_CNT; i++) { struct event_filter *filter = NULL; struct test_filter_data_t *d = &test_filter_data[i]; int err; err = create_filter(NULL, &event_ftrace_test_filter, d->filter, false, &filter); if (err) { printk(KERN_INFO "Failed to get filter for '%s', err %d\n", d->filter, err); __free_filter(filter); break; } /* Needed to dereference filter->prog */ mutex_lock(&event_mutex); /* * The preemption disabling is not really needed for self * tests, but the rcu dereference will complain without it. */ preempt_disable(); if (*d->not_visited) update_pred_fn(filter, d->not_visited); test_pred_visited = 0; err = filter_match_preds(filter, &d->rec); preempt_enable(); mutex_unlock(&event_mutex); __free_filter(filter); if (test_pred_visited) { printk(KERN_INFO "Failed, unwanted pred visited for filter %s\n", d->filter); break; } if (err != d->match) { printk(KERN_INFO "Failed to match filter '%s', expected %d\n", d->filter, d->match); break; } } if (i == DATA_CNT) printk(KERN_CONT "OK\n"); return 0; } late_initcall(ftrace_test_event_filter); #endif /* CONFIG_FTRACE_STARTUP_TEST */
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1262_0
crossvul-cpp_data_bad_1272_0
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dm_services.h" #include "link_encoder.h" #include "stream_encoder.h" #include "resource.h" #include "include/irq_service_interface.h" #include "../virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dce110/dce110_timing_generator.h" #include "irq/dce110/irq_service_dce110.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" #include "dce/dce_mem_input.h" #include "dce/dce_ipp.h" #include "dce/dce_transform.h" #include "dce/dce_opp.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "dce100/dce100_hw_sequencer.h" #include "reg_helper.h" #include "dce/dce_10_0_d.h" #include "dce/dce_10_0_sh_mask.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_abm.h" #include "dce/dce_i2c.h" #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_8_2_d.h" #include "gmc/gmc_8_2_sh_mask.h" #endif #ifndef mmDP_DPHY_INTERNAL_CTRL #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7 #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7 #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7 #endif #ifndef mmBIOS_SCRATCH_2 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF #endif #ifndef mmDP_DPHY_BS_SR_SWAP_CNTL #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC #endif #ifndef mmDP_DPHY_FAST_TRAINING #define mmDP_DPHY_FAST_TRAINING 0x4ABC #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC #endif static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = { { .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), } }; /* set register offset */ #define SR(reg_name)\ .reg_name = mm ## reg_name /* set register offset with instance */ #define SRI(reg_name, block, id)\ .reg_name = mm ## block ## id ## _ ## reg_name #define ipp_regs(id)\ [id] = {\ IPP_DCE100_REG_LIST_DCE_BASE(id)\ } static const struct dce_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), ipp_regs(4), ipp_regs(5) }; static const struct dce_ipp_shift ipp_shift = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_ipp_mask ipp_mask = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; #define transform_regs(id)\ [id] = {\ XFM_COMMON_REG_LIST_DCE100(id)\ } static const struct dce_transform_registers xfm_regs[] = { transform_regs(0), transform_regs(1), transform_regs(2), transform_regs(3), transform_regs(4), transform_regs(5) }; static const struct dce_transform_shift xfm_shift = { XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_transform_mask xfm_mask = { XFM_COMMON_MASK_SH_LIST_DCE110(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id)\ [id] = {\ LE_DCE100_REG_LIST(id)\ } static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3), link_regs(4), link_regs(5), link_regs(6), }; #define stream_enc_regs(id)\ [id] = {\ SE_COMMON_REG_LIST_DCE_BASE(id),\ .AFMT_CNTL = 0,\ } static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4), stream_enc_regs(5), stream_enc_regs(6) }; static const struct dce_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT) }; static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_DCE_100_REG_LIST(id),\ } static const struct dce_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5) }; static const struct dce_opp_shift opp_shift = { OPP_COMMON_MASK_SH_LIST_DCE_100(__SHIFT) }; static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6), }; static const struct dce_audio_shift audio_shift = { AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { AUD_COMMON_MASK_SH_LIST(_MASK) }; #define clk_src_regs(id)\ [id] = {\ CS_COMMON_REG_LIST_DCE_100_110(id),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0), clk_src_regs(1), clk_src_regs(2) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE110_COMMON_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCE110(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCE110_COMMON_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; #define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03 static const struct bios_registers bios_regs = { .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, .num_stream_encoder = 6, .num_pll = 3, .num_ddc = 6, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, .pixel_format_support = { .argb8888 = true, .nv12 = false, .fp16 = false }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 1, .fp16 = 1 } }; #define CTX ctx #define REG(reg) mm ## reg #ifndef mmCC_DC_HDMI_STRAPS #define mmCC_DC_HDMI_STRAPS 0x1918 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { REG_GET_2(CC_DC_HDMI_STRAPS, HDMI_DISABLE, &straps->hdmi_disable, AUDIO_STREAM_NUMBER, &straps->audio_stream_number); REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct timing_generator *dce100_timing_generator_create( struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { struct dce110_timing_generator *tg110 = kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); if (!tg110) return NULL; dce110_timing_generator_construct(tg110, ctx, instance, offsets); return &tg110->base; } static struct stream_encoder *dce100_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } #define SRII(reg_name, block, id)\ .reg_name[id] = mm ## block ## id ## _ ## reg_name static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCE10_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCE10_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE10_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dce100_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce100_stream_encoder_create, .create_hwseq = dce100_hwseq_create, }; #define mi_inst_regs(id) { \ MI_DCE8_REG_LIST(id), \ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), mi_inst_regs(1), mi_inst_regs(2), mi_inst_regs(3), mi_inst_regs(4), mi_inst_regs(5), }; static const struct dce_mem_input_shift mi_shifts = { MI_DCE8_MASK_SH_LIST(__SHIFT), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT }; static const struct dce_mem_input_mask mi_masks = { MI_DCE8_MASK_SH_LIST(_MASK), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK }; static struct mem_input *dce100_mem_input_create( struct dc_context *ctx, uint32_t inst) { struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), GFP_KERNEL); if (!dce_mi) { BREAK_TO_DEBUGGER(); return NULL; } dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); dce_mi->wa.single_head_rdreq_dmif_limit = 2; return &dce_mi->base; } static void dce100_transform_destroy(struct transform **xfm) { kfree(TO_DCE_TRANSFORM(*xfm)); *xfm = NULL; } static struct transform *dce100_transform_create( struct dc_context *ctx, uint32_t inst) { struct dce_transform *transform = kzalloc(sizeof(struct dce_transform), GFP_KERNEL); if (!transform) return NULL; dce_transform_construct(transform, ctx, inst, &xfm_regs[inst], &xfm_shift, &xfm_mask); return &transform->base; } static struct input_pixel_processor *dce100_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dce_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 300000, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true }; struct link_encoder *dce100_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; } struct output_pixel_processor *dce100_opp_create( struct dc_context *ctx, uint32_t inst) { struct dce110_opp *opp = kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); if (!opp) return NULL; dce110_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } struct dce_aux *dce100_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst]); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; struct dce_i2c_hw *dce100_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dce100_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } struct clock_source *dce100_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce110_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } BREAK_TO_DEBUGGER(); return NULL; } void dce100_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } static void destruct(struct dce110_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); if (pool->base.transforms[i] != NULL) dce100_transform_destroy(&pool->base.transforms[i]); if (pool->base.ipps[i] != NULL) dce_ipp_destroy(&pool->base.ipps[i]); if (pool->base.mis[i] != NULL) { kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); pool->base.mis[i] = NULL; } if (pool->base.timing_generators[i] != NULL) { kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) dce100_clock_source_destroy(&pool->base.clock_sources[i]); } if (pool->base.dp_clock_source != NULL) dce100_clock_source_destroy(&pool->base.dp_clock_source); for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i] != NULL) dce_aud_destroy(&pool->base.audios[i]); } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); if (pool->base.irqs != NULL) dal_irq_service_destroy(&pool->base.irqs); } static enum dc_status build_mapped_resource( const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; dce110_resource_build_pipe_hw_param(pipe_ctx); resource_build_info_frame(pipe_ctx); return DC_OK; } bool dce100_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) { int i; bool at_least_one_pipe = false; for (i = 0; i < dc->res_pool->pipe_count; i++) { if (context->res_ctx.pipe_ctx[i].stream) at_least_one_pipe = true; } if (at_least_one_pipe) { /* TODO implement when needed but for now hardcode max value*/ context->bw_ctx.bw.dce.dispclk_khz = 681000; context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; } else { context->bw_ctx.bw.dce.dispclk_khz = 0; context->bw_ctx.bw.dce.yclk_khz = 0; } return true; } static bool dce100_validate_surface_sets( struct dc_state *context) { int i; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) continue; if (context->stream_status[i].plane_count > 1) return false; if (context->stream_status[i].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return false; } return true; } enum dc_status dce100_validate_global( struct dc *dc, struct dc_state *context) { if (!dce100_validate_surface_sets(context)) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } enum dc_status dce100_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_ERROR_UNEXPECTED; result = resource_map_pool_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = resource_map_clock_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = build_mapped_resource(dc, new_ctx, dc_stream); return result; } static void dce100_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) { if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return DC_OK; return DC_FAIL_SURFACE_VALIDATE; } struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link( struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) { int i; int j = -1; struct dc_link *link = stream->link; for (i = 0; i < pool->stream_enc_count; i++) { if (!res_ctx->is_stream_enc_acquired[i] && pool->stream_enc[i]) { /* Store first available for MST second display * in daisy chain use case */ j = i; if (pool->stream_enc[i]->id == link->link_enc->preferred_engine) return pool->stream_enc[i]; } } /* * below can happen in cases when stream encoder is acquired: * 1) for second MST display in chain, so preferred engine already * acquired; * 2) for another link, which preferred engine already acquired by any * MST configuration. * * If signal is of DP type and preferred engine not found, return last available * * TODO - This is just a patch up and a generic solution is * required for non DP connectors. */ if (j >= 0 && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) return pool->stream_enc[j]; return NULL; } static const struct resource_funcs dce100_res_pool_funcs = { .destroy = dce100_destroy_resource_pool, .link_enc_create = dce100_link_encoder_create, .validate_bandwidth = dce100_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce100_add_stream_to_ctx, .validate_global = dce100_validate_global, .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link }; static bool construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap; pool->base.funcs = &dce100_res_pool_funcs; pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[2] = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 3; } else { pool->base.dp_clock_source = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); pool->base.clock_sources[0] = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[1] = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 2; } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce110_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.pipe_count = res_cap.num_timing_generator; pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; dc->caps.disable_dp_clk_share = true; for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce100_timing_generator_create( ctx, i, &dce100_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce100_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce100_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce100_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce100_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce100_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce100_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = NULL; } dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce100_hw_sequencer_construct(dc); return true; res_create_fail: destruct(pool); return false; } struct resource_pool *dce100_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (construct(num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); return NULL; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1272_0
crossvul-cpp_data_good_477_1
/* exif-data.c * * Copyright (c) 2001 Lutz Mueller <lutz@users.sourceforge.net> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301 USA. */ #include <config.h> #include <libexif/exif-mnote-data.h> #include <libexif/exif-data.h> #include <libexif/exif-ifd.h> #include <libexif/exif-mnote-data-priv.h> #include <libexif/exif-utils.h> #include <libexif/exif-loader.h> #include <libexif/exif-log.h> #include <libexif/i18n.h> #include <libexif/exif-system.h> #include <libexif/canon/exif-mnote-data-canon.h> #include <libexif/fuji/exif-mnote-data-fuji.h> #include <libexif/olympus/exif-mnote-data-olympus.h> #include <libexif/pentax/exif-mnote-data-pentax.h> #include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #undef JPEG_MARKER_SOI #define JPEG_MARKER_SOI 0xd8 #undef JPEG_MARKER_APP0 #define JPEG_MARKER_APP0 0xe0 #undef JPEG_MARKER_APP1 #define JPEG_MARKER_APP1 0xe1 static const unsigned char ExifHeader[] = {0x45, 0x78, 0x69, 0x66, 0x00, 0x00}; struct _ExifDataPrivate { ExifByteOrder order; ExifMnoteData *md; ExifLog *log; ExifMem *mem; unsigned int ref_count; /* Temporarily used while loading data */ unsigned int offset_mnote; ExifDataOption options; ExifDataType data_type; }; static void * exif_data_alloc (ExifData *data, unsigned int i) { void *d; if (!data || !i) return NULL; d = exif_mem_alloc (data->priv->mem, i); if (d) return d; EXIF_LOG_NO_MEMORY (data->priv->log, "ExifData", i); return NULL; } ExifMnoteData * exif_data_get_mnote_data (ExifData *d) { return (d && d->priv) ? d->priv->md : NULL; } ExifData * exif_data_new (void) { ExifMem *mem = exif_mem_new_default (); ExifData *d = exif_data_new_mem (mem); exif_mem_unref (mem); return d; } ExifData * exif_data_new_mem (ExifMem *mem) { ExifData *data; unsigned int i; if (!mem) return NULL; data = exif_mem_alloc (mem, sizeof (ExifData)); if (!data) return (NULL); data->priv = exif_mem_alloc (mem, sizeof (ExifDataPrivate)); if (!data->priv) { exif_mem_free (mem, data); return (NULL); } data->priv->ref_count = 1; data->priv->mem = mem; exif_mem_ref (mem); for (i = 0; i < EXIF_IFD_COUNT; i++) { data->ifd[i] = exif_content_new_mem (data->priv->mem); if (!data->ifd[i]) { exif_data_free (data); return (NULL); } data->ifd[i]->parent = data; } /* Default options */ #ifndef NO_VERBOSE_TAG_STRINGS /* * When the tag list is compiled away, setting this option prevents * any tags from being loaded */ exif_data_set_option (data, EXIF_DATA_OPTION_IGNORE_UNKNOWN_TAGS); #endif exif_data_set_option (data, EXIF_DATA_OPTION_FOLLOW_SPECIFICATION); /* Default data type: none */ exif_data_set_data_type (data, EXIF_DATA_TYPE_COUNT); return (data); } ExifData * exif_data_new_from_data (const unsigned char *data, unsigned int size) { ExifData *edata; edata = exif_data_new (); exif_data_load_data (edata, data, size); return (edata); } static int exif_data_load_data_entry (ExifData *data, ExifEntry *entry, const unsigned char *d, unsigned int size, unsigned int offset) { unsigned int s, doff; entry->tag = exif_get_short (d + offset + 0, data->priv->order); entry->format = exif_get_short (d + offset + 2, data->priv->order); entry->components = exif_get_long (d + offset + 4, data->priv->order); /* FIXME: should use exif_tag_get_name_in_ifd here but entry->parent * has not been set yet */ exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Loading entry 0x%x ('%s')...", entry->tag, exif_tag_get_name (entry->tag)); /* {0,1,2,4,8} x { 0x00000000 .. 0xffffffff } * -> { 0x000000000 .. 0x7fffffff8 } */ s = exif_format_get_size(entry->format) * entry->components; if ((s < entry->components) || (s == 0)){ return 0; } /* * Size? If bigger than 4 bytes, the actual data is not * in the entry but somewhere else (offset). */ if (s > 4) doff = exif_get_long (d + offset + 8, data->priv->order); else doff = offset + 8; /* Sanity checks */ if ((doff + s < doff) || (doff + s < s) || (doff + s > size)) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Tag data past end of buffer (%u > %u)", doff+s, size); return 0; } entry->data = exif_data_alloc (data, s); if (entry->data) { entry->size = s; memcpy (entry->data, d + doff, s); } else { EXIF_LOG_NO_MEMORY(data->priv->log, "ExifData", s); return 0; } /* If this is the MakerNote, remember the offset */ if (entry->tag == EXIF_TAG_MAKER_NOTE) { if (!entry->data) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "MakerNote found with empty data"); } else if (entry->size > 6) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "MakerNote found (%02x %02x %02x %02x " "%02x %02x %02x...).", entry->data[0], entry->data[1], entry->data[2], entry->data[3], entry->data[4], entry->data[5], entry->data[6]); } data->priv->offset_mnote = doff; } return 1; } static void exif_data_save_data_entry (ExifData *data, ExifEntry *e, unsigned char **d, unsigned int *ds, unsigned int offset) { unsigned int doff, s; unsigned int ts; if (!data || !data->priv) return; /* * Each entry is 12 bytes long. The memory for the entry has * already been allocated. */ exif_set_short (*d + 6 + offset + 0, data->priv->order, (ExifShort) e->tag); exif_set_short (*d + 6 + offset + 2, data->priv->order, (ExifShort) e->format); if (!(data->priv->options & EXIF_DATA_OPTION_DONT_CHANGE_MAKER_NOTE)) { /* If this is the maker note tag, update it. */ if ((e->tag == EXIF_TAG_MAKER_NOTE) && data->priv->md) { /* TODO: this is using the wrong ExifMem to free e->data */ exif_mem_free (data->priv->mem, e->data); e->data = NULL; e->size = 0; exif_mnote_data_set_offset (data->priv->md, *ds - 6); exif_mnote_data_save (data->priv->md, &e->data, &e->size); e->components = e->size; if (exif_format_get_size (e->format) != 1) { /* e->format is taken from input code, * but we need to make sure it is a 1 byte * entity due to the multiplication below. */ e->format = EXIF_FORMAT_UNDEFINED; } } } exif_set_long (*d + 6 + offset + 4, data->priv->order, e->components); /* * Size? If bigger than 4 bytes, the actual data is not in * the entry but somewhere else. */ s = exif_format_get_size (e->format) * e->components; if (s > 4) { unsigned char *t; doff = *ds - 6; ts = *ds + s; /* * According to the TIFF specification, * the offset must be an even number. If we need to introduce * a padding byte, we set it to 0. */ if (s & 1) ts++; t = exif_mem_realloc (data->priv->mem, *d, ts); if (!t) { EXIF_LOG_NO_MEMORY (data->priv->log, "ExifData", ts); return; } *d = t; *ds = ts; exif_set_long (*d + 6 + offset + 8, data->priv->order, doff); if (s & 1) *(*d + *ds - 1) = '\0'; } else doff = offset + 8; /* Write the data. Fill unneeded bytes with 0. Do not crash with * e->data is NULL */ if (e->data) { memcpy (*d + 6 + doff, e->data, s); } else { memset (*d + 6 + doff, 0, s); } if (s < 4) memset (*d + 6 + doff + s, 0, (4 - s)); } static void exif_data_load_data_thumbnail (ExifData *data, const unsigned char *d, unsigned int ds, ExifLong o, ExifLong s) { /* Sanity checks */ if ((o + s < o) || (o + s < s) || (o + s > ds) || (o > ds)) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Bogus thumbnail offset (%u) or size (%u).", o, s); return; } if (data->data) exif_mem_free (data->priv->mem, data->data); if (!(data->data = exif_data_alloc (data, s))) { EXIF_LOG_NO_MEMORY (data->priv->log, "ExifData", s); data->size = 0; return; } data->size = s; memcpy (data->data, d + o, s); } #undef CHECK_REC #define CHECK_REC(i) \ if ((i) == ifd) { \ exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, \ "ExifData", "Recursive entry in IFD " \ "'%s' detected. Skipping...", \ exif_ifd_get_name (i)); \ break; \ } \ if (data->ifd[(i)]->count) { \ exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, \ "ExifData", "Attempt to load IFD " \ "'%s' multiple times detected. " \ "Skipping...", \ exif_ifd_get_name (i)); \ break; \ } /*! Calculate the recursion cost added by one level of IFD loading. * * The work performed is related to the cost in the exponential relation * work=1.1**cost */ static unsigned int level_cost(unsigned int n) { static const double log_1_1 = 0.09531017980432493; /* Adding 0.1 protects against the case where n==1 */ return ceil(log(n + 0.1)/log_1_1); } /*! Load data for an IFD. * * \param[in,out] data #ExifData * \param[in] ifd IFD to load * \param[in] d pointer to buffer containing raw IFD data * \param[in] ds size of raw data in buffer at \c d * \param[in] offset offset into buffer at \c d at which IFD starts * \param[in] recursion_cost factor indicating how expensive this recursive * call could be */ static void exif_data_load_data_content (ExifData *data, ExifIfd ifd, const unsigned char *d, unsigned int ds, unsigned int offset, unsigned int recursion_cost) { ExifLong o, thumbnail_offset = 0, thumbnail_length = 0; ExifShort n; ExifEntry *entry; unsigned int i; ExifTag tag; if (!data || !data->priv) return; /* check for valid ExifIfd enum range */ if ((((int)ifd) < 0) || ( ((int)ifd) >= EXIF_IFD_COUNT)) return; if (recursion_cost > 170) { /* * recursion_cost is a logarithmic-scale indicator of how expensive this * recursive call might end up being. It is an indicator of the depth of * recursion as well as the potential for worst-case future recursive * calls. Since it's difficult to tell ahead of time how often recursion * will occur, this assumes the worst by assuming every tag could end up * causing recursion. * The value of 170 was chosen to limit typical EXIF structures to a * recursive depth of about 6, but pathological ones (those with very * many tags) to only 2. */ exif_log (data->priv->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifData", "Deep/expensive recursion detected!"); return; } /* Read the number of entries */ if ((offset + 2 < offset) || (offset + 2 < 2) || (offset + 2 > ds)) { exif_log (data->priv->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifData", "Tag data past end of buffer (%u > %u)", offset+2, ds); return; } n = exif_get_short (d + offset, data->priv->order); exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Loading %hu entries...", n); offset += 2; /* Check if we have enough data. */ if (offset + 12 * n > ds) { n = (ds - offset) / 12; exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Short data; only loading %hu entries...", n); } for (i = 0; i < n; i++) { tag = exif_get_short (d + offset + 12 * i, data->priv->order); switch (tag) { case EXIF_TAG_EXIF_IFD_POINTER: case EXIF_TAG_GPS_INFO_IFD_POINTER: case EXIF_TAG_INTEROPERABILITY_IFD_POINTER: case EXIF_TAG_JPEG_INTERCHANGE_FORMAT_LENGTH: case EXIF_TAG_JPEG_INTERCHANGE_FORMAT: o = exif_get_long (d + offset + 12 * i + 8, data->priv->order); /* FIXME: IFD_POINTER tags aren't marked as being in a * specific IFD, so exif_tag_get_name_in_ifd won't work */ exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Sub-IFD entry 0x%x ('%s') at %u.", tag, exif_tag_get_name(tag), o); switch (tag) { case EXIF_TAG_EXIF_IFD_POINTER: CHECK_REC (EXIF_IFD_EXIF); exif_data_load_data_content (data, EXIF_IFD_EXIF, d, ds, o, recursion_cost + level_cost(n)); break; case EXIF_TAG_GPS_INFO_IFD_POINTER: CHECK_REC (EXIF_IFD_GPS); exif_data_load_data_content (data, EXIF_IFD_GPS, d, ds, o, recursion_cost + level_cost(n)); break; case EXIF_TAG_INTEROPERABILITY_IFD_POINTER: CHECK_REC (EXIF_IFD_INTEROPERABILITY); exif_data_load_data_content (data, EXIF_IFD_INTEROPERABILITY, d, ds, o, recursion_cost + level_cost(n)); break; case EXIF_TAG_JPEG_INTERCHANGE_FORMAT: thumbnail_offset = o; if (thumbnail_offset && thumbnail_length) exif_data_load_data_thumbnail (data, d, ds, thumbnail_offset, thumbnail_length); break; case EXIF_TAG_JPEG_INTERCHANGE_FORMAT_LENGTH: thumbnail_length = o; if (thumbnail_offset && thumbnail_length) exif_data_load_data_thumbnail (data, d, ds, thumbnail_offset, thumbnail_length); break; default: return; } break; default: /* * If we don't know the tag, don't fail. It could be that new * versions of the standard have defined additional tags. Note that * 0 is a valid tag in the GPS IFD. */ if (!exif_tag_get_name_in_ifd (tag, ifd)) { /* * Special case: Tag and format 0. That's against specification * (at least up to 2.2). But Photoshop writes it anyways. */ if (!memcmp (d + offset + 12 * i, "\0\0\0\0", 4)) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Skipping empty entry at position %u in '%s'.", i, exif_ifd_get_name (ifd)); break; } exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Unknown tag 0x%04x (entry %u in '%s'). Please report this tag " "to <libexif-devel@lists.sourceforge.net>.", tag, i, exif_ifd_get_name (ifd)); if (data->priv->options & EXIF_DATA_OPTION_IGNORE_UNKNOWN_TAGS) break; } entry = exif_entry_new_mem (data->priv->mem); if (!entry) { exif_log (data->priv->log, EXIF_LOG_CODE_NO_MEMORY, "ExifData", "Could not allocate memory"); return; } if (exif_data_load_data_entry (data, entry, d, ds, offset + 12 * i)) exif_content_add_entry (data->ifd[ifd], entry); exif_entry_unref (entry); break; } } } static int cmp_func (const unsigned char *p1, const unsigned char *p2, ExifByteOrder o) { ExifShort tag1 = exif_get_short (p1, o); ExifShort tag2 = exif_get_short (p2, o); return (tag1 < tag2) ? -1 : (tag1 > tag2) ? 1 : 0; } static int cmp_func_intel (const void *elem1, const void *elem2) { return cmp_func ((const unsigned char *) elem1, (const unsigned char *) elem2, EXIF_BYTE_ORDER_INTEL); } static int cmp_func_motorola (const void *elem1, const void *elem2) { return cmp_func ((const unsigned char *) elem1, (const unsigned char *) elem2, EXIF_BYTE_ORDER_MOTOROLA); } static void exif_data_save_data_content (ExifData *data, ExifContent *ifd, unsigned char **d, unsigned int *ds, unsigned int offset) { unsigned int j, n_ptr = 0, n_thumb = 0; ExifIfd i; unsigned char *t; unsigned int ts; if (!data || !data->priv || !ifd || !d || !ds) return; for (i = 0; i < EXIF_IFD_COUNT; i++) if (ifd == data->ifd[i]) break; if (i == EXIF_IFD_COUNT) return; /* error */ /* * Check if we need some extra entries for pointers or the thumbnail. */ switch (i) { case EXIF_IFD_0: /* * The pointer to IFD_EXIF is in IFD_0. The pointer to * IFD_INTEROPERABILITY is in IFD_EXIF. */ if (data->ifd[EXIF_IFD_EXIF]->count || data->ifd[EXIF_IFD_INTEROPERABILITY]->count) n_ptr++; /* The pointer to IFD_GPS is in IFD_0. */ if (data->ifd[EXIF_IFD_GPS]->count) n_ptr++; break; case EXIF_IFD_1: if (data->size) n_thumb = 2; break; case EXIF_IFD_EXIF: if (data->ifd[EXIF_IFD_INTEROPERABILITY]->count) n_ptr++; default: break; } /* * Allocate enough memory for all entries * and the number of entries. */ ts = *ds + (2 + (ifd->count + n_ptr + n_thumb) * 12 + 4); t = exif_mem_realloc (data->priv->mem, *d, ts); if (!t) { EXIF_LOG_NO_MEMORY (data->priv->log, "ExifData", ts); return; } *d = t; *ds = ts; /* Save the number of entries */ exif_set_short (*d + 6 + offset, data->priv->order, (ExifShort) (ifd->count + n_ptr + n_thumb)); offset += 2; /* * Save each entry. Make sure that no memcpys from NULL pointers are * performed */ exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Saving %i entries (IFD '%s', offset: %i)...", ifd->count, exif_ifd_get_name (i), offset); for (j = 0; j < ifd->count; j++) { if (ifd->entries[j]) { exif_data_save_data_entry (data, ifd->entries[j], d, ds, offset + 12 * j); } } offset += 12 * ifd->count; /* Now save special entries. */ switch (i) { case EXIF_IFD_0: /* * The pointer to IFD_EXIF is in IFD_0. * However, the pointer to IFD_INTEROPERABILITY is in IFD_EXIF, * therefore, if IFD_INTEROPERABILITY is not empty, we need * IFD_EXIF even if latter is empty. */ if (data->ifd[EXIF_IFD_EXIF]->count || data->ifd[EXIF_IFD_INTEROPERABILITY]->count) { exif_set_short (*d + 6 + offset + 0, data->priv->order, EXIF_TAG_EXIF_IFD_POINTER); exif_set_short (*d + 6 + offset + 2, data->priv->order, EXIF_FORMAT_LONG); exif_set_long (*d + 6 + offset + 4, data->priv->order, 1); exif_set_long (*d + 6 + offset + 8, data->priv->order, *ds - 6); exif_data_save_data_content (data, data->ifd[EXIF_IFD_EXIF], d, ds, *ds - 6); offset += 12; } /* The pointer to IFD_GPS is in IFD_0, too. */ if (data->ifd[EXIF_IFD_GPS]->count) { exif_set_short (*d + 6 + offset + 0, data->priv->order, EXIF_TAG_GPS_INFO_IFD_POINTER); exif_set_short (*d + 6 + offset + 2, data->priv->order, EXIF_FORMAT_LONG); exif_set_long (*d + 6 + offset + 4, data->priv->order, 1); exif_set_long (*d + 6 + offset + 8, data->priv->order, *ds - 6); exif_data_save_data_content (data, data->ifd[EXIF_IFD_GPS], d, ds, *ds - 6); offset += 12; } break; case EXIF_IFD_EXIF: /* * The pointer to IFD_INTEROPERABILITY is in IFD_EXIF. * See note above. */ if (data->ifd[EXIF_IFD_INTEROPERABILITY]->count) { exif_set_short (*d + 6 + offset + 0, data->priv->order, EXIF_TAG_INTEROPERABILITY_IFD_POINTER); exif_set_short (*d + 6 + offset + 2, data->priv->order, EXIF_FORMAT_LONG); exif_set_long (*d + 6 + offset + 4, data->priv->order, 1); exif_set_long (*d + 6 + offset + 8, data->priv->order, *ds - 6); exif_data_save_data_content (data, data->ifd[EXIF_IFD_INTEROPERABILITY], d, ds, *ds - 6); offset += 12; } break; case EXIF_IFD_1: /* * Information about the thumbnail (if any) is saved in * IFD_1. */ if (data->size) { /* EXIF_TAG_JPEG_INTERCHANGE_FORMAT */ exif_set_short (*d + 6 + offset + 0, data->priv->order, EXIF_TAG_JPEG_INTERCHANGE_FORMAT); exif_set_short (*d + 6 + offset + 2, data->priv->order, EXIF_FORMAT_LONG); exif_set_long (*d + 6 + offset + 4, data->priv->order, 1); exif_set_long (*d + 6 + offset + 8, data->priv->order, *ds - 6); ts = *ds + data->size; t = exif_mem_realloc (data->priv->mem, *d, ts); if (!t) { EXIF_LOG_NO_MEMORY (data->priv->log, "ExifData", ts); return; } *d = t; *ds = ts; memcpy (*d + *ds - data->size, data->data, data->size); offset += 12; /* EXIF_TAG_JPEG_INTERCHANGE_FORMAT_LENGTH */ exif_set_short (*d + 6 + offset + 0, data->priv->order, EXIF_TAG_JPEG_INTERCHANGE_FORMAT_LENGTH); exif_set_short (*d + 6 + offset + 2, data->priv->order, EXIF_FORMAT_LONG); exif_set_long (*d + 6 + offset + 4, data->priv->order, 1); exif_set_long (*d + 6 + offset + 8, data->priv->order, data->size); offset += 12; } break; default: break; } /* Sort the directory according to TIFF specification */ qsort (*d + 6 + offset - (ifd->count + n_ptr + n_thumb) * 12, (ifd->count + n_ptr + n_thumb), 12, (data->priv->order == EXIF_BYTE_ORDER_INTEL) ? cmp_func_intel : cmp_func_motorola); /* Correctly terminate the directory */ if (i == EXIF_IFD_0 && (data->ifd[EXIF_IFD_1]->count || data->size)) { /* * We are saving IFD 0. Tell where IFD 1 starts and save * IFD 1. */ exif_set_long (*d + 6 + offset, data->priv->order, *ds - 6); exif_data_save_data_content (data, data->ifd[EXIF_IFD_1], d, ds, *ds - 6); } else exif_set_long (*d + 6 + offset, data->priv->order, 0); } typedef enum { EXIF_DATA_TYPE_MAKER_NOTE_NONE = 0, EXIF_DATA_TYPE_MAKER_NOTE_CANON = 1, EXIF_DATA_TYPE_MAKER_NOTE_OLYMPUS = 2, EXIF_DATA_TYPE_MAKER_NOTE_PENTAX = 3, EXIF_DATA_TYPE_MAKER_NOTE_NIKON = 4, EXIF_DATA_TYPE_MAKER_NOTE_CASIO = 5, EXIF_DATA_TYPE_MAKER_NOTE_FUJI = 6 } ExifDataTypeMakerNote; /*! If MakerNote is recognized, load it. * * \param[in,out] data #ExifData * \param[in] d pointer to raw EXIF data * \param[in] ds length of data at d */ static void interpret_maker_note(ExifData *data, const unsigned char *d, unsigned int ds) { int mnoteid; ExifEntry* e = exif_data_get_entry (data, EXIF_TAG_MAKER_NOTE); if (!e) return; if ((mnoteid = exif_mnote_data_olympus_identify (data, e)) != 0) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Olympus MakerNote variant type %d", mnoteid); data->priv->md = exif_mnote_data_olympus_new (data->priv->mem); } else if ((mnoteid = exif_mnote_data_canon_identify (data, e)) != 0) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Canon MakerNote variant type %d", mnoteid); data->priv->md = exif_mnote_data_canon_new (data->priv->mem, data->priv->options); } else if ((mnoteid = exif_mnote_data_fuji_identify (data, e)) != 0) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Fuji MakerNote variant type %d", mnoteid); data->priv->md = exif_mnote_data_fuji_new (data->priv->mem); /* NOTE: Must do Pentax detection last because some of the * heuristics are pretty general. */ } else if ((mnoteid = exif_mnote_data_pentax_identify (data, e)) != 0) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Pentax MakerNote variant type %d", mnoteid); data->priv->md = exif_mnote_data_pentax_new (data->priv->mem); } /* * If we are able to interpret the maker note, do so. */ if (data->priv->md) { exif_mnote_data_log (data->priv->md, data->priv->log); exif_mnote_data_set_byte_order (data->priv->md, data->priv->order); exif_mnote_data_set_offset (data->priv->md, data->priv->offset_mnote); exif_mnote_data_load (data->priv->md, d, ds); } } #define LOG_TOO_SMALL \ exif_log (data->priv->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifData", \ _("Size of data too small to allow for EXIF data.")); void exif_data_load_data (ExifData *data, const unsigned char *d_orig, unsigned int ds) { unsigned int l; ExifLong offset; ExifShort n; const unsigned char *d = d_orig; unsigned int len, fullds; if (!data || !data->priv || !d || !ds) return; exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Parsing %i byte(s) EXIF data...\n", ds); /* * It can be that the data starts with the EXIF header. If it does * not, search the EXIF marker. */ if (ds < 6) { LOG_TOO_SMALL; return; } if (!memcmp (d, ExifHeader, 6)) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Found EXIF header at start."); } else { while (ds >= 3) { while (ds && (d[0] == 0xff)) { d++; ds--; } /* JPEG_MARKER_SOI */ if (ds && d[0] == JPEG_MARKER_SOI) { d++; ds--; continue; } /* JPEG_MARKER_APP1 */ if (ds && d[0] == JPEG_MARKER_APP1) break; /* Skip irrelevant APP markers. The branch for APP1 must come before this, otherwise this code block will cause APP1 to be skipped. This code path is only relevant for files that are nonconformant to the EXIF specification. For conformant files, the APP1 code path above will be taken. */ if (ds >= 3 && d[0] >= 0xe0 && d[0] <= 0xef) { /* JPEG_MARKER_APPn */ d++; ds--; l = (d[0] << 8) | d[1]; if (l > ds) return; d += l; ds -= l; continue; } /* Unknown marker or data. Give up. */ exif_log (data->priv->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifData", _("EXIF marker not found.")); return; } if (ds < 3) { LOG_TOO_SMALL; return; } d++; ds--; len = (d[0] << 8) | d[1]; exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "We have to deal with %i byte(s) of EXIF data.", len); d += 2; ds -= 2; } /* * Verify the exif header * (offset 2, length 6). */ if (ds < 6) { LOG_TOO_SMALL; return; } if (memcmp (d, ExifHeader, 6)) { exif_log (data->priv->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifData", _("EXIF header not found.")); return; } exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Found EXIF header."); /* Sanity check the data length */ if (ds < 14) return; /* The JPEG APP1 section can be no longer than 64 KiB (including a 16-bit length), so cap the data length to protect against overflow in future offset calculations */ fullds = ds; if (ds > 0xfffe) ds = 0xfffe; /* Byte order (offset 6, length 2) */ if (!memcmp (d + 6, "II", 2)) data->priv->order = EXIF_BYTE_ORDER_INTEL; else if (!memcmp (d + 6, "MM", 2)) data->priv->order = EXIF_BYTE_ORDER_MOTOROLA; else { exif_log (data->priv->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifData", _("Unknown encoding.")); return; } /* Fixed value */ if (exif_get_short (d + 8, data->priv->order) != 0x002a) return; /* IFD 0 offset */ offset = exif_get_long (d + 10, data->priv->order); exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "IFD 0 at %i.", (int) offset); /* Sanity check the offset, being careful about overflow */ if (offset > ds || offset + 6 + 2 > ds) return; /* Parse the actual exif data (usually offset 14 from start) */ exif_data_load_data_content (data, EXIF_IFD_0, d + 6, ds - 6, offset, 0); /* IFD 1 offset */ n = exif_get_short (d + 6 + offset, data->priv->order); if (offset + 6 + 2 + 12 * n + 4 > ds) return; offset = exif_get_long (d + 6 + offset + 2 + 12 * n, data->priv->order); if (offset) { exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "IFD 1 at %i.", (int) offset); /* Sanity check. */ if (offset > ds || offset + 6 > ds) { exif_log (data->priv->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifData", "Bogus offset of IFD1."); } else { exif_data_load_data_content (data, EXIF_IFD_1, d + 6, ds - 6, offset, 0); } } /* * If we got an EXIF_TAG_MAKER_NOTE, try to interpret it. Some * cameras use pointers in the maker note tag that point to the * space between IFDs. Here is the only place where we have access * to that data. */ interpret_maker_note(data, d, fullds); /* Fixup tags if requested */ if (data->priv->options & EXIF_DATA_OPTION_FOLLOW_SPECIFICATION) exif_data_fix (data); } void exif_data_save_data (ExifData *data, unsigned char **d, unsigned int *ds) { if (ds) *ds = 0; /* This means something went wrong */ if (!data || !d || !ds) return; /* Header */ *ds = 14; *d = exif_data_alloc (data, *ds); if (!*d) { *ds = 0; return; } memcpy (*d, ExifHeader, 6); /* Order (offset 6) */ if (data->priv->order == EXIF_BYTE_ORDER_INTEL) { memcpy (*d + 6, "II", 2); } else { memcpy (*d + 6, "MM", 2); } /* Fixed value (2 bytes, offset 8) */ exif_set_short (*d + 8, data->priv->order, 0x002a); /* * IFD 0 offset (4 bytes, offset 10). * We will start 8 bytes after the * EXIF header (2 bytes for order, another 2 for the test, and * 4 bytes for the IFD 0 offset make 8 bytes together). */ exif_set_long (*d + 10, data->priv->order, 8); /* Now save IFD 0. IFD 1 will be saved automatically. */ exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Saving IFDs..."); exif_data_save_data_content (data, data->ifd[EXIF_IFD_0], d, ds, *ds - 6); exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Saved %i byte(s) EXIF data.", *ds); } ExifData * exif_data_new_from_file (const char *path) { ExifData *edata; ExifLoader *loader; loader = exif_loader_new (); exif_loader_write_file (loader, path); edata = exif_loader_get_data (loader); exif_loader_unref (loader); return (edata); } void exif_data_ref (ExifData *data) { if (!data) return; data->priv->ref_count++; } void exif_data_unref (ExifData *data) { if (!data) return; data->priv->ref_count--; if (!data->priv->ref_count) exif_data_free (data); } void exif_data_free (ExifData *data) { unsigned int i; ExifMem *mem = (data && data->priv) ? data->priv->mem : NULL; if (!data) return; for (i = 0; i < EXIF_IFD_COUNT; i++) { if (data->ifd[i]) { exif_content_unref (data->ifd[i]); data->ifd[i] = NULL; } } if (data->data) { exif_mem_free (mem, data->data); data->data = NULL; } if (data->priv) { if (data->priv->log) { exif_log_unref (data->priv->log); data->priv->log = NULL; } if (data->priv->md) { exif_mnote_data_unref (data->priv->md); data->priv->md = NULL; } exif_mem_free (mem, data->priv); exif_mem_free (mem, data); } exif_mem_unref (mem); } void exif_data_dump (ExifData *data) { unsigned int i; if (!data) return; for (i = 0; i < EXIF_IFD_COUNT; i++) { if (data->ifd[i] && data->ifd[i]->count) { printf ("Dumping IFD '%s'...\n", exif_ifd_get_name (i)); exif_content_dump (data->ifd[i], 0); } } if (data->data) { printf ("%i byte(s) thumbnail data available: ", data->size); if (data->size >= 4) { printf ("0x%02x 0x%02x ... 0x%02x 0x%02x\n", data->data[0], data->data[1], data->data[data->size - 2], data->data[data->size - 1]); } } } ExifByteOrder exif_data_get_byte_order (ExifData *data) { if (!data) return (0); return (data->priv->order); } void exif_data_foreach_content (ExifData *data, ExifDataForeachContentFunc func, void *user_data) { unsigned int i; if (!data || !func) return; for (i = 0; i < EXIF_IFD_COUNT; i++) func (data->ifd[i], user_data); } typedef struct _ByteOrderChangeData ByteOrderChangeData; struct _ByteOrderChangeData { ExifByteOrder old, new; }; static void entry_set_byte_order (ExifEntry *e, void *data) { ByteOrderChangeData *d = data; if (!e) return; exif_array_set_byte_order (e->format, e->data, e->components, d->old, d->new); } static void content_set_byte_order (ExifContent *content, void *data) { exif_content_foreach_entry (content, entry_set_byte_order, data); } void exif_data_set_byte_order (ExifData *data, ExifByteOrder order) { ByteOrderChangeData d; if (!data || (order == data->priv->order)) return; d.old = data->priv->order; d.new = order; exif_data_foreach_content (data, content_set_byte_order, &d); data->priv->order = order; if (data->priv->md) exif_mnote_data_set_byte_order (data->priv->md, order); } void exif_data_log (ExifData *data, ExifLog *log) { unsigned int i; if (!data || !data->priv) return; exif_log_unref (data->priv->log); data->priv->log = log; exif_log_ref (log); for (i = 0; i < EXIF_IFD_COUNT; i++) exif_content_log (data->ifd[i], log); } /* Used internally within libexif */ ExifLog *exif_data_get_log (ExifData *); ExifLog * exif_data_get_log (ExifData *data) { if (!data || !data->priv) return NULL; return data->priv->log; } static const struct { ExifDataOption option; const char *name; const char *description; } exif_data_option[] = { {EXIF_DATA_OPTION_IGNORE_UNKNOWN_TAGS, N_("Ignore unknown tags"), N_("Ignore unknown tags when loading EXIF data.")}, {EXIF_DATA_OPTION_FOLLOW_SPECIFICATION, N_("Follow specification"), N_("Add, correct and remove entries to get EXIF data that follows " "the specification.")}, {EXIF_DATA_OPTION_DONT_CHANGE_MAKER_NOTE, N_("Do not change maker note"), N_("When loading and resaving Exif data, save the maker note unmodified." " Be aware that the maker note can get corrupted.")}, {0, NULL, NULL} }; const char * exif_data_option_get_name (ExifDataOption o) { unsigned int i; for (i = 0; exif_data_option[i].name; i++) if (exif_data_option[i].option == o) break; return _(exif_data_option[i].name); } const char * exif_data_option_get_description (ExifDataOption o) { unsigned int i; for (i = 0; exif_data_option[i].description; i++) if (exif_data_option[i].option == o) break; return _(exif_data_option[i].description); } void exif_data_set_option (ExifData *d, ExifDataOption o) { if (!d) return; d->priv->options |= o; } void exif_data_unset_option (ExifData *d, ExifDataOption o) { if (!d) return; d->priv->options &= ~o; } static void fix_func (ExifContent *c, void *UNUSED(data)) { switch (exif_content_get_ifd (c)) { case EXIF_IFD_1: if (c->parent->data) exif_content_fix (c); else if (c->count) { exif_log (c->parent->priv->log, EXIF_LOG_CODE_DEBUG, "exif-data", "No thumbnail but entries on thumbnail. These entries have been " "removed."); while (c->count) { unsigned int cnt = c->count; exif_content_remove_entry (c, c->entries[c->count - 1]); if (cnt == c->count) { /* safety net */ exif_log (c->parent->priv->log, EXIF_LOG_CODE_DEBUG, "exif-data", "failed to remove last entry from entries."); c->count--; } } } break; default: exif_content_fix (c); } } void exif_data_fix (ExifData *d) { exif_data_foreach_content (d, fix_func, NULL); } void exif_data_set_data_type (ExifData *d, ExifDataType dt) { if (!d || !d->priv) return; d->priv->data_type = dt; } ExifDataType exif_data_get_data_type (ExifData *d) { return (d && d->priv) ? d->priv->data_type : EXIF_DATA_TYPE_UNKNOWN; }
./CrossVul/dataset_final_sorted/CWE-400/c/good_477_1
crossvul-cpp_data_good_1241_0
// SPDX-License-Identifier: GPL-2.0-only /* * Intel Wireless WiMAX Connection 2400m * Implement backend for the WiMAX stack rfkill support * * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com> * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * The WiMAX kernel stack integrates into RF-Kill and keeps the * switches's status. We just need to: * * - report changes in the HW RF Kill switch [with * wimax_rfkill_{sw,hw}_report(), which happens when we detect those * indications coming through hardware reports]. We also do it on * initialization to let the stack know the initial HW state. * * - implement indications from the stack to change the SW RF Kill * switch (coming from sysfs, the wimax stack or user space). */ #include "i2400m.h" #include <linux/wimax/i2400m.h> #include <linux/slab.h> #define D_SUBMODULE rfkill #include "debug-levels.h" /* * Return true if the i2400m radio is in the requested wimax_rf_state state * */ static int i2400m_radio_is(struct i2400m *i2400m, enum wimax_rf_state state) { if (state == WIMAX_RF_OFF) return i2400m->state == I2400M_SS_RF_OFF || i2400m->state == I2400M_SS_RF_SHUTDOWN; else if (state == WIMAX_RF_ON) /* state == WIMAX_RF_ON */ return i2400m->state != I2400M_SS_RF_OFF && i2400m->state != I2400M_SS_RF_SHUTDOWN; else { BUG(); return -EINVAL; /* shut gcc warnings on certain arches */ } } /* * WiMAX stack operation: implement SW RFKill toggling * * @wimax_dev: device descriptor * @skb: skb where the message has been received; skb->data is * expected to point to the message payload. * @genl_info: passed by the generic netlink layer * * Generic Netlink will call this function when a message is sent from * userspace to change the software RF-Kill switch status. * * This function will set the device's software RF-Kill switch state to * match what is requested. * * NOTE: the i2400m has a strict state machine; we can only set the * RF-Kill switch when it is on, the HW RF-Kill is on and the * device is initialized. So we ignore errors steaming from not * being in the right state (-EILSEQ). */ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev, enum wimax_rf_state state) { int result; struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev); struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct { struct i2400m_l3l4_hdr hdr; struct i2400m_tlv_rf_operation sw_rf; } __packed *cmd; char strerr[32]; d_fnstart(4, dev, "(wimax_dev %p state %d)\n", wimax_dev, state); result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL); cmd->hdr.length = sizeof(cmd->sw_rf); cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION); cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION); cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status)); switch (state) { case WIMAX_RF_OFF: /* RFKILL ON, radio OFF */ cmd->sw_rf.status = cpu_to_le32(2); break; case WIMAX_RF_ON: /* RFKILL OFF, radio ON */ cmd->sw_rf.status = cpu_to_le32(1); break; default: BUG(); } ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); result = PTR_ERR(ack_skb); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'RF Control' command: %d\n", result); goto error_msg_to_dev; } result = i2400m_msg_check_status(wimax_msg_data(ack_skb), strerr, sizeof(strerr)); if (result < 0) { dev_err(dev, "'RF Control' (0x%04x) command failed: %d - %s\n", I2400M_MT_CMD_RF_CONTROL, result, strerr); goto error_cmd; } /* Now we wait for the state to change to RADIO_OFF or RADIO_ON */ result = wait_event_timeout( i2400m->state_wq, i2400m_radio_is(i2400m, state), 5 * HZ); if (result == 0) result = -ETIMEDOUT; if (result < 0) dev_err(dev, "Error waiting for device to toggle RF state: " "%d\n", result); result = 0; error_cmd: kfree_skb(ack_skb); error_msg_to_dev: error_alloc: d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n", wimax_dev, state, result); kfree(cmd); return result; } /* * Inform the WiMAX stack of changes in the RF Kill switches reported * by the device * * @i2400m: device descriptor * @rfss: TLV for RF Switches status; already validated * * NOTE: the reports on RF switch status cannot be trusted * or used until the device is in a state of RADIO_OFF * or greater. */ void i2400m_report_tlv_rf_switches_status( struct i2400m *i2400m, const struct i2400m_tlv_rf_switches_status *rfss) { struct device *dev = i2400m_dev(i2400m); enum i2400m_rf_switch_status hw, sw; enum wimax_st wimax_state; sw = le32_to_cpu(rfss->sw_rf_switch); hw = le32_to_cpu(rfss->hw_rf_switch); d_fnstart(3, dev, "(i2400m %p rfss %p [hw %u sw %u])\n", i2400m, rfss, hw, sw); /* We only process rw switch evens when the device has been * fully initialized */ wimax_state = wimax_state_get(&i2400m->wimax_dev); if (wimax_state < WIMAX_ST_RADIO_OFF) { d_printf(3, dev, "ignoring RF switches report, state %u\n", wimax_state); goto out; } switch (sw) { case I2400M_RF_SWITCH_ON: /* RF Kill disabled (radio on) */ wimax_report_rfkill_sw(&i2400m->wimax_dev, WIMAX_RF_ON); break; case I2400M_RF_SWITCH_OFF: /* RF Kill enabled (radio off) */ wimax_report_rfkill_sw(&i2400m->wimax_dev, WIMAX_RF_OFF); break; default: dev_err(dev, "HW BUG? Unknown RF SW state 0x%x\n", sw); } switch (hw) { case I2400M_RF_SWITCH_ON: /* RF Kill disabled (radio on) */ wimax_report_rfkill_hw(&i2400m->wimax_dev, WIMAX_RF_ON); break; case I2400M_RF_SWITCH_OFF: /* RF Kill enabled (radio off) */ wimax_report_rfkill_hw(&i2400m->wimax_dev, WIMAX_RF_OFF); break; default: dev_err(dev, "HW BUG? Unknown RF HW state 0x%x\n", hw); } out: d_fnend(3, dev, "(i2400m %p rfss %p [hw %u sw %u]) = void\n", i2400m, rfss, hw, sw); }
./CrossVul/dataset_final_sorted/CWE-400/c/good_1241_0
crossvul-cpp_data_good_897_0
/* * The copyright in this software is being made available under the 2-clauses * BSD License, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such rights * are granted under this license. * * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium * Copyright (c) 2002-2014, Professor Benoit Macq * Copyright (c) 2001-2003, David Janssens * Copyright (c) 2002-2003, Yannick Verschueren * Copyright (c) 2003-2007, Francois-Olivier Devaux * Copyright (c) 2003-2014, Antonin Descampe * Copyright (c) 2005, Herve Drolon, FreeImage Team * Copyright (c) 2006-2007, Parvatha Elangovan * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "opj_apps_config.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include "openjpeg.h" #include "convert.h" typedef struct { OPJ_UINT16 bfType; /* 'BM' for Bitmap (19776) */ OPJ_UINT32 bfSize; /* Size of the file */ OPJ_UINT16 bfReserved1; /* Reserved : 0 */ OPJ_UINT16 bfReserved2; /* Reserved : 0 */ OPJ_UINT32 bfOffBits; /* Offset */ } OPJ_BITMAPFILEHEADER; typedef struct { OPJ_UINT32 biSize; /* Size of the structure in bytes */ OPJ_UINT32 biWidth; /* Width of the image in pixels */ OPJ_UINT32 biHeight; /* Height of the image in pixels */ OPJ_UINT16 biPlanes; /* 1 */ OPJ_UINT16 biBitCount; /* Number of color bits by pixels */ OPJ_UINT32 biCompression; /* Type of encoding 0: none 1: RLE8 2: RLE4 */ OPJ_UINT32 biSizeImage; /* Size of the image in bytes */ OPJ_UINT32 biXpelsPerMeter; /* Horizontal (X) resolution in pixels/meter */ OPJ_UINT32 biYpelsPerMeter; /* Vertical (Y) resolution in pixels/meter */ OPJ_UINT32 biClrUsed; /* Number of color used in the image (0: ALL) */ OPJ_UINT32 biClrImportant; /* Number of important color (0: ALL) */ OPJ_UINT32 biRedMask; /* Red channel bit mask */ OPJ_UINT32 biGreenMask; /* Green channel bit mask */ OPJ_UINT32 biBlueMask; /* Blue channel bit mask */ OPJ_UINT32 biAlphaMask; /* Alpha channel bit mask */ OPJ_UINT32 biColorSpaceType; /* Color space type */ OPJ_UINT8 biColorSpaceEP[36]; /* Color space end points */ OPJ_UINT32 biRedGamma; /* Red channel gamma */ OPJ_UINT32 biGreenGamma; /* Green channel gamma */ OPJ_UINT32 biBlueGamma; /* Blue channel gamma */ OPJ_UINT32 biIntent; /* Intent */ OPJ_UINT32 biIccProfileData; /* ICC profile data */ OPJ_UINT32 biIccProfileSize; /* ICC profile size */ OPJ_UINT32 biReserved; /* Reserved */ } OPJ_BITMAPINFOHEADER; static void opj_applyLUT8u_8u32s_C1R( OPJ_UINT8 const* pSrc, OPJ_INT32 srcStride, OPJ_INT32* pDst, OPJ_INT32 dstStride, OPJ_UINT8 const* pLUT, OPJ_UINT32 width, OPJ_UINT32 height) { OPJ_UINT32 y; for (y = height; y != 0U; --y) { OPJ_UINT32 x; for (x = 0; x < width; x++) { pDst[x] = (OPJ_INT32)pLUT[pSrc[x]]; } pSrc += srcStride; pDst += dstStride; } } static void opj_applyLUT8u_8u32s_C1P3R( OPJ_UINT8 const* pSrc, OPJ_INT32 srcStride, OPJ_INT32* const* pDst, OPJ_INT32 const* pDstStride, OPJ_UINT8 const* const* pLUT, OPJ_UINT32 width, OPJ_UINT32 height) { OPJ_UINT32 y; OPJ_INT32* pR = pDst[0]; OPJ_INT32* pG = pDst[1]; OPJ_INT32* pB = pDst[2]; OPJ_UINT8 const* pLUT_R = pLUT[0]; OPJ_UINT8 const* pLUT_G = pLUT[1]; OPJ_UINT8 const* pLUT_B = pLUT[2]; for (y = height; y != 0U; --y) { OPJ_UINT32 x; for (x = 0; x < width; x++) { OPJ_UINT8 idx = pSrc[x]; pR[x] = (OPJ_INT32)pLUT_R[idx]; pG[x] = (OPJ_INT32)pLUT_G[idx]; pB[x] = (OPJ_INT32)pLUT_B[idx]; } pSrc += srcStride; pR += pDstStride[0]; pG += pDstStride[1]; pB += pDstStride[2]; } } static void bmp24toimage(const OPJ_UINT8* pData, OPJ_UINT32 stride, opj_image_t* image) { int index; OPJ_UINT32 width, height; OPJ_UINT32 x, y; const OPJ_UINT8 *pSrc = NULL; width = image->comps[0].w; height = image->comps[0].h; index = 0; pSrc = pData + (height - 1U) * stride; for (y = 0; y < height; y++) { for (x = 0; x < width; x++) { image->comps[0].data[index] = (OPJ_INT32)pSrc[3 * x + 2]; /* R */ image->comps[1].data[index] = (OPJ_INT32)pSrc[3 * x + 1]; /* G */ image->comps[2].data[index] = (OPJ_INT32)pSrc[3 * x + 0]; /* B */ index++; } pSrc -= stride; } } static void bmp_mask_get_shift_and_prec(OPJ_UINT32 mask, OPJ_UINT32* shift, OPJ_UINT32* prec) { OPJ_UINT32 l_shift, l_prec; l_shift = l_prec = 0U; if (mask != 0U) { while ((mask & 1U) == 0U) { mask >>= 1; l_shift++; } while (mask & 1U) { mask >>= 1; l_prec++; } } *shift = l_shift; *prec = l_prec; } static void bmpmask32toimage(const OPJ_UINT8* pData, OPJ_UINT32 stride, opj_image_t* image, OPJ_UINT32 redMask, OPJ_UINT32 greenMask, OPJ_UINT32 blueMask, OPJ_UINT32 alphaMask) { int index; OPJ_UINT32 width, height; OPJ_UINT32 x, y; const OPJ_UINT8 *pSrc = NULL; OPJ_BOOL hasAlpha; OPJ_UINT32 redShift, redPrec; OPJ_UINT32 greenShift, greenPrec; OPJ_UINT32 blueShift, bluePrec; OPJ_UINT32 alphaShift, alphaPrec; width = image->comps[0].w; height = image->comps[0].h; hasAlpha = image->numcomps > 3U; bmp_mask_get_shift_and_prec(redMask, &redShift, &redPrec); bmp_mask_get_shift_and_prec(greenMask, &greenShift, &greenPrec); bmp_mask_get_shift_and_prec(blueMask, &blueShift, &bluePrec); bmp_mask_get_shift_and_prec(alphaMask, &alphaShift, &alphaPrec); image->comps[0].bpp = redPrec; image->comps[0].prec = redPrec; image->comps[1].bpp = greenPrec; image->comps[1].prec = greenPrec; image->comps[2].bpp = bluePrec; image->comps[2].prec = bluePrec; if (hasAlpha) { image->comps[3].bpp = alphaPrec; image->comps[3].prec = alphaPrec; } index = 0; pSrc = pData + (height - 1U) * stride; for (y = 0; y < height; y++) { for (x = 0; x < width; x++) { OPJ_UINT32 value = 0U; value |= ((OPJ_UINT32)pSrc[4 * x + 0]) << 0; value |= ((OPJ_UINT32)pSrc[4 * x + 1]) << 8; value |= ((OPJ_UINT32)pSrc[4 * x + 2]) << 16; value |= ((OPJ_UINT32)pSrc[4 * x + 3]) << 24; image->comps[0].data[index] = (OPJ_INT32)((value & redMask) >> redShift); /* R */ image->comps[1].data[index] = (OPJ_INT32)((value & greenMask) >> greenShift); /* G */ image->comps[2].data[index] = (OPJ_INT32)((value & blueMask) >> blueShift); /* B */ if (hasAlpha) { image->comps[3].data[index] = (OPJ_INT32)((value & alphaMask) >> alphaShift); /* A */ } index++; } pSrc -= stride; } } static void bmpmask16toimage(const OPJ_UINT8* pData, OPJ_UINT32 stride, opj_image_t* image, OPJ_UINT32 redMask, OPJ_UINT32 greenMask, OPJ_UINT32 blueMask, OPJ_UINT32 alphaMask) { int index; OPJ_UINT32 width, height; OPJ_UINT32 x, y; const OPJ_UINT8 *pSrc = NULL; OPJ_BOOL hasAlpha; OPJ_UINT32 redShift, redPrec; OPJ_UINT32 greenShift, greenPrec; OPJ_UINT32 blueShift, bluePrec; OPJ_UINT32 alphaShift, alphaPrec; width = image->comps[0].w; height = image->comps[0].h; hasAlpha = image->numcomps > 3U; bmp_mask_get_shift_and_prec(redMask, &redShift, &redPrec); bmp_mask_get_shift_and_prec(greenMask, &greenShift, &greenPrec); bmp_mask_get_shift_and_prec(blueMask, &blueShift, &bluePrec); bmp_mask_get_shift_and_prec(alphaMask, &alphaShift, &alphaPrec); image->comps[0].bpp = redPrec; image->comps[0].prec = redPrec; image->comps[1].bpp = greenPrec; image->comps[1].prec = greenPrec; image->comps[2].bpp = bluePrec; image->comps[2].prec = bluePrec; if (hasAlpha) { image->comps[3].bpp = alphaPrec; image->comps[3].prec = alphaPrec; } index = 0; pSrc = pData + (height - 1U) * stride; for (y = 0; y < height; y++) { for (x = 0; x < width; x++) { OPJ_UINT32 value = 0U; value |= ((OPJ_UINT32)pSrc[2 * x + 0]) << 0; value |= ((OPJ_UINT32)pSrc[2 * x + 1]) << 8; image->comps[0].data[index] = (OPJ_INT32)((value & redMask) >> redShift); /* R */ image->comps[1].data[index] = (OPJ_INT32)((value & greenMask) >> greenShift); /* G */ image->comps[2].data[index] = (OPJ_INT32)((value & blueMask) >> blueShift); /* B */ if (hasAlpha) { image->comps[3].data[index] = (OPJ_INT32)((value & alphaMask) >> alphaShift); /* A */ } index++; } pSrc -= stride; } } static opj_image_t* bmp8toimage(const OPJ_UINT8* pData, OPJ_UINT32 stride, opj_image_t* image, OPJ_UINT8 const* const* pLUT) { OPJ_UINT32 width, height; const OPJ_UINT8 *pSrc = NULL; width = image->comps[0].w; height = image->comps[0].h; pSrc = pData + (height - 1U) * stride; if (image->numcomps == 1U) { opj_applyLUT8u_8u32s_C1R(pSrc, -(OPJ_INT32)stride, image->comps[0].data, (OPJ_INT32)width, pLUT[0], width, height); } else { OPJ_INT32* pDst[3]; OPJ_INT32 pDstStride[3]; pDst[0] = image->comps[0].data; pDst[1] = image->comps[1].data; pDst[2] = image->comps[2].data; pDstStride[0] = (OPJ_INT32)width; pDstStride[1] = (OPJ_INT32)width; pDstStride[2] = (OPJ_INT32)width; opj_applyLUT8u_8u32s_C1P3R(pSrc, -(OPJ_INT32)stride, pDst, pDstStride, pLUT, width, height); } return image; } static OPJ_BOOL bmp_read_file_header(FILE* IN, OPJ_BITMAPFILEHEADER* header) { header->bfType = (OPJ_UINT16)getc(IN); header->bfType |= (OPJ_UINT16)((OPJ_UINT32)getc(IN) << 8); if (header->bfType != 19778) { fprintf(stderr, "Error, not a BMP file!\n"); return OPJ_FALSE; } /* FILE HEADER */ /* ------------- */ header->bfSize = (OPJ_UINT32)getc(IN); header->bfSize |= (OPJ_UINT32)getc(IN) << 8; header->bfSize |= (OPJ_UINT32)getc(IN) << 16; header->bfSize |= (OPJ_UINT32)getc(IN) << 24; header->bfReserved1 = (OPJ_UINT16)getc(IN); header->bfReserved1 |= (OPJ_UINT16)((OPJ_UINT32)getc(IN) << 8); header->bfReserved2 = (OPJ_UINT16)getc(IN); header->bfReserved2 |= (OPJ_UINT16)((OPJ_UINT32)getc(IN) << 8); header->bfOffBits = (OPJ_UINT32)getc(IN); header->bfOffBits |= (OPJ_UINT32)getc(IN) << 8; header->bfOffBits |= (OPJ_UINT32)getc(IN) << 16; header->bfOffBits |= (OPJ_UINT32)getc(IN) << 24; return OPJ_TRUE; } static OPJ_BOOL bmp_read_info_header(FILE* IN, OPJ_BITMAPINFOHEADER* header) { memset(header, 0, sizeof(*header)); /* INFO HEADER */ /* ------------- */ header->biSize = (OPJ_UINT32)getc(IN); header->biSize |= (OPJ_UINT32)getc(IN) << 8; header->biSize |= (OPJ_UINT32)getc(IN) << 16; header->biSize |= (OPJ_UINT32)getc(IN) << 24; switch (header->biSize) { case 12U: /* BITMAPCOREHEADER */ case 40U: /* BITMAPINFOHEADER */ case 52U: /* BITMAPV2INFOHEADER */ case 56U: /* BITMAPV3INFOHEADER */ case 108U: /* BITMAPV4HEADER */ case 124U: /* BITMAPV5HEADER */ break; default: fprintf(stderr, "Error, unknown BMP header size %d\n", header->biSize); return OPJ_FALSE; } header->biWidth = (OPJ_UINT32)getc(IN); header->biWidth |= (OPJ_UINT32)getc(IN) << 8; header->biWidth |= (OPJ_UINT32)getc(IN) << 16; header->biWidth |= (OPJ_UINT32)getc(IN) << 24; header->biHeight = (OPJ_UINT32)getc(IN); header->biHeight |= (OPJ_UINT32)getc(IN) << 8; header->biHeight |= (OPJ_UINT32)getc(IN) << 16; header->biHeight |= (OPJ_UINT32)getc(IN) << 24; header->biPlanes = (OPJ_UINT16)getc(IN); header->biPlanes |= (OPJ_UINT16)((OPJ_UINT32)getc(IN) << 8); header->biBitCount = (OPJ_UINT16)getc(IN); header->biBitCount |= (OPJ_UINT16)((OPJ_UINT32)getc(IN) << 8); if (header->biBitCount == 0) { fprintf(stderr, "Error, invalid biBitCount %d\n", 0); return OPJ_FALSE; } if (header->biSize >= 40U) { header->biCompression = (OPJ_UINT32)getc(IN); header->biCompression |= (OPJ_UINT32)getc(IN) << 8; header->biCompression |= (OPJ_UINT32)getc(IN) << 16; header->biCompression |= (OPJ_UINT32)getc(IN) << 24; header->biSizeImage = (OPJ_UINT32)getc(IN); header->biSizeImage |= (OPJ_UINT32)getc(IN) << 8; header->biSizeImage |= (OPJ_UINT32)getc(IN) << 16; header->biSizeImage |= (OPJ_UINT32)getc(IN) << 24; header->biXpelsPerMeter = (OPJ_UINT32)getc(IN); header->biXpelsPerMeter |= (OPJ_UINT32)getc(IN) << 8; header->biXpelsPerMeter |= (OPJ_UINT32)getc(IN) << 16; header->biXpelsPerMeter |= (OPJ_UINT32)getc(IN) << 24; header->biYpelsPerMeter = (OPJ_UINT32)getc(IN); header->biYpelsPerMeter |= (OPJ_UINT32)getc(IN) << 8; header->biYpelsPerMeter |= (OPJ_UINT32)getc(IN) << 16; header->biYpelsPerMeter |= (OPJ_UINT32)getc(IN) << 24; header->biClrUsed = (OPJ_UINT32)getc(IN); header->biClrUsed |= (OPJ_UINT32)getc(IN) << 8; header->biClrUsed |= (OPJ_UINT32)getc(IN) << 16; header->biClrUsed |= (OPJ_UINT32)getc(IN) << 24; header->biClrImportant = (OPJ_UINT32)getc(IN); header->biClrImportant |= (OPJ_UINT32)getc(IN) << 8; header->biClrImportant |= (OPJ_UINT32)getc(IN) << 16; header->biClrImportant |= (OPJ_UINT32)getc(IN) << 24; } if (header->biSize >= 56U) { header->biRedMask = (OPJ_UINT32)getc(IN); header->biRedMask |= (OPJ_UINT32)getc(IN) << 8; header->biRedMask |= (OPJ_UINT32)getc(IN) << 16; header->biRedMask |= (OPJ_UINT32)getc(IN) << 24; if (!header->biRedMask) { fprintf(stderr, "Error, invalid red mask value %d\n", header->biRedMask); return OPJ_FALSE; } header->biGreenMask = (OPJ_UINT32)getc(IN); header->biGreenMask |= (OPJ_UINT32)getc(IN) << 8; header->biGreenMask |= (OPJ_UINT32)getc(IN) << 16; header->biGreenMask |= (OPJ_UINT32)getc(IN) << 24; if (!header->biGreenMask) { fprintf(stderr, "Error, invalid green mask value %d\n", header->biGreenMask); return OPJ_FALSE; } header->biBlueMask = (OPJ_UINT32)getc(IN); header->biBlueMask |= (OPJ_UINT32)getc(IN) << 8; header->biBlueMask |= (OPJ_UINT32)getc(IN) << 16; header->biBlueMask |= (OPJ_UINT32)getc(IN) << 24; if (!header->biBlueMask) { fprintf(stderr, "Error, invalid blue mask value %d\n", header->biBlueMask); return OPJ_FALSE; } header->biAlphaMask = (OPJ_UINT32)getc(IN); header->biAlphaMask |= (OPJ_UINT32)getc(IN) << 8; header->biAlphaMask |= (OPJ_UINT32)getc(IN) << 16; header->biAlphaMask |= (OPJ_UINT32)getc(IN) << 24; } if (header->biSize >= 108U) { header->biColorSpaceType = (OPJ_UINT32)getc(IN); header->biColorSpaceType |= (OPJ_UINT32)getc(IN) << 8; header->biColorSpaceType |= (OPJ_UINT32)getc(IN) << 16; header->biColorSpaceType |= (OPJ_UINT32)getc(IN) << 24; if (fread(&(header->biColorSpaceEP), 1U, sizeof(header->biColorSpaceEP), IN) != sizeof(header->biColorSpaceEP)) { fprintf(stderr, "Error, can't read BMP header\n"); return OPJ_FALSE; } header->biRedGamma = (OPJ_UINT32)getc(IN); header->biRedGamma |= (OPJ_UINT32)getc(IN) << 8; header->biRedGamma |= (OPJ_UINT32)getc(IN) << 16; header->biRedGamma |= (OPJ_UINT32)getc(IN) << 24; header->biGreenGamma = (OPJ_UINT32)getc(IN); header->biGreenGamma |= (OPJ_UINT32)getc(IN) << 8; header->biGreenGamma |= (OPJ_UINT32)getc(IN) << 16; header->biGreenGamma |= (OPJ_UINT32)getc(IN) << 24; header->biBlueGamma = (OPJ_UINT32)getc(IN); header->biBlueGamma |= (OPJ_UINT32)getc(IN) << 8; header->biBlueGamma |= (OPJ_UINT32)getc(IN) << 16; header->biBlueGamma |= (OPJ_UINT32)getc(IN) << 24; } if (header->biSize >= 124U) { header->biIntent = (OPJ_UINT32)getc(IN); header->biIntent |= (OPJ_UINT32)getc(IN) << 8; header->biIntent |= (OPJ_UINT32)getc(IN) << 16; header->biIntent |= (OPJ_UINT32)getc(IN) << 24; header->biIccProfileData = (OPJ_UINT32)getc(IN); header->biIccProfileData |= (OPJ_UINT32)getc(IN) << 8; header->biIccProfileData |= (OPJ_UINT32)getc(IN) << 16; header->biIccProfileData |= (OPJ_UINT32)getc(IN) << 24; header->biIccProfileSize = (OPJ_UINT32)getc(IN); header->biIccProfileSize |= (OPJ_UINT32)getc(IN) << 8; header->biIccProfileSize |= (OPJ_UINT32)getc(IN) << 16; header->biIccProfileSize |= (OPJ_UINT32)getc(IN) << 24; header->biReserved = (OPJ_UINT32)getc(IN); header->biReserved |= (OPJ_UINT32)getc(IN) << 8; header->biReserved |= (OPJ_UINT32)getc(IN) << 16; header->biReserved |= (OPJ_UINT32)getc(IN) << 24; } return OPJ_TRUE; } static OPJ_BOOL bmp_read_raw_data(FILE* IN, OPJ_UINT8* pData, OPJ_UINT32 stride, OPJ_UINT32 width, OPJ_UINT32 height) { OPJ_ARG_NOT_USED(width); if (fread(pData, sizeof(OPJ_UINT8), stride * height, IN) != (stride * height)) { fprintf(stderr, "\nError: fread return a number of element different from the expected.\n"); return OPJ_FALSE; } return OPJ_TRUE; } static OPJ_BOOL bmp_read_rle8_data(FILE* IN, OPJ_UINT8* pData, OPJ_UINT32 stride, OPJ_UINT32 width, OPJ_UINT32 height) { OPJ_UINT32 x, y, written; OPJ_UINT8 *pix; const OPJ_UINT8 *beyond; beyond = pData + stride * height; pix = pData; x = y = written = 0U; while (y < height) { int c = getc(IN); if (c == EOF) { return OPJ_FALSE; } if (c) { int j, c1_int; OPJ_UINT8 c1; c1_int = getc(IN); if (c1_int == EOF) { return OPJ_FALSE; } c1 = (OPJ_UINT8)c1_int; for (j = 0; (j < c) && (x < width) && ((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++) { *pix = c1; written++; } } else { c = getc(IN); if (c == EOF) { return OPJ_FALSE; } if (c == 0x00) { /* EOL */ x = 0; ++y; pix = pData + y * stride + x; } else if (c == 0x01) { /* EOP */ break; } else if (c == 0x02) { /* MOVE by dxdy */ c = getc(IN); if (c == EOF) { return OPJ_FALSE; } x += (OPJ_UINT32)c; c = getc(IN); if (c == EOF) { return OPJ_FALSE; } y += (OPJ_UINT32)c; pix = pData + y * stride + x; } else { /* 03 .. 255 */ int j; for (j = 0; (j < c) && (x < width) && ((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++) { int c1_int; OPJ_UINT8 c1; c1_int = getc(IN); if (c1_int == EOF) { return OPJ_FALSE; } c1 = (OPJ_UINT8)c1_int; *pix = c1; written++; } if ((OPJ_UINT32)c & 1U) { /* skip padding byte */ c = getc(IN); if (c == EOF) { return OPJ_FALSE; } } } } }/* while() */ if (written != width * height) { fprintf(stderr, "warning, image's actual size does not match advertized one\n"); return OPJ_FALSE; } return OPJ_TRUE; } static OPJ_BOOL bmp_read_rle4_data(FILE* IN, OPJ_UINT8* pData, OPJ_UINT32 stride, OPJ_UINT32 width, OPJ_UINT32 height) { OPJ_UINT32 x, y; OPJ_UINT8 *pix; const OPJ_UINT8 *beyond; beyond = pData + stride * height; pix = pData; x = y = 0U; while (y < height) { int c = getc(IN); if (c == EOF) { break; } if (c) { /* encoded mode */ int j; OPJ_UINT8 c1 = (OPJ_UINT8)getc(IN); for (j = 0; (j < c) && (x < width) && ((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++) { *pix = (OPJ_UINT8)((j & 1) ? (c1 & 0x0fU) : ((c1 >> 4) & 0x0fU)); } } else { /* absolute mode */ c = getc(IN); if (c == EOF) { break; } if (c == 0x00) { /* EOL */ x = 0; y++; pix = pData + y * stride; } else if (c == 0x01) { /* EOP */ break; } else if (c == 0x02) { /* MOVE by dxdy */ c = getc(IN); x += (OPJ_UINT32)c; c = getc(IN); y += (OPJ_UINT32)c; pix = pData + y * stride + x; } else { /* 03 .. 255 : absolute mode */ int j; OPJ_UINT8 c1 = 0U; for (j = 0; (j < c) && (x < width) && ((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++) { if ((j & 1) == 0) { c1 = (OPJ_UINT8)getc(IN); } *pix = (OPJ_UINT8)((j & 1) ? (c1 & 0x0fU) : ((c1 >> 4) & 0x0fU)); } if (((c & 3) == 1) || ((c & 3) == 2)) { /* skip padding byte */ getc(IN); } } } } /* while(y < height) */ return OPJ_TRUE; } opj_image_t* bmptoimage(const char *filename, opj_cparameters_t *parameters) { opj_image_cmptparm_t cmptparm[4]; /* maximum of 4 components */ OPJ_UINT8 lut_R[256], lut_G[256], lut_B[256]; OPJ_UINT8 const* pLUT[3]; opj_image_t * image = NULL; FILE *IN; OPJ_BITMAPFILEHEADER File_h; OPJ_BITMAPINFOHEADER Info_h; OPJ_UINT32 i, palette_len, numcmpts = 1U; OPJ_BOOL l_result = OPJ_FALSE; OPJ_UINT8* pData = NULL; OPJ_UINT32 stride; pLUT[0] = lut_R; pLUT[1] = lut_G; pLUT[2] = lut_B; IN = fopen(filename, "rb"); if (!IN) { fprintf(stderr, "Failed to open %s for reading !!\n", filename); return NULL; } if (!bmp_read_file_header(IN, &File_h)) { fclose(IN); return NULL; } if (!bmp_read_info_header(IN, &Info_h)) { fclose(IN); return NULL; } /* Load palette */ if (Info_h.biBitCount <= 8U) { memset(&lut_R[0], 0, sizeof(lut_R)); memset(&lut_G[0], 0, sizeof(lut_G)); memset(&lut_B[0], 0, sizeof(lut_B)); palette_len = Info_h.biClrUsed; if ((palette_len == 0U) && (Info_h.biBitCount <= 8U)) { palette_len = (1U << Info_h.biBitCount); } if (palette_len > 256U) { palette_len = 256U; } if (palette_len > 0U) { OPJ_UINT8 has_color = 0U; for (i = 0U; i < palette_len; i++) { lut_B[i] = (OPJ_UINT8)getc(IN); lut_G[i] = (OPJ_UINT8)getc(IN); lut_R[i] = (OPJ_UINT8)getc(IN); (void)getc(IN); /* padding */ has_color |= (lut_B[i] ^ lut_G[i]) | (lut_G[i] ^ lut_R[i]); } if (has_color) { numcmpts = 3U; } } } else { numcmpts = 3U; if ((Info_h.biCompression == 3) && (Info_h.biAlphaMask != 0U)) { numcmpts++; } } if (Info_h.biWidth == 0 || Info_h.biHeight == 0) { fclose(IN); return NULL; } if (Info_h.biBitCount > (((OPJ_UINT32) - 1) - 31) / Info_h.biWidth) { fclose(IN); return NULL; } stride = ((Info_h.biWidth * Info_h.biBitCount + 31U) / 32U) * 4U; /* rows are aligned on 32bits */ if (Info_h.biBitCount == 4 && Info_h.biCompression == 2) { /* RLE 4 gets decoded as 8 bits data for now... */ if (8 > (((OPJ_UINT32) - 1) - 31) / Info_h.biWidth) { fclose(IN); return NULL; } stride = ((Info_h.biWidth * 8U + 31U) / 32U) * 4U; } if (stride > ((OPJ_UINT32) - 1) / sizeof(OPJ_UINT8) / Info_h.biHeight) { fclose(IN); return NULL; } pData = (OPJ_UINT8 *) calloc(1, sizeof(OPJ_UINT8) * stride * Info_h.biHeight); if (pData == NULL) { fclose(IN); return NULL; } /* Place the cursor at the beginning of the image information */ fseek(IN, 0, SEEK_SET); fseek(IN, (long)File_h.bfOffBits, SEEK_SET); switch (Info_h.biCompression) { case 0: case 3: /* read raw data */ l_result = bmp_read_raw_data(IN, pData, stride, Info_h.biWidth, Info_h.biHeight); break; case 1: /* read rle8 data */ l_result = bmp_read_rle8_data(IN, pData, stride, Info_h.biWidth, Info_h.biHeight); break; case 2: /* read rle4 data */ l_result = bmp_read_rle4_data(IN, pData, stride, Info_h.biWidth, Info_h.biHeight); break; default: fprintf(stderr, "Unsupported BMP compression\n"); l_result = OPJ_FALSE; break; } if (!l_result) { free(pData); fclose(IN); return NULL; } /* create the image */ memset(&cmptparm[0], 0, sizeof(cmptparm)); for (i = 0; i < 4U; i++) { cmptparm[i].prec = 8; cmptparm[i].bpp = 8; cmptparm[i].sgnd = 0; cmptparm[i].dx = (OPJ_UINT32)parameters->subsampling_dx; cmptparm[i].dy = (OPJ_UINT32)parameters->subsampling_dy; cmptparm[i].w = Info_h.biWidth; cmptparm[i].h = Info_h.biHeight; } image = opj_image_create(numcmpts, &cmptparm[0], (numcmpts == 1U) ? OPJ_CLRSPC_GRAY : OPJ_CLRSPC_SRGB); if (!image) { fclose(IN); free(pData); return NULL; } if (numcmpts == 4U) { image->comps[3].alpha = 1; } /* set image offset and reference grid */ image->x0 = (OPJ_UINT32)parameters->image_offset_x0; image->y0 = (OPJ_UINT32)parameters->image_offset_y0; image->x1 = image->x0 + (Info_h.biWidth - 1U) * (OPJ_UINT32) parameters->subsampling_dx + 1U; image->y1 = image->y0 + (Info_h.biHeight - 1U) * (OPJ_UINT32) parameters->subsampling_dy + 1U; /* Read the data */ if (Info_h.biBitCount == 24 && Info_h.biCompression == 0) { /*RGB */ bmp24toimage(pData, stride, image); } else if (Info_h.biBitCount == 8 && Info_h.biCompression == 0) { /* RGB 8bpp Indexed */ bmp8toimage(pData, stride, image, pLUT); } else if (Info_h.biBitCount == 8 && Info_h.biCompression == 1) { /*RLE8*/ bmp8toimage(pData, stride, image, pLUT); } else if (Info_h.biBitCount == 4 && Info_h.biCompression == 2) { /*RLE4*/ bmp8toimage(pData, stride, image, pLUT); /* RLE 4 gets decoded as 8 bits data for now */ } else if (Info_h.biBitCount == 32 && Info_h.biCompression == 0) { /* RGBX */ bmpmask32toimage(pData, stride, image, 0x00FF0000U, 0x0000FF00U, 0x000000FFU, 0x00000000U); } else if (Info_h.biBitCount == 32 && Info_h.biCompression == 3) { /* bitmask */ if ((Info_h.biRedMask == 0U) && (Info_h.biGreenMask == 0U) && (Info_h.biBlueMask == 0U)) { Info_h.biRedMask = 0x00FF0000U; Info_h.biGreenMask = 0x0000FF00U; Info_h.biBlueMask = 0x000000FFU; } bmpmask32toimage(pData, stride, image, Info_h.biRedMask, Info_h.biGreenMask, Info_h.biBlueMask, Info_h.biAlphaMask); } else if (Info_h.biBitCount == 16 && Info_h.biCompression == 0) { /* RGBX */ bmpmask16toimage(pData, stride, image, 0x7C00U, 0x03E0U, 0x001FU, 0x0000U); } else if (Info_h.biBitCount == 16 && Info_h.biCompression == 3) { /* bitmask */ if ((Info_h.biRedMask == 0U) && (Info_h.biGreenMask == 0U) && (Info_h.biBlueMask == 0U)) { Info_h.biRedMask = 0xF800U; Info_h.biGreenMask = 0x07E0U; Info_h.biBlueMask = 0x001FU; } bmpmask16toimage(pData, stride, image, Info_h.biRedMask, Info_h.biGreenMask, Info_h.biBlueMask, Info_h.biAlphaMask); } else { opj_image_destroy(image); image = NULL; fprintf(stderr, "Other system than 24 bits/pixels or 8 bits (no RLE coding) is not yet implemented [%d]\n", Info_h.biBitCount); } free(pData); fclose(IN); return image; } int imagetobmp(opj_image_t * image, const char *outfile) { int w, h; int i, pad; FILE *fdest = NULL; int adjustR, adjustG, adjustB; if (image->comps[0].prec < 8) { fprintf(stderr, "imagetobmp: Unsupported precision: %d\n", image->comps[0].prec); return 1; } if (image->numcomps >= 3 && image->comps[0].dx == image->comps[1].dx && image->comps[1].dx == image->comps[2].dx && image->comps[0].dy == image->comps[1].dy && image->comps[1].dy == image->comps[2].dy && image->comps[0].prec == image->comps[1].prec && image->comps[1].prec == image->comps[2].prec && image->comps[0].sgnd == image->comps[1].sgnd && image->comps[1].sgnd == image->comps[2].sgnd) { /* -->> -->> -->> -->> 24 bits color <<-- <<-- <<-- <<-- */ fdest = fopen(outfile, "wb"); if (!fdest) { fprintf(stderr, "ERROR -> failed to open %s for writing\n", outfile); return 1; } w = (int)image->comps[0].w; h = (int)image->comps[0].h; fprintf(fdest, "BM"); /* FILE HEADER */ /* ------------- */ fprintf(fdest, "%c%c%c%c", (OPJ_UINT8)(h * w * 3 + 3 * h * (w % 2) + 54) & 0xff, (OPJ_UINT8)((h * w * 3 + 3 * h * (w % 2) + 54) >> 8) & 0xff, (OPJ_UINT8)((h * w * 3 + 3 * h * (w % 2) + 54) >> 16) & 0xff, (OPJ_UINT8)((h * w * 3 + 3 * h * (w % 2) + 54) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (0) & 0xff, ((0) >> 8) & 0xff, ((0) >> 16) & 0xff, ((0) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (54) & 0xff, ((54) >> 8) & 0xff, ((54) >> 16) & 0xff, ((54) >> 24) & 0xff); /* INFO HEADER */ /* ------------- */ fprintf(fdest, "%c%c%c%c", (40) & 0xff, ((40) >> 8) & 0xff, ((40) >> 16) & 0xff, ((40) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (OPJ_UINT8)((w) & 0xff), (OPJ_UINT8)((w) >> 8) & 0xff, (OPJ_UINT8)((w) >> 16) & 0xff, (OPJ_UINT8)((w) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (OPJ_UINT8)((h) & 0xff), (OPJ_UINT8)((h) >> 8) & 0xff, (OPJ_UINT8)((h) >> 16) & 0xff, (OPJ_UINT8)((h) >> 24) & 0xff); fprintf(fdest, "%c%c", (1) & 0xff, ((1) >> 8) & 0xff); fprintf(fdest, "%c%c", (24) & 0xff, ((24) >> 8) & 0xff); fprintf(fdest, "%c%c%c%c", (0) & 0xff, ((0) >> 8) & 0xff, ((0) >> 16) & 0xff, ((0) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (OPJ_UINT8)(3 * h * w + 3 * h * (w % 2)) & 0xff, (OPJ_UINT8)((h * w * 3 + 3 * h * (w % 2)) >> 8) & 0xff, (OPJ_UINT8)((h * w * 3 + 3 * h * (w % 2)) >> 16) & 0xff, (OPJ_UINT8)((h * w * 3 + 3 * h * (w % 2)) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (7834) & 0xff, ((7834) >> 8) & 0xff, ((7834) >> 16) & 0xff, ((7834) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (7834) & 0xff, ((7834) >> 8) & 0xff, ((7834) >> 16) & 0xff, ((7834) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (0) & 0xff, ((0) >> 8) & 0xff, ((0) >> 16) & 0xff, ((0) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (0) & 0xff, ((0) >> 8) & 0xff, ((0) >> 16) & 0xff, ((0) >> 24) & 0xff); if (image->comps[0].prec > 8) { adjustR = (int)image->comps[0].prec - 8; printf("BMP CONVERSION: Truncating component 0 from %d bits to 8 bits\n", image->comps[0].prec); } else { adjustR = 0; } if (image->comps[1].prec > 8) { adjustG = (int)image->comps[1].prec - 8; printf("BMP CONVERSION: Truncating component 1 from %d bits to 8 bits\n", image->comps[1].prec); } else { adjustG = 0; } if (image->comps[2].prec > 8) { adjustB = (int)image->comps[2].prec - 8; printf("BMP CONVERSION: Truncating component 2 from %d bits to 8 bits\n", image->comps[2].prec); } else { adjustB = 0; } for (i = 0; i < w * h; i++) { OPJ_UINT8 rc, gc, bc; int r, g, b; r = image->comps[0].data[w * h - ((i) / (w) + 1) * w + (i) % (w)]; r += (image->comps[0].sgnd ? 1 << (image->comps[0].prec - 1) : 0); if (adjustR > 0) { r = ((r >> adjustR) + ((r >> (adjustR - 1)) % 2)); } if (r > 255) { r = 255; } else if (r < 0) { r = 0; } rc = (OPJ_UINT8)r; g = image->comps[1].data[w * h - ((i) / (w) + 1) * w + (i) % (w)]; g += (image->comps[1].sgnd ? 1 << (image->comps[1].prec - 1) : 0); if (adjustG > 0) { g = ((g >> adjustG) + ((g >> (adjustG - 1)) % 2)); } if (g > 255) { g = 255; } else if (g < 0) { g = 0; } gc = (OPJ_UINT8)g; b = image->comps[2].data[w * h - ((i) / (w) + 1) * w + (i) % (w)]; b += (image->comps[2].sgnd ? 1 << (image->comps[2].prec - 1) : 0); if (adjustB > 0) { b = ((b >> adjustB) + ((b >> (adjustB - 1)) % 2)); } if (b > 255) { b = 255; } else if (b < 0) { b = 0; } bc = (OPJ_UINT8)b; fprintf(fdest, "%c%c%c", bc, gc, rc); if ((i + 1) % w == 0) { for (pad = ((3 * w) % 4) ? (4 - (3 * w) % 4) : 0; pad > 0; pad--) { /* ADD */ fprintf(fdest, "%c", 0); } } } fclose(fdest); } else { /* Gray-scale */ /* -->> -->> -->> -->> 8 bits non code (Gray scale) <<-- <<-- <<-- <<-- */ fdest = fopen(outfile, "wb"); if (!fdest) { fprintf(stderr, "ERROR -> failed to open %s for writing\n", outfile); return 1; } if (image->numcomps > 1) { fprintf(stderr, "imagetobmp: only first component of %d is used.\n", image->numcomps); } w = (int)image->comps[0].w; h = (int)image->comps[0].h; fprintf(fdest, "BM"); /* FILE HEADER */ /* ------------- */ fprintf(fdest, "%c%c%c%c", (OPJ_UINT8)(h * w + 54 + 1024 + h * (w % 2)) & 0xff, (OPJ_UINT8)((h * w + 54 + 1024 + h * (w % 2)) >> 8) & 0xff, (OPJ_UINT8)((h * w + 54 + 1024 + h * (w % 2)) >> 16) & 0xff, (OPJ_UINT8)((h * w + 54 + 1024 + w * (w % 2)) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (0) & 0xff, ((0) >> 8) & 0xff, ((0) >> 16) & 0xff, ((0) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (54 + 1024) & 0xff, ((54 + 1024) >> 8) & 0xff, ((54 + 1024) >> 16) & 0xff, ((54 + 1024) >> 24) & 0xff); /* INFO HEADER */ /* ------------- */ fprintf(fdest, "%c%c%c%c", (40) & 0xff, ((40) >> 8) & 0xff, ((40) >> 16) & 0xff, ((40) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (OPJ_UINT8)((w) & 0xff), (OPJ_UINT8)((w) >> 8) & 0xff, (OPJ_UINT8)((w) >> 16) & 0xff, (OPJ_UINT8)((w) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (OPJ_UINT8)((h) & 0xff), (OPJ_UINT8)((h) >> 8) & 0xff, (OPJ_UINT8)((h) >> 16) & 0xff, (OPJ_UINT8)((h) >> 24) & 0xff); fprintf(fdest, "%c%c", (1) & 0xff, ((1) >> 8) & 0xff); fprintf(fdest, "%c%c", (8) & 0xff, ((8) >> 8) & 0xff); fprintf(fdest, "%c%c%c%c", (0) & 0xff, ((0) >> 8) & 0xff, ((0) >> 16) & 0xff, ((0) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (OPJ_UINT8)(h * w + h * (w % 2)) & 0xff, (OPJ_UINT8)((h * w + h * (w % 2)) >> 8) & 0xff, (OPJ_UINT8)((h * w + h * (w % 2)) >> 16) & 0xff, (OPJ_UINT8)((h * w + h * (w % 2)) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (7834) & 0xff, ((7834) >> 8) & 0xff, ((7834) >> 16) & 0xff, ((7834) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (7834) & 0xff, ((7834) >> 8) & 0xff, ((7834) >> 16) & 0xff, ((7834) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (256) & 0xff, ((256) >> 8) & 0xff, ((256) >> 16) & 0xff, ((256) >> 24) & 0xff); fprintf(fdest, "%c%c%c%c", (256) & 0xff, ((256) >> 8) & 0xff, ((256) >> 16) & 0xff, ((256) >> 24) & 0xff); if (image->comps[0].prec > 8) { adjustR = (int)image->comps[0].prec - 8; printf("BMP CONVERSION: Truncating component 0 from %d bits to 8 bits\n", image->comps[0].prec); } else { adjustR = 0; } for (i = 0; i < 256; i++) { fprintf(fdest, "%c%c%c%c", i, i, i, 0); } for (i = 0; i < w * h; i++) { int r; r = image->comps[0].data[w * h - ((i) / (w) + 1) * w + (i) % (w)]; r += (image->comps[0].sgnd ? 1 << (image->comps[0].prec - 1) : 0); if (adjustR > 0) { r = ((r >> adjustR) + ((r >> (adjustR - 1)) % 2)); } if (r > 255) { r = 255; } else if (r < 0) { r = 0; } fprintf(fdest, "%c", (OPJ_UINT8)r); if ((i + 1) % w == 0) { for (pad = (w % 4) ? (4 - w % 4) : 0; pad > 0; pad--) { /* ADD */ fprintf(fdest, "%c", 0); } } } fclose(fdest); } return 0; }
./CrossVul/dataset_final_sorted/CWE-400/c/good_897_0
crossvul-cpp_data_bad_1242_0
// SPDX-License-Identifier: GPL-2.0-only /* CAN driver for Geschwister Schneider USB/CAN devices * and bytewerk.org candleLight USB CAN interfaces. * * Copyright (C) 2013-2016 Geschwister Schneider Technologie-, * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt). * Copyright (C) 2016 Hubert Denkmair * * Many thanks to all socketcan devs! */ #include <linux/init.h> #include <linux/signal.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> /* Device specific constants */ #define USB_GSUSB_1_VENDOR_ID 0x1d50 #define USB_GSUSB_1_PRODUCT_ID 0x606f #define USB_CANDLELIGHT_VENDOR_ID 0x1209 #define USB_CANDLELIGHT_PRODUCT_ID 0x2323 #define GSUSB_ENDPOINT_IN 1 #define GSUSB_ENDPOINT_OUT 2 /* Device specific constants */ enum gs_usb_breq { GS_USB_BREQ_HOST_FORMAT = 0, GS_USB_BREQ_BITTIMING, GS_USB_BREQ_MODE, GS_USB_BREQ_BERR, GS_USB_BREQ_BT_CONST, GS_USB_BREQ_DEVICE_CONFIG, GS_USB_BREQ_TIMESTAMP, GS_USB_BREQ_IDENTIFY, }; enum gs_can_mode { /* reset a channel. turns it off */ GS_CAN_MODE_RESET = 0, /* starts a channel */ GS_CAN_MODE_START }; enum gs_can_state { GS_CAN_STATE_ERROR_ACTIVE = 0, GS_CAN_STATE_ERROR_WARNING, GS_CAN_STATE_ERROR_PASSIVE, GS_CAN_STATE_BUS_OFF, GS_CAN_STATE_STOPPED, GS_CAN_STATE_SLEEPING }; enum gs_can_identify_mode { GS_CAN_IDENTIFY_OFF = 0, GS_CAN_IDENTIFY_ON }; /* data types passed between host and device */ struct gs_host_config { u32 byte_order; } __packed; /* All data exchanged between host and device is exchanged in host byte order, * thanks to the struct gs_host_config byte_order member, which is sent first * to indicate the desired byte order. */ struct gs_device_config { u8 reserved1; u8 reserved2; u8 reserved3; u8 icount; u32 sw_version; u32 hw_version; } __packed; #define GS_CAN_MODE_NORMAL 0 #define GS_CAN_MODE_LISTEN_ONLY BIT(0) #define GS_CAN_MODE_LOOP_BACK BIT(1) #define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2) #define GS_CAN_MODE_ONE_SHOT BIT(3) struct gs_device_mode { u32 mode; u32 flags; } __packed; struct gs_device_state { u32 state; u32 rxerr; u32 txerr; } __packed; struct gs_device_bittiming { u32 prop_seg; u32 phase_seg1; u32 phase_seg2; u32 sjw; u32 brp; } __packed; struct gs_identify_mode { u32 mode; } __packed; #define GS_CAN_FEATURE_LISTEN_ONLY BIT(0) #define GS_CAN_FEATURE_LOOP_BACK BIT(1) #define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2) #define GS_CAN_FEATURE_ONE_SHOT BIT(3) #define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4) #define GS_CAN_FEATURE_IDENTIFY BIT(5) struct gs_device_bt_const { u32 feature; u32 fclk_can; u32 tseg1_min; u32 tseg1_max; u32 tseg2_min; u32 tseg2_max; u32 sjw_max; u32 brp_min; u32 brp_max; u32 brp_inc; } __packed; #define GS_CAN_FLAG_OVERFLOW 1 struct gs_host_frame { u32 echo_id; u32 can_id; u8 can_dlc; u8 channel; u8 flags; u8 reserved; u8 data[8]; } __packed; /* The GS USB devices make use of the same flags and masks as in * linux/can.h and linux/can/error.h, and no additional mapping is necessary. */ /* Only send a max of GS_MAX_TX_URBS frames per channel at a time. */ #define GS_MAX_TX_URBS 10 /* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */ #define GS_MAX_RX_URBS 30 /* Maximum number of interfaces the driver supports per device. * Current hardware only supports 2 interfaces. The future may vary. */ #define GS_MAX_INTF 2 struct gs_tx_context { struct gs_can *dev; unsigned int echo_id; }; struct gs_can { struct can_priv can; /* must be the first member */ struct gs_usb *parent; struct net_device *netdev; struct usb_device *udev; struct usb_interface *iface; struct can_bittiming_const bt_const; unsigned int channel; /* channel number */ /* This lock prevents a race condition between xmit and receive. */ spinlock_t tx_ctx_lock; struct gs_tx_context tx_context[GS_MAX_TX_URBS]; struct usb_anchor tx_submitted; atomic_t active_tx_urbs; }; /* usb interface struct */ struct gs_usb { struct gs_can *canch[GS_MAX_INTF]; struct usb_anchor rx_submitted; atomic_t active_channels; struct usb_device *udev; }; /* 'allocate' a tx context. * returns a valid tx context or NULL if there is no space. */ static struct gs_tx_context *gs_alloc_tx_context(struct gs_can *dev) { int i = 0; unsigned long flags; spin_lock_irqsave(&dev->tx_ctx_lock, flags); for (; i < GS_MAX_TX_URBS; i++) { if (dev->tx_context[i].echo_id == GS_MAX_TX_URBS) { dev->tx_context[i].echo_id = i; spin_unlock_irqrestore(&dev->tx_ctx_lock, flags); return &dev->tx_context[i]; } } spin_unlock_irqrestore(&dev->tx_ctx_lock, flags); return NULL; } /* releases a tx context */ static void gs_free_tx_context(struct gs_tx_context *txc) { txc->echo_id = GS_MAX_TX_URBS; } /* Get a tx context by id. */ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, unsigned int id) { unsigned long flags; if (id < GS_MAX_TX_URBS) { spin_lock_irqsave(&dev->tx_ctx_lock, flags); if (dev->tx_context[id].echo_id == id) { spin_unlock_irqrestore(&dev->tx_ctx_lock, flags); return &dev->tx_context[id]; } spin_unlock_irqrestore(&dev->tx_ctx_lock, flags); } return NULL; } static int gs_cmd_reset(struct gs_can *gsdev) { struct gs_device_mode *dm; struct usb_interface *intf = gsdev->iface; int rc; dm = kzalloc(sizeof(*dm), GFP_KERNEL); if (!dm) return -ENOMEM; dm->mode = GS_CAN_MODE_RESET; rc = usb_control_msg(interface_to_usbdev(intf), usb_sndctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_MODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, gsdev->channel, 0, dm, sizeof(*dm), 1000); kfree(dm); return rc; } static void gs_update_state(struct gs_can *dev, struct can_frame *cf) { struct can_device_stats *can_stats = &dev->can.can_stats; if (cf->can_id & CAN_ERR_RESTARTED) { dev->can.state = CAN_STATE_ERROR_ACTIVE; can_stats->restarts++; } else if (cf->can_id & CAN_ERR_BUSOFF) { dev->can.state = CAN_STATE_BUS_OFF; can_stats->bus_off++; } else if (cf->can_id & CAN_ERR_CRTL) { if ((cf->data[1] & CAN_ERR_CRTL_TX_WARNING) || (cf->data[1] & CAN_ERR_CRTL_RX_WARNING)) { dev->can.state = CAN_STATE_ERROR_WARNING; can_stats->error_warning++; } else if ((cf->data[1] & CAN_ERR_CRTL_TX_PASSIVE) || (cf->data[1] & CAN_ERR_CRTL_RX_PASSIVE)) { dev->can.state = CAN_STATE_ERROR_PASSIVE; can_stats->error_passive++; } else { dev->can.state = CAN_STATE_ERROR_ACTIVE; } } } static void gs_usb_receive_bulk_callback(struct urb *urb) { struct gs_usb *usbcan = urb->context; struct gs_can *dev; struct net_device *netdev; int rc; struct net_device_stats *stats; struct gs_host_frame *hf = urb->transfer_buffer; struct gs_tx_context *txc; struct can_frame *cf; struct sk_buff *skb; BUG_ON(!usbcan); switch (urb->status) { case 0: /* success */ break; case -ENOENT: case -ESHUTDOWN: return; default: /* do not resubmit aborted urbs. eg: when device goes down */ return; } /* device reports out of range channel id */ if (hf->channel >= GS_MAX_INTF) goto resubmit_urb; dev = usbcan->canch[hf->channel]; netdev = dev->netdev; stats = &netdev->stats; if (!netif_device_present(netdev)) return; if (hf->echo_id == -1) { /* normal rx */ skb = alloc_can_skb(dev->netdev, &cf); if (!skb) return; cf->can_id = hf->can_id; cf->can_dlc = get_can_dlc(hf->can_dlc); memcpy(cf->data, hf->data, 8); /* ERROR frames tell us information about the controller */ if (hf->can_id & CAN_ERR_FLAG) gs_update_state(dev, cf); netdev->stats.rx_packets++; netdev->stats.rx_bytes += hf->can_dlc; netif_rx(skb); } else { /* echo_id == hf->echo_id */ if (hf->echo_id >= GS_MAX_TX_URBS) { netdev_err(netdev, "Unexpected out of range echo id %d\n", hf->echo_id); goto resubmit_urb; } netdev->stats.tx_packets++; netdev->stats.tx_bytes += hf->can_dlc; txc = gs_get_tx_context(dev, hf->echo_id); /* bad devices send bad echo_ids. */ if (!txc) { netdev_err(netdev, "Unexpected unused echo id %d\n", hf->echo_id); goto resubmit_urb; } can_get_echo_skb(netdev, hf->echo_id); gs_free_tx_context(txc); atomic_dec(&dev->active_tx_urbs); netif_wake_queue(netdev); } if (hf->flags & GS_CAN_FLAG_OVERFLOW) { skb = alloc_can_err_skb(netdev, &cf); if (!skb) goto resubmit_urb; cf->can_id |= CAN_ERR_CRTL; cf->can_dlc = CAN_ERR_DLC; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; stats->rx_errors++; netif_rx(skb); } resubmit_urb: usb_fill_bulk_urb(urb, usbcan->udev, usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN), hf, sizeof(struct gs_host_frame), gs_usb_receive_bulk_callback, usbcan ); rc = usb_submit_urb(urb, GFP_ATOMIC); /* USB failure take down all interfaces */ if (rc == -ENODEV) { for (rc = 0; rc < GS_MAX_INTF; rc++) { if (usbcan->canch[rc]) netif_device_detach(usbcan->canch[rc]->netdev); } } } static int gs_usb_set_bittiming(struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); struct can_bittiming *bt = &dev->can.bittiming; struct usb_interface *intf = dev->iface; int rc; struct gs_device_bittiming *dbt; dbt = kmalloc(sizeof(*dbt), GFP_KERNEL); if (!dbt) return -ENOMEM; dbt->prop_seg = bt->prop_seg; dbt->phase_seg1 = bt->phase_seg1; dbt->phase_seg2 = bt->phase_seg2; dbt->sjw = bt->sjw; dbt->brp = bt->brp; /* request bit timings */ rc = usb_control_msg(interface_to_usbdev(intf), usb_sndctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_BITTIMING, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, dev->channel, 0, dbt, sizeof(*dbt), 1000); kfree(dbt); if (rc < 0) dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)", rc); return (rc > 0) ? 0 : rc; } static void gs_usb_xmit_callback(struct urb *urb) { struct gs_tx_context *txc = urb->context; struct gs_can *dev = txc->dev; struct net_device *netdev = dev->netdev; if (urb->status) netdev_info(netdev, "usb xmit fail %d\n", txc->echo_id); usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); } static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); struct net_device_stats *stats = &dev->netdev->stats; struct urb *urb; struct gs_host_frame *hf; struct can_frame *cf; int rc; unsigned int idx; struct gs_tx_context *txc; if (can_dropped_invalid_skb(netdev, skb)) return NETDEV_TX_OK; /* find an empty context to keep track of transmission */ txc = gs_alloc_tx_context(dev); if (!txc) return NETDEV_TX_BUSY; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) goto nomem_urb; hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC, &urb->transfer_dma); if (!hf) { netdev_err(netdev, "No memory left for USB buffer\n"); goto nomem_hf; } idx = txc->echo_id; if (idx >= GS_MAX_TX_URBS) { netdev_err(netdev, "Invalid tx context %d\n", idx); goto badidx; } hf->echo_id = idx; hf->channel = dev->channel; cf = (struct can_frame *)skb->data; hf->can_id = cf->can_id; hf->can_dlc = cf->can_dlc; memcpy(hf->data, cf->data, cf->can_dlc); usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT), hf, sizeof(*hf), gs_usb_xmit_callback, txc); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->tx_submitted); can_put_echo_skb(skb, netdev, idx); atomic_inc(&dev->active_tx_urbs); rc = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(rc)) { /* usb send failed */ atomic_dec(&dev->active_tx_urbs); can_free_echo_skb(netdev, idx); gs_free_tx_context(txc); usb_unanchor_urb(urb); usb_free_coherent(dev->udev, sizeof(*hf), hf, urb->transfer_dma); if (rc == -ENODEV) { netif_device_detach(netdev); } else { netdev_err(netdev, "usb_submit failed (err=%d)\n", rc); stats->tx_dropped++; } } else { /* Slow down tx path */ if (atomic_read(&dev->active_tx_urbs) >= GS_MAX_TX_URBS) netif_stop_queue(netdev); } /* let usb core take care of this urb */ usb_free_urb(urb); return NETDEV_TX_OK; badidx: usb_free_coherent(dev->udev, sizeof(*hf), hf, urb->transfer_dma); nomem_hf: usb_free_urb(urb); nomem_urb: gs_free_tx_context(txc); dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; } static int gs_can_open(struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); struct gs_usb *parent = dev->parent; int rc, i; struct gs_device_mode *dm; u32 ctrlmode; rc = open_candev(netdev); if (rc) return rc; if (atomic_add_return(1, &parent->active_channels) == 1) { for (i = 0; i < GS_MAX_RX_URBS; i++) { struct urb *urb; u8 *buf; /* alloc rx urb */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; /* alloc rx buffer */ buf = usb_alloc_coherent(dev->udev, sizeof(struct gs_host_frame), GFP_KERNEL, &urb->transfer_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); return -ENOMEM; } /* fill, anchor, and submit rx urb */ usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, GSUSB_ENDPOINT_IN), buf, sizeof(struct gs_host_frame), gs_usb_receive_bulk_callback, parent); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &parent->rx_submitted); rc = usb_submit_urb(urb, GFP_KERNEL); if (rc) { if (rc == -ENODEV) netif_device_detach(dev->netdev); netdev_err(netdev, "usb_submit failed (err=%d)\n", rc); usb_unanchor_urb(urb); break; } /* Drop reference, * USB core will take care of freeing it */ usb_free_urb(urb); } } dm = kmalloc(sizeof(*dm), GFP_KERNEL); if (!dm) return -ENOMEM; /* flags */ ctrlmode = dev->can.ctrlmode; dm->flags = 0; if (ctrlmode & CAN_CTRLMODE_LOOPBACK) dm->flags |= GS_CAN_MODE_LOOP_BACK; else if (ctrlmode & CAN_CTRLMODE_LISTENONLY) dm->flags |= GS_CAN_MODE_LISTEN_ONLY; /* Controller is not allowed to retry TX * this mode is unavailable on atmels uc3c hardware */ if (ctrlmode & CAN_CTRLMODE_ONE_SHOT) dm->flags |= GS_CAN_MODE_ONE_SHOT; if (ctrlmode & CAN_CTRLMODE_3_SAMPLES) dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE; /* finally start device */ dm->mode = GS_CAN_MODE_START; rc = usb_control_msg(interface_to_usbdev(dev->iface), usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0), GS_USB_BREQ_MODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, dev->channel, 0, dm, sizeof(*dm), 1000); if (rc < 0) { netdev_err(netdev, "Couldn't start device (err=%d)\n", rc); kfree(dm); return rc; } kfree(dm); dev->can.state = CAN_STATE_ERROR_ACTIVE; if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(netdev); return 0; } static int gs_can_close(struct net_device *netdev) { int rc; struct gs_can *dev = netdev_priv(netdev); struct gs_usb *parent = dev->parent; netif_stop_queue(netdev); /* Stop polling */ if (atomic_dec_and_test(&parent->active_channels)) usb_kill_anchored_urbs(&parent->rx_submitted); /* Stop sending URBs */ usb_kill_anchored_urbs(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); /* reset the device */ rc = gs_cmd_reset(dev); if (rc < 0) netdev_warn(netdev, "Couldn't shutdown device (err=%d)", rc); /* reset tx contexts */ for (rc = 0; rc < GS_MAX_TX_URBS; rc++) { dev->tx_context[rc].dev = dev; dev->tx_context[rc].echo_id = GS_MAX_TX_URBS; } /* close the netdev */ close_candev(netdev); return 0; } static const struct net_device_ops gs_usb_netdev_ops = { .ndo_open = gs_can_open, .ndo_stop = gs_can_close, .ndo_start_xmit = gs_can_start_xmit, .ndo_change_mtu = can_change_mtu, }; static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) { struct gs_can *dev = netdev_priv(netdev); struct gs_identify_mode *imode; int rc; imode = kmalloc(sizeof(*imode), GFP_KERNEL); if (!imode) return -ENOMEM; if (do_identify) imode->mode = GS_CAN_IDENTIFY_ON; else imode->mode = GS_CAN_IDENTIFY_OFF; rc = usb_control_msg(interface_to_usbdev(dev->iface), usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0), GS_USB_BREQ_IDENTIFY, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, dev->channel, 0, imode, sizeof(*imode), 100); kfree(imode); return (rc > 0) ? 0 : rc; } /* blink LED's for finding the this interface */ static int gs_usb_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { int rc = 0; switch (state) { case ETHTOOL_ID_ACTIVE: rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON); break; case ETHTOOL_ID_INACTIVE: rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF); break; default: break; } return rc; } static const struct ethtool_ops gs_usb_ethtool_ops = { .set_phys_id = gs_usb_set_phys_id, }; static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf, struct gs_device_config *dconf) { struct gs_can *dev; struct net_device *netdev; int rc; struct gs_device_bt_const *bt_const; bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL); if (!bt_const) return ERR_PTR(-ENOMEM); /* fetch bit timing constants */ rc = usb_control_msg(interface_to_usbdev(intf), usb_rcvctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_BT_CONST, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, channel, 0, bt_const, sizeof(*bt_const), 1000); if (rc < 0) { dev_err(&intf->dev, "Couldn't get bit timing const for channel (err=%d)\n", rc); kfree(bt_const); return ERR_PTR(rc); } /* create netdev */ netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "Couldn't allocate candev\n"); kfree(bt_const); return ERR_PTR(-ENOMEM); } dev = netdev_priv(netdev); netdev->netdev_ops = &gs_usb_netdev_ops; netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */ /* dev settup */ strcpy(dev->bt_const.name, "gs_usb"); dev->bt_const.tseg1_min = bt_const->tseg1_min; dev->bt_const.tseg1_max = bt_const->tseg1_max; dev->bt_const.tseg2_min = bt_const->tseg2_min; dev->bt_const.tseg2_max = bt_const->tseg2_max; dev->bt_const.sjw_max = bt_const->sjw_max; dev->bt_const.brp_min = bt_const->brp_min; dev->bt_const.brp_max = bt_const->brp_max; dev->bt_const.brp_inc = bt_const->brp_inc; dev->udev = interface_to_usbdev(intf); dev->iface = intf; dev->netdev = netdev; dev->channel = channel; init_usb_anchor(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); spin_lock_init(&dev->tx_ctx_lock); for (rc = 0; rc < GS_MAX_TX_URBS; rc++) { dev->tx_context[rc].dev = dev; dev->tx_context[rc].echo_id = GS_MAX_TX_URBS; } /* can settup */ dev->can.state = CAN_STATE_STOPPED; dev->can.clock.freq = bt_const->fclk_can; dev->can.bittiming_const = &dev->bt_const; dev->can.do_set_bittiming = gs_usb_set_bittiming; dev->can.ctrlmode_supported = 0; if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY) dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK) dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK; if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE) dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT) dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; SET_NETDEV_DEV(netdev, &intf->dev); if (dconf->sw_version > 1) if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY) netdev->ethtool_ops = &gs_usb_ethtool_ops; kfree(bt_const); rc = register_candev(dev->netdev); if (rc) { free_candev(dev->netdev); dev_err(&intf->dev, "Couldn't register candev (err=%d)\n", rc); return ERR_PTR(rc); } return dev; } static void gs_destroy_candev(struct gs_can *dev) { unregister_candev(dev->netdev); usb_kill_anchored_urbs(&dev->tx_submitted); free_candev(dev->netdev); } static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct gs_usb *dev; int rc = -ENOMEM; unsigned int icount, i; struct gs_host_config *hconf; struct gs_device_config *dconf; hconf = kmalloc(sizeof(*hconf), GFP_KERNEL); if (!hconf) return -ENOMEM; hconf->byte_order = 0x0000beef; /* send host config */ rc = usb_control_msg(interface_to_usbdev(intf), usb_sndctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_HOST_FORMAT, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, intf->altsetting[0].desc.bInterfaceNumber, hconf, sizeof(*hconf), 1000); kfree(hconf); if (rc < 0) { dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc); return rc; } dconf = kmalloc(sizeof(*dconf), GFP_KERNEL); if (!dconf) return -ENOMEM; /* read device config */ rc = usb_control_msg(interface_to_usbdev(intf), usb_rcvctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_DEVICE_CONFIG, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, intf->altsetting[0].desc.bInterfaceNumber, dconf, sizeof(*dconf), 1000); if (rc < 0) { dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n", rc); kfree(dconf); return rc; } icount = dconf->icount + 1; dev_info(&intf->dev, "Configuring for %d interfaces\n", icount); if (icount > GS_MAX_INTF) { dev_err(&intf->dev, "Driver cannot handle more that %d CAN interfaces\n", GS_MAX_INTF); kfree(dconf); return -EINVAL; } dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { kfree(dconf); return -ENOMEM; } init_usb_anchor(&dev->rx_submitted); atomic_set(&dev->active_channels, 0); usb_set_intfdata(intf, dev); dev->udev = interface_to_usbdev(intf); for (i = 0; i < icount; i++) { dev->canch[i] = gs_make_candev(i, intf, dconf); if (IS_ERR_OR_NULL(dev->canch[i])) { /* save error code to return later */ rc = PTR_ERR(dev->canch[i]); /* on failure destroy previously created candevs */ icount = i; for (i = 0; i < icount; i++) gs_destroy_candev(dev->canch[i]); usb_kill_anchored_urbs(&dev->rx_submitted); kfree(dconf); kfree(dev); return rc; } dev->canch[i]->parent = dev; } kfree(dconf); return 0; } static void gs_usb_disconnect(struct usb_interface *intf) { unsigned i; struct gs_usb *dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (!dev) { dev_err(&intf->dev, "Disconnect (nodata)\n"); return; } for (i = 0; i < GS_MAX_INTF; i++) if (dev->canch[i]) gs_destroy_candev(dev->canch[i]); usb_kill_anchored_urbs(&dev->rx_submitted); kfree(dev); } static const struct usb_device_id gs_usb_table[] = { { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID, USB_GSUSB_1_PRODUCT_ID, 0) }, { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID, USB_CANDLELIGHT_PRODUCT_ID, 0) }, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, gs_usb_table); static struct usb_driver gs_usb_driver = { .name = "gs_usb", .probe = gs_usb_probe, .disconnect = gs_usb_disconnect, .id_table = gs_usb_table, }; module_usb_driver(gs_usb_driver); MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>"); MODULE_DESCRIPTION( "Socket CAN device driver for Geschwister Schneider Technologie-, " "Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces\n" "and bytewerk.org candleLight USB CAN interfaces."); MODULE_LICENSE("GPL v2");
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1242_0
crossvul-cpp_data_good_1251_0
// SPDX-License-Identifier: GPL-2.0-or-later /* * Common library for ADIS16XXX devices * * Copyright 2012 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> */ #include <linux/export.h> #include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/iio/iio.h> #include <linux/iio/buffer.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #include <linux/iio/imu/adis.h> static int adis_update_scan_mode_burst(struct iio_dev *indio_dev, const unsigned long *scan_mask) { struct adis *adis = iio_device_get_drvdata(indio_dev); unsigned int burst_length; u8 *tx; /* All but the timestamp channel */ burst_length = (indio_dev->num_channels - 1) * sizeof(u16); burst_length += adis->burst->extra_len; adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL); if (!adis->xfer) return -ENOMEM; adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL); if (!adis->buffer) { kfree(adis->xfer); adis->xfer = NULL; return -ENOMEM; } tx = adis->buffer + burst_length; tx[0] = ADIS_READ_REG(adis->burst->reg_cmd); tx[1] = 0; adis->xfer[0].tx_buf = tx; adis->xfer[0].bits_per_word = 8; adis->xfer[0].len = 2; adis->xfer[1].rx_buf = adis->buffer; adis->xfer[1].bits_per_word = 8; adis->xfer[1].len = burst_length; spi_message_init(&adis->msg); spi_message_add_tail(&adis->xfer[0], &adis->msg); spi_message_add_tail(&adis->xfer[1], &adis->msg); return 0; } int adis_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask) { struct adis *adis = iio_device_get_drvdata(indio_dev); const struct iio_chan_spec *chan; unsigned int scan_count; unsigned int i, j; __be16 *tx, *rx; kfree(adis->xfer); kfree(adis->buffer); if (adis->burst && adis->burst->en) return adis_update_scan_mode_burst(indio_dev, scan_mask); scan_count = indio_dev->scan_bytes / 2; adis->xfer = kcalloc(scan_count + 1, sizeof(*adis->xfer), GFP_KERNEL); if (!adis->xfer) return -ENOMEM; adis->buffer = kcalloc(indio_dev->scan_bytes, 2, GFP_KERNEL); if (!adis->buffer) { kfree(adis->xfer); adis->xfer = NULL; return -ENOMEM; } rx = adis->buffer; tx = rx + scan_count; spi_message_init(&adis->msg); for (j = 0; j <= scan_count; j++) { adis->xfer[j].bits_per_word = 8; if (j != scan_count) adis->xfer[j].cs_change = 1; adis->xfer[j].len = 2; adis->xfer[j].delay_usecs = adis->data->read_delay; if (j < scan_count) adis->xfer[j].tx_buf = &tx[j]; if (j >= 1) adis->xfer[j].rx_buf = &rx[j - 1]; spi_message_add_tail(&adis->xfer[j], &adis->msg); } chan = indio_dev->channels; for (i = 0; i < indio_dev->num_channels; i++, chan++) { if (!test_bit(chan->scan_index, scan_mask)) continue; if (chan->scan_type.storagebits == 32) *tx++ = cpu_to_be16((chan->address + 2) << 8); *tx++ = cpu_to_be16(chan->address << 8); } return 0; } EXPORT_SYMBOL_GPL(adis_update_scan_mode); static irqreturn_t adis_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adis *adis = iio_device_get_drvdata(indio_dev); int ret; if (!adis->buffer) return -ENOMEM; if (adis->data->has_paging) { mutex_lock(&adis->txrx_lock); if (adis->current_page != 0) { adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID); adis->tx[1] = 0; spi_write(adis->spi, adis->tx, 2); } } ret = spi_sync(adis->spi, &adis->msg); if (ret) dev_err(&adis->spi->dev, "Failed to read data: %d", ret); if (adis->data->has_paging) { adis->current_page = 0; mutex_unlock(&adis->txrx_lock); } iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer, pf->timestamp); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; } /** * adis_setup_buffer_and_trigger() - Sets up buffer and trigger for the adis device * @adis: The adis device. * @indio_dev: The IIO device. * @trigger_handler: Optional trigger handler, may be NULL. * * Returns 0 on success, a negative error code otherwise. * * This function sets up the buffer and trigger for a adis devices. If * 'trigger_handler' is NULL the default trigger handler will be used. The * default trigger handler will simply read the registers assigned to the * currently active channels. * * adis_cleanup_buffer_and_trigger() should be called to free the resources * allocated by this function. */ int adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *)) { int ret; if (!trigger_handler) trigger_handler = adis_trigger_handler; ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, trigger_handler, NULL); if (ret) return ret; if (adis->spi->irq) { ret = adis_probe_trigger(adis, indio_dev); if (ret) goto error_buffer_cleanup; } return 0; error_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); return ret; } EXPORT_SYMBOL_GPL(adis_setup_buffer_and_trigger); /** * adis_cleanup_buffer_and_trigger() - Free buffer and trigger resources * @adis: The adis device. * @indio_dev: The IIO device. * * Frees resources allocated by adis_setup_buffer_and_trigger() */ void adis_cleanup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev) { if (adis->spi->irq) adis_remove_trigger(adis); kfree(adis->buffer); kfree(adis->xfer); iio_triggered_buffer_cleanup(indio_dev); } EXPORT_SYMBOL_GPL(adis_cleanup_buffer_and_trigger);
./CrossVul/dataset_final_sorted/CWE-400/c/good_1251_0
crossvul-cpp_data_bad_5356_1
/* * NET3 Protocol independent device support routines. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Derived from the non IP parts of dev.c 1.0.19 * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * * Additional Authors: * Florian la Roche <rzsfl@rz.uni-sb.de> * Alan Cox <gw4pts@gw4pts.ampr.org> * David Hinds <dahinds@users.sourceforge.net> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * Adam Sulmicki <adam@cfar.umd.edu> * Pekka Riikonen <priikone@poesidon.pspt.fi> * * Changes: * D.J. Barrow : Fixed bug where dev->refcnt gets set * to 2 if register_netdev gets called * before net_dev_init & also removed a * few lines of code in the process. * Alan Cox : device private ioctl copies fields back. * Alan Cox : Transmit queue code does relevant * stunts to keep the queue safe. * Alan Cox : Fixed double lock. * Alan Cox : Fixed promisc NULL pointer trap * ???????? : Support the full private ioctl range * Alan Cox : Moved ioctl permission check into * drivers * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI * Alan Cox : 100 backlog just doesn't cut it when * you start doing multicast video 8) * Alan Cox : Rewrote net_bh and list manager. * Alan Cox : Fix ETH_P_ALL echoback lengths. * Alan Cox : Took out transmit every packet pass * Saved a few bytes in the ioctl handler * Alan Cox : Network driver sets packet type before * calling netif_rx. Saves a function * call a packet. * Alan Cox : Hashed net_bh() * Richard Kooijman: Timestamp fixes. * Alan Cox : Wrong field in SIOCGIFDSTADDR * Alan Cox : Device lock protection. * Alan Cox : Fixed nasty side effect of device close * changes. * Rudi Cilibrasi : Pass the right thing to * set_mac_address() * Dave Miller : 32bit quantity for the device lock to * make it work out on a Sparc. * Bjorn Ekwall : Added KERNELD hack. * Alan Cox : Cleaned up the backlog initialise. * Craig Metz : SIOCGIFCONF fix if space for under * 1 device. * Thomas Bogendoerfer : Return ENODEV for dev_open, if there * is no device open function. * Andi Kleen : Fix error reporting for SIOCGIFCONF * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF * Cyrus Durgin : Cleaned for KMOD * Adam Sulmicki : Bug Fix : Network Device Unload * A network device unload needs to purge * the backlog queue. * Paul Rusty Russell : SIOCSIFNAME * Pekka Riikonen : Netdev boot-time settings code * Andrew Morton : Make unregister_netdevice wait * indefinitely on dev->refcnt * J Hadi Salim : - Backlog queue sampling * - netif_rx() feedback */ #include <asm/uaccess.h> #include <linux/bitops.h> #include <linux/capability.h> #include <linux/cpu.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/hash.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/notifier.h> #include <linux/skbuff.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/busy_poll.h> #include <linux/rtnetlink.h> #include <linux/stat.h> #include <net/dst.h> #include <net/dst_metadata.h> #include <net/pkt_sched.h> #include <net/checksum.h> #include <net/xfrm.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netpoll.h> #include <linux/rcupdate.h> #include <linux/delay.h> #include <net/iw_handler.h> #include <asm/current.h> #include <linux/audit.h> #include <linux/dmaengine.h> #include <linux/err.h> #include <linux/ctype.h> #include <linux/if_arp.h> #include <linux/if_vlan.h> #include <linux/ip.h> #include <net/ip.h> #include <net/mpls.h> #include <linux/ipv6.h> #include <linux/in.h> #include <linux/jhash.h> #include <linux/random.h> #include <trace/events/napi.h> #include <trace/events/net.h> #include <trace/events/skb.h> #include <linux/pci.h> #include <linux/inetdevice.h> #include <linux/cpu_rmap.h> #include <linux/static_key.h> #include <linux/hashtable.h> #include <linux/vmalloc.h> #include <linux/if_macvlan.h> #include <linux/errqueue.h> #include <linux/hrtimer.h> #include <linux/netfilter_ingress.h> #include <linux/sctp.h> #include "net-sysfs.h" /* Instead of increasing this, you should create a hash table. */ #define MAX_GRO_SKBS 8 /* This should be increased if a protocol with a bigger head is added. */ #define GRO_MAX_HEAD (MAX_HEADER + 128) static DEFINE_SPINLOCK(ptype_lock); static DEFINE_SPINLOCK(offload_lock); struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; struct list_head ptype_all __read_mostly; /* Taps */ static struct list_head offload_base __read_mostly; static int netif_rx_internal(struct sk_buff *skb); static int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev, struct netdev_notifier_info *info); /* * The @dev_base_head list is protected by @dev_base_lock and the rtnl * semaphore. * * Pure readers hold dev_base_lock for reading, or rcu_read_lock() * * Writers must hold the rtnl semaphore while they loop through the * dev_base_head list, and hold dev_base_lock for writing when they do the * actual updates. This allows pure readers to access the list even * while a writer is preparing to update it. * * To put it another way, dev_base_lock is held for writing only to * protect against pure readers; the rtnl semaphore provides the * protection against other writers. * * See, for example usages, register_netdevice() and * unregister_netdevice(), which must be called with the rtnl * semaphore held. */ DEFINE_RWLOCK(dev_base_lock); EXPORT_SYMBOL(dev_base_lock); /* protects napi_hash addition/deletion and napi_gen_id */ static DEFINE_SPINLOCK(napi_hash_lock); static unsigned int napi_gen_id = NR_CPUS; static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); static seqcount_t devnet_rename_seq; static inline void dev_base_seq_inc(struct net *net) { while (++net->dev_base_seq == 0); } static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) { unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; } static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) { return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; } static inline void rps_lock(struct softnet_data *sd) { #ifdef CONFIG_RPS spin_lock(&sd->input_pkt_queue.lock); #endif } static inline void rps_unlock(struct softnet_data *sd) { #ifdef CONFIG_RPS spin_unlock(&sd->input_pkt_queue.lock); #endif } /* Device list insertion */ static void list_netdevice(struct net_device *dev) { struct net *net = dev_net(dev); ASSERT_RTNL(); write_lock_bh(&dev_base_lock); list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); hlist_add_head_rcu(&dev->index_hlist, dev_index_hash(net, dev->ifindex)); write_unlock_bh(&dev_base_lock); dev_base_seq_inc(net); } /* Device list removal * caller must respect a RCU grace period before freeing/reusing dev */ static void unlist_netdevice(struct net_device *dev) { ASSERT_RTNL(); /* Unlink dev from the device chain */ write_lock_bh(&dev_base_lock); list_del_rcu(&dev->dev_list); hlist_del_rcu(&dev->name_hlist); hlist_del_rcu(&dev->index_hlist); write_unlock_bh(&dev_base_lock); dev_base_seq_inc(dev_net(dev)); } /* * Our notifier list */ static RAW_NOTIFIER_HEAD(netdev_chain); /* * Device drivers call our routines to queue packets here. We empty the * queue in the local softnet handler. */ DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); EXPORT_PER_CPU_SYMBOL(softnet_data); #ifdef CONFIG_LOCKDEP /* * register_netdevice() inits txq->_xmit_lock and sets lockdep class * according to dev->type */ static const unsigned short netdev_lock_type[] = {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; static const char *const netdev_lock_name[] = {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; static inline unsigned short netdev_lock_pos(unsigned short dev_type) { int i; for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) if (netdev_lock_type[i] == dev_type) return i; /* the last key is used by default */ return ARRAY_SIZE(netdev_lock_type) - 1; } static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, unsigned short dev_type) { int i; i = netdev_lock_pos(dev_type); lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], netdev_lock_name[i]); } static inline void netdev_set_addr_lockdep_class(struct net_device *dev) { int i; i = netdev_lock_pos(dev->type); lockdep_set_class_and_name(&dev->addr_list_lock, &netdev_addr_lock_key[i], netdev_lock_name[i]); } #else static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, unsigned short dev_type) { } static inline void netdev_set_addr_lockdep_class(struct net_device *dev) { } #endif /******************************************************************************* Protocol management and registration routines *******************************************************************************/ /* * Add a protocol ID to the list. Now that the input handler is * smarter we can dispense with all the messy stuff that used to be * here. * * BEWARE!!! Protocol handlers, mangling input packets, * MUST BE last in hash buckets and checking protocol handlers * MUST start from promiscuous ptype_all chain in net_bh. * It is true now, do not change it. * Explanation follows: if protocol handler, mangling packet, will * be the first on list, it is not able to sense, that packet * is cloned and should be copied-on-write, so that it will * change it and subsequent readers will get broken packet. * --ANK (980803) */ static inline struct list_head *ptype_head(const struct packet_type *pt) { if (pt->type == htons(ETH_P_ALL)) return pt->dev ? &pt->dev->ptype_all : &ptype_all; else return pt->dev ? &pt->dev->ptype_specific : &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; } /** * dev_add_pack - add packet handler * @pt: packet type declaration * * Add a protocol handler to the networking stack. The passed &packet_type * is linked into kernel lists and may not be freed until it has been * removed from the kernel lists. * * This call does not sleep therefore it can not * guarantee all CPU's that are in middle of receiving packets * will see the new packet type (until the next received packet). */ void dev_add_pack(struct packet_type *pt) { struct list_head *head = ptype_head(pt); spin_lock(&ptype_lock); list_add_rcu(&pt->list, head); spin_unlock(&ptype_lock); } EXPORT_SYMBOL(dev_add_pack); /** * __dev_remove_pack - remove packet handler * @pt: packet type declaration * * Remove a protocol handler that was previously added to the kernel * protocol handlers by dev_add_pack(). The passed &packet_type is removed * from the kernel lists and can be freed or reused once this function * returns. * * The packet type might still be in use by receivers * and must not be freed until after all the CPU's have gone * through a quiescent state. */ void __dev_remove_pack(struct packet_type *pt) { struct list_head *head = ptype_head(pt); struct packet_type *pt1; spin_lock(&ptype_lock); list_for_each_entry(pt1, head, list) { if (pt == pt1) { list_del_rcu(&pt->list); goto out; } } pr_warn("dev_remove_pack: %p not found\n", pt); out: spin_unlock(&ptype_lock); } EXPORT_SYMBOL(__dev_remove_pack); /** * dev_remove_pack - remove packet handler * @pt: packet type declaration * * Remove a protocol handler that was previously added to the kernel * protocol handlers by dev_add_pack(). The passed &packet_type is removed * from the kernel lists and can be freed or reused once this function * returns. * * This call sleeps to guarantee that no CPU is looking at the packet * type after return. */ void dev_remove_pack(struct packet_type *pt) { __dev_remove_pack(pt); synchronize_net(); } EXPORT_SYMBOL(dev_remove_pack); /** * dev_add_offload - register offload handlers * @po: protocol offload declaration * * Add protocol offload handlers to the networking stack. The passed * &proto_offload is linked into kernel lists and may not be freed until * it has been removed from the kernel lists. * * This call does not sleep therefore it can not * guarantee all CPU's that are in middle of receiving packets * will see the new offload handlers (until the next received packet). */ void dev_add_offload(struct packet_offload *po) { struct packet_offload *elem; spin_lock(&offload_lock); list_for_each_entry(elem, &offload_base, list) { if (po->priority < elem->priority) break; } list_add_rcu(&po->list, elem->list.prev); spin_unlock(&offload_lock); } EXPORT_SYMBOL(dev_add_offload); /** * __dev_remove_offload - remove offload handler * @po: packet offload declaration * * Remove a protocol offload handler that was previously added to the * kernel offload handlers by dev_add_offload(). The passed &offload_type * is removed from the kernel lists and can be freed or reused once this * function returns. * * The packet type might still be in use by receivers * and must not be freed until after all the CPU's have gone * through a quiescent state. */ static void __dev_remove_offload(struct packet_offload *po) { struct list_head *head = &offload_base; struct packet_offload *po1; spin_lock(&offload_lock); list_for_each_entry(po1, head, list) { if (po == po1) { list_del_rcu(&po->list); goto out; } } pr_warn("dev_remove_offload: %p not found\n", po); out: spin_unlock(&offload_lock); } /** * dev_remove_offload - remove packet offload handler * @po: packet offload declaration * * Remove a packet offload handler that was previously added to the kernel * offload handlers by dev_add_offload(). The passed &offload_type is * removed from the kernel lists and can be freed or reused once this * function returns. * * This call sleeps to guarantee that no CPU is looking at the packet * type after return. */ void dev_remove_offload(struct packet_offload *po) { __dev_remove_offload(po); synchronize_net(); } EXPORT_SYMBOL(dev_remove_offload); /****************************************************************************** Device Boot-time Settings Routines *******************************************************************************/ /* Boot time configuration table */ static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; /** * netdev_boot_setup_add - add new setup entry * @name: name of the device * @map: configured settings for the device * * Adds new setup entry to the dev_boot_setup list. The function * returns 0 on error and 1 on success. This is a generic routine to * all netdevices. */ static int netdev_boot_setup_add(char *name, struct ifmap *map) { struct netdev_boot_setup *s; int i; s = dev_boot_setup; for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { memset(s[i].name, 0, sizeof(s[i].name)); strlcpy(s[i].name, name, IFNAMSIZ); memcpy(&s[i].map, map, sizeof(s[i].map)); break; } } return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; } /** * netdev_boot_setup_check - check boot time settings * @dev: the netdevice * * Check boot time settings for the device. * The found settings are set for the device to be used * later in the device probing. * Returns 0 if no settings found, 1 if they are. */ int netdev_boot_setup_check(struct net_device *dev) { struct netdev_boot_setup *s = dev_boot_setup; int i; for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && !strcmp(dev->name, s[i].name)) { dev->irq = s[i].map.irq; dev->base_addr = s[i].map.base_addr; dev->mem_start = s[i].map.mem_start; dev->mem_end = s[i].map.mem_end; return 1; } } return 0; } EXPORT_SYMBOL(netdev_boot_setup_check); /** * netdev_boot_base - get address from boot time settings * @prefix: prefix for network device * @unit: id for network device * * Check boot time settings for the base address of device. * The found settings are set for the device to be used * later in the device probing. * Returns 0 if no settings found. */ unsigned long netdev_boot_base(const char *prefix, int unit) { const struct netdev_boot_setup *s = dev_boot_setup; char name[IFNAMSIZ]; int i; sprintf(name, "%s%d", prefix, unit); /* * If device already registered then return base of 1 * to indicate not to probe for this interface */ if (__dev_get_by_name(&init_net, name)) return 1; for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) if (!strcmp(name, s[i].name)) return s[i].map.base_addr; return 0; } /* * Saves at boot time configured settings for any netdevice. */ int __init netdev_boot_setup(char *str) { int ints[5]; struct ifmap map; str = get_options(str, ARRAY_SIZE(ints), ints); if (!str || !*str) return 0; /* Save settings */ memset(&map, 0, sizeof(map)); if (ints[0] > 0) map.irq = ints[1]; if (ints[0] > 1) map.base_addr = ints[2]; if (ints[0] > 2) map.mem_start = ints[3]; if (ints[0] > 3) map.mem_end = ints[4]; /* Add new entry to the list */ return netdev_boot_setup_add(str, &map); } __setup("netdev=", netdev_boot_setup); /******************************************************************************* Device Interface Subroutines *******************************************************************************/ /** * dev_get_iflink - get 'iflink' value of a interface * @dev: targeted interface * * Indicates the ifindex the interface is linked to. * Physical interfaces have the same 'ifindex' and 'iflink' values. */ int dev_get_iflink(const struct net_device *dev) { if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) return dev->netdev_ops->ndo_get_iflink(dev); return dev->ifindex; } EXPORT_SYMBOL(dev_get_iflink); /** * dev_fill_metadata_dst - Retrieve tunnel egress information. * @dev: targeted interface * @skb: The packet. * * For better visibility of tunnel traffic OVS needs to retrieve * egress tunnel information for a packet. Following API allows * user to get this info. */ int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct ip_tunnel_info *info; if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) return -EINVAL; info = skb_tunnel_info_unclone(skb); if (!info) return -ENOMEM; if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) return -EINVAL; return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); } EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); /** * __dev_get_by_name - find a device by its name * @net: the applicable net namespace * @name: name to find * * Find an interface by name. Must be called under RTNL semaphore * or @dev_base_lock. If the name is found a pointer to the device * is returned. If the name is not found then %NULL is returned. The * reference counters are not incremented so the caller must be * careful with locks. */ struct net_device *__dev_get_by_name(struct net *net, const char *name) { struct net_device *dev; struct hlist_head *head = dev_name_hash(net, name); hlist_for_each_entry(dev, head, name_hlist) if (!strncmp(dev->name, name, IFNAMSIZ)) return dev; return NULL; } EXPORT_SYMBOL(__dev_get_by_name); /** * dev_get_by_name_rcu - find a device by its name * @net: the applicable net namespace * @name: name to find * * Find an interface by name. * If the name is found a pointer to the device is returned. * If the name is not found then %NULL is returned. * The reference counters are not incremented so the caller must be * careful with locks. The caller must hold RCU lock. */ struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) { struct net_device *dev; struct hlist_head *head = dev_name_hash(net, name); hlist_for_each_entry_rcu(dev, head, name_hlist) if (!strncmp(dev->name, name, IFNAMSIZ)) return dev; return NULL; } EXPORT_SYMBOL(dev_get_by_name_rcu); /** * dev_get_by_name - find a device by its name * @net: the applicable net namespace * @name: name to find * * Find an interface by name. This can be called from any * context and does its own locking. The returned handle has * the usage count incremented and the caller must use dev_put() to * release it when it is no longer needed. %NULL is returned if no * matching device is found. */ struct net_device *dev_get_by_name(struct net *net, const char *name) { struct net_device *dev; rcu_read_lock(); dev = dev_get_by_name_rcu(net, name); if (dev) dev_hold(dev); rcu_read_unlock(); return dev; } EXPORT_SYMBOL(dev_get_by_name); /** * __dev_get_by_index - find a device by its ifindex * @net: the applicable net namespace * @ifindex: index of device * * Search for an interface by index. Returns %NULL if the device * is not found or a pointer to the device. The device has not * had its reference counter increased so the caller must be careful * about locking. The caller must hold either the RTNL semaphore * or @dev_base_lock. */ struct net_device *__dev_get_by_index(struct net *net, int ifindex) { struct net_device *dev; struct hlist_head *head = dev_index_hash(net, ifindex); hlist_for_each_entry(dev, head, index_hlist) if (dev->ifindex == ifindex) return dev; return NULL; } EXPORT_SYMBOL(__dev_get_by_index); /** * dev_get_by_index_rcu - find a device by its ifindex * @net: the applicable net namespace * @ifindex: index of device * * Search for an interface by index. Returns %NULL if the device * is not found or a pointer to the device. The device has not * had its reference counter increased so the caller must be careful * about locking. The caller must hold RCU lock. */ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) { struct net_device *dev; struct hlist_head *head = dev_index_hash(net, ifindex); hlist_for_each_entry_rcu(dev, head, index_hlist) if (dev->ifindex == ifindex) return dev; return NULL; } EXPORT_SYMBOL(dev_get_by_index_rcu); /** * dev_get_by_index - find a device by its ifindex * @net: the applicable net namespace * @ifindex: index of device * * Search for an interface by index. Returns NULL if the device * is not found or a pointer to the device. The device returned has * had a reference added and the pointer is safe until the user calls * dev_put to indicate they have finished with it. */ struct net_device *dev_get_by_index(struct net *net, int ifindex) { struct net_device *dev; rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifindex); if (dev) dev_hold(dev); rcu_read_unlock(); return dev; } EXPORT_SYMBOL(dev_get_by_index); /** * netdev_get_name - get a netdevice name, knowing its ifindex. * @net: network namespace * @name: a pointer to the buffer where the name will be stored. * @ifindex: the ifindex of the interface to get the name from. * * The use of raw_seqcount_begin() and cond_resched() before * retrying is required as we want to give the writers a chance * to complete when CONFIG_PREEMPT is not set. */ int netdev_get_name(struct net *net, char *name, int ifindex) { struct net_device *dev; unsigned int seq; retry: seq = raw_seqcount_begin(&devnet_rename_seq); rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifindex); if (!dev) { rcu_read_unlock(); return -ENODEV; } strcpy(name, dev->name); rcu_read_unlock(); if (read_seqcount_retry(&devnet_rename_seq, seq)) { cond_resched(); goto retry; } return 0; } /** * dev_getbyhwaddr_rcu - find a device by its hardware address * @net: the applicable net namespace * @type: media type of device * @ha: hardware address * * Search for an interface by MAC address. Returns NULL if the device * is not found or a pointer to the device. * The caller must hold RCU or RTNL. * The returned device has not had its ref count increased * and the caller must therefore be careful about locking * */ struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, const char *ha) { struct net_device *dev; for_each_netdev_rcu(net, dev) if (dev->type == type && !memcmp(dev->dev_addr, ha, dev->addr_len)) return dev; return NULL; } EXPORT_SYMBOL(dev_getbyhwaddr_rcu); struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) { struct net_device *dev; ASSERT_RTNL(); for_each_netdev(net, dev) if (dev->type == type) return dev; return NULL; } EXPORT_SYMBOL(__dev_getfirstbyhwtype); struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) { struct net_device *dev, *ret = NULL; rcu_read_lock(); for_each_netdev_rcu(net, dev) if (dev->type == type) { dev_hold(dev); ret = dev; break; } rcu_read_unlock(); return ret; } EXPORT_SYMBOL(dev_getfirstbyhwtype); /** * __dev_get_by_flags - find any device with given flags * @net: the applicable net namespace * @if_flags: IFF_* values * @mask: bitmask of bits in if_flags to check * * Search for any interface with the given flags. Returns NULL if a device * is not found or a pointer to the device. Must be called inside * rtnl_lock(), and result refcount is unchanged. */ struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask) { struct net_device *dev, *ret; ASSERT_RTNL(); ret = NULL; for_each_netdev(net, dev) { if (((dev->flags ^ if_flags) & mask) == 0) { ret = dev; break; } } return ret; } EXPORT_SYMBOL(__dev_get_by_flags); /** * dev_valid_name - check if name is okay for network device * @name: name string * * Network device names need to be valid file names to * to allow sysfs to work. We also disallow any kind of * whitespace. */ bool dev_valid_name(const char *name) { if (*name == '\0') return false; if (strlen(name) >= IFNAMSIZ) return false; if (!strcmp(name, ".") || !strcmp(name, "..")) return false; while (*name) { if (*name == '/' || *name == ':' || isspace(*name)) return false; name++; } return true; } EXPORT_SYMBOL(dev_valid_name); /** * __dev_alloc_name - allocate a name for a device * @net: network namespace to allocate the device name in * @name: name format string * @buf: scratch buffer and result name string * * Passed a format string - eg "lt%d" it will try and find a suitable * id. It scans list of devices to build up a free map, then chooses * the first empty slot. The caller must hold the dev_base or rtnl lock * while allocating the name and adding the device in order to avoid * duplicates. * Limited to bits_per_byte * page size devices (ie 32K on most platforms). * Returns the number of the unit assigned or a negative errno code. */ static int __dev_alloc_name(struct net *net, const char *name, char *buf) { int i = 0; const char *p; const int max_netdevices = 8*PAGE_SIZE; unsigned long *inuse; struct net_device *d; p = strnchr(name, IFNAMSIZ-1, '%'); if (p) { /* * Verify the string as this thing may have come from * the user. There must be either one "%d" and no other "%" * characters. */ if (p[1] != 'd' || strchr(p + 2, '%')) return -EINVAL; /* Use one page as a bit array of possible slots */ inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); if (!inuse) return -ENOMEM; for_each_netdev(net, d) { if (!sscanf(d->name, name, &i)) continue; if (i < 0 || i >= max_netdevices) continue; /* avoid cases where sscanf is not exact inverse of printf */ snprintf(buf, IFNAMSIZ, name, i); if (!strncmp(buf, d->name, IFNAMSIZ)) set_bit(i, inuse); } i = find_first_zero_bit(inuse, max_netdevices); free_page((unsigned long) inuse); } if (buf != name) snprintf(buf, IFNAMSIZ, name, i); if (!__dev_get_by_name(net, buf)) return i; /* It is possible to run out of possible slots * when the name is long and there isn't enough space left * for the digits, or if all bits are used. */ return -ENFILE; } /** * dev_alloc_name - allocate a name for a device * @dev: device * @name: name format string * * Passed a format string - eg "lt%d" it will try and find a suitable * id. It scans list of devices to build up a free map, then chooses * the first empty slot. The caller must hold the dev_base or rtnl lock * while allocating the name and adding the device in order to avoid * duplicates. * Limited to bits_per_byte * page size devices (ie 32K on most platforms). * Returns the number of the unit assigned or a negative errno code. */ int dev_alloc_name(struct net_device *dev, const char *name) { char buf[IFNAMSIZ]; struct net *net; int ret; BUG_ON(!dev_net(dev)); net = dev_net(dev); ret = __dev_alloc_name(net, name, buf); if (ret >= 0) strlcpy(dev->name, buf, IFNAMSIZ); return ret; } EXPORT_SYMBOL(dev_alloc_name); static int dev_alloc_name_ns(struct net *net, struct net_device *dev, const char *name) { char buf[IFNAMSIZ]; int ret; ret = __dev_alloc_name(net, name, buf); if (ret >= 0) strlcpy(dev->name, buf, IFNAMSIZ); return ret; } static int dev_get_valid_name(struct net *net, struct net_device *dev, const char *name) { BUG_ON(!net); if (!dev_valid_name(name)) return -EINVAL; if (strchr(name, '%')) return dev_alloc_name_ns(net, dev, name); else if (__dev_get_by_name(net, name)) return -EEXIST; else if (dev->name != name) strlcpy(dev->name, name, IFNAMSIZ); return 0; } /** * dev_change_name - change name of a device * @dev: device * @newname: name (or format string) must be at least IFNAMSIZ * * Change name of a device, can pass format strings "eth%d". * for wildcarding. */ int dev_change_name(struct net_device *dev, const char *newname) { unsigned char old_assign_type; char oldname[IFNAMSIZ]; int err = 0; int ret; struct net *net; ASSERT_RTNL(); BUG_ON(!dev_net(dev)); net = dev_net(dev); if (dev->flags & IFF_UP) return -EBUSY; write_seqcount_begin(&devnet_rename_seq); if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { write_seqcount_end(&devnet_rename_seq); return 0; } memcpy(oldname, dev->name, IFNAMSIZ); err = dev_get_valid_name(net, dev, newname); if (err < 0) { write_seqcount_end(&devnet_rename_seq); return err; } if (oldname[0] && !strchr(oldname, '%')) netdev_info(dev, "renamed from %s\n", oldname); old_assign_type = dev->name_assign_type; dev->name_assign_type = NET_NAME_RENAMED; rollback: ret = device_rename(&dev->dev, dev->name); if (ret) { memcpy(dev->name, oldname, IFNAMSIZ); dev->name_assign_type = old_assign_type; write_seqcount_end(&devnet_rename_seq); return ret; } write_seqcount_end(&devnet_rename_seq); netdev_adjacent_rename_links(dev, oldname); write_lock_bh(&dev_base_lock); hlist_del_rcu(&dev->name_hlist); write_unlock_bh(&dev_base_lock); synchronize_rcu(); write_lock_bh(&dev_base_lock); hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); write_unlock_bh(&dev_base_lock); ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); ret = notifier_to_errno(ret); if (ret) { /* err >= 0 after dev_alloc_name() or stores the first errno */ if (err >= 0) { err = ret; write_seqcount_begin(&devnet_rename_seq); memcpy(dev->name, oldname, IFNAMSIZ); memcpy(oldname, newname, IFNAMSIZ); dev->name_assign_type = old_assign_type; old_assign_type = NET_NAME_RENAMED; goto rollback; } else { pr_err("%s: name change rollback failed: %d\n", dev->name, ret); } } return err; } /** * dev_set_alias - change ifalias of a device * @dev: device * @alias: name up to IFALIASZ * @len: limit of bytes to copy from info * * Set ifalias for a device, */ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) { char *new_ifalias; ASSERT_RTNL(); if (len >= IFALIASZ) return -EINVAL; if (!len) { kfree(dev->ifalias); dev->ifalias = NULL; return 0; } new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); if (!new_ifalias) return -ENOMEM; dev->ifalias = new_ifalias; strlcpy(dev->ifalias, alias, len+1); return len; } /** * netdev_features_change - device changes features * @dev: device to cause notification * * Called to indicate a device has changed features. */ void netdev_features_change(struct net_device *dev) { call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); } EXPORT_SYMBOL(netdev_features_change); /** * netdev_state_change - device changes state * @dev: device to cause notification * * Called to indicate a device has changed state. This function calls * the notifier chains for netdev_chain and sends a NEWLINK message * to the routing socket. */ void netdev_state_change(struct net_device *dev) { if (dev->flags & IFF_UP) { struct netdev_notifier_change_info change_info; change_info.flags_changed = 0; call_netdevice_notifiers_info(NETDEV_CHANGE, dev, &change_info.info); rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); } } EXPORT_SYMBOL(netdev_state_change); /** * netdev_notify_peers - notify network peers about existence of @dev * @dev: network device * * Generate traffic such that interested network peers are aware of * @dev, such as by generating a gratuitous ARP. This may be used when * a device wants to inform the rest of the network about some sort of * reconfiguration such as a failover event or virtual machine * migration. */ void netdev_notify_peers(struct net_device *dev) { rtnl_lock(); call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); rtnl_unlock(); } EXPORT_SYMBOL(netdev_notify_peers); static int __dev_open(struct net_device *dev) { const struct net_device_ops *ops = dev->netdev_ops; int ret; ASSERT_RTNL(); if (!netif_device_present(dev)) return -ENODEV; /* Block netpoll from trying to do any rx path servicing. * If we don't do this there is a chance ndo_poll_controller * or ndo_poll may be running while we open the device */ netpoll_poll_disable(dev); ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); ret = notifier_to_errno(ret); if (ret) return ret; set_bit(__LINK_STATE_START, &dev->state); if (ops->ndo_validate_addr) ret = ops->ndo_validate_addr(dev); if (!ret && ops->ndo_open) ret = ops->ndo_open(dev); netpoll_poll_enable(dev); if (ret) clear_bit(__LINK_STATE_START, &dev->state); else { dev->flags |= IFF_UP; dev_set_rx_mode(dev); dev_activate(dev); add_device_randomness(dev->dev_addr, dev->addr_len); } return ret; } /** * dev_open - prepare an interface for use. * @dev: device to open * * Takes a device from down to up state. The device's private open * function is invoked and then the multicast lists are loaded. Finally * the device is moved into the up state and a %NETDEV_UP message is * sent to the netdev notifier chain. * * Calling this function on an active interface is a nop. On a failure * a negative errno code is returned. */ int dev_open(struct net_device *dev) { int ret; if (dev->flags & IFF_UP) return 0; ret = __dev_open(dev); if (ret < 0) return ret; rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); call_netdevice_notifiers(NETDEV_UP, dev); return ret; } EXPORT_SYMBOL(dev_open); static int __dev_close_many(struct list_head *head) { struct net_device *dev; ASSERT_RTNL(); might_sleep(); list_for_each_entry(dev, head, close_list) { /* Temporarily disable netpoll until the interface is down */ netpoll_poll_disable(dev); call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); clear_bit(__LINK_STATE_START, &dev->state); /* Synchronize to scheduled poll. We cannot touch poll list, it * can be even on different cpu. So just clear netif_running(). * * dev->stop() will invoke napi_disable() on all of it's * napi_struct instances on this device. */ smp_mb__after_atomic(); /* Commit netif_running(). */ } dev_deactivate_many(head); list_for_each_entry(dev, head, close_list) { const struct net_device_ops *ops = dev->netdev_ops; /* * Call the device specific close. This cannot fail. * Only if device is UP * * We allow it to be called even after a DETACH hot-plug * event. */ if (ops->ndo_stop) ops->ndo_stop(dev); dev->flags &= ~IFF_UP; netpoll_poll_enable(dev); } return 0; } static int __dev_close(struct net_device *dev) { int retval; LIST_HEAD(single); list_add(&dev->close_list, &single); retval = __dev_close_many(&single); list_del(&single); return retval; } int dev_close_many(struct list_head *head, bool unlink) { struct net_device *dev, *tmp; /* Remove the devices that don't need to be closed */ list_for_each_entry_safe(dev, tmp, head, close_list) if (!(dev->flags & IFF_UP)) list_del_init(&dev->close_list); __dev_close_many(head); list_for_each_entry_safe(dev, tmp, head, close_list) { rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); call_netdevice_notifiers(NETDEV_DOWN, dev); if (unlink) list_del_init(&dev->close_list); } return 0; } EXPORT_SYMBOL(dev_close_many); /** * dev_close - shutdown an interface. * @dev: device to shutdown * * This function moves an active device into down state. A * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier * chain. */ int dev_close(struct net_device *dev) { if (dev->flags & IFF_UP) { LIST_HEAD(single); list_add(&dev->close_list, &single); dev_close_many(&single, true); list_del(&single); } return 0; } EXPORT_SYMBOL(dev_close); /** * dev_disable_lro - disable Large Receive Offload on a device * @dev: device * * Disable Large Receive Offload (LRO) on a net device. Must be * called under RTNL. This is needed if received packets may be * forwarded to another interface. */ void dev_disable_lro(struct net_device *dev) { struct net_device *lower_dev; struct list_head *iter; dev->wanted_features &= ~NETIF_F_LRO; netdev_update_features(dev); if (unlikely(dev->features & NETIF_F_LRO)) netdev_WARN(dev, "failed to disable LRO!\n"); netdev_for_each_lower_dev(dev, lower_dev, iter) dev_disable_lro(lower_dev); } EXPORT_SYMBOL(dev_disable_lro); static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, struct net_device *dev) { struct netdev_notifier_info info; netdev_notifier_info_init(&info, dev); return nb->notifier_call(nb, val, &info); } static int dev_boot_phase = 1; /** * register_netdevice_notifier - register a network notifier block * @nb: notifier * * Register a notifier to be called when network device events occur. * The notifier passed is linked into the kernel structures and must * not be reused until it has been unregistered. A negative errno code * is returned on a failure. * * When registered all registration and up events are replayed * to the new notifier to allow device to have a race free * view of the network device list. */ int register_netdevice_notifier(struct notifier_block *nb) { struct net_device *dev; struct net_device *last; struct net *net; int err; rtnl_lock(); err = raw_notifier_chain_register(&netdev_chain, nb); if (err) goto unlock; if (dev_boot_phase) goto unlock; for_each_net(net) { for_each_netdev(net, dev) { err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); err = notifier_to_errno(err); if (err) goto rollback; if (!(dev->flags & IFF_UP)) continue; call_netdevice_notifier(nb, NETDEV_UP, dev); } } unlock: rtnl_unlock(); return err; rollback: last = dev; for_each_net(net) { for_each_netdev(net, dev) { if (dev == last) goto outroll; if (dev->flags & IFF_UP) { call_netdevice_notifier(nb, NETDEV_GOING_DOWN, dev); call_netdevice_notifier(nb, NETDEV_DOWN, dev); } call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); } } outroll: raw_notifier_chain_unregister(&netdev_chain, nb); goto unlock; } EXPORT_SYMBOL(register_netdevice_notifier); /** * unregister_netdevice_notifier - unregister a network notifier block * @nb: notifier * * Unregister a notifier previously registered by * register_netdevice_notifier(). The notifier is unlinked into the * kernel structures and may then be reused. A negative errno code * is returned on a failure. * * After unregistering unregister and down device events are synthesized * for all devices on the device list to the removed notifier to remove * the need for special case cleanup code. */ int unregister_netdevice_notifier(struct notifier_block *nb) { struct net_device *dev; struct net *net; int err; rtnl_lock(); err = raw_notifier_chain_unregister(&netdev_chain, nb); if (err) goto unlock; for_each_net(net) { for_each_netdev(net, dev) { if (dev->flags & IFF_UP) { call_netdevice_notifier(nb, NETDEV_GOING_DOWN, dev); call_netdevice_notifier(nb, NETDEV_DOWN, dev); } call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); } } unlock: rtnl_unlock(); return err; } EXPORT_SYMBOL(unregister_netdevice_notifier); /** * call_netdevice_notifiers_info - call all network notifier blocks * @val: value passed unmodified to notifier function * @dev: net_device pointer passed unmodified to notifier function * @info: notifier information data * * Call all network notifier blocks. Parameters and return value * are as for raw_notifier_call_chain(). */ static int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev, struct netdev_notifier_info *info) { ASSERT_RTNL(); netdev_notifier_info_init(info, dev); return raw_notifier_call_chain(&netdev_chain, val, info); } /** * call_netdevice_notifiers - call all network notifier blocks * @val: value passed unmodified to notifier function * @dev: net_device pointer passed unmodified to notifier function * * Call all network notifier blocks. Parameters and return value * are as for raw_notifier_call_chain(). */ int call_netdevice_notifiers(unsigned long val, struct net_device *dev) { struct netdev_notifier_info info; return call_netdevice_notifiers_info(val, dev, &info); } EXPORT_SYMBOL(call_netdevice_notifiers); #ifdef CONFIG_NET_INGRESS static struct static_key ingress_needed __read_mostly; void net_inc_ingress_queue(void) { static_key_slow_inc(&ingress_needed); } EXPORT_SYMBOL_GPL(net_inc_ingress_queue); void net_dec_ingress_queue(void) { static_key_slow_dec(&ingress_needed); } EXPORT_SYMBOL_GPL(net_dec_ingress_queue); #endif #ifdef CONFIG_NET_EGRESS static struct static_key egress_needed __read_mostly; void net_inc_egress_queue(void) { static_key_slow_inc(&egress_needed); } EXPORT_SYMBOL_GPL(net_inc_egress_queue); void net_dec_egress_queue(void) { static_key_slow_dec(&egress_needed); } EXPORT_SYMBOL_GPL(net_dec_egress_queue); #endif static struct static_key netstamp_needed __read_mostly; #ifdef HAVE_JUMP_LABEL /* We are not allowed to call static_key_slow_dec() from irq context * If net_disable_timestamp() is called from irq context, defer the * static_key_slow_dec() calls. */ static atomic_t netstamp_needed_deferred; #endif void net_enable_timestamp(void) { #ifdef HAVE_JUMP_LABEL int deferred = atomic_xchg(&netstamp_needed_deferred, 0); if (deferred) { while (--deferred) static_key_slow_dec(&netstamp_needed); return; } #endif static_key_slow_inc(&netstamp_needed); } EXPORT_SYMBOL(net_enable_timestamp); void net_disable_timestamp(void) { #ifdef HAVE_JUMP_LABEL if (in_interrupt()) { atomic_inc(&netstamp_needed_deferred); return; } #endif static_key_slow_dec(&netstamp_needed); } EXPORT_SYMBOL(net_disable_timestamp); static inline void net_timestamp_set(struct sk_buff *skb) { skb->tstamp.tv64 = 0; if (static_key_false(&netstamp_needed)) __net_timestamp(skb); } #define net_timestamp_check(COND, SKB) \ if (static_key_false(&netstamp_needed)) { \ if ((COND) && !(SKB)->tstamp.tv64) \ __net_timestamp(SKB); \ } \ bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb) { unsigned int len; if (!(dev->flags & IFF_UP)) return false; len = dev->mtu + dev->hard_header_len + VLAN_HLEN; if (skb->len <= len) return true; /* if TSO is enabled, we don't care about the length as the packet * could be forwarded without being segmented before */ if (skb_is_gso(skb)) return true; return false; } EXPORT_SYMBOL_GPL(is_skb_forwardable); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { if (skb_orphan_frags(skb, GFP_ATOMIC) || unlikely(!is_skb_forwardable(dev, skb))) { atomic_long_inc(&dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; } skb_scrub_packet(skb, true); skb->priority = 0; skb->protocol = eth_type_trans(skb, dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); return 0; } EXPORT_SYMBOL_GPL(__dev_forward_skb); /** * dev_forward_skb - loopback an skb to another netif * * @dev: destination network device * @skb: buffer to forward * * return values: * NET_RX_SUCCESS (no congestion) * NET_RX_DROP (packet was dropped, but freed) * * dev_forward_skb can be used for injecting an skb from the * start_xmit function of one device into the receive queue * of another device. * * The receiving device may be in another namespace, so * we have to clear all information in the skb that could * impact namespace isolation. */ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); } EXPORT_SYMBOL_GPL(dev_forward_skb); static inline int deliver_skb(struct sk_buff *skb, struct packet_type *pt_prev, struct net_device *orig_dev) { if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) return -ENOMEM; atomic_inc(&skb->users); return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } static inline void deliver_ptype_list_skb(struct sk_buff *skb, struct packet_type **pt, struct net_device *orig_dev, __be16 type, struct list_head *ptype_list) { struct packet_type *ptype, *pt_prev = *pt; list_for_each_entry_rcu(ptype, ptype_list, list) { if (ptype->type != type) continue; if (pt_prev) deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; } *pt = pt_prev; } static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) { if (!ptype->af_packet_priv || !skb->sk) return false; if (ptype->id_match) return ptype->id_match(ptype, skb->sk); else if ((struct sock *)ptype->af_packet_priv == skb->sk) return true; return false; } /* * Support routine. Sends outgoing frames to any network * taps currently in use. */ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) { struct packet_type *ptype; struct sk_buff *skb2 = NULL; struct packet_type *pt_prev = NULL; struct list_head *ptype_list = &ptype_all; rcu_read_lock(); again: list_for_each_entry_rcu(ptype, ptype_list, list) { /* Never send packets back to the socket * they originated from - MvS (miquels@drinkel.ow.org) */ if (skb_loop_sk(ptype, skb)) continue; if (pt_prev) { deliver_skb(skb2, pt_prev, skb->dev); pt_prev = ptype; continue; } /* need to clone skb, done only once */ skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) goto out_unlock; net_timestamp_set(skb2); /* skb->nh should be correctly * set by sender, so that the second statement is * just protection against buggy protocols. */ skb_reset_mac_header(skb2); if (skb_network_header(skb2) < skb2->data || skb_network_header(skb2) > skb_tail_pointer(skb2)) { net_crit_ratelimited("protocol %04x is buggy, dev %s\n", ntohs(skb2->protocol), dev->name); skb_reset_network_header(skb2); } skb2->transport_header = skb2->network_header; skb2->pkt_type = PACKET_OUTGOING; pt_prev = ptype; } if (ptype_list == &ptype_all) { ptype_list = &dev->ptype_all; goto again; } out_unlock: if (pt_prev) pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); rcu_read_unlock(); } /** * netif_setup_tc - Handle tc mappings on real_num_tx_queues change * @dev: Network device * @txq: number of queues available * * If real_num_tx_queues is changed the tc mappings may no longer be * valid. To resolve this verify the tc mapping remains valid and if * not NULL the mapping. With no priorities mapping to this * offset/count pair it will no longer be used. In the worst case TC0 * is invalid nothing can be done so disable priority mappings. If is * expected that drivers will fix this mapping if they can before * calling netif_set_real_num_tx_queues. */ static void netif_setup_tc(struct net_device *dev, unsigned int txq) { int i; struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; /* If TC0 is invalidated disable TC mapping */ if (tc->offset + tc->count > txq) { pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); dev->num_tc = 0; return; } /* Invalidated prio to tc mappings set to TC0 */ for (i = 1; i < TC_BITMASK + 1; i++) { int q = netdev_get_prio_tc_map(dev, i); tc = &dev->tc_to_txq[q]; if (tc->offset + tc->count > txq) { pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", i, q); netdev_set_prio_tc_map(dev, i, 0); } } } #ifdef CONFIG_XPS static DEFINE_MUTEX(xps_map_mutex); #define xmap_dereference(P) \ rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps, int cpu, u16 index) { struct xps_map *map = NULL; int pos; if (dev_maps) map = xmap_dereference(dev_maps->cpu_map[cpu]); for (pos = 0; map && pos < map->len; pos++) { if (map->queues[pos] == index) { if (map->len > 1) { map->queues[pos] = map->queues[--map->len]; } else { RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL); kfree_rcu(map, rcu); map = NULL; } break; } } return map; } static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) { struct xps_dev_maps *dev_maps; int cpu, i; bool active = false; mutex_lock(&xps_map_mutex); dev_maps = xmap_dereference(dev->xps_maps); if (!dev_maps) goto out_no_maps; for_each_possible_cpu(cpu) { for (i = index; i < dev->num_tx_queues; i++) { if (!remove_xps_queue(dev_maps, cpu, i)) break; } if (i == dev->num_tx_queues) active = true; } if (!active) { RCU_INIT_POINTER(dev->xps_maps, NULL); kfree_rcu(dev_maps, rcu); } for (i = index; i < dev->num_tx_queues; i++) netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i), NUMA_NO_NODE); out_no_maps: mutex_unlock(&xps_map_mutex); } static struct xps_map *expand_xps_map(struct xps_map *map, int cpu, u16 index) { struct xps_map *new_map; int alloc_len = XPS_MIN_MAP_ALLOC; int i, pos; for (pos = 0; map && pos < map->len; pos++) { if (map->queues[pos] != index) continue; return map; } /* Need to add queue to this CPU's existing map */ if (map) { if (pos < map->alloc_len) return map; alloc_len = map->alloc_len * 2; } /* Need to allocate new map to store queue on this CPU's map */ new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, cpu_to_node(cpu)); if (!new_map) return NULL; for (i = 0; i < pos; i++) new_map->queues[i] = map->queues[i]; new_map->alloc_len = alloc_len; new_map->len = pos; return new_map; } int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, u16 index) { struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; struct xps_map *map, *new_map; int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES); int cpu, numa_node_id = -2; bool active = false; mutex_lock(&xps_map_mutex); dev_maps = xmap_dereference(dev->xps_maps); /* allocate memory for queue storage */ for_each_online_cpu(cpu) { if (!cpumask_test_cpu(cpu, mask)) continue; if (!new_dev_maps) new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); if (!new_dev_maps) { mutex_unlock(&xps_map_mutex); return -ENOMEM; } map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : NULL; map = expand_xps_map(map, cpu, index); if (!map) goto error; RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map); } if (!new_dev_maps) goto out_no_new_maps; for_each_possible_cpu(cpu) { if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) { /* add queue to CPU maps */ int pos = 0; map = xmap_dereference(new_dev_maps->cpu_map[cpu]); while ((pos < map->len) && (map->queues[pos] != index)) pos++; if (pos == map->len) map->queues[map->len++] = index; #ifdef CONFIG_NUMA if (numa_node_id == -2) numa_node_id = cpu_to_node(cpu); else if (numa_node_id != cpu_to_node(cpu)) numa_node_id = -1; #endif } else if (dev_maps) { /* fill in the new device map from the old device map */ map = xmap_dereference(dev_maps->cpu_map[cpu]); RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map); } } rcu_assign_pointer(dev->xps_maps, new_dev_maps); /* Cleanup old maps */ if (dev_maps) { for_each_possible_cpu(cpu) { new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]); map = xmap_dereference(dev_maps->cpu_map[cpu]); if (map && map != new_map) kfree_rcu(map, rcu); } kfree_rcu(dev_maps, rcu); } dev_maps = new_dev_maps; active = true; out_no_new_maps: /* update Tx queue numa node */ netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), (numa_node_id >= 0) ? numa_node_id : NUMA_NO_NODE); if (!dev_maps) goto out_no_maps; /* removes queue from unused CPUs */ for_each_possible_cpu(cpu) { if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) continue; if (remove_xps_queue(dev_maps, cpu, index)) active = true; } /* free map if not active */ if (!active) { RCU_INIT_POINTER(dev->xps_maps, NULL); kfree_rcu(dev_maps, rcu); } out_no_maps: mutex_unlock(&xps_map_mutex); return 0; error: /* remove any maps that we added */ for_each_possible_cpu(cpu) { new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]); map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : NULL; if (new_map && new_map != map) kfree(new_map); } mutex_unlock(&xps_map_mutex); kfree(new_dev_maps); return -ENOMEM; } EXPORT_SYMBOL(netif_set_xps_queue); #endif /* * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. */ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) { int rc; if (txq < 1 || txq > dev->num_tx_queues) return -EINVAL; if (dev->reg_state == NETREG_REGISTERED || dev->reg_state == NETREG_UNREGISTERING) { ASSERT_RTNL(); rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, txq); if (rc) return rc; if (dev->num_tc) netif_setup_tc(dev, txq); if (txq < dev->real_num_tx_queues) { qdisc_reset_all_tx_gt(dev, txq); #ifdef CONFIG_XPS netif_reset_xps_queues_gt(dev, txq); #endif } } dev->real_num_tx_queues = txq; return 0; } EXPORT_SYMBOL(netif_set_real_num_tx_queues); #ifdef CONFIG_SYSFS /** * netif_set_real_num_rx_queues - set actual number of RX queues used * @dev: Network device * @rxq: Actual number of RX queues * * This must be called either with the rtnl_lock held or before * registration of the net device. Returns 0 on success, or a * negative error code. If called before registration, it always * succeeds. */ int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) { int rc; if (rxq < 1 || rxq > dev->num_rx_queues) return -EINVAL; if (dev->reg_state == NETREG_REGISTERED) { ASSERT_RTNL(); rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, rxq); if (rc) return rc; } dev->real_num_rx_queues = rxq; return 0; } EXPORT_SYMBOL(netif_set_real_num_rx_queues); #endif /** * netif_get_num_default_rss_queues - default number of RSS queues * * This routine should set an upper limit on the number of RSS queues * used by default by multiqueue devices. */ int netif_get_num_default_rss_queues(void) { return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); } EXPORT_SYMBOL(netif_get_num_default_rss_queues); static inline void __netif_reschedule(struct Qdisc *q) { struct softnet_data *sd; unsigned long flags; local_irq_save(flags); sd = this_cpu_ptr(&softnet_data); q->next_sched = NULL; *sd->output_queue_tailp = q; sd->output_queue_tailp = &q->next_sched; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); } void __netif_schedule(struct Qdisc *q) { if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) __netif_reschedule(q); } EXPORT_SYMBOL(__netif_schedule); struct dev_kfree_skb_cb { enum skb_free_reason reason; }; static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) { return (struct dev_kfree_skb_cb *)skb->cb; } void netif_schedule_queue(struct netdev_queue *txq) { rcu_read_lock(); if (!(txq->state & QUEUE_STATE_ANY_XOFF)) { struct Qdisc *q = rcu_dereference(txq->qdisc); __netif_schedule(q); } rcu_read_unlock(); } EXPORT_SYMBOL(netif_schedule_queue); /** * netif_wake_subqueue - allow sending packets on subqueue * @dev: network device * @queue_index: sub queue index * * Resume individual transmit queue of a device with multiple transmit queues. */ void netif_wake_subqueue(struct net_device *dev, u16 queue_index) { struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) { struct Qdisc *q; rcu_read_lock(); q = rcu_dereference(txq->qdisc); __netif_schedule(q); rcu_read_unlock(); } } EXPORT_SYMBOL(netif_wake_subqueue); void netif_tx_wake_queue(struct netdev_queue *dev_queue) { if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { struct Qdisc *q; rcu_read_lock(); q = rcu_dereference(dev_queue->qdisc); __netif_schedule(q); rcu_read_unlock(); } } EXPORT_SYMBOL(netif_tx_wake_queue); void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) { unsigned long flags; if (likely(atomic_read(&skb->users) == 1)) { smp_rmb(); atomic_set(&skb->users, 0); } else if (likely(!atomic_dec_and_test(&skb->users))) { return; } get_kfree_skb_cb(skb)->reason = reason; local_irq_save(flags); skb->next = __this_cpu_read(softnet_data.completion_queue); __this_cpu_write(softnet_data.completion_queue, skb); raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); } EXPORT_SYMBOL(__dev_kfree_skb_irq); void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) { if (in_irq() || irqs_disabled()) __dev_kfree_skb_irq(skb, reason); else dev_kfree_skb(skb); } EXPORT_SYMBOL(__dev_kfree_skb_any); /** * netif_device_detach - mark device as removed * @dev: network device * * Mark device as removed from system and therefore no longer available. */ void netif_device_detach(struct net_device *dev) { if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && netif_running(dev)) { netif_tx_stop_all_queues(dev); } } EXPORT_SYMBOL(netif_device_detach); /** * netif_device_attach - mark device as attached * @dev: network device * * Mark device as attached from system and restart if needed. */ void netif_device_attach(struct net_device *dev) { if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && netif_running(dev)) { netif_tx_wake_all_queues(dev); __netdev_watchdog_up(dev); } } EXPORT_SYMBOL(netif_device_attach); /* * Returns a Tx hash based on the given packet descriptor a Tx queues' number * to be used as a distribution range. */ u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, unsigned int num_tx_queues) { u32 hash; u16 qoffset = 0; u16 qcount = num_tx_queues; if (skb_rx_queue_recorded(skb)) { hash = skb_get_rx_queue(skb); while (unlikely(hash >= num_tx_queues)) hash -= num_tx_queues; return hash; } if (dev->num_tc) { u8 tc = netdev_get_prio_tc_map(dev, skb->priority); qoffset = dev->tc_to_txq[tc].offset; qcount = dev->tc_to_txq[tc].count; } return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; } EXPORT_SYMBOL(__skb_tx_hash); static void skb_warn_bad_offload(const struct sk_buff *skb) { static const netdev_features_t null_features = 0; struct net_device *dev = skb->dev; const char *name = ""; if (!net_ratelimit()) return; if (dev) { if (dev->dev.parent) name = dev_driver_string(dev->dev.parent); else name = netdev_name(dev); } WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " "gso_type=%d ip_summed=%d\n", name, dev ? &dev->features : &null_features, skb->sk ? &skb->sk->sk_route_caps : &null_features, skb->len, skb->data_len, skb_shinfo(skb)->gso_size, skb_shinfo(skb)->gso_type, skb->ip_summed); } /* * Invalidate hardware checksum when packet is to be mangled, and * complete checksum manually on outgoing path. */ int skb_checksum_help(struct sk_buff *skb) { __wsum csum; int ret = 0, offset; if (skb->ip_summed == CHECKSUM_COMPLETE) goto out_set_summed; if (unlikely(skb_shinfo(skb)->gso_size)) { skb_warn_bad_offload(skb); return -EINVAL; } /* Before computing a checksum, we should make sure no frag could * be modified by an external entity : checksum could be wrong. */ if (skb_has_shared_frag(skb)) { ret = __skb_linearize(skb); if (ret) goto out; } offset = skb_checksum_start_offset(skb); BUG_ON(offset >= skb_headlen(skb)); csum = skb_checksum(skb, offset, skb->len - offset, 0); offset += skb->csum_offset; BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(__sum16))) { ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (ret) goto out; } *(__sum16 *)(skb->data + offset) = csum_fold(csum); out_set_summed: skb->ip_summed = CHECKSUM_NONE; out: return ret; } EXPORT_SYMBOL(skb_checksum_help); /* skb_csum_offload_check - Driver helper function to determine if a device * with limited checksum offload capabilities is able to offload the checksum * for a given packet. * * Arguments: * skb - sk_buff for the packet in question * spec - contains the description of what device can offload * csum_encapped - returns true if the checksum being offloaded is * encpasulated. That is it is checksum for the transport header * in the inner headers. * checksum_help - when set indicates that helper function should * call skb_checksum_help if offload checks fail * * Returns: * true: Packet has passed the checksum checks and should be offloadable to * the device (a driver may still need to check for additional * restrictions of its device) * false: Checksum is not offloadable. If checksum_help was set then * skb_checksum_help was called to resolve checksum for non-GSO * packets and when IP protocol is not SCTP */ bool __skb_csum_offload_chk(struct sk_buff *skb, const struct skb_csum_offl_spec *spec, bool *csum_encapped, bool csum_help) { struct iphdr *iph; struct ipv6hdr *ipv6; void *nhdr; int protocol; u8 ip_proto; if (skb->protocol == htons(ETH_P_8021Q) || skb->protocol == htons(ETH_P_8021AD)) { if (!spec->vlan_okay) goto need_help; } /* We check whether the checksum refers to a transport layer checksum in * the outermost header or an encapsulated transport layer checksum that * corresponds to the inner headers of the skb. If the checksum is for * something else in the packet we need help. */ if (skb_checksum_start_offset(skb) == skb_transport_offset(skb)) { /* Non-encapsulated checksum */ protocol = eproto_to_ipproto(vlan_get_protocol(skb)); nhdr = skb_network_header(skb); *csum_encapped = false; if (spec->no_not_encapped) goto need_help; } else if (skb->encapsulation && spec->encap_okay && skb_checksum_start_offset(skb) == skb_inner_transport_offset(skb)) { /* Encapsulated checksum */ *csum_encapped = true; switch (skb->inner_protocol_type) { case ENCAP_TYPE_ETHER: protocol = eproto_to_ipproto(skb->inner_protocol); break; case ENCAP_TYPE_IPPROTO: protocol = skb->inner_protocol; break; } nhdr = skb_inner_network_header(skb); } else { goto need_help; } switch (protocol) { case IPPROTO_IP: if (!spec->ipv4_okay) goto need_help; iph = nhdr; ip_proto = iph->protocol; if (iph->ihl != 5 && !spec->ip_options_okay) goto need_help; break; case IPPROTO_IPV6: if (!spec->ipv6_okay) goto need_help; if (spec->no_encapped_ipv6 && *csum_encapped) goto need_help; ipv6 = nhdr; nhdr += sizeof(*ipv6); ip_proto = ipv6->nexthdr; break; default: goto need_help; } ip_proto_again: switch (ip_proto) { case IPPROTO_TCP: if (!spec->tcp_okay || skb->csum_offset != offsetof(struct tcphdr, check)) goto need_help; break; case IPPROTO_UDP: if (!spec->udp_okay || skb->csum_offset != offsetof(struct udphdr, check)) goto need_help; break; case IPPROTO_SCTP: if (!spec->sctp_okay || skb->csum_offset != offsetof(struct sctphdr, checksum)) goto cant_help; break; case NEXTHDR_HOP: case NEXTHDR_ROUTING: case NEXTHDR_DEST: { u8 *opthdr = nhdr; if (protocol != IPPROTO_IPV6 || !spec->ext_hdrs_okay) goto need_help; ip_proto = opthdr[0]; nhdr += (opthdr[1] + 1) << 3; goto ip_proto_again; } default: goto need_help; } /* Passed the tests for offloading checksum */ return true; need_help: if (csum_help && !skb_shinfo(skb)->gso_size) skb_checksum_help(skb); cant_help: return false; } EXPORT_SYMBOL(__skb_csum_offload_chk); __be16 skb_network_protocol(struct sk_buff *skb, int *depth) { __be16 type = skb->protocol; /* Tunnel gso handlers can set protocol to ethernet. */ if (type == htons(ETH_P_TEB)) { struct ethhdr *eth; if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) return 0; eth = (struct ethhdr *)skb_mac_header(skb); type = eth->h_proto; } return __vlan_get_protocol(skb, type, depth); } /** * skb_mac_gso_segment - mac layer segmentation handler. * @skb: buffer to segment * @features: features for the output path (see dev->features) */ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); struct packet_offload *ptype; int vlan_depth = skb->mac_len; __be16 type = skb_network_protocol(skb, &vlan_depth); if (unlikely(!type)) return ERR_PTR(-EINVAL); __skb_pull(skb, vlan_depth); rcu_read_lock(); list_for_each_entry_rcu(ptype, &offload_base, list) { if (ptype->type == type && ptype->callbacks.gso_segment) { segs = ptype->callbacks.gso_segment(skb, features); break; } } rcu_read_unlock(); __skb_push(skb, skb->data - skb_mac_header(skb)); return segs; } EXPORT_SYMBOL(skb_mac_gso_segment); /* openvswitch calls this on rx path, so we need a different check. */ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) { if (tx_path) return skb->ip_summed != CHECKSUM_PARTIAL; else return skb->ip_summed == CHECKSUM_NONE; } /** * __skb_gso_segment - Perform segmentation on skb. * @skb: buffer to segment * @features: features for the output path (see dev->features) * @tx_path: whether it is called in TX path * * This function segments the given skb and returns a list of segments. * * It may return NULL if the skb requires no segmentation. This is * only possible when GSO is used for verifying header integrity. * * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb. */ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path) { if (unlikely(skb_needs_check(skb, tx_path))) { int err; skb_warn_bad_offload(skb); err = skb_cow_head(skb, 0); if (err < 0) return ERR_PTR(err); } BUILD_BUG_ON(SKB_SGO_CB_OFFSET + sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); SKB_GSO_CB(skb)->encap_level = 0; skb_reset_mac_header(skb); skb_reset_mac_len(skb); return skb_mac_gso_segment(skb, features); } EXPORT_SYMBOL(__skb_gso_segment); /* Take action when hardware reception checksum errors are detected. */ #ifdef CONFIG_BUG void netdev_rx_csum_fault(struct net_device *dev) { if (net_ratelimit()) { pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); dump_stack(); } } EXPORT_SYMBOL(netdev_rx_csum_fault); #endif /* Actually, we should eliminate this check as soon as we know, that: * 1. IOMMU is present and allows to map all the memory. * 2. No high memory really exists on this machine. */ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_HIGHMEM int i; if (!(dev->features & NETIF_F_HIGHDMA)) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (PageHighMem(skb_frag_page(frag))) return 1; } } if (PCI_DMA_BUS_IS_PHYS) { struct device *pdev = dev->dev.parent; if (!pdev) return 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr_t addr = page_to_phys(skb_frag_page(frag)); if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) return 1; } } #endif return 0; } /* If MPLS offload request, verify we are testing hardware MPLS features * instead of standard features for the netdev. */ #if IS_ENABLED(CONFIG_NET_MPLS_GSO) static netdev_features_t net_mpls_features(struct sk_buff *skb, netdev_features_t features, __be16 type) { if (eth_p_mpls(type)) features &= skb->dev->mpls_features; return features; } #else static netdev_features_t net_mpls_features(struct sk_buff *skb, netdev_features_t features, __be16 type) { return features; } #endif static netdev_features_t harmonize_features(struct sk_buff *skb, netdev_features_t features) { int tmp; __be16 type; type = skb_network_protocol(skb, &tmp); features = net_mpls_features(skb, features, type); if (skb->ip_summed != CHECKSUM_NONE && !can_checksum_protocol(features, type)) { features &= ~NETIF_F_CSUM_MASK; } else if (illegal_highdma(skb->dev, skb)) { features &= ~NETIF_F_SG; } return features; } netdev_features_t passthru_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { return features; } EXPORT_SYMBOL(passthru_features_check); static netdev_features_t dflt_features_check(const struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { return vlan_features_check(skb, features); } netdev_features_t netif_skb_features(struct sk_buff *skb) { struct net_device *dev = skb->dev; netdev_features_t features = dev->features; u16 gso_segs = skb_shinfo(skb)->gso_segs; if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs) features &= ~NETIF_F_GSO_MASK; /* If encapsulation offload request, verify we are testing * hardware encapsulation features instead of standard * features for the netdev */ if (skb->encapsulation) features &= dev->hw_enc_features; if (skb_vlan_tagged(skb)) features = netdev_intersect_features(features, dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); if (dev->netdev_ops->ndo_features_check) features &= dev->netdev_ops->ndo_features_check(skb, dev, features); else features &= dflt_features_check(skb, dev, features); return harmonize_features(skb, features); } EXPORT_SYMBOL(netif_skb_features); static int xmit_one(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, bool more) { unsigned int len; int rc; if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) dev_queue_xmit_nit(skb, dev); len = skb->len; trace_net_dev_start_xmit(skb, dev); rc = netdev_start_xmit(skb, dev, txq, more); trace_net_dev_xmit(skb, rc, dev, len); return rc; } struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, struct netdev_queue *txq, int *ret) { struct sk_buff *skb = first; int rc = NETDEV_TX_OK; while (skb) { struct sk_buff *next = skb->next; skb->next = NULL; rc = xmit_one(skb, dev, txq, next != NULL); if (unlikely(!dev_xmit_complete(rc))) { skb->next = next; goto out; } skb = next; if (netif_xmit_stopped(txq) && skb) { rc = NETDEV_TX_BUSY; break; } } out: *ret = rc; return skb; } static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, netdev_features_t features) { if (skb_vlan_tag_present(skb) && !vlan_hw_offload_capable(features, skb->vlan_proto)) skb = __vlan_hwaccel_push_inside(skb); return skb; } static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) { netdev_features_t features; if (skb->next) return skb; features = netif_skb_features(skb); skb = validate_xmit_vlan(skb, features); if (unlikely(!skb)) goto out_null; if (netif_needs_gso(skb, features)) { struct sk_buff *segs; segs = skb_gso_segment(skb, features); if (IS_ERR(segs)) { goto out_kfree_skb; } else if (segs) { consume_skb(skb); skb = segs; } } else { if (skb_needs_linearize(skb, features) && __skb_linearize(skb)) goto out_kfree_skb; /* If packet is not checksummed and device does not * support checksumming for this protocol, complete * checksumming here. */ if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->encapsulation) skb_set_inner_transport_header(skb, skb_checksum_start_offset(skb)); else skb_set_transport_header(skb, skb_checksum_start_offset(skb)); if (!(features & NETIF_F_CSUM_MASK) && skb_checksum_help(skb)) goto out_kfree_skb; } } return skb; out_kfree_skb: kfree_skb(skb); out_null: return NULL; } struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev) { struct sk_buff *next, *head = NULL, *tail; for (; skb != NULL; skb = next) { next = skb->next; skb->next = NULL; /* in case skb wont be segmented, point to itself */ skb->prev = skb; skb = validate_xmit_skb(skb, dev); if (!skb) continue; if (!head) head = skb; else tail->next = skb; /* If skb was segmented, skb->prev points to * the last segment. If not, it still contains skb. */ tail = skb->prev; } return head; } static void qdisc_pkt_len_init(struct sk_buff *skb) { const struct skb_shared_info *shinfo = skb_shinfo(skb); qdisc_skb_cb(skb)->pkt_len = skb->len; /* To get more precise estimation of bytes sent on wire, * we add to pkt_len the headers size of all segments */ if (shinfo->gso_size) { unsigned int hdr_len; u16 gso_segs = shinfo->gso_segs; /* mac layer + network layer */ hdr_len = skb_transport_header(skb) - skb_mac_header(skb); /* + transport layer */ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) hdr_len += tcp_hdrlen(skb); else hdr_len += sizeof(struct udphdr); if (shinfo->gso_type & SKB_GSO_DODGY) gso_segs = DIV_ROUND_UP(skb->len - hdr_len, shinfo->gso_size); qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; } } static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq) { spinlock_t *root_lock = qdisc_lock(q); bool contended; int rc; qdisc_calculate_pkt_len(skb, q); /* * Heuristic to force contended enqueues to serialize on a * separate lock before trying to get qdisc main lock. * This permits __QDISC___STATE_RUNNING owner to get the lock more * often and dequeue packets faster. */ contended = qdisc_is_running(q); if (unlikely(contended)) spin_lock(&q->busylock); spin_lock(root_lock); if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { kfree_skb(skb); rc = NET_XMIT_DROP; } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && qdisc_run_begin(q)) { /* * This is a work-conserving queue; there are no old skbs * waiting to be sent out; and the qdisc is not running - * xmit the skb directly. */ qdisc_bstats_update(q, skb); if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { if (unlikely(contended)) { spin_unlock(&q->busylock); contended = false; } __qdisc_run(q); } else qdisc_run_end(q); rc = NET_XMIT_SUCCESS; } else { rc = q->enqueue(skb, q) & NET_XMIT_MASK; if (qdisc_run_begin(q)) { if (unlikely(contended)) { spin_unlock(&q->busylock); contended = false; } __qdisc_run(q); } } spin_unlock(root_lock); if (unlikely(contended)) spin_unlock(&q->busylock); return rc; } #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) static void skb_update_prio(struct sk_buff *skb) { struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); if (!skb->priority && skb->sk && map) { unsigned int prioidx = sock_cgroup_prioidx(&skb->sk->sk_cgrp_data); if (prioidx < map->priomap_len) skb->priority = map->priomap[prioidx]; } } #else #define skb_update_prio(skb) #endif DEFINE_PER_CPU(int, xmit_recursion); EXPORT_SYMBOL(xmit_recursion); #define RECURSION_LIMIT 10 /** * dev_loopback_xmit - loop back @skb * @net: network namespace this loopback is happening in * @sk: sk needed to be a netfilter okfn * @skb: buffer to transmit */ int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) { skb_reset_mac_header(skb); __skb_pull(skb, skb_network_offset(skb)); skb->pkt_type = PACKET_LOOPBACK; skb->ip_summed = CHECKSUM_UNNECESSARY; WARN_ON(!skb_dst(skb)); skb_dst_force(skb); netif_rx_ni(skb); return 0; } EXPORT_SYMBOL(dev_loopback_xmit); #ifdef CONFIG_NET_EGRESS static struct sk_buff * sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) { struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list); struct tcf_result cl_res; if (!cl) return skb; /* skb->tc_verd and qdisc_skb_cb(skb)->pkt_len were already set * earlier by the caller. */ qdisc_bstats_cpu_update(cl->q, skb); switch (tc_classify(skb, cl, &cl_res, false)) { case TC_ACT_OK: case TC_ACT_RECLASSIFY: skb->tc_index = TC_H_MIN(cl_res.classid); break; case TC_ACT_SHOT: qdisc_qstats_cpu_drop(cl->q); *ret = NET_XMIT_DROP; goto drop; case TC_ACT_STOLEN: case TC_ACT_QUEUED: *ret = NET_XMIT_SUCCESS; drop: kfree_skb(skb); return NULL; case TC_ACT_REDIRECT: /* No need to push/pop skb's mac_header here on egress! */ skb_do_redirect(skb); *ret = NET_XMIT_SUCCESS; return NULL; default: break; } return skb; } #endif /* CONFIG_NET_EGRESS */ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_XPS struct xps_dev_maps *dev_maps; struct xps_map *map; int queue_index = -1; rcu_read_lock(); dev_maps = rcu_dereference(dev->xps_maps); if (dev_maps) { map = rcu_dereference( dev_maps->cpu_map[skb->sender_cpu - 1]); if (map) { if (map->len == 1) queue_index = map->queues[0]; else queue_index = map->queues[reciprocal_scale(skb_get_hash(skb), map->len)]; if (unlikely(queue_index >= dev->real_num_tx_queues)) queue_index = -1; } } rcu_read_unlock(); return queue_index; #else return -1; #endif } static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) { struct sock *sk = skb->sk; int queue_index = sk_tx_queue_get(sk); if (queue_index < 0 || skb->ooo_okay || queue_index >= dev->real_num_tx_queues) { int new_index = get_xps_queue(dev, skb); if (new_index < 0) new_index = skb_tx_hash(dev, skb); if (queue_index != new_index && sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) sk_tx_queue_set(sk, new_index); queue_index = new_index; } return queue_index; } struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, void *accel_priv) { int queue_index = 0; #ifdef CONFIG_XPS u32 sender_cpu = skb->sender_cpu - 1; if (sender_cpu >= (u32)NR_CPUS) skb->sender_cpu = raw_smp_processor_id() + 1; #endif if (dev->real_num_tx_queues != 1) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_select_queue) queue_index = ops->ndo_select_queue(dev, skb, accel_priv, __netdev_pick_tx); else queue_index = __netdev_pick_tx(dev, skb); if (!accel_priv) queue_index = netdev_cap_txqueue(dev, queue_index); } skb_set_queue_mapping(skb, queue_index); return netdev_get_tx_queue(dev, queue_index); } /** * __dev_queue_xmit - transmit a buffer * @skb: buffer to transmit * @accel_priv: private data used for L2 forwarding offload * * Queue a buffer for transmission to a network device. The caller must * have set the device and priority and built the buffer before calling * this function. The function can be called from an interrupt. * * A negative errno code is returned on a failure. A success does not * guarantee the frame will be transmitted as it may be dropped due * to congestion or traffic shaping. * * ----------------------------------------------------------------------------------- * I notice this method can also return errors from the queue disciplines, * including NET_XMIT_DROP, which is a positive value. So, errors can also * be positive. * * Regardless of the return value, the skb is consumed, so it is currently * difficult to retry a send to this method. (You can bump the ref count * before sending to hold a reference for retry if you are careful.) * * When calling this method, interrupts MUST be enabled. This is because * the BH enable code must have IRQs enabled so that it will not deadlock. * --BLG */ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) { struct net_device *dev = skb->dev; struct netdev_queue *txq; struct Qdisc *q; int rc = -ENOMEM; skb_reset_mac_header(skb); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED); /* Disable soft irqs for various locks below. Also * stops preemption for RCU. */ rcu_read_lock_bh(); skb_update_prio(skb); qdisc_pkt_len_init(skb); #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); # ifdef CONFIG_NET_EGRESS if (static_key_false(&egress_needed)) { skb = sch_handle_egress(skb, &rc, dev); if (!skb) goto out; } # endif #endif /* If device/qdisc don't need skb->dst, release it right now while * its hot in this cpu cache. */ if (dev->priv_flags & IFF_XMIT_DST_RELEASE) skb_dst_drop(skb); else skb_dst_force(skb); #ifdef CONFIG_NET_SWITCHDEV /* Don't forward if offload device already forwarded */ if (skb->offload_fwd_mark && skb->offload_fwd_mark == dev->offload_fwd_mark) { consume_skb(skb); rc = NET_XMIT_SUCCESS; goto out; } #endif txq = netdev_pick_tx(dev, skb, accel_priv); q = rcu_dereference_bh(txq->qdisc); trace_net_dev_queue(skb); if (q->enqueue) { rc = __dev_xmit_skb(skb, q, dev, txq); goto out; } /* The device has no queue. Common case for software devices: loopback, all the sorts of tunnels... Really, it is unlikely that netif_tx_lock protection is necessary here. (f.e. loopback and IP tunnels are clean ignoring statistics counters.) However, it is possible, that they rely on protection made by us here. Check this and shot the lock. It is not prone from deadlocks. Either shot noqueue qdisc, it is even simpler 8) */ if (dev->flags & IFF_UP) { int cpu = smp_processor_id(); /* ok because BHs are off */ if (txq->xmit_lock_owner != cpu) { if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) goto recursion_alert; skb = validate_xmit_skb(skb, dev); if (!skb) goto drop; HARD_TX_LOCK(dev, txq, cpu); if (!netif_xmit_stopped(txq)) { __this_cpu_inc(xmit_recursion); skb = dev_hard_start_xmit(skb, dev, txq, &rc); __this_cpu_dec(xmit_recursion); if (dev_xmit_complete(rc)) { HARD_TX_UNLOCK(dev, txq); goto out; } } HARD_TX_UNLOCK(dev, txq); net_crit_ratelimited("Virtual device %s asks to queue packet!\n", dev->name); } else { /* Recursion is detected! It is possible, * unfortunately */ recursion_alert: net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", dev->name); } } rc = -ENETDOWN; drop: rcu_read_unlock_bh(); atomic_long_inc(&dev->tx_dropped); kfree_skb_list(skb); return rc; out: rcu_read_unlock_bh(); return rc; } int dev_queue_xmit(struct sk_buff *skb) { return __dev_queue_xmit(skb, NULL); } EXPORT_SYMBOL(dev_queue_xmit); int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) { return __dev_queue_xmit(skb, accel_priv); } EXPORT_SYMBOL(dev_queue_xmit_accel); /*======================================================================= Receiver routines =======================================================================*/ int netdev_max_backlog __read_mostly = 1000; EXPORT_SYMBOL(netdev_max_backlog); int netdev_tstamp_prequeue __read_mostly = 1; int netdev_budget __read_mostly = 300; int weight_p __read_mostly = 64; /* old backlog weight */ /* Called with irq disabled */ static inline void ____napi_schedule(struct softnet_data *sd, struct napi_struct *napi) { list_add_tail(&napi->poll_list, &sd->poll_list); __raise_softirq_irqoff(NET_RX_SOFTIRQ); } #ifdef CONFIG_RPS /* One global table that all flow-based protocols share. */ struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; EXPORT_SYMBOL(rps_sock_flow_table); u32 rps_cpu_mask __read_mostly; EXPORT_SYMBOL(rps_cpu_mask); struct static_key rps_needed __read_mostly; static struct rps_dev_flow * set_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow *rflow, u16 next_cpu) { if (next_cpu < nr_cpu_ids) { #ifdef CONFIG_RFS_ACCEL struct netdev_rx_queue *rxqueue; struct rps_dev_flow_table *flow_table; struct rps_dev_flow *old_rflow; u32 flow_id; u16 rxq_index; int rc; /* Should we steer this flow to a different hardware queue? */ if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || !(dev->features & NETIF_F_NTUPLE)) goto out; rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); if (rxq_index == skb_get_rx_queue(skb)) goto out; rxqueue = dev->_rx + rxq_index; flow_table = rcu_dereference(rxqueue->rps_flow_table); if (!flow_table) goto out; flow_id = skb_get_hash(skb) & flow_table->mask; rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, rxq_index, flow_id); if (rc < 0) goto out; old_rflow = rflow; rflow = &flow_table->flows[flow_id]; rflow->filter = rc; if (old_rflow->filter == rflow->filter) old_rflow->filter = RPS_NO_FILTER; out: #endif rflow->last_qtail = per_cpu(softnet_data, next_cpu).input_queue_head; } rflow->cpu = next_cpu; return rflow; } /* * get_rps_cpu is called from netif_receive_skb and returns the target * CPU from the RPS map of the receiving queue for a given skb. * rcu_read_lock must be held on entry. */ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow **rflowp) { const struct rps_sock_flow_table *sock_flow_table; struct netdev_rx_queue *rxqueue = dev->_rx; struct rps_dev_flow_table *flow_table; struct rps_map *map; int cpu = -1; u32 tcpu; u32 hash; if (skb_rx_queue_recorded(skb)) { u16 index = skb_get_rx_queue(skb); if (unlikely(index >= dev->real_num_rx_queues)) { WARN_ONCE(dev->real_num_rx_queues > 1, "%s received packet on queue %u, but number " "of RX queues is %u\n", dev->name, index, dev->real_num_rx_queues); goto done; } rxqueue += index; } /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ flow_table = rcu_dereference(rxqueue->rps_flow_table); map = rcu_dereference(rxqueue->rps_map); if (!flow_table && !map) goto done; skb_reset_network_header(skb); hash = skb_get_hash(skb); if (!hash) goto done; sock_flow_table = rcu_dereference(rps_sock_flow_table); if (flow_table && sock_flow_table) { struct rps_dev_flow *rflow; u32 next_cpu; u32 ident; /* First check into global flow table if there is a match */ ident = sock_flow_table->ents[hash & sock_flow_table->mask]; if ((ident ^ hash) & ~rps_cpu_mask) goto try_rps; next_cpu = ident & rps_cpu_mask; /* OK, now we know there is a match, * we can look at the local (per receive queue) flow table */ rflow = &flow_table->flows[hash & flow_table->mask]; tcpu = rflow->cpu; /* * If the desired CPU (where last recvmsg was done) is * different from current CPU (one in the rx-queue flow * table entry), switch if one of the following holds: * - Current CPU is unset (>= nr_cpu_ids). * - Current CPU is offline. * - The current CPU's queue tail has advanced beyond the * last packet that was enqueued using this table entry. * This guarantees that all previous packets for the flow * have been dequeued, thus preserving in order delivery. */ if (unlikely(tcpu != next_cpu) && (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || ((int)(per_cpu(softnet_data, tcpu).input_queue_head - rflow->last_qtail)) >= 0)) { tcpu = next_cpu; rflow = set_rps_cpu(dev, skb, rflow, next_cpu); } if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { *rflowp = rflow; cpu = tcpu; goto done; } } try_rps: if (map) { tcpu = map->cpus[reciprocal_scale(hash, map->len)]; if (cpu_online(tcpu)) { cpu = tcpu; goto done; } } done: return cpu; } #ifdef CONFIG_RFS_ACCEL /** * rps_may_expire_flow - check whether an RFS hardware filter may be removed * @dev: Device on which the filter was set * @rxq_index: RX queue index * @flow_id: Flow ID passed to ndo_rx_flow_steer() * @filter_id: Filter ID returned by ndo_rx_flow_steer() * * Drivers that implement ndo_rx_flow_steer() should periodically call * this function for each installed filter and remove the filters for * which it returns %true. */ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, u16 filter_id) { struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; struct rps_dev_flow_table *flow_table; struct rps_dev_flow *rflow; bool expire = true; unsigned int cpu; rcu_read_lock(); flow_table = rcu_dereference(rxqueue->rps_flow_table); if (flow_table && flow_id <= flow_table->mask) { rflow = &flow_table->flows[flow_id]; cpu = ACCESS_ONCE(rflow->cpu); if (rflow->filter == filter_id && cpu < nr_cpu_ids && ((int)(per_cpu(softnet_data, cpu).input_queue_head - rflow->last_qtail) < (int)(10 * flow_table->mask))) expire = false; } rcu_read_unlock(); return expire; } EXPORT_SYMBOL(rps_may_expire_flow); #endif /* CONFIG_RFS_ACCEL */ /* Called from hardirq (IPI) context */ static void rps_trigger_softirq(void *data) { struct softnet_data *sd = data; ____napi_schedule(sd, &sd->backlog); sd->received_rps++; } #endif /* CONFIG_RPS */ /* * Check if this softnet_data structure is another cpu one * If yes, queue it to our IPI list and return 1 * If no, return 0 */ static int rps_ipi_queued(struct softnet_data *sd) { #ifdef CONFIG_RPS struct softnet_data *mysd = this_cpu_ptr(&softnet_data); if (sd != mysd) { sd->rps_ipi_next = mysd->rps_ipi_list; mysd->rps_ipi_list = sd; __raise_softirq_irqoff(NET_RX_SOFTIRQ); return 1; } #endif /* CONFIG_RPS */ return 0; } #ifdef CONFIG_NET_FLOW_LIMIT int netdev_flow_limit_table_len __read_mostly = (1 << 12); #endif static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) { #ifdef CONFIG_NET_FLOW_LIMIT struct sd_flow_limit *fl; struct softnet_data *sd; unsigned int old_flow, new_flow; if (qlen < (netdev_max_backlog >> 1)) return false; sd = this_cpu_ptr(&softnet_data); rcu_read_lock(); fl = rcu_dereference(sd->flow_limit); if (fl) { new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); old_flow = fl->history[fl->history_head]; fl->history[fl->history_head] = new_flow; fl->history_head++; fl->history_head &= FLOW_LIMIT_HISTORY - 1; if (likely(fl->buckets[old_flow])) fl->buckets[old_flow]--; if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { fl->count++; rcu_read_unlock(); return true; } } rcu_read_unlock(); #endif return false; } /* * enqueue_to_backlog is called to queue an skb to a per CPU backlog * queue (may be a remote CPU queue). */ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, unsigned int *qtail) { struct softnet_data *sd; unsigned long flags; unsigned int qlen; sd = &per_cpu(softnet_data, cpu); local_irq_save(flags); rps_lock(sd); if (!netif_running(skb->dev)) goto drop; qlen = skb_queue_len(&sd->input_pkt_queue); if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { if (qlen) { enqueue: __skb_queue_tail(&sd->input_pkt_queue, skb); input_queue_tail_incr_save(sd, qtail); rps_unlock(sd); local_irq_restore(flags); return NET_RX_SUCCESS; } /* Schedule NAPI for backlog device * We can use non atomic operation since we own the queue lock */ if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { if (!rps_ipi_queued(sd)) ____napi_schedule(sd, &sd->backlog); } goto enqueue; } drop: sd->dropped++; rps_unlock(sd); local_irq_restore(flags); atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; } static int netif_rx_internal(struct sk_buff *skb) { int ret; net_timestamp_check(netdev_tstamp_prequeue, skb); trace_netif_rx(skb); #ifdef CONFIG_RPS if (static_key_false(&rps_needed)) { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; preempt_disable(); rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); if (cpu < 0) cpu = smp_processor_id(); ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); preempt_enable(); } else #endif { unsigned int qtail; ret = enqueue_to_backlog(skb, get_cpu(), &qtail); put_cpu(); } return ret; } /** * netif_rx - post buffer to the network code * @skb: buffer to post * * This function receives a packet from a device driver and queues it for * the upper (protocol) levels to process. It always succeeds. The buffer * may be dropped during processing for congestion control or by the * protocol layers. * * return values: * NET_RX_SUCCESS (no congestion) * NET_RX_DROP (packet was dropped) * */ int netif_rx(struct sk_buff *skb) { trace_netif_rx_entry(skb); return netif_rx_internal(skb); } EXPORT_SYMBOL(netif_rx); int netif_rx_ni(struct sk_buff *skb) { int err; trace_netif_rx_ni_entry(skb); preempt_disable(); err = netif_rx_internal(skb); if (local_softirq_pending()) do_softirq(); preempt_enable(); return err; } EXPORT_SYMBOL(netif_rx_ni); static void net_tx_action(struct softirq_action *h) { struct softnet_data *sd = this_cpu_ptr(&softnet_data); if (sd->completion_queue) { struct sk_buff *clist; local_irq_disable(); clist = sd->completion_queue; sd->completion_queue = NULL; local_irq_enable(); while (clist) { struct sk_buff *skb = clist; clist = clist->next; WARN_ON(atomic_read(&skb->users)); if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) trace_consume_skb(skb); else trace_kfree_skb(skb, net_tx_action); if (skb->fclone != SKB_FCLONE_UNAVAILABLE) __kfree_skb(skb); else __kfree_skb_defer(skb); } __kfree_skb_flush(); } if (sd->output_queue) { struct Qdisc *head; local_irq_disable(); head = sd->output_queue; sd->output_queue = NULL; sd->output_queue_tailp = &sd->output_queue; local_irq_enable(); while (head) { struct Qdisc *q = head; spinlock_t *root_lock; head = head->next_sched; root_lock = qdisc_lock(q); if (spin_trylock(root_lock)) { smp_mb__before_atomic(); clear_bit(__QDISC_STATE_SCHED, &q->state); qdisc_run(q); spin_unlock(root_lock); } else { if (!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)) { __netif_reschedule(q); } else { smp_mb__before_atomic(); clear_bit(__QDISC_STATE_SCHED, &q->state); } } } } } #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \ (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)) /* This hook is defined here for ATM LANE */ int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr) __read_mostly; EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); #endif static inline struct sk_buff * sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, struct net_device *orig_dev) { #ifdef CONFIG_NET_CLS_ACT struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list); struct tcf_result cl_res; /* If there's at least one ingress present somewhere (so * we get here via enabled static key), remaining devices * that are not configured with an ingress qdisc will bail * out here. */ if (!cl) return skb; if (*pt_prev) { *ret = deliver_skb(skb, *pt_prev, orig_dev); *pt_prev = NULL; } qdisc_skb_cb(skb)->pkt_len = skb->len; skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); qdisc_bstats_cpu_update(cl->q, skb); switch (tc_classify(skb, cl, &cl_res, false)) { case TC_ACT_OK: case TC_ACT_RECLASSIFY: skb->tc_index = TC_H_MIN(cl_res.classid); break; case TC_ACT_SHOT: qdisc_qstats_cpu_drop(cl->q); case TC_ACT_STOLEN: case TC_ACT_QUEUED: kfree_skb(skb); return NULL; case TC_ACT_REDIRECT: /* skb_mac_header check was done by cls/act_bpf, so * we can safely push the L2 header back before * redirecting to another netdev */ __skb_push(skb, skb->mac_len); skb_do_redirect(skb); return NULL; default: break; } #endif /* CONFIG_NET_CLS_ACT */ return skb; } /** * netdev_rx_handler_register - register receive handler * @dev: device to register a handler for * @rx_handler: receive handler to register * @rx_handler_data: data pointer that is used by rx handler * * Register a receive handler for a device. This handler will then be * called from __netif_receive_skb. A negative errno code is returned * on a failure. * * The caller must hold the rtnl_mutex. * * For a general description of rx_handler, see enum rx_handler_result. */ int netdev_rx_handler_register(struct net_device *dev, rx_handler_func_t *rx_handler, void *rx_handler_data) { ASSERT_RTNL(); if (dev->rx_handler) return -EBUSY; /* Note: rx_handler_data must be set before rx_handler */ rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); rcu_assign_pointer(dev->rx_handler, rx_handler); return 0; } EXPORT_SYMBOL_GPL(netdev_rx_handler_register); /** * netdev_rx_handler_unregister - unregister receive handler * @dev: device to unregister a handler from * * Unregister a receive handler from a device. * * The caller must hold the rtnl_mutex. */ void netdev_rx_handler_unregister(struct net_device *dev) { ASSERT_RTNL(); RCU_INIT_POINTER(dev->rx_handler, NULL); /* a reader seeing a non NULL rx_handler in a rcu_read_lock() * section has a guarantee to see a non NULL rx_handler_data * as well. */ synchronize_net(); RCU_INIT_POINTER(dev->rx_handler_data, NULL); } EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); /* * Limit the use of PFMEMALLOC reserves to those protocols that implement * the special handling of PFMEMALLOC skbs. */ static bool skb_pfmemalloc_protocol(struct sk_buff *skb) { switch (skb->protocol) { case htons(ETH_P_ARP): case htons(ETH_P_IP): case htons(ETH_P_IPV6): case htons(ETH_P_8021Q): case htons(ETH_P_8021AD): return true; default: return false; } } static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, struct net_device *orig_dev) { #ifdef CONFIG_NETFILTER_INGRESS if (nf_hook_ingress_active(skb)) { if (*pt_prev) { *ret = deliver_skb(skb, *pt_prev, orig_dev); *pt_prev = NULL; } return nf_hook_ingress(skb); } #endif /* CONFIG_NETFILTER_INGRESS */ return 0; } static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) { struct packet_type *ptype, *pt_prev; rx_handler_func_t *rx_handler; struct net_device *orig_dev; bool deliver_exact = false; int ret = NET_RX_DROP; __be16 type; net_timestamp_check(!netdev_tstamp_prequeue, skb); trace_netif_receive_skb(skb); orig_dev = skb->dev; skb_reset_network_header(skb); if (!skb_transport_header_was_set(skb)) skb_reset_transport_header(skb); skb_reset_mac_len(skb); pt_prev = NULL; another_round: skb->skb_iif = skb->dev->ifindex; __this_cpu_inc(softnet_data.processed); if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || skb->protocol == cpu_to_be16(ETH_P_8021AD)) { skb = skb_vlan_untag(skb); if (unlikely(!skb)) goto out; } #ifdef CONFIG_NET_CLS_ACT if (skb->tc_verd & TC_NCLS) { skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); goto ncls; } #endif if (pfmemalloc) goto skip_taps; list_for_each_entry_rcu(ptype, &ptype_all, list) { if (pt_prev) ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; } list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { if (pt_prev) ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; } skip_taps: #ifdef CONFIG_NET_INGRESS if (static_key_false(&ingress_needed)) { skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev); if (!skb) goto out; if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) goto out; } #endif #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = 0; ncls: #endif if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) goto drop; if (skb_vlan_tag_present(skb)) { if (pt_prev) { ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = NULL; } if (vlan_do_receive(&skb)) goto another_round; else if (unlikely(!skb)) goto out; } rx_handler = rcu_dereference(skb->dev->rx_handler); if (rx_handler) { if (pt_prev) { ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = NULL; } switch (rx_handler(&skb)) { case RX_HANDLER_CONSUMED: ret = NET_RX_SUCCESS; goto out; case RX_HANDLER_ANOTHER: goto another_round; case RX_HANDLER_EXACT: deliver_exact = true; case RX_HANDLER_PASS: break; default: BUG(); } } if (unlikely(skb_vlan_tag_present(skb))) { if (skb_vlan_tag_get_id(skb)) skb->pkt_type = PACKET_OTHERHOST; /* Note: we might in the future use prio bits * and set skb->priority like in vlan_do_receive() * For the time being, just ignore Priority Code Point */ skb->vlan_tci = 0; } type = skb->protocol; /* deliver only exact match when indicated */ if (likely(!deliver_exact)) { deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, &ptype_base[ntohs(type) & PTYPE_HASH_MASK]); } deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, &orig_dev->ptype_specific); if (unlikely(skb->dev != orig_dev)) { deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, &skb->dev->ptype_specific); } if (pt_prev) { if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) goto drop; else ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } else { drop: if (!deliver_exact) atomic_long_inc(&skb->dev->rx_dropped); else atomic_long_inc(&skb->dev->rx_nohandler); kfree_skb(skb); /* Jamal, now you will not able to escape explaining * me how you were going to use this. :-) */ ret = NET_RX_DROP; } out: return ret; } static int __netif_receive_skb(struct sk_buff *skb) { int ret; if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { unsigned long pflags = current->flags; /* * PFMEMALLOC skbs are special, they should * - be delivered to SOCK_MEMALLOC sockets only * - stay away from userspace * - have bounded memory usage * * Use PF_MEMALLOC as this saves us from propagating the allocation * context down to all allocation sites. */ current->flags |= PF_MEMALLOC; ret = __netif_receive_skb_core(skb, true); tsk_restore_flags(current, pflags, PF_MEMALLOC); } else ret = __netif_receive_skb_core(skb, false); return ret; } static int netif_receive_skb_internal(struct sk_buff *skb) { int ret; net_timestamp_check(netdev_tstamp_prequeue, skb); if (skb_defer_rx_timestamp(skb)) return NET_RX_SUCCESS; rcu_read_lock(); #ifdef CONFIG_RPS if (static_key_false(&rps_needed)) { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu = get_rps_cpu(skb->dev, skb, &rflow); if (cpu >= 0) { ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); return ret; } } #endif ret = __netif_receive_skb(skb); rcu_read_unlock(); return ret; } /** * netif_receive_skb - process receive buffer from network * @skb: buffer to process * * netif_receive_skb() is the main receive data processing function. * It always succeeds. The buffer may be dropped during processing * for congestion control or by the protocol layers. * * This function may only be called from softirq context and interrupts * should be enabled. * * Return values (usually ignored): * NET_RX_SUCCESS: no congestion * NET_RX_DROP: packet was dropped */ int netif_receive_skb(struct sk_buff *skb) { trace_netif_receive_skb_entry(skb); return netif_receive_skb_internal(skb); } EXPORT_SYMBOL(netif_receive_skb); /* Network device is going away, flush any packets still pending * Called with irqs disabled. */ static void flush_backlog(void *arg) { struct net_device *dev = arg; struct softnet_data *sd = this_cpu_ptr(&softnet_data); struct sk_buff *skb, *tmp; rps_lock(sd); skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev == dev) { __skb_unlink(skb, &sd->input_pkt_queue); kfree_skb(skb); input_queue_head_incr(sd); } } rps_unlock(sd); skb_queue_walk_safe(&sd->process_queue, skb, tmp) { if (skb->dev == dev) { __skb_unlink(skb, &sd->process_queue); kfree_skb(skb); input_queue_head_incr(sd); } } } static int napi_gro_complete(struct sk_buff *skb) { struct packet_offload *ptype; __be16 type = skb->protocol; struct list_head *head = &offload_base; int err = -ENOENT; BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); if (NAPI_GRO_CB(skb)->count == 1) { skb_shinfo(skb)->gso_size = 0; goto out; } rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { if (ptype->type != type || !ptype->callbacks.gro_complete) continue; err = ptype->callbacks.gro_complete(skb, 0); break; } rcu_read_unlock(); if (err) { WARN_ON(&ptype->list == head); kfree_skb(skb); return NET_RX_SUCCESS; } out: return netif_receive_skb_internal(skb); } /* napi->gro_list contains packets ordered by age. * youngest packets at the head of it. * Complete skbs in reverse order to reduce latencies. */ void napi_gro_flush(struct napi_struct *napi, bool flush_old) { struct sk_buff *skb, *prev = NULL; /* scan list and build reverse chain */ for (skb = napi->gro_list; skb != NULL; skb = skb->next) { skb->prev = prev; prev = skb; } for (skb = prev; skb; skb = prev) { skb->next = NULL; if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) return; prev = skb->prev; napi_gro_complete(skb); napi->gro_count--; } napi->gro_list = NULL; } EXPORT_SYMBOL(napi_gro_flush); static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) { struct sk_buff *p; unsigned int maclen = skb->dev->hard_header_len; u32 hash = skb_get_hash_raw(skb); for (p = napi->gro_list; p; p = p->next) { unsigned long diffs; NAPI_GRO_CB(p)->flush = 0; if (hash != skb_get_hash_raw(p)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; diffs |= p->vlan_tci ^ skb->vlan_tci; diffs |= skb_metadata_dst_cmp(p, skb); if (maclen == ETH_HLEN) diffs |= compare_ether_header(skb_mac_header(p), skb_mac_header(skb)); else if (!diffs) diffs = memcmp(skb_mac_header(p), skb_mac_header(skb), maclen); NAPI_GRO_CB(p)->same_flow = !diffs; } } static void skb_gro_reset_offset(struct sk_buff *skb) { const struct skb_shared_info *pinfo = skb_shinfo(skb); const skb_frag_t *frag0 = &pinfo->frags[0]; NAPI_GRO_CB(skb)->data_offset = 0; NAPI_GRO_CB(skb)->frag0 = NULL; NAPI_GRO_CB(skb)->frag0_len = 0; if (skb_mac_header(skb) == skb_tail_pointer(skb) && pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0))) { NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0); } } static void gro_pull_from_frag0(struct sk_buff *skb, int grow) { struct skb_shared_info *pinfo = skb_shinfo(skb); BUG_ON(skb->end - skb->tail < grow); memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); skb->data_len -= grow; skb->tail += grow; pinfo->frags[0].page_offset += grow; skb_frag_size_sub(&pinfo->frags[0], grow); if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { skb_frag_unref(skb, 0); memmove(pinfo->frags, pinfo->frags + 1, --pinfo->nr_frags * sizeof(pinfo->frags[0])); } } static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { struct sk_buff **pp = NULL; struct packet_offload *ptype; __be16 type = skb->protocol; struct list_head *head = &offload_base; int same_flow; enum gro_result ret; int grow; if (!(skb->dev->features & NETIF_F_GRO)) goto normal; if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad) goto normal; gro_list_prepare(napi, skb); rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { if (ptype->type != type || !ptype->callbacks.gro_receive) continue; skb_set_network_header(skb, skb_gro_offset(skb)); skb_reset_mac_len(skb); NAPI_GRO_CB(skb)->same_flow = 0; NAPI_GRO_CB(skb)->flush = 0; NAPI_GRO_CB(skb)->free = 0; NAPI_GRO_CB(skb)->udp_mark = 0; NAPI_GRO_CB(skb)->gro_remcsum_start = 0; /* Setup for GRO checksum validation */ switch (skb->ip_summed) { case CHECKSUM_COMPLETE: NAPI_GRO_CB(skb)->csum = skb->csum; NAPI_GRO_CB(skb)->csum_valid = 1; NAPI_GRO_CB(skb)->csum_cnt = 0; break; case CHECKSUM_UNNECESSARY: NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; NAPI_GRO_CB(skb)->csum_valid = 0; break; default: NAPI_GRO_CB(skb)->csum_cnt = 0; NAPI_GRO_CB(skb)->csum_valid = 0; } pp = ptype->callbacks.gro_receive(&napi->gro_list, skb); break; } rcu_read_unlock(); if (&ptype->list == head) goto normal; same_flow = NAPI_GRO_CB(skb)->same_flow; ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; if (pp) { struct sk_buff *nskb = *pp; *pp = nskb->next; nskb->next = NULL; napi_gro_complete(nskb); napi->gro_count--; } if (same_flow) goto ok; if (NAPI_GRO_CB(skb)->flush) goto normal; if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) { struct sk_buff *nskb = napi->gro_list; /* locate the end of the list to select the 'oldest' flow */ while (nskb->next) { pp = &nskb->next; nskb = *pp; } *pp = NULL; nskb->next = NULL; napi_gro_complete(nskb); } else { napi->gro_count++; } NAPI_GRO_CB(skb)->count = 1; NAPI_GRO_CB(skb)->age = jiffies; NAPI_GRO_CB(skb)->last = skb; skb_shinfo(skb)->gso_size = skb_gro_len(skb); skb->next = napi->gro_list; napi->gro_list = skb; ret = GRO_HELD; pull: grow = skb_gro_offset(skb) - skb_headlen(skb); if (grow > 0) gro_pull_from_frag0(skb, grow); ok: return ret; normal: ret = GRO_NORMAL; goto pull; } struct packet_offload *gro_find_receive_by_type(__be16 type) { struct list_head *offload_head = &offload_base; struct packet_offload *ptype; list_for_each_entry_rcu(ptype, offload_head, list) { if (ptype->type != type || !ptype->callbacks.gro_receive) continue; return ptype; } return NULL; } EXPORT_SYMBOL(gro_find_receive_by_type); struct packet_offload *gro_find_complete_by_type(__be16 type) { struct list_head *offload_head = &offload_base; struct packet_offload *ptype; list_for_each_entry_rcu(ptype, offload_head, list) { if (ptype->type != type || !ptype->callbacks.gro_complete) continue; return ptype; } return NULL; } EXPORT_SYMBOL(gro_find_complete_by_type); static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) { switch (ret) { case GRO_NORMAL: if (netif_receive_skb_internal(skb)) ret = GRO_DROP; break; case GRO_DROP: kfree_skb(skb); break; case GRO_MERGED_FREE: if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) { skb_dst_drop(skb); kmem_cache_free(skbuff_head_cache, skb); } else { __kfree_skb(skb); } break; case GRO_HELD: case GRO_MERGED: break; } return ret; } gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { skb_mark_napi_id(skb, napi); trace_napi_gro_receive_entry(skb); skb_gro_reset_offset(skb); return napi_skb_finish(dev_gro_receive(napi, skb), skb); } EXPORT_SYMBOL(napi_gro_receive); static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) { if (unlikely(skb->pfmemalloc)) { consume_skb(skb); return; } __skb_pull(skb, skb_headlen(skb)); /* restore the reserve we had after netdev_alloc_skb_ip_align() */ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); skb->vlan_tci = 0; skb->dev = napi->dev; skb->skb_iif = 0; skb->encapsulation = 0; skb_shinfo(skb)->gso_type = 0; skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); napi->skb = skb; } struct sk_buff *napi_get_frags(struct napi_struct *napi) { struct sk_buff *skb = napi->skb; if (!skb) { skb = napi_alloc_skb(napi, GRO_MAX_HEAD); if (skb) { napi->skb = skb; skb_mark_napi_id(skb, napi); } } return skb; } EXPORT_SYMBOL(napi_get_frags); static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, gro_result_t ret) { switch (ret) { case GRO_NORMAL: case GRO_HELD: __skb_push(skb, ETH_HLEN); skb->protocol = eth_type_trans(skb, skb->dev); if (ret == GRO_NORMAL && netif_receive_skb_internal(skb)) ret = GRO_DROP; break; case GRO_DROP: case GRO_MERGED_FREE: napi_reuse_skb(napi, skb); break; case GRO_MERGED: break; } return ret; } /* Upper GRO stack assumes network header starts at gro_offset=0 * Drivers could call both napi_gro_frags() and napi_gro_receive() * We copy ethernet header into skb->data to have a common layout. */ static struct sk_buff *napi_frags_skb(struct napi_struct *napi) { struct sk_buff *skb = napi->skb; const struct ethhdr *eth; unsigned int hlen = sizeof(*eth); napi->skb = NULL; skb_reset_mac_header(skb); skb_gro_reset_offset(skb); eth = skb_gro_header_fast(skb, 0); if (unlikely(skb_gro_header_hard(skb, hlen))) { eth = skb_gro_header_slow(skb, hlen, 0); if (unlikely(!eth)) { napi_reuse_skb(napi, skb); return NULL; } } else { gro_pull_from_frag0(skb, hlen); NAPI_GRO_CB(skb)->frag0 += hlen; NAPI_GRO_CB(skb)->frag0_len -= hlen; } __skb_pull(skb, hlen); /* * This works because the only protocols we care about don't require * special handling. * We'll fix it up properly in napi_frags_finish() */ skb->protocol = eth->h_proto; return skb; } gro_result_t napi_gro_frags(struct napi_struct *napi) { struct sk_buff *skb = napi_frags_skb(napi); if (!skb) return GRO_DROP; trace_napi_gro_frags_entry(skb); return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); } EXPORT_SYMBOL(napi_gro_frags); /* Compute the checksum from gro_offset and return the folded value * after adding in any pseudo checksum. */ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) { __wsum wsum; __sum16 sum; wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); if (likely(!sum)) { if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && !skb->csum_complete_sw) netdev_rx_csum_fault(skb->dev); } NAPI_GRO_CB(skb)->csum = wsum; NAPI_GRO_CB(skb)->csum_valid = 1; return sum; } EXPORT_SYMBOL(__skb_gro_checksum_complete); /* * net_rps_action_and_irq_enable sends any pending IPI's for rps. * Note: called with local irq disabled, but exits with local irq enabled. */ static void net_rps_action_and_irq_enable(struct softnet_data *sd) { #ifdef CONFIG_RPS struct softnet_data *remsd = sd->rps_ipi_list; if (remsd) { sd->rps_ipi_list = NULL; local_irq_enable(); /* Send pending IPI's to kick RPS processing on remote cpus. */ while (remsd) { struct softnet_data *next = remsd->rps_ipi_next; if (cpu_online(remsd->cpu)) smp_call_function_single_async(remsd->cpu, &remsd->csd); remsd = next; } } else #endif local_irq_enable(); } static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) { #ifdef CONFIG_RPS return sd->rps_ipi_list != NULL; #else return false; #endif } static int process_backlog(struct napi_struct *napi, int quota) { int work = 0; struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); /* Check if we have pending ipi, its better to send them now, * not waiting net_rx_action() end. */ if (sd_has_rps_ipi_waiting(sd)) { local_irq_disable(); net_rps_action_and_irq_enable(sd); } napi->weight = weight_p; local_irq_disable(); while (1) { struct sk_buff *skb; while ((skb = __skb_dequeue(&sd->process_queue))) { rcu_read_lock(); local_irq_enable(); __netif_receive_skb(skb); rcu_read_unlock(); local_irq_disable(); input_queue_head_incr(sd); if (++work >= quota) { local_irq_enable(); return work; } } rps_lock(sd); if (skb_queue_empty(&sd->input_pkt_queue)) { /* * Inline a custom version of __napi_complete(). * only current cpu owns and manipulates this napi, * and NAPI_STATE_SCHED is the only possible flag set * on backlog. * We can use a plain write instead of clear_bit(), * and we dont need an smp_mb() memory barrier. */ napi->state = 0; rps_unlock(sd); break; } skb_queue_splice_tail_init(&sd->input_pkt_queue, &sd->process_queue); rps_unlock(sd); } local_irq_enable(); return work; } /** * __napi_schedule - schedule for receive * @n: entry to schedule * * The entry's receive function will be scheduled to run. * Consider using __napi_schedule_irqoff() if hard irqs are masked. */ void __napi_schedule(struct napi_struct *n) { unsigned long flags; local_irq_save(flags); ____napi_schedule(this_cpu_ptr(&softnet_data), n); local_irq_restore(flags); } EXPORT_SYMBOL(__napi_schedule); /** * __napi_schedule_irqoff - schedule for receive * @n: entry to schedule * * Variant of __napi_schedule() assuming hard irqs are masked */ void __napi_schedule_irqoff(struct napi_struct *n) { ____napi_schedule(this_cpu_ptr(&softnet_data), n); } EXPORT_SYMBOL(__napi_schedule_irqoff); void __napi_complete(struct napi_struct *n) { BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); list_del_init(&n->poll_list); smp_mb__before_atomic(); clear_bit(NAPI_STATE_SCHED, &n->state); } EXPORT_SYMBOL(__napi_complete); void napi_complete_done(struct napi_struct *n, int work_done) { unsigned long flags; /* * don't let napi dequeue from the cpu poll list * just in case its running on a different cpu */ if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) return; if (n->gro_list) { unsigned long timeout = 0; if (work_done) timeout = n->dev->gro_flush_timeout; if (timeout) hrtimer_start(&n->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); else napi_gro_flush(n, false); } if (likely(list_empty(&n->poll_list))) { WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state)); } else { /* If n->poll_list is not empty, we need to mask irqs */ local_irq_save(flags); __napi_complete(n); local_irq_restore(flags); } } EXPORT_SYMBOL(napi_complete_done); /* must be called under rcu_read_lock(), as we dont take a reference */ static struct napi_struct *napi_by_id(unsigned int napi_id) { unsigned int hash = napi_id % HASH_SIZE(napi_hash); struct napi_struct *napi; hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) if (napi->napi_id == napi_id) return napi; return NULL; } #if defined(CONFIG_NET_RX_BUSY_POLL) #define BUSY_POLL_BUDGET 8 bool sk_busy_loop(struct sock *sk, int nonblock) { unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0; int (*busy_poll)(struct napi_struct *dev); struct napi_struct *napi; int rc = false; rcu_read_lock(); napi = napi_by_id(sk->sk_napi_id); if (!napi) goto out; /* Note: ndo_busy_poll method is optional in linux-4.5 */ busy_poll = napi->dev->netdev_ops->ndo_busy_poll; do { rc = 0; local_bh_disable(); if (busy_poll) { rc = busy_poll(napi); } else if (napi_schedule_prep(napi)) { void *have = netpoll_poll_lock(napi); if (test_bit(NAPI_STATE_SCHED, &napi->state)) { rc = napi->poll(napi, BUSY_POLL_BUDGET); trace_napi_poll(napi); if (rc == BUSY_POLL_BUDGET) { napi_complete_done(napi, rc); napi_schedule(napi); } } netpoll_poll_unlock(have); } if (rc > 0) NET_ADD_STATS_BH(sock_net(sk), LINUX_MIB_BUSYPOLLRXPACKETS, rc); local_bh_enable(); if (rc == LL_FLUSH_FAILED) break; /* permanent failure */ cpu_relax(); } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && !need_resched() && !busy_loop_timeout(end_time)); rc = !skb_queue_empty(&sk->sk_receive_queue); out: rcu_read_unlock(); return rc; } EXPORT_SYMBOL(sk_busy_loop); #endif /* CONFIG_NET_RX_BUSY_POLL */ void napi_hash_add(struct napi_struct *napi) { if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) || test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) return; spin_lock(&napi_hash_lock); /* 0..NR_CPUS+1 range is reserved for sender_cpu use */ do { if (unlikely(++napi_gen_id < NR_CPUS + 1)) napi_gen_id = NR_CPUS + 1; } while (napi_by_id(napi_gen_id)); napi->napi_id = napi_gen_id; hlist_add_head_rcu(&napi->napi_hash_node, &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); spin_unlock(&napi_hash_lock); } EXPORT_SYMBOL_GPL(napi_hash_add); /* Warning : caller is responsible to make sure rcu grace period * is respected before freeing memory containing @napi */ bool napi_hash_del(struct napi_struct *napi) { bool rcu_sync_needed = false; spin_lock(&napi_hash_lock); if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) { rcu_sync_needed = true; hlist_del_rcu(&napi->napi_hash_node); } spin_unlock(&napi_hash_lock); return rcu_sync_needed; } EXPORT_SYMBOL_GPL(napi_hash_del); static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) { struct napi_struct *napi; napi = container_of(timer, struct napi_struct, timer); if (napi->gro_list) napi_schedule(napi); return HRTIMER_NORESTART; } void netif_napi_add(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int weight) { INIT_LIST_HEAD(&napi->poll_list); hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); napi->timer.function = napi_watchdog; napi->gro_count = 0; napi->gro_list = NULL; napi->skb = NULL; napi->poll = poll; if (weight > NAPI_POLL_WEIGHT) pr_err_once("netif_napi_add() called with weight %d on device %s\n", weight, dev->name); napi->weight = weight; list_add(&napi->dev_list, &dev->napi_list); napi->dev = dev; #ifdef CONFIG_NETPOLL spin_lock_init(&napi->poll_lock); napi->poll_owner = -1; #endif set_bit(NAPI_STATE_SCHED, &napi->state); napi_hash_add(napi); } EXPORT_SYMBOL(netif_napi_add); void napi_disable(struct napi_struct *n) { might_sleep(); set_bit(NAPI_STATE_DISABLE, &n->state); while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) msleep(1); while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state)) msleep(1); hrtimer_cancel(&n->timer); clear_bit(NAPI_STATE_DISABLE, &n->state); } EXPORT_SYMBOL(napi_disable); /* Must be called in process context */ void netif_napi_del(struct napi_struct *napi) { might_sleep(); if (napi_hash_del(napi)) synchronize_net(); list_del_init(&napi->dev_list); napi_free_frags(napi); kfree_skb_list(napi->gro_list); napi->gro_list = NULL; napi->gro_count = 0; } EXPORT_SYMBOL(netif_napi_del); static int napi_poll(struct napi_struct *n, struct list_head *repoll) { void *have; int work, weight; list_del_init(&n->poll_list); have = netpoll_poll_lock(n); weight = n->weight; /* This NAPI_STATE_SCHED test is for avoiding a race * with netpoll's poll_napi(). Only the entity which * obtains the lock and sees NAPI_STATE_SCHED set will * actually make the ->poll() call. Therefore we avoid * accidentally calling ->poll() when NAPI is not scheduled. */ work = 0; if (test_bit(NAPI_STATE_SCHED, &n->state)) { work = n->poll(n, weight); trace_napi_poll(n); } WARN_ON_ONCE(work > weight); if (likely(work < weight)) goto out_unlock; /* Drivers must not modify the NAPI state if they * consume the entire weight. In such cases this code * still "owns" the NAPI instance and therefore can * move the instance around on the list at-will. */ if (unlikely(napi_disable_pending(n))) { napi_complete(n); goto out_unlock; } if (n->gro_list) { /* flush too old packets * If HZ < 1000, flush all packets. */ napi_gro_flush(n, HZ >= 1000); } /* Some drivers may have called napi_schedule * prior to exhausting their budget. */ if (unlikely(!list_empty(&n->poll_list))) { pr_warn_once("%s: Budget exhausted after napi rescheduled\n", n->dev ? n->dev->name : "backlog"); goto out_unlock; } list_add_tail(&n->poll_list, repoll); out_unlock: netpoll_poll_unlock(have); return work; } static void net_rx_action(struct softirq_action *h) { struct softnet_data *sd = this_cpu_ptr(&softnet_data); unsigned long time_limit = jiffies + 2; int budget = netdev_budget; LIST_HEAD(list); LIST_HEAD(repoll); local_irq_disable(); list_splice_init(&sd->poll_list, &list); local_irq_enable(); for (;;) { struct napi_struct *n; if (list_empty(&list)) { if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll)) return; break; } n = list_first_entry(&list, struct napi_struct, poll_list); budget -= napi_poll(n, &repoll); /* If softirq window is exhausted then punt. * Allow this to run for 2 jiffies since which will allow * an average latency of 1.5/HZ. */ if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit))) { sd->time_squeeze++; break; } } __kfree_skb_flush(); local_irq_disable(); list_splice_tail_init(&sd->poll_list, &list); list_splice_tail(&repoll, &list); list_splice(&list, &sd->poll_list); if (!list_empty(&sd->poll_list)) __raise_softirq_irqoff(NET_RX_SOFTIRQ); net_rps_action_and_irq_enable(sd); } struct netdev_adjacent { struct net_device *dev; /* upper master flag, there can only be one master device per list */ bool master; /* counter for the number of times this device was added to us */ u16 ref_nr; /* private field for the users */ void *private; struct list_head list; struct rcu_head rcu; }; static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev, struct list_head *adj_list) { struct netdev_adjacent *adj; list_for_each_entry(adj, adj_list, list) { if (adj->dev == adj_dev) return adj; } return NULL; } /** * netdev_has_upper_dev - Check if device is linked to an upper device * @dev: device * @upper_dev: upper device to check * * Find out if a device is linked to specified upper device and return true * in case it is. Note that this checks only immediate upper device, * not through a complete stack of devices. The caller must hold the RTNL lock. */ bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev) { ASSERT_RTNL(); return __netdev_find_adj(upper_dev, &dev->all_adj_list.upper); } EXPORT_SYMBOL(netdev_has_upper_dev); /** * netdev_has_any_upper_dev - Check if device is linked to some device * @dev: device * * Find out if a device is linked to an upper device and return true in case * it is. The caller must hold the RTNL lock. */ static bool netdev_has_any_upper_dev(struct net_device *dev) { ASSERT_RTNL(); return !list_empty(&dev->all_adj_list.upper); } /** * netdev_master_upper_dev_get - Get master upper device * @dev: device * * Find a master upper device and return pointer to it or NULL in case * it's not there. The caller must hold the RTNL lock. */ struct net_device *netdev_master_upper_dev_get(struct net_device *dev) { struct netdev_adjacent *upper; ASSERT_RTNL(); if (list_empty(&dev->adj_list.upper)) return NULL; upper = list_first_entry(&dev->adj_list.upper, struct netdev_adjacent, list); if (likely(upper->master)) return upper->dev; return NULL; } EXPORT_SYMBOL(netdev_master_upper_dev_get); void *netdev_adjacent_get_private(struct list_head *adj_list) { struct netdev_adjacent *adj; adj = list_entry(adj_list, struct netdev_adjacent, list); return adj->private; } EXPORT_SYMBOL(netdev_adjacent_get_private); /** * netdev_upper_get_next_dev_rcu - Get the next dev from upper list * @dev: device * @iter: list_head ** of the current position * * Gets the next device from the dev's upper list, starting from iter * position. The caller must hold RCU read lock. */ struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, struct list_head **iter) { struct netdev_adjacent *upper; WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); if (&upper->list == &dev->adj_list.upper) return NULL; *iter = &upper->list; return upper->dev; } EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); /** * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list * @dev: device * @iter: list_head ** of the current position * * Gets the next device from the dev's upper list, starting from iter * position. The caller must hold RCU read lock. */ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, struct list_head **iter) { struct netdev_adjacent *upper; WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); if (&upper->list == &dev->all_adj_list.upper) return NULL; *iter = &upper->list; return upper->dev; } EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu); /** * netdev_lower_get_next_private - Get the next ->private from the * lower neighbour list * @dev: device * @iter: list_head ** of the current position * * Gets the next netdev_adjacent->private from the dev's lower neighbour * list, starting from iter position. The caller must hold either hold the * RTNL lock or its own locking that guarantees that the neighbour lower * list will remain unchanged. */ void *netdev_lower_get_next_private(struct net_device *dev, struct list_head **iter) { struct netdev_adjacent *lower; lower = list_entry(*iter, struct netdev_adjacent, list); if (&lower->list == &dev->adj_list.lower) return NULL; *iter = lower->list.next; return lower->private; } EXPORT_SYMBOL(netdev_lower_get_next_private); /** * netdev_lower_get_next_private_rcu - Get the next ->private from the * lower neighbour list, RCU * variant * @dev: device * @iter: list_head ** of the current position * * Gets the next netdev_adjacent->private from the dev's lower neighbour * list, starting from iter position. The caller must hold RCU read lock. */ void *netdev_lower_get_next_private_rcu(struct net_device *dev, struct list_head **iter) { struct netdev_adjacent *lower; WARN_ON_ONCE(!rcu_read_lock_held()); lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); if (&lower->list == &dev->adj_list.lower) return NULL; *iter = &lower->list; return lower->private; } EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); /** * netdev_lower_get_next - Get the next device from the lower neighbour * list * @dev: device * @iter: list_head ** of the current position * * Gets the next netdev_adjacent from the dev's lower neighbour * list, starting from iter position. The caller must hold RTNL lock or * its own locking that guarantees that the neighbour lower * list will remain unchanged. */ void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) { struct netdev_adjacent *lower; lower = list_entry(*iter, struct netdev_adjacent, list); if (&lower->list == &dev->adj_list.lower) return NULL; *iter = lower->list.next; return lower->dev; } EXPORT_SYMBOL(netdev_lower_get_next); /** * netdev_lower_get_first_private_rcu - Get the first ->private from the * lower neighbour list, RCU * variant * @dev: device * * Gets the first netdev_adjacent->private from the dev's lower neighbour * list. The caller must hold RCU read lock. */ void *netdev_lower_get_first_private_rcu(struct net_device *dev) { struct netdev_adjacent *lower; lower = list_first_or_null_rcu(&dev->adj_list.lower, struct netdev_adjacent, list); if (lower) return lower->private; return NULL; } EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); /** * netdev_master_upper_dev_get_rcu - Get master upper device * @dev: device * * Find a master upper device and return pointer to it or NULL in case * it's not there. The caller must hold the RCU read lock. */ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) { struct netdev_adjacent *upper; upper = list_first_or_null_rcu(&dev->adj_list.upper, struct netdev_adjacent, list); if (upper && likely(upper->master)) return upper->dev; return NULL; } EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); static int netdev_adjacent_sysfs_add(struct net_device *dev, struct net_device *adj_dev, struct list_head *dev_list) { char linkname[IFNAMSIZ+7]; sprintf(linkname, dev_list == &dev->adj_list.upper ? "upper_%s" : "lower_%s", adj_dev->name); return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), linkname); } static void netdev_adjacent_sysfs_del(struct net_device *dev, char *name, struct list_head *dev_list) { char linkname[IFNAMSIZ+7]; sprintf(linkname, dev_list == &dev->adj_list.upper ? "upper_%s" : "lower_%s", name); sysfs_remove_link(&(dev->dev.kobj), linkname); } static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, struct net_device *adj_dev, struct list_head *dev_list) { return (dev_list == &dev->adj_list.upper || dev_list == &dev->adj_list.lower) && net_eq(dev_net(dev), dev_net(adj_dev)); } static int __netdev_adjacent_dev_insert(struct net_device *dev, struct net_device *adj_dev, struct list_head *dev_list, void *private, bool master) { struct netdev_adjacent *adj; int ret; adj = __netdev_find_adj(adj_dev, dev_list); if (adj) { adj->ref_nr++; return 0; } adj = kmalloc(sizeof(*adj), GFP_KERNEL); if (!adj) return -ENOMEM; adj->dev = adj_dev; adj->master = master; adj->ref_nr = 1; adj->private = private; dev_hold(adj_dev); pr_debug("dev_hold for %s, because of link added from %s to %s\n", adj_dev->name, dev->name, adj_dev->name); if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); if (ret) goto free_adj; } /* Ensure that master link is always the first item in list. */ if (master) { ret = sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), "master"); if (ret) goto remove_symlinks; list_add_rcu(&adj->list, dev_list); } else { list_add_tail_rcu(&adj->list, dev_list); } return 0; remove_symlinks: if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); free_adj: kfree(adj); dev_put(adj_dev); return ret; } static void __netdev_adjacent_dev_remove(struct net_device *dev, struct net_device *adj_dev, struct list_head *dev_list) { struct netdev_adjacent *adj; adj = __netdev_find_adj(adj_dev, dev_list); if (!adj) { pr_err("tried to remove device %s from %s\n", dev->name, adj_dev->name); BUG(); } if (adj->ref_nr > 1) { pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name, adj->ref_nr-1); adj->ref_nr--; return; } if (adj->master) sysfs_remove_link(&(dev->dev.kobj), "master"); if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); list_del_rcu(&adj->list); pr_debug("dev_put for %s, because link removed from %s to %s\n", adj_dev->name, dev->name, adj_dev->name); dev_put(adj_dev); kfree_rcu(adj, rcu); } static int __netdev_adjacent_dev_link_lists(struct net_device *dev, struct net_device *upper_dev, struct list_head *up_list, struct list_head *down_list, void *private, bool master) { int ret; ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private, master); if (ret) return ret; ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private, false); if (ret) { __netdev_adjacent_dev_remove(dev, upper_dev, up_list); return ret; } return 0; } static int __netdev_adjacent_dev_link(struct net_device *dev, struct net_device *upper_dev) { return __netdev_adjacent_dev_link_lists(dev, upper_dev, &dev->all_adj_list.upper, &upper_dev->all_adj_list.lower, NULL, false); } static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, struct net_device *upper_dev, struct list_head *up_list, struct list_head *down_list) { __netdev_adjacent_dev_remove(dev, upper_dev, up_list); __netdev_adjacent_dev_remove(upper_dev, dev, down_list); } static void __netdev_adjacent_dev_unlink(struct net_device *dev, struct net_device *upper_dev) { __netdev_adjacent_dev_unlink_lists(dev, upper_dev, &dev->all_adj_list.upper, &upper_dev->all_adj_list.lower); } static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, struct net_device *upper_dev, void *private, bool master) { int ret = __netdev_adjacent_dev_link(dev, upper_dev); if (ret) return ret; ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, &dev->adj_list.upper, &upper_dev->adj_list.lower, private, master); if (ret) { __netdev_adjacent_dev_unlink(dev, upper_dev); return ret; } return 0; } static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, struct net_device *upper_dev) { __netdev_adjacent_dev_unlink(dev, upper_dev); __netdev_adjacent_dev_unlink_lists(dev, upper_dev, &dev->adj_list.upper, &upper_dev->adj_list.lower); } static int __netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, bool master, void *upper_priv, void *upper_info) { struct netdev_notifier_changeupper_info changeupper_info; struct netdev_adjacent *i, *j, *to_i, *to_j; int ret = 0; ASSERT_RTNL(); if (dev == upper_dev) return -EBUSY; /* To prevent loops, check if dev is not upper device to upper_dev. */ if (__netdev_find_adj(dev, &upper_dev->all_adj_list.upper)) return -EBUSY; if (__netdev_find_adj(upper_dev, &dev->adj_list.upper)) return -EEXIST; if (master && netdev_master_upper_dev_get(dev)) return -EBUSY; changeupper_info.upper_dev = upper_dev; changeupper_info.master = master; changeupper_info.linking = true; changeupper_info.upper_info = upper_info; ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev, &changeupper_info.info); ret = notifier_to_errno(ret); if (ret) return ret; ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv, master); if (ret) return ret; /* Now that we linked these devs, make all the upper_dev's * all_adj_list.upper visible to every dev's all_adj_list.lower an * versa, and don't forget the devices itself. All of these * links are non-neighbours. */ list_for_each_entry(i, &dev->all_adj_list.lower, list) { list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) { pr_debug("Interlinking %s with %s, non-neighbour\n", i->dev->name, j->dev->name); ret = __netdev_adjacent_dev_link(i->dev, j->dev); if (ret) goto rollback_mesh; } } /* add dev to every upper_dev's upper device */ list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) { pr_debug("linking %s's upper device %s with %s\n", upper_dev->name, i->dev->name, dev->name); ret = __netdev_adjacent_dev_link(dev, i->dev); if (ret) goto rollback_upper_mesh; } /* add upper_dev to every dev's lower device */ list_for_each_entry(i, &dev->all_adj_list.lower, list) { pr_debug("linking %s's lower device %s with %s\n", dev->name, i->dev->name, upper_dev->name); ret = __netdev_adjacent_dev_link(i->dev, upper_dev); if (ret) goto rollback_lower_mesh; } ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, &changeupper_info.info); ret = notifier_to_errno(ret); if (ret) goto rollback_lower_mesh; return 0; rollback_lower_mesh: to_i = i; list_for_each_entry(i, &dev->all_adj_list.lower, list) { if (i == to_i) break; __netdev_adjacent_dev_unlink(i->dev, upper_dev); } i = NULL; rollback_upper_mesh: to_i = i; list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) { if (i == to_i) break; __netdev_adjacent_dev_unlink(dev, i->dev); } i = j = NULL; rollback_mesh: to_i = i; to_j = j; list_for_each_entry(i, &dev->all_adj_list.lower, list) { list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) { if (i == to_i && j == to_j) break; __netdev_adjacent_dev_unlink(i->dev, j->dev); } if (i == to_i) break; } __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); return ret; } /** * netdev_upper_dev_link - Add a link to the upper device * @dev: device * @upper_dev: new upper device * * Adds a link to device which is upper to this one. The caller must hold * the RTNL lock. On a failure a negative errno code is returned. * On success the reference counts are adjusted and the function * returns zero. */ int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev) { return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL); } EXPORT_SYMBOL(netdev_upper_dev_link); /** * netdev_master_upper_dev_link - Add a master link to the upper device * @dev: device * @upper_dev: new upper device * @upper_priv: upper device private * @upper_info: upper info to be passed down via notifier * * Adds a link to device which is upper to this one. In this case, only * one master upper device can be linked, although other non-master devices * might be linked as well. The caller must hold the RTNL lock. * On a failure a negative errno code is returned. On success the reference * counts are adjusted and the function returns zero. */ int netdev_master_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, void *upper_priv, void *upper_info) { return __netdev_upper_dev_link(dev, upper_dev, true, upper_priv, upper_info); } EXPORT_SYMBOL(netdev_master_upper_dev_link); /** * netdev_upper_dev_unlink - Removes a link to upper device * @dev: device * @upper_dev: new upper device * * Removes a link to device which is upper to this one. The caller must hold * the RTNL lock. */ void netdev_upper_dev_unlink(struct net_device *dev, struct net_device *upper_dev) { struct netdev_notifier_changeupper_info changeupper_info; struct netdev_adjacent *i, *j; ASSERT_RTNL(); changeupper_info.upper_dev = upper_dev; changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; changeupper_info.linking = false; call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev, &changeupper_info.info); __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); /* Here is the tricky part. We must remove all dev's lower * devices from all upper_dev's upper devices and vice * versa, to maintain the graph relationship. */ list_for_each_entry(i, &dev->all_adj_list.lower, list) list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) __netdev_adjacent_dev_unlink(i->dev, j->dev); /* remove also the devices itself from lower/upper device * list */ list_for_each_entry(i, &dev->all_adj_list.lower, list) __netdev_adjacent_dev_unlink(i->dev, upper_dev); list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) __netdev_adjacent_dev_unlink(dev, i->dev); call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, &changeupper_info.info); } EXPORT_SYMBOL(netdev_upper_dev_unlink); /** * netdev_bonding_info_change - Dispatch event about slave change * @dev: device * @bonding_info: info to dispatch * * Send NETDEV_BONDING_INFO to netdev notifiers with info. * The caller must hold the RTNL lock. */ void netdev_bonding_info_change(struct net_device *dev, struct netdev_bonding_info *bonding_info) { struct netdev_notifier_bonding_info info; memcpy(&info.bonding_info, bonding_info, sizeof(struct netdev_bonding_info)); call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev, &info.info); } EXPORT_SYMBOL(netdev_bonding_info_change); static void netdev_adjacent_add_links(struct net_device *dev) { struct netdev_adjacent *iter; struct net *net = dev_net(dev); list_for_each_entry(iter, &dev->adj_list.upper, list) { if (!net_eq(net,dev_net(iter->dev))) continue; netdev_adjacent_sysfs_add(iter->dev, dev, &iter->dev->adj_list.lower); netdev_adjacent_sysfs_add(dev, iter->dev, &dev->adj_list.upper); } list_for_each_entry(iter, &dev->adj_list.lower, list) { if (!net_eq(net,dev_net(iter->dev))) continue; netdev_adjacent_sysfs_add(iter->dev, dev, &iter->dev->adj_list.upper); netdev_adjacent_sysfs_add(dev, iter->dev, &dev->adj_list.lower); } } static void netdev_adjacent_del_links(struct net_device *dev) { struct netdev_adjacent *iter; struct net *net = dev_net(dev); list_for_each_entry(iter, &dev->adj_list.upper, list) { if (!net_eq(net,dev_net(iter->dev))) continue; netdev_adjacent_sysfs_del(iter->dev, dev->name, &iter->dev->adj_list.lower); netdev_adjacent_sysfs_del(dev, iter->dev->name, &dev->adj_list.upper); } list_for_each_entry(iter, &dev->adj_list.lower, list) { if (!net_eq(net,dev_net(iter->dev))) continue; netdev_adjacent_sysfs_del(iter->dev, dev->name, &iter->dev->adj_list.upper); netdev_adjacent_sysfs_del(dev, iter->dev->name, &dev->adj_list.lower); } } void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) { struct netdev_adjacent *iter; struct net *net = dev_net(dev); list_for_each_entry(iter, &dev->adj_list.upper, list) { if (!net_eq(net,dev_net(iter->dev))) continue; netdev_adjacent_sysfs_del(iter->dev, oldname, &iter->dev->adj_list.lower); netdev_adjacent_sysfs_add(iter->dev, dev, &iter->dev->adj_list.lower); } list_for_each_entry(iter, &dev->adj_list.lower, list) { if (!net_eq(net,dev_net(iter->dev))) continue; netdev_adjacent_sysfs_del(iter->dev, oldname, &iter->dev->adj_list.upper); netdev_adjacent_sysfs_add(iter->dev, dev, &iter->dev->adj_list.upper); } } void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev) { struct netdev_adjacent *lower; if (!lower_dev) return NULL; lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower); if (!lower) return NULL; return lower->private; } EXPORT_SYMBOL(netdev_lower_dev_get_private); int dev_get_nest_level(struct net_device *dev, bool (*type_check)(const struct net_device *dev)) { struct net_device *lower = NULL; struct list_head *iter; int max_nest = -1; int nest; ASSERT_RTNL(); netdev_for_each_lower_dev(dev, lower, iter) { nest = dev_get_nest_level(lower, type_check); if (max_nest < nest) max_nest = nest; } if (type_check(dev)) max_nest++; return max_nest; } EXPORT_SYMBOL(dev_get_nest_level); /** * netdev_lower_change - Dispatch event about lower device state change * @lower_dev: device * @lower_state_info: state to dispatch * * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info. * The caller must hold the RTNL lock. */ void netdev_lower_state_changed(struct net_device *lower_dev, void *lower_state_info) { struct netdev_notifier_changelowerstate_info changelowerstate_info; ASSERT_RTNL(); changelowerstate_info.lower_state_info = lower_state_info; call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev, &changelowerstate_info.info); } EXPORT_SYMBOL(netdev_lower_state_changed); static void dev_change_rx_flags(struct net_device *dev, int flags) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_change_rx_flags) ops->ndo_change_rx_flags(dev, flags); } static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) { unsigned int old_flags = dev->flags; kuid_t uid; kgid_t gid; ASSERT_RTNL(); dev->flags |= IFF_PROMISC; dev->promiscuity += inc; if (dev->promiscuity == 0) { /* * Avoid overflow. * If inc causes overflow, untouch promisc and return error. */ if (inc < 0) dev->flags &= ~IFF_PROMISC; else { dev->promiscuity -= inc; pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", dev->name); return -EOVERFLOW; } } if (dev->flags != old_flags) { pr_info("device %s %s promiscuous mode\n", dev->name, dev->flags & IFF_PROMISC ? "entered" : "left"); if (audit_enabled) { current_uid_gid(&uid, &gid); audit_log(current->audit_context, GFP_ATOMIC, AUDIT_ANOM_PROMISCUOUS, "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", dev->name, (dev->flags & IFF_PROMISC), (old_flags & IFF_PROMISC), from_kuid(&init_user_ns, audit_get_loginuid(current)), from_kuid(&init_user_ns, uid), from_kgid(&init_user_ns, gid), audit_get_sessionid(current)); } dev_change_rx_flags(dev, IFF_PROMISC); } if (notify) __dev_notify_flags(dev, old_flags, IFF_PROMISC); return 0; } /** * dev_set_promiscuity - update promiscuity count on a device * @dev: device * @inc: modifier * * Add or remove promiscuity from a device. While the count in the device * remains above zero the interface remains promiscuous. Once it hits zero * the device reverts back to normal filtering operation. A negative inc * value is used to drop promiscuity on the device. * Return 0 if successful or a negative errno code on error. */ int dev_set_promiscuity(struct net_device *dev, int inc) { unsigned int old_flags = dev->flags; int err; err = __dev_set_promiscuity(dev, inc, true); if (err < 0) return err; if (dev->flags != old_flags) dev_set_rx_mode(dev); return err; } EXPORT_SYMBOL(dev_set_promiscuity); static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) { unsigned int old_flags = dev->flags, old_gflags = dev->gflags; ASSERT_RTNL(); dev->flags |= IFF_ALLMULTI; dev->allmulti += inc; if (dev->allmulti == 0) { /* * Avoid overflow. * If inc causes overflow, untouch allmulti and return error. */ if (inc < 0) dev->flags &= ~IFF_ALLMULTI; else { dev->allmulti -= inc; pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", dev->name); return -EOVERFLOW; } } if (dev->flags ^ old_flags) { dev_change_rx_flags(dev, IFF_ALLMULTI); dev_set_rx_mode(dev); if (notify) __dev_notify_flags(dev, old_flags, dev->gflags ^ old_gflags); } return 0; } /** * dev_set_allmulti - update allmulti count on a device * @dev: device * @inc: modifier * * Add or remove reception of all multicast frames to a device. While the * count in the device remains above zero the interface remains listening * to all interfaces. Once it hits zero the device reverts back to normal * filtering operation. A negative @inc value is used to drop the counter * when releasing a resource needing all multicasts. * Return 0 if successful or a negative errno code on error. */ int dev_set_allmulti(struct net_device *dev, int inc) { return __dev_set_allmulti(dev, inc, true); } EXPORT_SYMBOL(dev_set_allmulti); /* * Upload unicast and multicast address lists to device and * configure RX filtering. When the device doesn't support unicast * filtering it is put in promiscuous mode while unicast addresses * are present. */ void __dev_set_rx_mode(struct net_device *dev) { const struct net_device_ops *ops = dev->netdev_ops; /* dev_open will call this function so the list will stay sane. */ if (!(dev->flags&IFF_UP)) return; if (!netif_device_present(dev)) return; if (!(dev->priv_flags & IFF_UNICAST_FLT)) { /* Unicast addresses changes may only happen under the rtnl, * therefore calling __dev_set_promiscuity here is safe. */ if (!netdev_uc_empty(dev) && !dev->uc_promisc) { __dev_set_promiscuity(dev, 1, false); dev->uc_promisc = true; } else if (netdev_uc_empty(dev) && dev->uc_promisc) { __dev_set_promiscuity(dev, -1, false); dev->uc_promisc = false; } } if (ops->ndo_set_rx_mode) ops->ndo_set_rx_mode(dev); } void dev_set_rx_mode(struct net_device *dev) { netif_addr_lock_bh(dev); __dev_set_rx_mode(dev); netif_addr_unlock_bh(dev); } /** * dev_get_flags - get flags reported to userspace * @dev: device * * Get the combination of flag bits exported through APIs to userspace. */ unsigned int dev_get_flags(const struct net_device *dev) { unsigned int flags; flags = (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI | IFF_RUNNING | IFF_LOWER_UP | IFF_DORMANT)) | (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); if (netif_running(dev)) { if (netif_oper_up(dev)) flags |= IFF_RUNNING; if (netif_carrier_ok(dev)) flags |= IFF_LOWER_UP; if (netif_dormant(dev)) flags |= IFF_DORMANT; } return flags; } EXPORT_SYMBOL(dev_get_flags); int __dev_change_flags(struct net_device *dev, unsigned int flags) { unsigned int old_flags = dev->flags; int ret; ASSERT_RTNL(); /* * Set the flags on our device. */ dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | IFF_AUTOMEDIA)) | (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | IFF_ALLMULTI)); /* * Load in the correct multicast list now the flags have changed. */ if ((old_flags ^ flags) & IFF_MULTICAST) dev_change_rx_flags(dev, IFF_MULTICAST); dev_set_rx_mode(dev); /* * Have we downed the interface. We handle IFF_UP ourselves * according to user attempts to set it, rather than blindly * setting it. */ ret = 0; if ((old_flags ^ flags) & IFF_UP) ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); if ((flags ^ dev->gflags) & IFF_PROMISC) { int inc = (flags & IFF_PROMISC) ? 1 : -1; unsigned int old_flags = dev->flags; dev->gflags ^= IFF_PROMISC; if (__dev_set_promiscuity(dev, inc, false) >= 0) if (dev->flags != old_flags) dev_set_rx_mode(dev); } /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI is important. Some (broken) drivers set IFF_PROMISC, when IFF_ALLMULTI is requested not asking us and not reporting. */ if ((flags ^ dev->gflags) & IFF_ALLMULTI) { int inc = (flags & IFF_ALLMULTI) ? 1 : -1; dev->gflags ^= IFF_ALLMULTI; __dev_set_allmulti(dev, inc, false); } return ret; } void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, unsigned int gchanges) { unsigned int changes = dev->flags ^ old_flags; if (gchanges) rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC); if (changes & IFF_UP) { if (dev->flags & IFF_UP) call_netdevice_notifiers(NETDEV_UP, dev); else call_netdevice_notifiers(NETDEV_DOWN, dev); } if (dev->flags & IFF_UP && (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { struct netdev_notifier_change_info change_info; change_info.flags_changed = changes; call_netdevice_notifiers_info(NETDEV_CHANGE, dev, &change_info.info); } } /** * dev_change_flags - change device settings * @dev: device * @flags: device state flags * * Change settings on device based state flags. The flags are * in the userspace exported format. */ int dev_change_flags(struct net_device *dev, unsigned int flags) { int ret; unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; ret = __dev_change_flags(dev, flags); if (ret < 0) return ret; changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); __dev_notify_flags(dev, old_flags, changes); return ret; } EXPORT_SYMBOL(dev_change_flags); static int __dev_set_mtu(struct net_device *dev, int new_mtu) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_change_mtu) return ops->ndo_change_mtu(dev, new_mtu); dev->mtu = new_mtu; return 0; } /** * dev_set_mtu - Change maximum transfer unit * @dev: device * @new_mtu: new transfer unit * * Change the maximum transfer size of the network device. */ int dev_set_mtu(struct net_device *dev, int new_mtu) { int err, orig_mtu; if (new_mtu == dev->mtu) return 0; /* MTU must be positive. */ if (new_mtu < 0) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); err = notifier_to_errno(err); if (err) return err; orig_mtu = dev->mtu; err = __dev_set_mtu(dev, new_mtu); if (!err) { err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); err = notifier_to_errno(err); if (err) { /* setting mtu back and notifying everyone again, * so that they have a chance to revert changes. */ __dev_set_mtu(dev, orig_mtu); call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); } } return err; } EXPORT_SYMBOL(dev_set_mtu); /** * dev_set_group - Change group this device belongs to * @dev: device * @new_group: group this device should belong to */ void dev_set_group(struct net_device *dev, int new_group) { dev->group = new_group; } EXPORT_SYMBOL(dev_set_group); /** * dev_set_mac_address - Change Media Access Control Address * @dev: device * @sa: new address * * Change the hardware (MAC) address of the device */ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) { const struct net_device_ops *ops = dev->netdev_ops; int err; if (!ops->ndo_set_mac_address) return -EOPNOTSUPP; if (sa->sa_family != dev->type) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; err = ops->ndo_set_mac_address(dev, sa); if (err) return err; dev->addr_assign_type = NET_ADDR_SET; call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); add_device_randomness(dev->dev_addr, dev->addr_len); return 0; } EXPORT_SYMBOL(dev_set_mac_address); /** * dev_change_carrier - Change device carrier * @dev: device * @new_carrier: new value * * Change device carrier */ int dev_change_carrier(struct net_device *dev, bool new_carrier) { const struct net_device_ops *ops = dev->netdev_ops; if (!ops->ndo_change_carrier) return -EOPNOTSUPP; if (!netif_device_present(dev)) return -ENODEV; return ops->ndo_change_carrier(dev, new_carrier); } EXPORT_SYMBOL(dev_change_carrier); /** * dev_get_phys_port_id - Get device physical port ID * @dev: device * @ppid: port ID * * Get device physical port ID */ int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid) { const struct net_device_ops *ops = dev->netdev_ops; if (!ops->ndo_get_phys_port_id) return -EOPNOTSUPP; return ops->ndo_get_phys_port_id(dev, ppid); } EXPORT_SYMBOL(dev_get_phys_port_id); /** * dev_get_phys_port_name - Get device physical port name * @dev: device * @name: port name * * Get device physical port name */ int dev_get_phys_port_name(struct net_device *dev, char *name, size_t len) { const struct net_device_ops *ops = dev->netdev_ops; if (!ops->ndo_get_phys_port_name) return -EOPNOTSUPP; return ops->ndo_get_phys_port_name(dev, name, len); } EXPORT_SYMBOL(dev_get_phys_port_name); /** * dev_change_proto_down - update protocol port state information * @dev: device * @proto_down: new value * * This info can be used by switch drivers to set the phys state of the * port. */ int dev_change_proto_down(struct net_device *dev, bool proto_down) { const struct net_device_ops *ops = dev->netdev_ops; if (!ops->ndo_change_proto_down) return -EOPNOTSUPP; if (!netif_device_present(dev)) return -ENODEV; return ops->ndo_change_proto_down(dev, proto_down); } EXPORT_SYMBOL(dev_change_proto_down); /** * dev_new_index - allocate an ifindex * @net: the applicable net namespace * * Returns a suitable unique value for a new device interface * number. The caller must hold the rtnl semaphore or the * dev_base_lock to be sure it remains unique. */ static int dev_new_index(struct net *net) { int ifindex = net->ifindex; for (;;) { if (++ifindex <= 0) ifindex = 1; if (!__dev_get_by_index(net, ifindex)) return net->ifindex = ifindex; } } /* Delayed registration/unregisteration */ static LIST_HEAD(net_todo_list); DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); static void net_set_todo(struct net_device *dev) { list_add_tail(&dev->todo_list, &net_todo_list); dev_net(dev)->dev_unreg_count++; } static void rollback_registered_many(struct list_head *head) { struct net_device *dev, *tmp; LIST_HEAD(close_head); BUG_ON(dev_boot_phase); ASSERT_RTNL(); list_for_each_entry_safe(dev, tmp, head, unreg_list) { /* Some devices call without registering * for initialization unwind. Remove those * devices and proceed with the remaining. */ if (dev->reg_state == NETREG_UNINITIALIZED) { pr_debug("unregister_netdevice: device %s/%p never was registered\n", dev->name, dev); WARN_ON(1); list_del(&dev->unreg_list); continue; } dev->dismantle = true; BUG_ON(dev->reg_state != NETREG_REGISTERED); } /* If device is running, close it first. */ list_for_each_entry(dev, head, unreg_list) list_add_tail(&dev->close_list, &close_head); dev_close_many(&close_head, true); list_for_each_entry(dev, head, unreg_list) { /* And unlink it from device chain. */ unlist_netdevice(dev); dev->reg_state = NETREG_UNREGISTERING; on_each_cpu(flush_backlog, dev, 1); } synchronize_net(); list_for_each_entry(dev, head, unreg_list) { struct sk_buff *skb = NULL; /* Shutdown queueing discipline. */ dev_shutdown(dev); /* Notify protocols, that we are about to destroy this device. They should clean all the things. */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); if (!dev->rtnl_link_ops || dev->rtnl_link_state == RTNL_LINK_INITIALIZED) skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, GFP_KERNEL); /* * Flush the unicast and multicast chains */ dev_uc_flush(dev); dev_mc_flush(dev); if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); if (skb) rtmsg_ifinfo_send(skb, dev, GFP_KERNEL); /* Notifier chain MUST detach us all upper devices. */ WARN_ON(netdev_has_any_upper_dev(dev)); /* Remove entries from kobject tree */ netdev_unregister_kobject(dev); #ifdef CONFIG_XPS /* Remove XPS queueing entries */ netif_reset_xps_queues_gt(dev, 0); #endif } synchronize_net(); list_for_each_entry(dev, head, unreg_list) dev_put(dev); } static void rollback_registered(struct net_device *dev) { LIST_HEAD(single); list_add(&dev->unreg_list, &single); rollback_registered_many(&single); list_del(&single); } static netdev_features_t netdev_sync_upper_features(struct net_device *lower, struct net_device *upper, netdev_features_t features) { netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; netdev_features_t feature; int feature_bit; for_each_netdev_feature(&upper_disables, feature_bit) { feature = __NETIF_F_BIT(feature_bit); if (!(upper->wanted_features & feature) && (features & feature)) { netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n", &feature, upper->name); features &= ~feature; } } return features; } static void netdev_sync_lower_features(struct net_device *upper, struct net_device *lower, netdev_features_t features) { netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; netdev_features_t feature; int feature_bit; for_each_netdev_feature(&upper_disables, feature_bit) { feature = __NETIF_F_BIT(feature_bit); if (!(features & feature) && (lower->features & feature)) { netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", &feature, lower->name); lower->wanted_features &= ~feature; netdev_update_features(lower); if (unlikely(lower->features & feature)) netdev_WARN(upper, "failed to disable %pNF on %s!\n", &feature, lower->name); } } } static netdev_features_t netdev_fix_features(struct net_device *dev, netdev_features_t features) { /* Fix illegal checksum combinations */ if ((features & NETIF_F_HW_CSUM) && (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { netdev_warn(dev, "mixed HW and IP checksum settings.\n"); features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); } /* TSO requires that SG is present as well. */ if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); features &= ~NETIF_F_ALL_TSO; } if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && !(features & NETIF_F_IP_CSUM)) { netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); features &= ~NETIF_F_TSO; features &= ~NETIF_F_TSO_ECN; } if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && !(features & NETIF_F_IPV6_CSUM)) { netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); features &= ~NETIF_F_TSO6; } /* TSO ECN requires that TSO is present as well. */ if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) features &= ~NETIF_F_TSO_ECN; /* Software GSO depends on SG. */ if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); features &= ~NETIF_F_GSO; } /* UFO needs SG and checksumming */ if (features & NETIF_F_UFO) { /* maybe split UFO into V4 and V6? */ if (!(features & NETIF_F_HW_CSUM) && ((features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) != (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) { netdev_dbg(dev, "Dropping NETIF_F_UFO since no checksum offload features.\n"); features &= ~NETIF_F_UFO; } if (!(features & NETIF_F_SG)) { netdev_dbg(dev, "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); features &= ~NETIF_F_UFO; } } #ifdef CONFIG_NET_RX_BUSY_POLL if (dev->netdev_ops->ndo_busy_poll) features |= NETIF_F_BUSY_POLL; else #endif features &= ~NETIF_F_BUSY_POLL; return features; } int __netdev_update_features(struct net_device *dev) { struct net_device *upper, *lower; netdev_features_t features; struct list_head *iter; int err = -1; ASSERT_RTNL(); features = netdev_get_wanted_features(dev); if (dev->netdev_ops->ndo_fix_features) features = dev->netdev_ops->ndo_fix_features(dev, features); /* driver might be less strict about feature dependencies */ features = netdev_fix_features(dev, features); /* some features can't be enabled if they're off an an upper device */ netdev_for_each_upper_dev_rcu(dev, upper, iter) features = netdev_sync_upper_features(dev, upper, features); if (dev->features == features) goto sync_lower; netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", &dev->features, &features); if (dev->netdev_ops->ndo_set_features) err = dev->netdev_ops->ndo_set_features(dev, features); else err = 0; if (unlikely(err < 0)) { netdev_err(dev, "set_features() failed (%d); wanted %pNF, left %pNF\n", err, &features, &dev->features); /* return non-0 since some features might have changed and * it's better to fire a spurious notification than miss it */ return -1; } sync_lower: /* some features must be disabled on lower devices when disabled * on an upper device (think: bonding master or bridge) */ netdev_for_each_lower_dev(dev, lower, iter) netdev_sync_lower_features(dev, lower, features); if (!err) dev->features = features; return err < 0 ? 0 : 1; } /** * netdev_update_features - recalculate device features * @dev: the device to check * * Recalculate dev->features set and send notifications if it * has changed. Should be called after driver or hardware dependent * conditions might have changed that influence the features. */ void netdev_update_features(struct net_device *dev) { if (__netdev_update_features(dev)) netdev_features_change(dev); } EXPORT_SYMBOL(netdev_update_features); /** * netdev_change_features - recalculate device features * @dev: the device to check * * Recalculate dev->features set and send notifications even * if they have not changed. Should be called instead of * netdev_update_features() if also dev->vlan_features might * have changed to allow the changes to be propagated to stacked * VLAN devices. */ void netdev_change_features(struct net_device *dev) { __netdev_update_features(dev); netdev_features_change(dev); } EXPORT_SYMBOL(netdev_change_features); /** * netif_stacked_transfer_operstate - transfer operstate * @rootdev: the root or lower level device to transfer state from * @dev: the device to transfer operstate to * * Transfer operational state from root to device. This is normally * called when a stacking relationship exists between the root * device and the device(a leaf device). */ void netif_stacked_transfer_operstate(const struct net_device *rootdev, struct net_device *dev) { if (rootdev->operstate == IF_OPER_DORMANT) netif_dormant_on(dev); else netif_dormant_off(dev); if (netif_carrier_ok(rootdev)) { if (!netif_carrier_ok(dev)) netif_carrier_on(dev); } else { if (netif_carrier_ok(dev)) netif_carrier_off(dev); } } EXPORT_SYMBOL(netif_stacked_transfer_operstate); #ifdef CONFIG_SYSFS static int netif_alloc_rx_queues(struct net_device *dev) { unsigned int i, count = dev->num_rx_queues; struct netdev_rx_queue *rx; size_t sz = count * sizeof(*rx); BUG_ON(count < 1); rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); if (!rx) { rx = vzalloc(sz); if (!rx) return -ENOMEM; } dev->_rx = rx; for (i = 0; i < count; i++) rx[i].dev = dev; return 0; } #endif static void netdev_init_one_queue(struct net_device *dev, struct netdev_queue *queue, void *_unused) { /* Initialize queue lock */ spin_lock_init(&queue->_xmit_lock); netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); queue->xmit_lock_owner = -1; netdev_queue_numa_node_write(queue, NUMA_NO_NODE); queue->dev = dev; #ifdef CONFIG_BQL dql_init(&queue->dql, HZ); #endif } static void netif_free_tx_queues(struct net_device *dev) { kvfree(dev->_tx); } static int netif_alloc_netdev_queues(struct net_device *dev) { unsigned int count = dev->num_tx_queues; struct netdev_queue *tx; size_t sz = count * sizeof(*tx); if (count < 1 || count > 0xffff) return -EINVAL; tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); if (!tx) { tx = vzalloc(sz); if (!tx) return -ENOMEM; } dev->_tx = tx; netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); spin_lock_init(&dev->tx_global_lock); return 0; } void netif_tx_stop_all_queues(struct net_device *dev) { unsigned int i; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); netif_tx_stop_queue(txq); } } EXPORT_SYMBOL(netif_tx_stop_all_queues); /** * register_netdevice - register a network device * @dev: device to register * * Take a completed network device structure and add it to the kernel * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier * chain. 0 is returned on success. A negative errno code is returned * on a failure to set up the device, or if the name is a duplicate. * * Callers must hold the rtnl semaphore. You may want * register_netdev() instead of this. * * BUGS: * The locking appears insufficient to guarantee two parallel registers * will not get the same name. */ int register_netdevice(struct net_device *dev) { int ret; struct net *net = dev_net(dev); BUG_ON(dev_boot_phase); ASSERT_RTNL(); might_sleep(); /* When net_device's are persistent, this will be fatal. */ BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); BUG_ON(!net); spin_lock_init(&dev->addr_list_lock); netdev_set_addr_lockdep_class(dev); ret = dev_get_valid_name(net, dev, dev->name); if (ret < 0) goto out; /* Init, if this function is available */ if (dev->netdev_ops->ndo_init) { ret = dev->netdev_ops->ndo_init(dev); if (ret) { if (ret > 0) ret = -EIO; goto out; } } if (((dev->hw_features | dev->features) & NETIF_F_HW_VLAN_CTAG_FILTER) && (!dev->netdev_ops->ndo_vlan_rx_add_vid || !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); ret = -EINVAL; goto err_uninit; } ret = -EBUSY; if (!dev->ifindex) dev->ifindex = dev_new_index(net); else if (__dev_get_by_index(net, dev->ifindex)) goto err_uninit; /* Transfer changeable features to wanted_features and enable * software offloads (GSO and GRO). */ dev->hw_features |= NETIF_F_SOFT_FEATURES; dev->features |= NETIF_F_SOFT_FEATURES; dev->wanted_features = dev->features & dev->hw_features; if (!(dev->flags & IFF_LOOPBACK)) { dev->hw_features |= NETIF_F_NOCACHE_COPY; } /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. */ dev->vlan_features |= NETIF_F_HIGHDMA; /* Make NETIF_F_SG inheritable to tunnel devices. */ dev->hw_enc_features |= NETIF_F_SG; /* Make NETIF_F_SG inheritable to MPLS. */ dev->mpls_features |= NETIF_F_SG; ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); ret = notifier_to_errno(ret); if (ret) goto err_uninit; ret = netdev_register_kobject(dev); if (ret) goto err_uninit; dev->reg_state = NETREG_REGISTERED; __netdev_update_features(dev); /* * Default initial state at registry is that the * device is present. */ set_bit(__LINK_STATE_PRESENT, &dev->state); linkwatch_init_dev(dev); dev_init_scheduler(dev); dev_hold(dev); list_netdevice(dev); add_device_randomness(dev->dev_addr, dev->addr_len); /* If the device has permanent device address, driver should * set dev_addr and also addr_assign_type should be set to * NET_ADDR_PERM (default value). */ if (dev->addr_assign_type == NET_ADDR_PERM) memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); /* Notify protocols, that a new device appeared. */ ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); ret = notifier_to_errno(ret); if (ret) { rollback_registered(dev); dev->reg_state = NETREG_UNREGISTERED; } /* * Prevent userspace races by waiting until the network * device is fully setup before sending notifications. */ if (!dev->rtnl_link_ops || dev->rtnl_link_state == RTNL_LINK_INITIALIZED) rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); out: return ret; err_uninit: if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); goto out; } EXPORT_SYMBOL(register_netdevice); /** * init_dummy_netdev - init a dummy network device for NAPI * @dev: device to init * * This takes a network device structure and initialize the minimum * amount of fields so it can be used to schedule NAPI polls without * registering a full blown interface. This is to be used by drivers * that need to tie several hardware interfaces to a single NAPI * poll scheduler due to HW limitations. */ int init_dummy_netdev(struct net_device *dev) { /* Clear everything. Note we don't initialize spinlocks * are they aren't supposed to be taken by any of the * NAPI code and this dummy netdev is supposed to be * only ever used for NAPI polls */ memset(dev, 0, sizeof(struct net_device)); /* make sure we BUG if trying to hit standard * register/unregister code path */ dev->reg_state = NETREG_DUMMY; /* NAPI wants this */ INIT_LIST_HEAD(&dev->napi_list); /* a dummy interface is started by default */ set_bit(__LINK_STATE_PRESENT, &dev->state); set_bit(__LINK_STATE_START, &dev->state); /* Note : We dont allocate pcpu_refcnt for dummy devices, * because users of this 'device' dont need to change * its refcount. */ return 0; } EXPORT_SYMBOL_GPL(init_dummy_netdev); /** * register_netdev - register a network device * @dev: device to register * * Take a completed network device structure and add it to the kernel * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier * chain. 0 is returned on success. A negative errno code is returned * on a failure to set up the device, or if the name is a duplicate. * * This is a wrapper around register_netdevice that takes the rtnl semaphore * and expands the device name if you passed a format string to * alloc_netdev. */ int register_netdev(struct net_device *dev) { int err; rtnl_lock(); err = register_netdevice(dev); rtnl_unlock(); return err; } EXPORT_SYMBOL(register_netdev); int netdev_refcnt_read(const struct net_device *dev) { int i, refcnt = 0; for_each_possible_cpu(i) refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); return refcnt; } EXPORT_SYMBOL(netdev_refcnt_read); /** * netdev_wait_allrefs - wait until all references are gone. * @dev: target net_device * * This is called when unregistering network devices. * * Any protocol or device that holds a reference should register * for netdevice notification, and cleanup and put back the * reference if they receive an UNREGISTER event. * We can get stuck here if buggy protocols don't correctly * call dev_put. */ static void netdev_wait_allrefs(struct net_device *dev) { unsigned long rebroadcast_time, warning_time; int refcnt; linkwatch_forget_dev(dev); rebroadcast_time = warning_time = jiffies; refcnt = netdev_refcnt_read(dev); while (refcnt != 0) { if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { rtnl_lock(); /* Rebroadcast unregister notification */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); __rtnl_unlock(); rcu_barrier(); rtnl_lock(); call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); if (test_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { /* We must not have linkwatch events * pending on unregister. If this * happens, we simply run the queue * unscheduled, resulting in a noop * for this device. */ linkwatch_run_queue(); } __rtnl_unlock(); rebroadcast_time = jiffies; } msleep(250); refcnt = netdev_refcnt_read(dev); if (time_after(jiffies, warning_time + 10 * HZ)) { pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", dev->name, refcnt); warning_time = jiffies; } } } /* The sequence is: * * rtnl_lock(); * ... * register_netdevice(x1); * register_netdevice(x2); * ... * unregister_netdevice(y1); * unregister_netdevice(y2); * ... * rtnl_unlock(); * free_netdev(y1); * free_netdev(y2); * * We are invoked by rtnl_unlock(). * This allows us to deal with problems: * 1) We can delete sysfs objects which invoke hotplug * without deadlocking with linkwatch via keventd. * 2) Since we run with the RTNL semaphore not held, we can sleep * safely in order to wait for the netdev refcnt to drop to zero. * * We must not return until all unregister events added during * the interval the lock was held have been completed. */ void netdev_run_todo(void) { struct list_head list; /* Snapshot list, allow later requests */ list_replace_init(&net_todo_list, &list); __rtnl_unlock(); /* Wait for rcu callbacks to finish before next phase */ if (!list_empty(&list)) rcu_barrier(); while (!list_empty(&list)) { struct net_device *dev = list_first_entry(&list, struct net_device, todo_list); list_del(&dev->todo_list); rtnl_lock(); call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); __rtnl_unlock(); if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { pr_err("network todo '%s' but state %d\n", dev->name, dev->reg_state); dump_stack(); continue; } dev->reg_state = NETREG_UNREGISTERED; netdev_wait_allrefs(dev); /* paranoia */ BUG_ON(netdev_refcnt_read(dev)); BUG_ON(!list_empty(&dev->ptype_all)); BUG_ON(!list_empty(&dev->ptype_specific)); WARN_ON(rcu_access_pointer(dev->ip_ptr)); WARN_ON(rcu_access_pointer(dev->ip6_ptr)); WARN_ON(dev->dn_ptr); if (dev->destructor) dev->destructor(dev); /* Report a network device has been unregistered */ rtnl_lock(); dev_net(dev)->dev_unreg_count--; __rtnl_unlock(); wake_up(&netdev_unregistering_wq); /* Free network device */ kobject_put(&dev->dev.kobj); } } /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has * all the same fields in the same order as net_device_stats, with only * the type differing, but rtnl_link_stats64 may have additional fields * at the end for newer counters. */ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, const struct net_device_stats *netdev_stats) { #if BITS_PER_LONG == 64 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); memcpy(stats64, netdev_stats, sizeof(*stats64)); /* zero out counters that only exist in rtnl_link_stats64 */ memset((char *)stats64 + sizeof(*netdev_stats), 0, sizeof(*stats64) - sizeof(*netdev_stats)); #else size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long); const unsigned long *src = (const unsigned long *)netdev_stats; u64 *dst = (u64 *)stats64; BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); for (i = 0; i < n; i++) dst[i] = src[i]; /* zero out counters that only exist in rtnl_link_stats64 */ memset((char *)stats64 + n * sizeof(u64), 0, sizeof(*stats64) - n * sizeof(u64)); #endif } EXPORT_SYMBOL(netdev_stats_to_stats64); /** * dev_get_stats - get network device statistics * @dev: device to get statistics from * @storage: place to store stats * * Get network statistics from device. Return @storage. * The device driver may provide its own method by setting * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; * otherwise the internal statistics structure is used. */ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, struct rtnl_link_stats64 *storage) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_get_stats64) { memset(storage, 0, sizeof(*storage)); ops->ndo_get_stats64(dev, storage); } else if (ops->ndo_get_stats) { netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); } else { netdev_stats_to_stats64(storage, &dev->stats); } storage->rx_dropped += atomic_long_read(&dev->rx_dropped); storage->tx_dropped += atomic_long_read(&dev->tx_dropped); storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler); return storage; } EXPORT_SYMBOL(dev_get_stats); struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) { struct netdev_queue *queue = dev_ingress_queue(dev); #ifdef CONFIG_NET_CLS_ACT if (queue) return queue; queue = kzalloc(sizeof(*queue), GFP_KERNEL); if (!queue) return NULL; netdev_init_one_queue(dev, queue, NULL); RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); queue->qdisc_sleeping = &noop_qdisc; rcu_assign_pointer(dev->ingress_queue, queue); #endif return queue; } static const struct ethtool_ops default_ethtool_ops; void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops) { if (dev->ethtool_ops == &default_ethtool_ops) dev->ethtool_ops = ops; } EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); void netdev_freemem(struct net_device *dev) { char *addr = (char *)dev - dev->padded; kvfree(addr); } /** * alloc_netdev_mqs - allocate network device * @sizeof_priv: size of private data to allocate space for * @name: device name format string * @name_assign_type: origin of device name * @setup: callback to initialize device * @txqs: the number of TX subqueues to allocate * @rxqs: the number of RX subqueues to allocate * * Allocates a struct net_device with private data area for driver use * and performs basic initialization. Also allocates subqueue structs * for each queue on the device. */ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *), unsigned int txqs, unsigned int rxqs) { struct net_device *dev; size_t alloc_size; struct net_device *p; BUG_ON(strlen(name) >= sizeof(dev->name)); if (txqs < 1) { pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); return NULL; } #ifdef CONFIG_SYSFS if (rxqs < 1) { pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); return NULL; } #endif alloc_size = sizeof(struct net_device); if (sizeof_priv) { /* ensure 32-byte alignment of private area */ alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); alloc_size += sizeof_priv; } /* ensure 32-byte alignment of whole construct */ alloc_size += NETDEV_ALIGN - 1; p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); if (!p) p = vzalloc(alloc_size); if (!p) return NULL; dev = PTR_ALIGN(p, NETDEV_ALIGN); dev->padded = (char *)dev - (char *)p; dev->pcpu_refcnt = alloc_percpu(int); if (!dev->pcpu_refcnt) goto free_dev; if (dev_addr_init(dev)) goto free_pcpu; dev_mc_init(dev); dev_uc_init(dev); dev_net_set(dev, &init_net); dev->gso_max_size = GSO_MAX_SIZE; dev->gso_max_segs = GSO_MAX_SEGS; dev->gso_min_segs = 0; INIT_LIST_HEAD(&dev->napi_list); INIT_LIST_HEAD(&dev->unreg_list); INIT_LIST_HEAD(&dev->close_list); INIT_LIST_HEAD(&dev->link_watch_list); INIT_LIST_HEAD(&dev->adj_list.upper); INIT_LIST_HEAD(&dev->adj_list.lower); INIT_LIST_HEAD(&dev->all_adj_list.upper); INIT_LIST_HEAD(&dev->all_adj_list.lower); INIT_LIST_HEAD(&dev->ptype_all); INIT_LIST_HEAD(&dev->ptype_specific); dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; setup(dev); if (!dev->tx_queue_len) { dev->priv_flags |= IFF_NO_QUEUE; dev->tx_queue_len = 1; } dev->num_tx_queues = txqs; dev->real_num_tx_queues = txqs; if (netif_alloc_netdev_queues(dev)) goto free_all; #ifdef CONFIG_SYSFS dev->num_rx_queues = rxqs; dev->real_num_rx_queues = rxqs; if (netif_alloc_rx_queues(dev)) goto free_all; #endif strcpy(dev->name, name); dev->name_assign_type = name_assign_type; dev->group = INIT_NETDEV_GROUP; if (!dev->ethtool_ops) dev->ethtool_ops = &default_ethtool_ops; nf_hook_ingress_init(dev); return dev; free_all: free_netdev(dev); return NULL; free_pcpu: free_percpu(dev->pcpu_refcnt); free_dev: netdev_freemem(dev); return NULL; } EXPORT_SYMBOL(alloc_netdev_mqs); /** * free_netdev - free network device * @dev: device * * This function does the last stage of destroying an allocated device * interface. The reference to the device object is released. * If this is the last reference then it will be freed. * Must be called in process context. */ void free_netdev(struct net_device *dev) { struct napi_struct *p, *n; might_sleep(); netif_free_tx_queues(dev); #ifdef CONFIG_SYSFS kvfree(dev->_rx); #endif kfree(rcu_dereference_protected(dev->ingress_queue, 1)); /* Flush device addresses */ dev_addr_flush(dev); list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) netif_napi_del(p); free_percpu(dev->pcpu_refcnt); dev->pcpu_refcnt = NULL; /* Compatibility with error handling in drivers */ if (dev->reg_state == NETREG_UNINITIALIZED) { netdev_freemem(dev); return; } BUG_ON(dev->reg_state != NETREG_UNREGISTERED); dev->reg_state = NETREG_RELEASED; /* will free via device release */ put_device(&dev->dev); } EXPORT_SYMBOL(free_netdev); /** * synchronize_net - Synchronize with packet receive processing * * Wait for packets currently being received to be done. * Does not block later packets from starting. */ void synchronize_net(void) { might_sleep(); if (rtnl_is_locked()) synchronize_rcu_expedited(); else synchronize_rcu(); } EXPORT_SYMBOL(synchronize_net); /** * unregister_netdevice_queue - remove device from the kernel * @dev: device * @head: list * * This function shuts down a device interface and removes it * from the kernel tables. * If head not NULL, device is queued to be unregistered later. * * Callers must hold the rtnl semaphore. You may want * unregister_netdev() instead of this. */ void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) { ASSERT_RTNL(); if (head) { list_move_tail(&dev->unreg_list, head); } else { rollback_registered(dev); /* Finish processing unregister after unlock */ net_set_todo(dev); } } EXPORT_SYMBOL(unregister_netdevice_queue); /** * unregister_netdevice_many - unregister many devices * @head: list of devices * * Note: As most callers use a stack allocated list_head, * we force a list_del() to make sure stack wont be corrupted later. */ void unregister_netdevice_many(struct list_head *head) { struct net_device *dev; if (!list_empty(head)) { rollback_registered_many(head); list_for_each_entry(dev, head, unreg_list) net_set_todo(dev); list_del(head); } } EXPORT_SYMBOL(unregister_netdevice_many); /** * unregister_netdev - remove device from the kernel * @dev: device * * This function shuts down a device interface and removes it * from the kernel tables. * * This is just a wrapper for unregister_netdevice that takes * the rtnl semaphore. In general you want to use this and not * unregister_netdevice. */ void unregister_netdev(struct net_device *dev) { rtnl_lock(); unregister_netdevice(dev); rtnl_unlock(); } EXPORT_SYMBOL(unregister_netdev); /** * dev_change_net_namespace - move device to different nethost namespace * @dev: device * @net: network namespace * @pat: If not NULL name pattern to try if the current device name * is already taken in the destination network namespace. * * This function shuts down a device interface and moves it * to a new network namespace. On success 0 is returned, on * a failure a netagive errno code is returned. * * Callers must hold the rtnl semaphore. */ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) { int err; ASSERT_RTNL(); /* Don't allow namespace local devices to be moved. */ err = -EINVAL; if (dev->features & NETIF_F_NETNS_LOCAL) goto out; /* Ensure the device has been registrered */ if (dev->reg_state != NETREG_REGISTERED) goto out; /* Get out if there is nothing todo */ err = 0; if (net_eq(dev_net(dev), net)) goto out; /* Pick the destination device name, and ensure * we can use it in the destination network namespace. */ err = -EEXIST; if (__dev_get_by_name(net, dev->name)) { /* We get here if we can't use the current device name */ if (!pat) goto out; if (dev_get_valid_name(net, dev, pat) < 0) goto out; } /* * And now a mini version of register_netdevice unregister_netdevice. */ /* If device is running close it first. */ dev_close(dev); /* And unlink it from device chain */ err = -ENODEV; unlist_netdevice(dev); synchronize_net(); /* Shutdown queueing discipline. */ dev_shutdown(dev); /* Notify protocols, that we are about to destroy this device. They should clean all the things. Note that dev->reg_state stays at NETREG_REGISTERED. This is wanted because this way 8021q and macvlan know the device is just moving and can keep their slaves up. */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); rcu_barrier(); call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL); /* * Flush the unicast and multicast chains */ dev_uc_flush(dev); dev_mc_flush(dev); /* Send a netdev-removed uevent to the old namespace */ kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); netdev_adjacent_del_links(dev); /* Actually switch the network namespace */ dev_net_set(dev, net); /* If there is an ifindex conflict assign a new one */ if (__dev_get_by_index(net, dev->ifindex)) dev->ifindex = dev_new_index(net); /* Send a netdev-add uevent to the new namespace */ kobject_uevent(&dev->dev.kobj, KOBJ_ADD); netdev_adjacent_add_links(dev); /* Fixup kobjects */ err = device_rename(&dev->dev, dev->name); WARN_ON(err); /* Add the device back in the hashes */ list_netdevice(dev); /* Notify protocols, that a new device appeared. */ call_netdevice_notifiers(NETDEV_REGISTER, dev); /* * Prevent userspace races by waiting until the network * device is fully setup before sending notifications. */ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); synchronize_net(); err = 0; out: return err; } EXPORT_SYMBOL_GPL(dev_change_net_namespace); static int dev_cpu_callback(struct notifier_block *nfb, unsigned long action, void *ocpu) { struct sk_buff **list_skb; struct sk_buff *skb; unsigned int cpu, oldcpu = (unsigned long)ocpu; struct softnet_data *sd, *oldsd; if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) return NOTIFY_OK; local_irq_disable(); cpu = smp_processor_id(); sd = &per_cpu(softnet_data, cpu); oldsd = &per_cpu(softnet_data, oldcpu); /* Find end of our completion_queue. */ list_skb = &sd->completion_queue; while (*list_skb) list_skb = &(*list_skb)->next; /* Append completion queue from offline CPU. */ *list_skb = oldsd->completion_queue; oldsd->completion_queue = NULL; /* Append output queue from offline CPU. */ if (oldsd->output_queue) { *sd->output_queue_tailp = oldsd->output_queue; sd->output_queue_tailp = oldsd->output_queue_tailp; oldsd->output_queue = NULL; oldsd->output_queue_tailp = &oldsd->output_queue; } /* Append NAPI poll list from offline CPU, with one exception : * process_backlog() must be called by cpu owning percpu backlog. * We properly handle process_queue & input_pkt_queue later. */ while (!list_empty(&oldsd->poll_list)) { struct napi_struct *napi = list_first_entry(&oldsd->poll_list, struct napi_struct, poll_list); list_del_init(&napi->poll_list); if (napi->poll == process_backlog) napi->state = 0; else ____napi_schedule(sd, napi); } raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); /* Process offline CPU's input_pkt_queue */ while ((skb = __skb_dequeue(&oldsd->process_queue))) { netif_rx_ni(skb); input_queue_head_incr(oldsd); } while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { netif_rx_ni(skb); input_queue_head_incr(oldsd); } return NOTIFY_OK; } /** * netdev_increment_features - increment feature set by one * @all: current feature set * @one: new feature set * @mask: mask feature set * * Computes a new feature set after adding a device with feature set * @one to the master device with current feature set @all. Will not * enable anything that is off in @mask. Returns the new feature set. */ netdev_features_t netdev_increment_features(netdev_features_t all, netdev_features_t one, netdev_features_t mask) { if (mask & NETIF_F_HW_CSUM) mask |= NETIF_F_CSUM_MASK; mask |= NETIF_F_VLAN_CHALLENGED; all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask; all &= one | ~NETIF_F_ALL_FOR_ALL; /* If one device supports hw checksumming, set for all. */ if (all & NETIF_F_HW_CSUM) all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM); return all; } EXPORT_SYMBOL(netdev_increment_features); static struct hlist_head * __net_init netdev_create_hash(void) { int i; struct hlist_head *hash; hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL); if (hash != NULL) for (i = 0; i < NETDEV_HASHENTRIES; i++) INIT_HLIST_HEAD(&hash[i]); return hash; } /* Initialize per network namespace state */ static int __net_init netdev_init(struct net *net) { if (net != &init_net) INIT_LIST_HEAD(&net->dev_base_head); net->dev_name_head = netdev_create_hash(); if (net->dev_name_head == NULL) goto err_name; net->dev_index_head = netdev_create_hash(); if (net->dev_index_head == NULL) goto err_idx; return 0; err_idx: kfree(net->dev_name_head); err_name: return -ENOMEM; } /** * netdev_drivername - network driver for the device * @dev: network device * * Determine network driver for device. */ const char *netdev_drivername(const struct net_device *dev) { const struct device_driver *driver; const struct device *parent; const char *empty = ""; parent = dev->dev.parent; if (!parent) return empty; driver = parent->driver; if (driver && driver->name) return driver->name; return empty; } static void __netdev_printk(const char *level, const struct net_device *dev, struct va_format *vaf) { if (dev && dev->dev.parent) { dev_printk_emit(level[1] - '0', dev->dev.parent, "%s %s %s%s: %pV", dev_driver_string(dev->dev.parent), dev_name(dev->dev.parent), netdev_name(dev), netdev_reg_state(dev), vaf); } else if (dev) { printk("%s%s%s: %pV", level, netdev_name(dev), netdev_reg_state(dev), vaf); } else { printk("%s(NULL net_device): %pV", level, vaf); } } void netdev_printk(const char *level, const struct net_device *dev, const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; __netdev_printk(level, dev, &vaf); va_end(args); } EXPORT_SYMBOL(netdev_printk); #define define_netdev_printk_level(func, level) \ void func(const struct net_device *dev, const char *fmt, ...) \ { \ struct va_format vaf; \ va_list args; \ \ va_start(args, fmt); \ \ vaf.fmt = fmt; \ vaf.va = &args; \ \ __netdev_printk(level, dev, &vaf); \ \ va_end(args); \ } \ EXPORT_SYMBOL(func); define_netdev_printk_level(netdev_emerg, KERN_EMERG); define_netdev_printk_level(netdev_alert, KERN_ALERT); define_netdev_printk_level(netdev_crit, KERN_CRIT); define_netdev_printk_level(netdev_err, KERN_ERR); define_netdev_printk_level(netdev_warn, KERN_WARNING); define_netdev_printk_level(netdev_notice, KERN_NOTICE); define_netdev_printk_level(netdev_info, KERN_INFO); static void __net_exit netdev_exit(struct net *net) { kfree(net->dev_name_head); kfree(net->dev_index_head); } static struct pernet_operations __net_initdata netdev_net_ops = { .init = netdev_init, .exit = netdev_exit, }; static void __net_exit default_device_exit(struct net *net) { struct net_device *dev, *aux; /* * Push all migratable network devices back to the * initial network namespace */ rtnl_lock(); for_each_netdev_safe(net, dev, aux) { int err; char fb_name[IFNAMSIZ]; /* Ignore unmoveable devices (i.e. loopback) */ if (dev->features & NETIF_F_NETNS_LOCAL) continue; /* Leave virtual devices for the generic cleanup */ if (dev->rtnl_link_ops) continue; /* Push remaining network devices to init_net */ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); err = dev_change_net_namespace(dev, &init_net, fb_name); if (err) { pr_emerg("%s: failed to move %s to init_net: %d\n", __func__, dev->name, err); BUG(); } } rtnl_unlock(); } static void __net_exit rtnl_lock_unregistering(struct list_head *net_list) { /* Return with the rtnl_lock held when there are no network * devices unregistering in any network namespace in net_list. */ struct net *net; bool unregistering; DEFINE_WAIT_FUNC(wait, woken_wake_function); add_wait_queue(&netdev_unregistering_wq, &wait); for (;;) { unregistering = false; rtnl_lock(); list_for_each_entry(net, net_list, exit_list) { if (net->dev_unreg_count > 0) { unregistering = true; break; } } if (!unregistering) break; __rtnl_unlock(); wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } remove_wait_queue(&netdev_unregistering_wq, &wait); } static void __net_exit default_device_exit_batch(struct list_head *net_list) { /* At exit all network devices most be removed from a network * namespace. Do this in the reverse order of registration. * Do this across as many network namespaces as possible to * improve batching efficiency. */ struct net_device *dev; struct net *net; LIST_HEAD(dev_kill_list); /* To prevent network device cleanup code from dereferencing * loopback devices or network devices that have been freed * wait here for all pending unregistrations to complete, * before unregistring the loopback device and allowing the * network namespace be freed. * * The netdev todo list containing all network devices * unregistrations that happen in default_device_exit_batch * will run in the rtnl_unlock() at the end of * default_device_exit_batch. */ rtnl_lock_unregistering(net_list); list_for_each_entry(net, net_list, exit_list) { for_each_netdev_reverse(net, dev) { if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) dev->rtnl_link_ops->dellink(dev, &dev_kill_list); else unregister_netdevice_queue(dev, &dev_kill_list); } } unregister_netdevice_many(&dev_kill_list); rtnl_unlock(); } static struct pernet_operations __net_initdata default_device_ops = { .exit = default_device_exit, .exit_batch = default_device_exit_batch, }; /* * Initialize the DEV module. At boot time this walks the device list and * unhooks any devices that fail to initialise (normally hardware not * present) and leaves us with a valid list of present and active devices. * */ /* * This is called single threaded during boot, so no need * to take the rtnl semaphore. */ static int __init net_dev_init(void) { int i, rc = -ENOMEM; BUG_ON(!dev_boot_phase); if (dev_proc_init()) goto out; if (netdev_kobject_init()) goto out; INIT_LIST_HEAD(&ptype_all); for (i = 0; i < PTYPE_HASH_SIZE; i++) INIT_LIST_HEAD(&ptype_base[i]); INIT_LIST_HEAD(&offload_base); if (register_pernet_subsys(&netdev_net_ops)) goto out; /* * Initialise the packet receive queues. */ for_each_possible_cpu(i) { struct softnet_data *sd = &per_cpu(softnet_data, i); skb_queue_head_init(&sd->input_pkt_queue); skb_queue_head_init(&sd->process_queue); INIT_LIST_HEAD(&sd->poll_list); sd->output_queue_tailp = &sd->output_queue; #ifdef CONFIG_RPS sd->csd.func = rps_trigger_softirq; sd->csd.info = sd; sd->cpu = i; #endif sd->backlog.poll = process_backlog; sd->backlog.weight = weight_p; } dev_boot_phase = 0; /* The loopback device is special if any other network devices * is present in a network namespace the loopback device must * be present. Since we now dynamically allocate and free the * loopback device ensure this invariant is maintained by * keeping the loopback device as the first device on the * list of network devices. Ensuring the loopback devices * is the first device that appears and the last network device * that disappears. */ if (register_pernet_device(&loopback_net_ops)) goto out; if (register_pernet_device(&default_device_ops)) goto out; open_softirq(NET_TX_SOFTIRQ, net_tx_action); open_softirq(NET_RX_SOFTIRQ, net_rx_action); hotcpu_notifier(dev_cpu_callback, 0); dst_subsys_init(); rc = 0; out: return rc; } subsys_initcall(net_dev_init);
./CrossVul/dataset_final_sorted/CWE-400/c/bad_5356_1
crossvul-cpp_data_bad_485_0
// SPDX-License-Identifier: GPL-2.0 /* * USB hub driver. * * (C) Copyright 1999 Linus Torvalds * (C) Copyright 1999 Johannes Erdfelt * (C) Copyright 1999 Gregory P. Smith * (C) Copyright 2001 Brad Hards (bhards@bigpond.net.au) * * Released under the GPLv2 only. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/completion.h> #include <linux/sched/mm.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <linux/usb.h> #include <linux/usbdevice_fs.h> #include <linux/usb/hcd.h> #include <linux/usb/otg.h> #include <linux/usb/quirks.h> #include <linux/workqueue.h> #include <linux/mutex.h> #include <linux/random.h> #include <linux/pm_qos.h> #include <linux/kobject.h> #include <linux/uaccess.h> #include <asm/byteorder.h> #include "hub.h" #include "otg_whitelist.h" #define USB_VENDOR_GENESYS_LOGIC 0x05e3 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01 #define USB_TP_TRANSMISSION_DELAY 40 /* ns */ #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */ /* Protect struct usb_device->state and ->children members * Note: Both are also protected by ->dev.sem, except that ->state can * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */ static DEFINE_SPINLOCK(device_state_lock); /* workqueue to process hub events */ static struct workqueue_struct *hub_wq; static void hub_event(struct work_struct *work); /* synchronize hub-port add/remove and peering operations */ DEFINE_MUTEX(usb_port_peer_mutex); /* cycle leds on hubs that aren't blinking for attention */ static bool blinkenlights; module_param(blinkenlights, bool, S_IRUGO); MODULE_PARM_DESC(blinkenlights, "true to cycle leds on hubs"); /* * Device SATA8000 FW1.0 from DATAST0R Technology Corp requires about * 10 seconds to send reply for the initial 64-byte descriptor request. */ /* define initial 64-byte descriptor request timeout in milliseconds */ static int initial_descriptor_timeout = USB_CTRL_GET_TIMEOUT; module_param(initial_descriptor_timeout, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(initial_descriptor_timeout, "initial 64-byte descriptor request timeout in milliseconds " "(default 5000 - 5.0 seconds)"); /* * As of 2.6.10 we introduce a new USB device initialization scheme which * closely resembles the way Windows works. Hopefully it will be compatible * with a wider range of devices than the old scheme. However some previously * working devices may start giving rise to "device not accepting address" * errors; if that happens the user can try the old scheme by adjusting the * following module parameters. * * For maximum flexibility there are two boolean parameters to control the * hub driver's behavior. On the first initialization attempt, if the * "old_scheme_first" parameter is set then the old scheme will be used, * otherwise the new scheme is used. If that fails and "use_both_schemes" * is set, then the driver will make another attempt, using the other scheme. */ static bool old_scheme_first; module_param(old_scheme_first, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(old_scheme_first, "start with the old device initialization scheme"); static bool use_both_schemes = 1; module_param(use_both_schemes, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(use_both_schemes, "try the other device initialization scheme if the " "first one fails"); /* Mutual exclusion for EHCI CF initialization. This interferes with * port reset on some companion controllers. */ DECLARE_RWSEM(ehci_cf_port_reset_rwsem); EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem); #define HUB_DEBOUNCE_TIMEOUT 2000 #define HUB_DEBOUNCE_STEP 25 #define HUB_DEBOUNCE_STABLE 100 static void hub_release(struct kref *kref); static int usb_reset_and_verify_device(struct usb_device *udev); static int hub_port_disable(struct usb_hub *hub, int port1, int set_state); static inline char *portspeed(struct usb_hub *hub, int portstatus) { if (hub_is_superspeedplus(hub->hdev)) return "10.0 Gb/s"; if (hub_is_superspeed(hub->hdev)) return "5.0 Gb/s"; if (portstatus & USB_PORT_STAT_HIGH_SPEED) return "480 Mb/s"; else if (portstatus & USB_PORT_STAT_LOW_SPEED) return "1.5 Mb/s"; else return "12 Mb/s"; } /* Note that hdev or one of its children must be locked! */ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev) { if (!hdev || !hdev->actconfig || !hdev->maxchild) return NULL; return usb_get_intfdata(hdev->actconfig->interface[0]); } int usb_device_supports_lpm(struct usb_device *udev) { /* Some devices have trouble with LPM */ if (udev->quirks & USB_QUIRK_NO_LPM) return 0; /* USB 2.1 (and greater) devices indicate LPM support through * their USB 2.0 Extended Capabilities BOS descriptor. */ if (udev->speed == USB_SPEED_HIGH || udev->speed == USB_SPEED_FULL) { if (udev->bos->ext_cap && (USB_LPM_SUPPORT & le32_to_cpu(udev->bos->ext_cap->bmAttributes))) return 1; return 0; } /* * According to the USB 3.0 spec, all USB 3.0 devices must support LPM. * However, there are some that don't, and they set the U1/U2 exit * latencies to zero. */ if (!udev->bos->ss_cap) { dev_info(&udev->dev, "No LPM exit latency info found, disabling LPM.\n"); return 0; } if (udev->bos->ss_cap->bU1devExitLat == 0 && udev->bos->ss_cap->bU2DevExitLat == 0) { if (udev->parent) dev_info(&udev->dev, "LPM exit latency is zeroed, disabling LPM.\n"); else dev_info(&udev->dev, "We don't know the algorithms for LPM for this host, disabling LPM.\n"); return 0; } if (!udev->parent || udev->parent->lpm_capable) return 1; return 0; } /* * Set the Maximum Exit Latency (MEL) for the host to initiate a transition from * either U1 or U2. */ static void usb_set_lpm_mel(struct usb_device *udev, struct usb3_lpm_parameters *udev_lpm_params, unsigned int udev_exit_latency, struct usb_hub *hub, struct usb3_lpm_parameters *hub_lpm_params, unsigned int hub_exit_latency) { unsigned int total_mel; unsigned int device_mel; unsigned int hub_mel; /* * Calculate the time it takes to transition all links from the roothub * to the parent hub into U0. The parent hub must then decode the * packet (hub header decode latency) to figure out which port it was * bound for. * * The Hub Header decode latency is expressed in 0.1us intervals (0x1 * means 0.1us). Multiply that by 100 to get nanoseconds. */ total_mel = hub_lpm_params->mel + (hub->descriptor->u.ss.bHubHdrDecLat * 100); /* * How long will it take to transition the downstream hub's port into * U0? The greater of either the hub exit latency or the device exit * latency. * * The BOS U1/U2 exit latencies are expressed in 1us intervals. * Multiply that by 1000 to get nanoseconds. */ device_mel = udev_exit_latency * 1000; hub_mel = hub_exit_latency * 1000; if (device_mel > hub_mel) total_mel += device_mel; else total_mel += hub_mel; udev_lpm_params->mel = total_mel; } /* * Set the maximum Device to Host Exit Latency (PEL) for the device to initiate * a transition from either U1 or U2. */ static void usb_set_lpm_pel(struct usb_device *udev, struct usb3_lpm_parameters *udev_lpm_params, unsigned int udev_exit_latency, struct usb_hub *hub, struct usb3_lpm_parameters *hub_lpm_params, unsigned int hub_exit_latency, unsigned int port_to_port_exit_latency) { unsigned int first_link_pel; unsigned int hub_pel; /* * First, the device sends an LFPS to transition the link between the * device and the parent hub into U0. The exit latency is the bigger of * the device exit latency or the hub exit latency. */ if (udev_exit_latency > hub_exit_latency) first_link_pel = udev_exit_latency * 1000; else first_link_pel = hub_exit_latency * 1000; /* * When the hub starts to receive the LFPS, there is a slight delay for * it to figure out that one of the ports is sending an LFPS. Then it * will forward the LFPS to its upstream link. The exit latency is the * delay, plus the PEL that we calculated for this hub. */ hub_pel = port_to_port_exit_latency * 1000 + hub_lpm_params->pel; /* * According to figure C-7 in the USB 3.0 spec, the PEL for this device * is the greater of the two exit latencies. */ if (first_link_pel > hub_pel) udev_lpm_params->pel = first_link_pel; else udev_lpm_params->pel = hub_pel; } /* * Set the System Exit Latency (SEL) to indicate the total worst-case time from * when a device initiates a transition to U0, until when it will receive the * first packet from the host controller. * * Section C.1.5.1 describes the four components to this: * - t1: device PEL * - t2: time for the ERDY to make it from the device to the host. * - t3: a host-specific delay to process the ERDY. * - t4: time for the packet to make it from the host to the device. * * t3 is specific to both the xHCI host and the platform the host is integrated * into. The Intel HW folks have said it's negligible, FIXME if a different * vendor says otherwise. */ static void usb_set_lpm_sel(struct usb_device *udev, struct usb3_lpm_parameters *udev_lpm_params) { struct usb_device *parent; unsigned int num_hubs; unsigned int total_sel; /* t1 = device PEL */ total_sel = udev_lpm_params->pel; /* How many external hubs are in between the device & the root port. */ for (parent = udev->parent, num_hubs = 0; parent->parent; parent = parent->parent) num_hubs++; /* t2 = 2.1us + 250ns * (num_hubs - 1) */ if (num_hubs > 0) total_sel += 2100 + 250 * (num_hubs - 1); /* t4 = 250ns * num_hubs */ total_sel += 250 * num_hubs; udev_lpm_params->sel = total_sel; } static void usb_set_lpm_parameters(struct usb_device *udev) { struct usb_hub *hub; unsigned int port_to_port_delay; unsigned int udev_u1_del; unsigned int udev_u2_del; unsigned int hub_u1_del; unsigned int hub_u2_del; if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER) return; hub = usb_hub_to_struct_hub(udev->parent); /* It doesn't take time to transition the roothub into U0, since it * doesn't have an upstream link. */ if (!hub) return; udev_u1_del = udev->bos->ss_cap->bU1devExitLat; udev_u2_del = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat); hub_u1_del = udev->parent->bos->ss_cap->bU1devExitLat; hub_u2_del = le16_to_cpu(udev->parent->bos->ss_cap->bU2DevExitLat); usb_set_lpm_mel(udev, &udev->u1_params, udev_u1_del, hub, &udev->parent->u1_params, hub_u1_del); usb_set_lpm_mel(udev, &udev->u2_params, udev_u2_del, hub, &udev->parent->u2_params, hub_u2_del); /* * Appendix C, section C.2.2.2, says that there is a slight delay from * when the parent hub notices the downstream port is trying to * transition to U0 to when the hub initiates a U0 transition on its * upstream port. The section says the delays are tPort2PortU1EL and * tPort2PortU2EL, but it doesn't define what they are. * * The hub chapter, sections 10.4.2.4 and 10.4.2.5 seem to be talking * about the same delays. Use the maximum delay calculations from those * sections. For U1, it's tHubPort2PortExitLat, which is 1us max. For * U2, it's tHubPort2PortExitLat + U2DevExitLat - U1DevExitLat. I * assume the device exit latencies they are talking about are the hub * exit latencies. * * What do we do if the U2 exit latency is less than the U1 exit * latency? It's possible, although not likely... */ port_to_port_delay = 1; usb_set_lpm_pel(udev, &udev->u1_params, udev_u1_del, hub, &udev->parent->u1_params, hub_u1_del, port_to_port_delay); if (hub_u2_del > hub_u1_del) port_to_port_delay = 1 + hub_u2_del - hub_u1_del; else port_to_port_delay = 1 + hub_u1_del; usb_set_lpm_pel(udev, &udev->u2_params, udev_u2_del, hub, &udev->parent->u2_params, hub_u2_del, port_to_port_delay); /* Now that we've got PEL, calculate SEL. */ usb_set_lpm_sel(udev, &udev->u1_params); usb_set_lpm_sel(udev, &udev->u2_params); } /* USB 2.0 spec Section 11.24.4.5 */ static int get_hub_descriptor(struct usb_device *hdev, struct usb_hub_descriptor *desc) { int i, ret, size; unsigned dtype; if (hub_is_superspeed(hdev)) { dtype = USB_DT_SS_HUB; size = USB_DT_SS_HUB_SIZE; } else { dtype = USB_DT_HUB; size = sizeof(struct usb_hub_descriptor); } for (i = 0; i < 3; i++) { ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB, dtype << 8, 0, desc, size, USB_CTRL_GET_TIMEOUT); if (hub_is_superspeed(hdev)) { if (ret == size) return ret; } else if (ret >= USB_DT_HUB_NONVAR_SIZE + 2) { /* Make sure we have the DeviceRemovable field. */ size = USB_DT_HUB_NONVAR_SIZE + desc->bNbrPorts / 8 + 1; if (ret < size) return -EMSGSIZE; return ret; } } return -EINVAL; } /* * USB 2.0 spec Section 11.24.2.1 */ static int clear_hub_feature(struct usb_device *hdev, int feature) { return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), USB_REQ_CLEAR_FEATURE, USB_RT_HUB, feature, 0, NULL, 0, 1000); } /* * USB 2.0 spec Section 11.24.2.2 */ int usb_clear_port_feature(struct usb_device *hdev, int port1, int feature) { return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), USB_REQ_CLEAR_FEATURE, USB_RT_PORT, feature, port1, NULL, 0, 1000); } /* * USB 2.0 spec Section 11.24.2.13 */ static int set_port_feature(struct usb_device *hdev, int port1, int feature) { return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1, NULL, 0, 1000); } static char *to_led_name(int selector) { switch (selector) { case HUB_LED_AMBER: return "amber"; case HUB_LED_GREEN: return "green"; case HUB_LED_OFF: return "off"; case HUB_LED_AUTO: return "auto"; default: return "??"; } } /* * USB 2.0 spec Section 11.24.2.7.1.10 and table 11-7 * for info about using port indicators */ static void set_port_led(struct usb_hub *hub, int port1, int selector) { struct usb_port *port_dev = hub->ports[port1 - 1]; int status; status = set_port_feature(hub->hdev, (selector << 8) | port1, USB_PORT_FEAT_INDICATOR); dev_dbg(&port_dev->dev, "indicator %s status %d\n", to_led_name(selector), status); } #define LED_CYCLE_PERIOD ((2*HZ)/3) static void led_work(struct work_struct *work) { struct usb_hub *hub = container_of(work, struct usb_hub, leds.work); struct usb_device *hdev = hub->hdev; unsigned i; unsigned changed = 0; int cursor = -1; if (hdev->state != USB_STATE_CONFIGURED || hub->quiescing) return; for (i = 0; i < hdev->maxchild; i++) { unsigned selector, mode; /* 30%-50% duty cycle */ switch (hub->indicator[i]) { /* cycle marker */ case INDICATOR_CYCLE: cursor = i; selector = HUB_LED_AUTO; mode = INDICATOR_AUTO; break; /* blinking green = sw attention */ case INDICATOR_GREEN_BLINK: selector = HUB_LED_GREEN; mode = INDICATOR_GREEN_BLINK_OFF; break; case INDICATOR_GREEN_BLINK_OFF: selector = HUB_LED_OFF; mode = INDICATOR_GREEN_BLINK; break; /* blinking amber = hw attention */ case INDICATOR_AMBER_BLINK: selector = HUB_LED_AMBER; mode = INDICATOR_AMBER_BLINK_OFF; break; case INDICATOR_AMBER_BLINK_OFF: selector = HUB_LED_OFF; mode = INDICATOR_AMBER_BLINK; break; /* blink green/amber = reserved */ case INDICATOR_ALT_BLINK: selector = HUB_LED_GREEN; mode = INDICATOR_ALT_BLINK_OFF; break; case INDICATOR_ALT_BLINK_OFF: selector = HUB_LED_AMBER; mode = INDICATOR_ALT_BLINK; break; default: continue; } if (selector != HUB_LED_AUTO) changed = 1; set_port_led(hub, i + 1, selector); hub->indicator[i] = mode; } if (!changed && blinkenlights) { cursor++; cursor %= hdev->maxchild; set_port_led(hub, cursor + 1, HUB_LED_GREEN); hub->indicator[cursor] = INDICATOR_CYCLE; changed++; } if (changed) queue_delayed_work(system_power_efficient_wq, &hub->leds, LED_CYCLE_PERIOD); } /* use a short timeout for hub/port status fetches */ #define USB_STS_TIMEOUT 1000 #define USB_STS_RETRIES 5 /* * USB 2.0 spec Section 11.24.2.6 */ static int get_hub_status(struct usb_device *hdev, struct usb_hub_status *data) { int i, status = -ETIMEDOUT; for (i = 0; i < USB_STS_RETRIES && (status == -ETIMEDOUT || status == -EPIPE); i++) { status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_HUB, 0, 0, data, sizeof(*data), USB_STS_TIMEOUT); } return status; } /* * USB 2.0 spec Section 11.24.2.7 * USB 3.1 takes into use the wValue and wLength fields, spec Section 10.16.2.6 */ static int get_port_status(struct usb_device *hdev, int port1, void *data, u16 value, u16 length) { int i, status = -ETIMEDOUT; for (i = 0; i < USB_STS_RETRIES && (status == -ETIMEDOUT || status == -EPIPE); i++) { status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, value, port1, data, length, USB_STS_TIMEOUT); } return status; } static int hub_ext_port_status(struct usb_hub *hub, int port1, int type, u16 *status, u16 *change, u32 *ext_status) { int ret; int len = 4; if (type != HUB_PORT_STATUS) len = 8; mutex_lock(&hub->status_mutex); ret = get_port_status(hub->hdev, port1, &hub->status->port, type, len); if (ret < len) { if (ret != -ENODEV) dev_err(hub->intfdev, "%s failed (err = %d)\n", __func__, ret); if (ret >= 0) ret = -EIO; } else { *status = le16_to_cpu(hub->status->port.wPortStatus); *change = le16_to_cpu(hub->status->port.wPortChange); if (type != HUB_PORT_STATUS && ext_status) *ext_status = le32_to_cpu( hub->status->port.dwExtPortStatus); ret = 0; } mutex_unlock(&hub->status_mutex); return ret; } static int hub_port_status(struct usb_hub *hub, int port1, u16 *status, u16 *change) { return hub_ext_port_status(hub, port1, HUB_PORT_STATUS, status, change, NULL); } static void kick_hub_wq(struct usb_hub *hub) { struct usb_interface *intf; if (hub->disconnected || work_pending(&hub->events)) return; /* * Suppress autosuspend until the event is proceed. * * Be careful and make sure that the symmetric operation is * always called. We are here only when there is no pending * work for this hub. Therefore put the interface either when * the new work is called or when it is canceled. */ intf = to_usb_interface(hub->intfdev); usb_autopm_get_interface_no_resume(intf); kref_get(&hub->kref); if (queue_work(hub_wq, &hub->events)) return; /* the work has already been scheduled */ usb_autopm_put_interface_async(intf); kref_put(&hub->kref, hub_release); } void usb_kick_hub_wq(struct usb_device *hdev) { struct usb_hub *hub = usb_hub_to_struct_hub(hdev); if (hub) kick_hub_wq(hub); } /* * Let the USB core know that a USB 3.0 device has sent a Function Wake Device * Notification, which indicates it had initiated remote wakeup. * * USB 3.0 hubs do not report the port link state change from U3 to U0 when the * device initiates resume, so the USB core will not receive notice of the * resume through the normal hub interrupt URB. */ void usb_wakeup_notification(struct usb_device *hdev, unsigned int portnum) { struct usb_hub *hub; struct usb_port *port_dev; if (!hdev) return; hub = usb_hub_to_struct_hub(hdev); if (hub) { port_dev = hub->ports[portnum - 1]; if (port_dev && port_dev->child) pm_wakeup_event(&port_dev->child->dev, 0); set_bit(portnum, hub->wakeup_bits); kick_hub_wq(hub); } } EXPORT_SYMBOL_GPL(usb_wakeup_notification); /* completion function, fires on port status changes and various faults */ static void hub_irq(struct urb *urb) { struct usb_hub *hub = urb->context; int status = urb->status; unsigned i; unsigned long bits; switch (status) { case -ENOENT: /* synchronous unlink */ case -ECONNRESET: /* async unlink */ case -ESHUTDOWN: /* hardware going away */ return; default: /* presumably an error */ /* Cause a hub reset after 10 consecutive errors */ dev_dbg(hub->intfdev, "transfer --> %d\n", status); if ((++hub->nerrors < 10) || hub->error) goto resubmit; hub->error = status; /* FALL THROUGH */ /* let hub_wq handle things */ case 0: /* we got data: port status changed */ bits = 0; for (i = 0; i < urb->actual_length; ++i) bits |= ((unsigned long) ((*hub->buffer)[i])) << (i*8); hub->event_bits[0] = bits; break; } hub->nerrors = 0; /* Something happened, let hub_wq figure it out */ kick_hub_wq(hub); resubmit: if (hub->quiescing) return; status = usb_submit_urb(hub->urb, GFP_ATOMIC); if (status != 0 && status != -ENODEV && status != -EPERM) dev_err(hub->intfdev, "resubmit --> %d\n", status); } /* USB 2.0 spec Section 11.24.2.3 */ static inline int hub_clear_tt_buffer(struct usb_device *hdev, u16 devinfo, u16 tt) { /* Need to clear both directions for control ep */ if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_CONTROL) { int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo ^ 0x8000, tt, NULL, 0, 1000); if (status) return status; } return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo, tt, NULL, 0, 1000); } /* * enumeration blocks hub_wq for a long time. we use keventd instead, since * long blocking there is the exception, not the rule. accordingly, HCDs * talking to TTs must queue control transfers (not just bulk and iso), so * both can talk to the same hub concurrently. */ static void hub_tt_work(struct work_struct *work) { struct usb_hub *hub = container_of(work, struct usb_hub, tt.clear_work); unsigned long flags; spin_lock_irqsave(&hub->tt.lock, flags); while (!list_empty(&hub->tt.clear_list)) { struct list_head *next; struct usb_tt_clear *clear; struct usb_device *hdev = hub->hdev; const struct hc_driver *drv; int status; next = hub->tt.clear_list.next; clear = list_entry(next, struct usb_tt_clear, clear_list); list_del(&clear->clear_list); /* drop lock so HCD can concurrently report other TT errors */ spin_unlock_irqrestore(&hub->tt.lock, flags); status = hub_clear_tt_buffer(hdev, clear->devinfo, clear->tt); if (status && status != -ENODEV) dev_err(&hdev->dev, "clear tt %d (%04x) error %d\n", clear->tt, clear->devinfo, status); /* Tell the HCD, even if the operation failed */ drv = clear->hcd->driver; if (drv->clear_tt_buffer_complete) (drv->clear_tt_buffer_complete)(clear->hcd, clear->ep); kfree(clear); spin_lock_irqsave(&hub->tt.lock, flags); } spin_unlock_irqrestore(&hub->tt.lock, flags); } /** * usb_hub_set_port_power - control hub port's power state * @hdev: USB device belonging to the usb hub * @hub: target hub * @port1: port index * @set: expected status * * call this function to control port's power via setting or * clearing the port's PORT_POWER feature. * * Return: 0 if successful. A negative error code otherwise. */ int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub, int port1, bool set) { int ret; if (set) ret = set_port_feature(hdev, port1, USB_PORT_FEAT_POWER); else ret = usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_POWER); if (ret) return ret; if (set) set_bit(port1, hub->power_bits); else clear_bit(port1, hub->power_bits); return 0; } /** * usb_hub_clear_tt_buffer - clear control/bulk TT state in high speed hub * @urb: an URB associated with the failed or incomplete split transaction * * High speed HCDs use this to tell the hub driver that some split control or * bulk transaction failed in a way that requires clearing internal state of * a transaction translator. This is normally detected (and reported) from * interrupt context. * * It may not be possible for that hub to handle additional full (or low) * speed transactions until that state is fully cleared out. * * Return: 0 if successful. A negative error code otherwise. */ int usb_hub_clear_tt_buffer(struct urb *urb) { struct usb_device *udev = urb->dev; int pipe = urb->pipe; struct usb_tt *tt = udev->tt; unsigned long flags; struct usb_tt_clear *clear; /* we've got to cope with an arbitrary number of pending TT clears, * since each TT has "at least two" buffers that can need it (and * there can be many TTs per hub). even if they're uncommon. */ clear = kmalloc(sizeof *clear, GFP_ATOMIC); if (clear == NULL) { dev_err(&udev->dev, "can't save CLEAR_TT_BUFFER state\n"); /* FIXME recover somehow ... RESET_TT? */ return -ENOMEM; } /* info that CLEAR_TT_BUFFER needs */ clear->tt = tt->multi ? udev->ttport : 1; clear->devinfo = usb_pipeendpoint (pipe); clear->devinfo |= udev->devnum << 4; clear->devinfo |= usb_pipecontrol(pipe) ? (USB_ENDPOINT_XFER_CONTROL << 11) : (USB_ENDPOINT_XFER_BULK << 11); if (usb_pipein(pipe)) clear->devinfo |= 1 << 15; /* info for completion callback */ clear->hcd = bus_to_hcd(udev->bus); clear->ep = urb->ep; /* tell keventd to clear state for this TT */ spin_lock_irqsave(&tt->lock, flags); list_add_tail(&clear->clear_list, &tt->clear_list); schedule_work(&tt->clear_work); spin_unlock_irqrestore(&tt->lock, flags); return 0; } EXPORT_SYMBOL_GPL(usb_hub_clear_tt_buffer); static void hub_power_on(struct usb_hub *hub, bool do_delay) { int port1; /* Enable power on each port. Some hubs have reserved values * of LPSM (> 2) in their descriptors, even though they are * USB 2.0 hubs. Some hubs do not implement port-power switching * but only emulate it. In all cases, the ports won't work * unless we send these messages to the hub. */ if (hub_is_port_power_switchable(hub)) dev_dbg(hub->intfdev, "enabling power on all ports\n"); else dev_dbg(hub->intfdev, "trying to enable port power on " "non-switchable hub\n"); for (port1 = 1; port1 <= hub->hdev->maxchild; port1++) if (test_bit(port1, hub->power_bits)) set_port_feature(hub->hdev, port1, USB_PORT_FEAT_POWER); else usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_POWER); if (do_delay) msleep(hub_power_on_good_delay(hub)); } static int hub_hub_status(struct usb_hub *hub, u16 *status, u16 *change) { int ret; mutex_lock(&hub->status_mutex); ret = get_hub_status(hub->hdev, &hub->status->hub); if (ret < 0) { if (ret != -ENODEV) dev_err(hub->intfdev, "%s failed (err = %d)\n", __func__, ret); } else { *status = le16_to_cpu(hub->status->hub.wHubStatus); *change = le16_to_cpu(hub->status->hub.wHubChange); ret = 0; } mutex_unlock(&hub->status_mutex); return ret; } static int hub_set_port_link_state(struct usb_hub *hub, int port1, unsigned int link_status) { return set_port_feature(hub->hdev, port1 | (link_status << 3), USB_PORT_FEAT_LINK_STATE); } /* * Disable a port and mark a logical connect-change event, so that some * time later hub_wq will disconnect() any existing usb_device on the port * and will re-enumerate if there actually is a device attached. */ static void hub_port_logical_disconnect(struct usb_hub *hub, int port1) { dev_dbg(&hub->ports[port1 - 1]->dev, "logical disconnect\n"); hub_port_disable(hub, port1, 1); /* FIXME let caller ask to power down the port: * - some devices won't enumerate without a VBUS power cycle * - SRP saves power that way * - ... new call, TBD ... * That's easy if this hub can switch power per-port, and * hub_wq reactivates the port later (timer, SRP, etc). * Powerdown must be optional, because of reset/DFU. */ set_bit(port1, hub->change_bits); kick_hub_wq(hub); } /** * usb_remove_device - disable a device's port on its parent hub * @udev: device to be disabled and removed * Context: @udev locked, must be able to sleep. * * After @udev's port has been disabled, hub_wq is notified and it will * see that the device has been disconnected. When the device is * physically unplugged and something is plugged in, the events will * be received and processed normally. * * Return: 0 if successful. A negative error code otherwise. */ int usb_remove_device(struct usb_device *udev) { struct usb_hub *hub; struct usb_interface *intf; if (!udev->parent) /* Can't remove a root hub */ return -EINVAL; hub = usb_hub_to_struct_hub(udev->parent); intf = to_usb_interface(hub->intfdev); usb_autopm_get_interface(intf); set_bit(udev->portnum, hub->removed_bits); hub_port_logical_disconnect(hub, udev->portnum); usb_autopm_put_interface(intf); return 0; } enum hub_activation_type { HUB_INIT, HUB_INIT2, HUB_INIT3, /* INITs must come first */ HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME, }; static void hub_init_func2(struct work_struct *ws); static void hub_init_func3(struct work_struct *ws); static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) { struct usb_device *hdev = hub->hdev; struct usb_hcd *hcd; int ret; int port1; int status; bool need_debounce_delay = false; unsigned delay; /* Continue a partial initialization */ if (type == HUB_INIT2 || type == HUB_INIT3) { device_lock(&hdev->dev); /* Was the hub disconnected while we were waiting? */ if (hub->disconnected) goto disconnected; if (type == HUB_INIT2) goto init2; goto init3; } kref_get(&hub->kref); /* The superspeed hub except for root hub has to use Hub Depth * value as an offset into the route string to locate the bits * it uses to determine the downstream port number. So hub driver * should send a set hub depth request to superspeed hub after * the superspeed hub is set configuration in initialization or * reset procedure. * * After a resume, port power should still be on. * For any other type of activation, turn it on. */ if (type != HUB_RESUME) { if (hdev->parent && hub_is_superspeed(hdev)) { ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), HUB_SET_DEPTH, USB_RT_HUB, hdev->level - 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (ret < 0) dev_err(hub->intfdev, "set hub depth failed\n"); } /* Speed up system boot by using a delayed_work for the * hub's initial power-up delays. This is pretty awkward * and the implementation looks like a home-brewed sort of * setjmp/longjmp, but it saves at least 100 ms for each * root hub (assuming usbcore is compiled into the kernel * rather than as a module). It adds up. * * This can't be done for HUB_RESUME or HUB_RESET_RESUME * because for those activation types the ports have to be * operational when we return. In theory this could be done * for HUB_POST_RESET, but it's easier not to. */ if (type == HUB_INIT) { delay = hub_power_on_good_delay(hub); hub_power_on(hub, false); INIT_DELAYED_WORK(&hub->init_work, hub_init_func2); queue_delayed_work(system_power_efficient_wq, &hub->init_work, msecs_to_jiffies(delay)); /* Suppress autosuspend until init is done */ usb_autopm_get_interface_no_resume( to_usb_interface(hub->intfdev)); return; /* Continues at init2: below */ } else if (type == HUB_RESET_RESUME) { /* The internal host controller state for the hub device * may be gone after a host power loss on system resume. * Update the device's info so the HW knows it's a hub. */ hcd = bus_to_hcd(hdev->bus); if (hcd->driver->update_hub_device) { ret = hcd->driver->update_hub_device(hcd, hdev, &hub->tt, GFP_NOIO); if (ret < 0) { dev_err(hub->intfdev, "Host not accepting hub info update\n"); dev_err(hub->intfdev, "LS/FS devices and hubs may not work under this hub\n"); } } hub_power_on(hub, true); } else { hub_power_on(hub, true); } } init2: /* * Check each port and set hub->change_bits to let hub_wq know * which ports need attention. */ for (port1 = 1; port1 <= hdev->maxchild; ++port1) { struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *udev = port_dev->child; u16 portstatus, portchange; portstatus = portchange = 0; status = hub_port_status(hub, port1, &portstatus, &portchange); if (status) goto abort; if (udev || (portstatus & USB_PORT_STAT_CONNECTION)) dev_dbg(&port_dev->dev, "status %04x change %04x\n", portstatus, portchange); /* * After anything other than HUB_RESUME (i.e., initialization * or any sort of reset), every port should be disabled. * Unconnected ports should likewise be disabled (paranoia), * and so should ports for which we have no usb_device. */ if ((portstatus & USB_PORT_STAT_ENABLE) && ( type != HUB_RESUME || !(portstatus & USB_PORT_STAT_CONNECTION) || !udev || udev->state == USB_STATE_NOTATTACHED)) { /* * USB3 protocol ports will automatically transition * to Enabled state when detect an USB3.0 device attach. * Do not disable USB3 protocol ports, just pretend * power was lost */ portstatus &= ~USB_PORT_STAT_ENABLE; if (!hub_is_superspeed(hdev)) usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE); } /* Clear status-change flags; we'll debounce later */ if (portchange & USB_PORT_STAT_C_CONNECTION) { need_debounce_delay = true; usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_CONNECTION); } if (portchange & USB_PORT_STAT_C_ENABLE) { need_debounce_delay = true; usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_ENABLE); } if (portchange & USB_PORT_STAT_C_RESET) { need_debounce_delay = true; usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_RESET); } if ((portchange & USB_PORT_STAT_C_BH_RESET) && hub_is_superspeed(hub->hdev)) { need_debounce_delay = true; usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_BH_PORT_RESET); } /* We can forget about a "removed" device when there's a * physical disconnect or the connect status changes. */ if (!(portstatus & USB_PORT_STAT_CONNECTION) || (portchange & USB_PORT_STAT_C_CONNECTION)) clear_bit(port1, hub->removed_bits); if (!udev || udev->state == USB_STATE_NOTATTACHED) { /* Tell hub_wq to disconnect the device or * check for a new connection or over current condition. * Based on USB2.0 Spec Section 11.12.5, * C_PORT_OVER_CURRENT could be set while * PORT_OVER_CURRENT is not. So check for any of them. */ if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || (portstatus & USB_PORT_STAT_OVERCURRENT) || (portchange & USB_PORT_STAT_C_OVERCURRENT)) set_bit(port1, hub->change_bits); } else if (portstatus & USB_PORT_STAT_ENABLE) { bool port_resumed = (portstatus & USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_U0; /* The power session apparently survived the resume. * If there was an overcurrent or suspend change * (i.e., remote wakeup request), have hub_wq * take care of it. Look at the port link state * for USB 3.0 hubs, since they don't have a suspend * change bit, and they don't set the port link change * bit on device-initiated resume. */ if (portchange || (hub_is_superspeed(hub->hdev) && port_resumed)) set_bit(port1, hub->change_bits); } else if (udev->persist_enabled) { #ifdef CONFIG_PM udev->reset_resume = 1; #endif /* Don't set the change_bits when the device * was powered off. */ if (test_bit(port1, hub->power_bits)) set_bit(port1, hub->change_bits); } else { /* The power session is gone; tell hub_wq */ usb_set_device_state(udev, USB_STATE_NOTATTACHED); set_bit(port1, hub->change_bits); } } /* If no port-status-change flags were set, we don't need any * debouncing. If flags were set we can try to debounce the * ports all at once right now, instead of letting hub_wq do them * one at a time later on. * * If any port-status changes do occur during this delay, hub_wq * will see them later and handle them normally. */ if (need_debounce_delay) { delay = HUB_DEBOUNCE_STABLE; /* Don't do a long sleep inside a workqueue routine */ if (type == HUB_INIT2) { INIT_DELAYED_WORK(&hub->init_work, hub_init_func3); queue_delayed_work(system_power_efficient_wq, &hub->init_work, msecs_to_jiffies(delay)); device_unlock(&hdev->dev); return; /* Continues at init3: below */ } else { msleep(delay); } } init3: hub->quiescing = 0; status = usb_submit_urb(hub->urb, GFP_NOIO); if (status < 0) dev_err(hub->intfdev, "activate --> %d\n", status); if (hub->has_indicators && blinkenlights) queue_delayed_work(system_power_efficient_wq, &hub->leds, LED_CYCLE_PERIOD); /* Scan all ports that need attention */ kick_hub_wq(hub); abort: if (type == HUB_INIT2 || type == HUB_INIT3) { /* Allow autosuspend if it was suppressed */ disconnected: usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); device_unlock(&hdev->dev); } kref_put(&hub->kref, hub_release); } /* Implement the continuations for the delays above */ static void hub_init_func2(struct work_struct *ws) { struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work); hub_activate(hub, HUB_INIT2); } static void hub_init_func3(struct work_struct *ws) { struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work); hub_activate(hub, HUB_INIT3); } enum hub_quiescing_type { HUB_DISCONNECT, HUB_PRE_RESET, HUB_SUSPEND }; static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) { struct usb_device *hdev = hub->hdev; int i; /* hub_wq and related activity won't re-trigger */ hub->quiescing = 1; if (type != HUB_SUSPEND) { /* Disconnect all the children */ for (i = 0; i < hdev->maxchild; ++i) { if (hub->ports[i]->child) usb_disconnect(&hub->ports[i]->child); } } /* Stop hub_wq and related activity */ usb_kill_urb(hub->urb); if (hub->has_indicators) cancel_delayed_work_sync(&hub->leds); if (hub->tt.hub) flush_work(&hub->tt.clear_work); } static void hub_pm_barrier_for_all_ports(struct usb_hub *hub) { int i; for (i = 0; i < hub->hdev->maxchild; ++i) pm_runtime_barrier(&hub->ports[i]->dev); } /* caller has locked the hub device */ static int hub_pre_reset(struct usb_interface *intf) { struct usb_hub *hub = usb_get_intfdata(intf); hub_quiesce(hub, HUB_PRE_RESET); hub->in_reset = 1; hub_pm_barrier_for_all_ports(hub); return 0; } /* caller has locked the hub device */ static int hub_post_reset(struct usb_interface *intf) { struct usb_hub *hub = usb_get_intfdata(intf); hub->in_reset = 0; hub_pm_barrier_for_all_ports(hub); hub_activate(hub, HUB_POST_RESET); return 0; } static int hub_configure(struct usb_hub *hub, struct usb_endpoint_descriptor *endpoint) { struct usb_hcd *hcd; struct usb_device *hdev = hub->hdev; struct device *hub_dev = hub->intfdev; u16 hubstatus, hubchange; u16 wHubCharacteristics; unsigned int pipe; int maxp, ret, i; char *message = "out of memory"; unsigned unit_load; unsigned full_load; unsigned maxchild; hub->buffer = kmalloc(sizeof(*hub->buffer), GFP_KERNEL); if (!hub->buffer) { ret = -ENOMEM; goto fail; } hub->status = kmalloc(sizeof(*hub->status), GFP_KERNEL); if (!hub->status) { ret = -ENOMEM; goto fail; } mutex_init(&hub->status_mutex); hub->descriptor = kzalloc(sizeof(*hub->descriptor), GFP_KERNEL); if (!hub->descriptor) { ret = -ENOMEM; goto fail; } /* Request the entire hub descriptor. * hub->descriptor can handle USB_MAXCHILDREN ports, * but a (non-SS) hub can/will return fewer bytes here. */ ret = get_hub_descriptor(hdev, hub->descriptor); if (ret < 0) { message = "can't read hub descriptor"; goto fail; } maxchild = USB_MAXCHILDREN; if (hub_is_superspeed(hdev)) maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS); if (hub->descriptor->bNbrPorts > maxchild) { message = "hub has too many ports!"; ret = -ENODEV; goto fail; } else if (hub->descriptor->bNbrPorts == 0) { message = "hub doesn't have any ports!"; ret = -ENODEV; goto fail; } /* * Accumulate wHubDelay + 40ns for every hub in the tree of devices. * The resulting value will be used for SetIsochDelay() request. */ if (hub_is_superspeed(hdev) || hub_is_superspeedplus(hdev)) { u32 delay = __le16_to_cpu(hub->descriptor->u.ss.wHubDelay); if (hdev->parent) delay += hdev->parent->hub_delay; delay += USB_TP_TRANSMISSION_DELAY; hdev->hub_delay = min_t(u32, delay, USB_TP_TRANSMISSION_DELAY_MAX); } maxchild = hub->descriptor->bNbrPorts; dev_info(hub_dev, "%d port%s detected\n", maxchild, (maxchild == 1) ? "" : "s"); hub->ports = kcalloc(maxchild, sizeof(struct usb_port *), GFP_KERNEL); if (!hub->ports) { ret = -ENOMEM; goto fail; } wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics); if (hub_is_superspeed(hdev)) { unit_load = 150; full_load = 900; } else { unit_load = 100; full_load = 500; } /* FIXME for USB 3.0, skip for now */ if ((wHubCharacteristics & HUB_CHAR_COMPOUND) && !(hub_is_superspeed(hdev))) { char portstr[USB_MAXCHILDREN + 1]; for (i = 0; i < maxchild; i++) portstr[i] = hub->descriptor->u.hs.DeviceRemovable [((i + 1) / 8)] & (1 << ((i + 1) % 8)) ? 'F' : 'R'; portstr[maxchild] = 0; dev_dbg(hub_dev, "compound device; port removable status: %s\n", portstr); } else dev_dbg(hub_dev, "standalone hub\n"); switch (wHubCharacteristics & HUB_CHAR_LPSM) { case HUB_CHAR_COMMON_LPSM: dev_dbg(hub_dev, "ganged power switching\n"); break; case HUB_CHAR_INDV_PORT_LPSM: dev_dbg(hub_dev, "individual port power switching\n"); break; case HUB_CHAR_NO_LPSM: case HUB_CHAR_LPSM: dev_dbg(hub_dev, "no power switching (usb 1.0)\n"); break; } switch (wHubCharacteristics & HUB_CHAR_OCPM) { case HUB_CHAR_COMMON_OCPM: dev_dbg(hub_dev, "global over-current protection\n"); break; case HUB_CHAR_INDV_PORT_OCPM: dev_dbg(hub_dev, "individual port over-current protection\n"); break; case HUB_CHAR_NO_OCPM: case HUB_CHAR_OCPM: dev_dbg(hub_dev, "no over-current protection\n"); break; } spin_lock_init(&hub->tt.lock); INIT_LIST_HEAD(&hub->tt.clear_list); INIT_WORK(&hub->tt.clear_work, hub_tt_work); switch (hdev->descriptor.bDeviceProtocol) { case USB_HUB_PR_FS: break; case USB_HUB_PR_HS_SINGLE_TT: dev_dbg(hub_dev, "Single TT\n"); hub->tt.hub = hdev; break; case USB_HUB_PR_HS_MULTI_TT: ret = usb_set_interface(hdev, 0, 1); if (ret == 0) { dev_dbg(hub_dev, "TT per port\n"); hub->tt.multi = 1; } else dev_err(hub_dev, "Using single TT (err %d)\n", ret); hub->tt.hub = hdev; break; case USB_HUB_PR_SS: /* USB 3.0 hubs don't have a TT */ break; default: dev_dbg(hub_dev, "Unrecognized hub protocol %d\n", hdev->descriptor.bDeviceProtocol); break; } /* Note 8 FS bit times == (8 bits / 12000000 bps) ~= 666ns */ switch (wHubCharacteristics & HUB_CHAR_TTTT) { case HUB_TTTT_8_BITS: if (hdev->descriptor.bDeviceProtocol != 0) { hub->tt.think_time = 666; dev_dbg(hub_dev, "TT requires at most %d " "FS bit times (%d ns)\n", 8, hub->tt.think_time); } break; case HUB_TTTT_16_BITS: hub->tt.think_time = 666 * 2; dev_dbg(hub_dev, "TT requires at most %d " "FS bit times (%d ns)\n", 16, hub->tt.think_time); break; case HUB_TTTT_24_BITS: hub->tt.think_time = 666 * 3; dev_dbg(hub_dev, "TT requires at most %d " "FS bit times (%d ns)\n", 24, hub->tt.think_time); break; case HUB_TTTT_32_BITS: hub->tt.think_time = 666 * 4; dev_dbg(hub_dev, "TT requires at most %d " "FS bit times (%d ns)\n", 32, hub->tt.think_time); break; } /* probe() zeroes hub->indicator[] */ if (wHubCharacteristics & HUB_CHAR_PORTIND) { hub->has_indicators = 1; dev_dbg(hub_dev, "Port indicators are supported\n"); } dev_dbg(hub_dev, "power on to power good time: %dms\n", hub->descriptor->bPwrOn2PwrGood * 2); /* power budgeting mostly matters with bus-powered hubs, * and battery-powered root hubs (may provide just 8 mA). */ ret = usb_get_std_status(hdev, USB_RECIP_DEVICE, 0, &hubstatus); if (ret) { message = "can't get hub status"; goto fail; } hcd = bus_to_hcd(hdev->bus); if (hdev == hdev->bus->root_hub) { if (hcd->power_budget > 0) hdev->bus_mA = hcd->power_budget; else hdev->bus_mA = full_load * maxchild; if (hdev->bus_mA >= full_load) hub->mA_per_port = full_load; else { hub->mA_per_port = hdev->bus_mA; hub->limited_power = 1; } } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) { int remaining = hdev->bus_mA - hub->descriptor->bHubContrCurrent; dev_dbg(hub_dev, "hub controller current requirement: %dmA\n", hub->descriptor->bHubContrCurrent); hub->limited_power = 1; if (remaining < maxchild * unit_load) dev_warn(hub_dev, "insufficient power available " "to use all downstream ports\n"); hub->mA_per_port = unit_load; /* 7.2.1 */ } else { /* Self-powered external hub */ /* FIXME: What about battery-powered external hubs that * provide less current per port? */ hub->mA_per_port = full_load; } if (hub->mA_per_port < full_load) dev_dbg(hub_dev, "%umA bus power budget for each child\n", hub->mA_per_port); ret = hub_hub_status(hub, &hubstatus, &hubchange); if (ret < 0) { message = "can't get hub status"; goto fail; } /* local power status reports aren't always correct */ if (hdev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_SELFPOWER) dev_dbg(hub_dev, "local power source is %s\n", (hubstatus & HUB_STATUS_LOCAL_POWER) ? "lost (inactive)" : "good"); if ((wHubCharacteristics & HUB_CHAR_OCPM) == 0) dev_dbg(hub_dev, "%sover-current condition exists\n", (hubstatus & HUB_STATUS_OVERCURRENT) ? "" : "no "); /* set up the interrupt endpoint * We use the EP's maxpacket size instead of (PORTS+1+7)/8 * bytes as USB2.0[11.12.3] says because some hubs are known * to send more data (and thus cause overflow). For root hubs, * maxpktsize is defined in hcd.c's fake endpoint descriptors * to be big enough for at least USB_MAXCHILDREN ports. */ pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress); maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe)); if (maxp > sizeof(*hub->buffer)) maxp = sizeof(*hub->buffer); hub->urb = usb_alloc_urb(0, GFP_KERNEL); if (!hub->urb) { ret = -ENOMEM; goto fail; } usb_fill_int_urb(hub->urb, hdev, pipe, *hub->buffer, maxp, hub_irq, hub, endpoint->bInterval); /* maybe cycle the hub leds */ if (hub->has_indicators && blinkenlights) hub->indicator[0] = INDICATOR_CYCLE; mutex_lock(&usb_port_peer_mutex); for (i = 0; i < maxchild; i++) { ret = usb_hub_create_port_device(hub, i + 1); if (ret < 0) { dev_err(hub->intfdev, "couldn't create port%d device.\n", i + 1); break; } } hdev->maxchild = i; for (i = 0; i < hdev->maxchild; i++) { struct usb_port *port_dev = hub->ports[i]; pm_runtime_put(&port_dev->dev); } mutex_unlock(&usb_port_peer_mutex); if (ret < 0) goto fail; /* Update the HCD's internal representation of this hub before hub_wq * starts getting port status changes for devices under the hub. */ if (hcd->driver->update_hub_device) { ret = hcd->driver->update_hub_device(hcd, hdev, &hub->tt, GFP_KERNEL); if (ret < 0) { message = "can't update HCD hub info"; goto fail; } } usb_hub_adjust_deviceremovable(hdev, hub->descriptor); hub_activate(hub, HUB_INIT); return 0; fail: dev_err(hub_dev, "config failed, %s (err %d)\n", message, ret); /* hub_disconnect() frees urb and descriptor */ return ret; } static void hub_release(struct kref *kref) { struct usb_hub *hub = container_of(kref, struct usb_hub, kref); usb_put_dev(hub->hdev); usb_put_intf(to_usb_interface(hub->intfdev)); kfree(hub); } static unsigned highspeed_hubs; static void hub_disconnect(struct usb_interface *intf) { struct usb_hub *hub = usb_get_intfdata(intf); struct usb_device *hdev = interface_to_usbdev(intf); int port1; /* * Stop adding new hub events. We do not want to block here and thus * will not try to remove any pending work item. */ hub->disconnected = 1; /* Disconnect all children and quiesce the hub */ hub->error = 0; hub_quiesce(hub, HUB_DISCONNECT); mutex_lock(&usb_port_peer_mutex); /* Avoid races with recursively_mark_NOTATTACHED() */ spin_lock_irq(&device_state_lock); port1 = hdev->maxchild; hdev->maxchild = 0; usb_set_intfdata(intf, NULL); spin_unlock_irq(&device_state_lock); for (; port1 > 0; --port1) usb_hub_remove_port_device(hub, port1); mutex_unlock(&usb_port_peer_mutex); if (hub->hdev->speed == USB_SPEED_HIGH) highspeed_hubs--; usb_free_urb(hub->urb); kfree(hub->ports); kfree(hub->descriptor); kfree(hub->status); kfree(hub->buffer); pm_suspend_ignore_children(&intf->dev, false); kref_put(&hub->kref, hub_release); } static bool hub_descriptor_is_sane(struct usb_host_interface *desc) { /* Some hubs have a subclass of 1, which AFAICT according to the */ /* specs is not defined, but it works */ if (desc->desc.bInterfaceSubClass != 0 && desc->desc.bInterfaceSubClass != 1) return false; /* Multiple endpoints? What kind of mutant ninja-hub is this? */ if (desc->desc.bNumEndpoints != 1) return false; /* If the first endpoint is not interrupt IN, we'd better punt! */ if (!usb_endpoint_is_int_in(&desc->endpoint[0].desc)) return false; return true; } static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_host_interface *desc; struct usb_device *hdev; struct usb_hub *hub; desc = intf->cur_altsetting; hdev = interface_to_usbdev(intf); /* * Set default autosuspend delay as 0 to speedup bus suspend, * based on the below considerations: * * - Unlike other drivers, the hub driver does not rely on the * autosuspend delay to provide enough time to handle a wakeup * event, and the submitted status URB is just to check future * change on hub downstream ports, so it is safe to do it. * * - The patch might cause one or more auto supend/resume for * below very rare devices when they are plugged into hub * first time: * * devices having trouble initializing, and disconnect * themselves from the bus and then reconnect a second * or so later * * devices just for downloading firmware, and disconnects * themselves after completing it * * For these quite rare devices, their drivers may change the * autosuspend delay of their parent hub in the probe() to one * appropriate value to avoid the subtle problem if someone * does care it. * * - The patch may cause one or more auto suspend/resume on * hub during running 'lsusb', but it is probably too * infrequent to worry about. * * - Change autosuspend delay of hub can avoid unnecessary auto * suspend timer for hub, also may decrease power consumption * of USB bus. * * - If user has indicated to prevent autosuspend by passing * usbcore.autosuspend = -1 then keep autosuspend disabled. */ #ifdef CONFIG_PM if (hdev->dev.power.autosuspend_delay >= 0) pm_runtime_set_autosuspend_delay(&hdev->dev, 0); #endif /* * Hubs have proper suspend/resume support, except for root hubs * where the controller driver doesn't have bus_suspend and * bus_resume methods. */ if (hdev->parent) { /* normal device */ usb_enable_autosuspend(hdev); } else { /* root hub */ const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver; if (drv->bus_suspend && drv->bus_resume) usb_enable_autosuspend(hdev); } if (hdev->level == MAX_TOPO_LEVEL) { dev_err(&intf->dev, "Unsupported bus topology: hub nested too deep\n"); return -E2BIG; } #ifdef CONFIG_USB_OTG_BLACKLIST_HUB if (hdev->parent) { dev_warn(&intf->dev, "ignoring external hub\n"); return -ENODEV; } #endif if (!hub_descriptor_is_sane(desc)) { dev_err(&intf->dev, "bad descriptor, ignoring hub\n"); return -EIO; } /* We found a hub */ dev_info(&intf->dev, "USB hub found\n"); hub = kzalloc(sizeof(*hub), GFP_KERNEL); if (!hub) return -ENOMEM; kref_init(&hub->kref); hub->intfdev = &intf->dev; hub->hdev = hdev; INIT_DELAYED_WORK(&hub->leds, led_work); INIT_DELAYED_WORK(&hub->init_work, NULL); INIT_WORK(&hub->events, hub_event); usb_get_intf(intf); usb_get_dev(hdev); usb_set_intfdata(intf, hub); intf->needs_remote_wakeup = 1; pm_suspend_ignore_children(&intf->dev, true); if (hdev->speed == USB_SPEED_HIGH) highspeed_hubs++; if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND) hub->quirk_check_port_auto_suspend = 1; if (hub_configure(hub, &desc->endpoint[0].desc) >= 0) return 0; hub_disconnect(intf); return -ENODEV; } static int hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data) { struct usb_device *hdev = interface_to_usbdev(intf); struct usb_hub *hub = usb_hub_to_struct_hub(hdev); /* assert ifno == 0 (part of hub spec) */ switch (code) { case USBDEVFS_HUB_PORTINFO: { struct usbdevfs_hub_portinfo *info = user_data; int i; spin_lock_irq(&device_state_lock); if (hdev->devnum <= 0) info->nports = 0; else { info->nports = hdev->maxchild; for (i = 0; i < info->nports; i++) { if (hub->ports[i]->child == NULL) info->port[i] = 0; else info->port[i] = hub->ports[i]->child->devnum; } } spin_unlock_irq(&device_state_lock); return info->nports + 1; } default: return -ENOSYS; } } /* * Allow user programs to claim ports on a hub. When a device is attached * to one of these "claimed" ports, the program will "own" the device. */ static int find_port_owner(struct usb_device *hdev, unsigned port1, struct usb_dev_state ***ppowner) { struct usb_hub *hub = usb_hub_to_struct_hub(hdev); if (hdev->state == USB_STATE_NOTATTACHED) return -ENODEV; if (port1 == 0 || port1 > hdev->maxchild) return -EINVAL; /* Devices not managed by the hub driver * will always have maxchild equal to 0. */ *ppowner = &(hub->ports[port1 - 1]->port_owner); return 0; } /* In the following three functions, the caller must hold hdev's lock */ int usb_hub_claim_port(struct usb_device *hdev, unsigned port1, struct usb_dev_state *owner) { int rc; struct usb_dev_state **powner; rc = find_port_owner(hdev, port1, &powner); if (rc) return rc; if (*powner) return -EBUSY; *powner = owner; return rc; } EXPORT_SYMBOL_GPL(usb_hub_claim_port); int usb_hub_release_port(struct usb_device *hdev, unsigned port1, struct usb_dev_state *owner) { int rc; struct usb_dev_state **powner; rc = find_port_owner(hdev, port1, &powner); if (rc) return rc; if (*powner != owner) return -ENOENT; *powner = NULL; return rc; } EXPORT_SYMBOL_GPL(usb_hub_release_port); void usb_hub_release_all_ports(struct usb_device *hdev, struct usb_dev_state *owner) { struct usb_hub *hub = usb_hub_to_struct_hub(hdev); int n; for (n = 0; n < hdev->maxchild; n++) { if (hub->ports[n]->port_owner == owner) hub->ports[n]->port_owner = NULL; } } /* The caller must hold udev's lock */ bool usb_device_is_owned(struct usb_device *udev) { struct usb_hub *hub; if (udev->state == USB_STATE_NOTATTACHED || !udev->parent) return false; hub = usb_hub_to_struct_hub(udev->parent); return !!hub->ports[udev->portnum - 1]->port_owner; } static void recursively_mark_NOTATTACHED(struct usb_device *udev) { struct usb_hub *hub = usb_hub_to_struct_hub(udev); int i; for (i = 0; i < udev->maxchild; ++i) { if (hub->ports[i]->child) recursively_mark_NOTATTACHED(hub->ports[i]->child); } if (udev->state == USB_STATE_SUSPENDED) udev->active_duration -= jiffies; udev->state = USB_STATE_NOTATTACHED; } /** * usb_set_device_state - change a device's current state (usbcore, hcds) * @udev: pointer to device whose state should be changed * @new_state: new state value to be stored * * udev->state is _not_ fully protected by the device lock. Although * most transitions are made only while holding the lock, the state can * can change to USB_STATE_NOTATTACHED at almost any time. This * is so that devices can be marked as disconnected as soon as possible, * without having to wait for any semaphores to be released. As a result, * all changes to any device's state must be protected by the * device_state_lock spinlock. * * Once a device has been added to the device tree, all changes to its state * should be made using this routine. The state should _not_ be set directly. * * If udev->state is already USB_STATE_NOTATTACHED then no change is made. * Otherwise udev->state is set to new_state, and if new_state is * USB_STATE_NOTATTACHED then all of udev's descendants' states are also set * to USB_STATE_NOTATTACHED. */ void usb_set_device_state(struct usb_device *udev, enum usb_device_state new_state) { unsigned long flags; int wakeup = -1; spin_lock_irqsave(&device_state_lock, flags); if (udev->state == USB_STATE_NOTATTACHED) ; /* do nothing */ else if (new_state != USB_STATE_NOTATTACHED) { /* root hub wakeup capabilities are managed out-of-band * and may involve silicon errata ... ignore them here. */ if (udev->parent) { if (udev->state == USB_STATE_SUSPENDED || new_state == USB_STATE_SUSPENDED) ; /* No change to wakeup settings */ else if (new_state == USB_STATE_CONFIGURED) wakeup = (udev->quirks & USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 : udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP; else wakeup = 0; } if (udev->state == USB_STATE_SUSPENDED && new_state != USB_STATE_SUSPENDED) udev->active_duration -= jiffies; else if (new_state == USB_STATE_SUSPENDED && udev->state != USB_STATE_SUSPENDED) udev->active_duration += jiffies; udev->state = new_state; } else recursively_mark_NOTATTACHED(udev); spin_unlock_irqrestore(&device_state_lock, flags); if (wakeup >= 0) device_set_wakeup_capable(&udev->dev, wakeup); } EXPORT_SYMBOL_GPL(usb_set_device_state); /* * Choose a device number. * * Device numbers are used as filenames in usbfs. On USB-1.1 and * USB-2.0 buses they are also used as device addresses, however on * USB-3.0 buses the address is assigned by the controller hardware * and it usually is not the same as the device number. * * WUSB devices are simple: they have no hubs behind, so the mapping * device <-> virtual port number becomes 1:1. Why? to simplify the * life of the device connection logic in * drivers/usb/wusbcore/devconnect.c. When we do the initial secret * handshake we need to assign a temporary address in the unauthorized * space. For simplicity we use the first virtual port number found to * be free [drivers/usb/wusbcore/devconnect.c:wusbhc_devconnect_ack()] * and that becomes it's address [X < 128] or its unauthorized address * [X | 0x80]. * * We add 1 as an offset to the one-based USB-stack port number * (zero-based wusb virtual port index) for two reasons: (a) dev addr * 0 is reserved by USB for default address; (b) Linux's USB stack * uses always #1 for the root hub of the controller. So USB stack's * port #1, which is wusb virtual-port #0 has address #2. * * Devices connected under xHCI are not as simple. The host controller * supports virtualization, so the hardware assigns device addresses and * the HCD must setup data structures before issuing a set address * command to the hardware. */ static void choose_devnum(struct usb_device *udev) { int devnum; struct usb_bus *bus = udev->bus; /* be safe when more hub events are proceed in parallel */ mutex_lock(&bus->devnum_next_mutex); if (udev->wusb) { devnum = udev->portnum + 1; BUG_ON(test_bit(devnum, bus->devmap.devicemap)); } else { /* Try to allocate the next devnum beginning at * bus->devnum_next. */ devnum = find_next_zero_bit(bus->devmap.devicemap, 128, bus->devnum_next); if (devnum >= 128) devnum = find_next_zero_bit(bus->devmap.devicemap, 128, 1); bus->devnum_next = (devnum >= 127 ? 1 : devnum + 1); } if (devnum < 128) { set_bit(devnum, bus->devmap.devicemap); udev->devnum = devnum; } mutex_unlock(&bus->devnum_next_mutex); } static void release_devnum(struct usb_device *udev) { if (udev->devnum > 0) { clear_bit(udev->devnum, udev->bus->devmap.devicemap); udev->devnum = -1; } } static void update_devnum(struct usb_device *udev, int devnum) { /* The address for a WUSB device is managed by wusbcore. */ if (!udev->wusb) udev->devnum = devnum; } static void hub_free_dev(struct usb_device *udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); /* Root hubs aren't real devices, so don't free HCD resources */ if (hcd->driver->free_dev && udev->parent) hcd->driver->free_dev(hcd, udev); } static void hub_disconnect_children(struct usb_device *udev) { struct usb_hub *hub = usb_hub_to_struct_hub(udev); int i; /* Free up all the children before we remove this device */ for (i = 0; i < udev->maxchild; i++) { if (hub->ports[i]->child) usb_disconnect(&hub->ports[i]->child); } } /** * usb_disconnect - disconnect a device (usbcore-internal) * @pdev: pointer to device being disconnected * Context: !in_interrupt () * * Something got disconnected. Get rid of it and all of its children. * * If *pdev is a normal device then the parent hub must already be locked. * If *pdev is a root hub then the caller must hold the usb_bus_idr_lock, * which protects the set of root hubs as well as the list of buses. * * Only hub drivers (including virtual root hub drivers for host * controllers) should ever call this. * * This call is synchronous, and may not be used in an interrupt context. */ void usb_disconnect(struct usb_device **pdev) { struct usb_port *port_dev = NULL; struct usb_device *udev = *pdev; struct usb_hub *hub = NULL; int port1 = 1; /* mark the device as inactive, so any further urb submissions for * this device (and any of its children) will fail immediately. * this quiesces everything except pending urbs. */ usb_set_device_state(udev, USB_STATE_NOTATTACHED); dev_info(&udev->dev, "USB disconnect, device number %d\n", udev->devnum); /* * Ensure that the pm runtime code knows that the USB device * is in the process of being disconnected. */ pm_runtime_barrier(&udev->dev); usb_lock_device(udev); hub_disconnect_children(udev); /* deallocate hcd/hardware state ... nuking all pending urbs and * cleaning up all state associated with the current configuration * so that the hardware is now fully quiesced. */ dev_dbg(&udev->dev, "unregistering device\n"); usb_disable_device(udev, 0); usb_hcd_synchronize_unlinks(udev); if (udev->parent) { port1 = udev->portnum; hub = usb_hub_to_struct_hub(udev->parent); port_dev = hub->ports[port1 - 1]; sysfs_remove_link(&udev->dev.kobj, "port"); sysfs_remove_link(&port_dev->dev.kobj, "device"); /* * As usb_port_runtime_resume() de-references udev, make * sure no resumes occur during removal */ if (!test_and_set_bit(port1, hub->child_usage_bits)) pm_runtime_get_sync(&port_dev->dev); } usb_remove_ep_devs(&udev->ep0); usb_unlock_device(udev); /* Unregister the device. The device driver is responsible * for de-configuring the device and invoking the remove-device * notifier chain (used by usbfs and possibly others). */ device_del(&udev->dev); /* Free the device number and delete the parent's children[] * (or root_hub) pointer. */ release_devnum(udev); /* Avoid races with recursively_mark_NOTATTACHED() */ spin_lock_irq(&device_state_lock); *pdev = NULL; spin_unlock_irq(&device_state_lock); if (port_dev && test_and_clear_bit(port1, hub->child_usage_bits)) pm_runtime_put(&port_dev->dev); hub_free_dev(udev); put_device(&udev->dev); } #ifdef CONFIG_USB_ANNOUNCE_NEW_DEVICES static void show_string(struct usb_device *udev, char *id, char *string) { if (!string) return; dev_info(&udev->dev, "%s: %s\n", id, string); } static void announce_device(struct usb_device *udev) { u16 bcdDevice = le16_to_cpu(udev->descriptor.bcdDevice); dev_info(&udev->dev, "New USB device found, idVendor=%04x, idProduct=%04x, bcdDevice=%2x.%02x\n", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), bcdDevice >> 8, bcdDevice & 0xff); dev_info(&udev->dev, "New USB device strings: Mfr=%d, Product=%d, SerialNumber=%d\n", udev->descriptor.iManufacturer, udev->descriptor.iProduct, udev->descriptor.iSerialNumber); show_string(udev, "Product", udev->product); show_string(udev, "Manufacturer", udev->manufacturer); show_string(udev, "SerialNumber", udev->serial); } #else static inline void announce_device(struct usb_device *udev) { } #endif /** * usb_enumerate_device_otg - FIXME (usbcore-internal) * @udev: newly addressed device (in ADDRESS state) * * Finish enumeration for On-The-Go devices * * Return: 0 if successful. A negative error code otherwise. */ static int usb_enumerate_device_otg(struct usb_device *udev) { int err = 0; #ifdef CONFIG_USB_OTG /* * OTG-aware devices on OTG-capable root hubs may be able to use SRP, * to wake us after we've powered off VBUS; and HNP, switching roles * "host" to "peripheral". The OTG descriptor helps figure this out. */ if (!udev->bus->is_b_host && udev->config && udev->parent == udev->bus->root_hub) { struct usb_otg_descriptor *desc = NULL; struct usb_bus *bus = udev->bus; unsigned port1 = udev->portnum; /* descriptor may appear anywhere in config */ err = __usb_get_extra_descriptor(udev->rawdescriptors[0], le16_to_cpu(udev->config[0].desc.wTotalLength), USB_DT_OTG, (void **) &desc); if (err || !(desc->bmAttributes & USB_OTG_HNP)) return 0; dev_info(&udev->dev, "Dual-Role OTG device on %sHNP port\n", (port1 == bus->otg_port) ? "" : "non-"); /* enable HNP before suspend, it's simpler */ if (port1 == bus->otg_port) { bus->b_hnp_enable = 1; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, 0, USB_DEVICE_B_HNP_ENABLE, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (err < 0) { /* * OTG MESSAGE: report errors here, * customize to match your product. */ dev_err(&udev->dev, "can't set HNP mode: %d\n", err); bus->b_hnp_enable = 0; } } else if (desc->bLength == sizeof (struct usb_otg_descriptor)) { /* Set a_alt_hnp_support for legacy otg device */ err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, 0, USB_DEVICE_A_ALT_HNP_SUPPORT, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (err < 0) dev_err(&udev->dev, "set a_alt_hnp_support failed: %d\n", err); } } #endif return err; } /** * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal) * @udev: newly addressed device (in ADDRESS state) * * This is only called by usb_new_device() and usb_authorize_device() * and FIXME -- all comments that apply to them apply here wrt to * environment. * * If the device is WUSB and not authorized, we don't attempt to read * the string descriptors, as they will be errored out by the device * until it has been authorized. * * Return: 0 if successful. A negative error code otherwise. */ static int usb_enumerate_device(struct usb_device *udev) { int err; struct usb_hcd *hcd = bus_to_hcd(udev->bus); if (udev->config == NULL) { err = usb_get_configuration(udev); if (err < 0) { if (err != -ENODEV) dev_err(&udev->dev, "can't read configurations, error %d\n", err); return err; } } /* read the standard strings and cache them if present */ udev->product = usb_cache_string(udev, udev->descriptor.iProduct); udev->manufacturer = usb_cache_string(udev, udev->descriptor.iManufacturer); udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber); err = usb_enumerate_device_otg(udev); if (err < 0) return err; if (IS_ENABLED(CONFIG_USB_OTG_WHITELIST) && hcd->tpl_support && !is_targeted(udev)) { /* Maybe it can talk to us, though we can't talk to it. * (Includes HNP test device.) */ if (IS_ENABLED(CONFIG_USB_OTG) && (udev->bus->b_hnp_enable || udev->bus->is_b_host)) { err = usb_port_suspend(udev, PMSG_AUTO_SUSPEND); if (err < 0) dev_dbg(&udev->dev, "HNP fail, %d\n", err); } return -ENOTSUPP; } usb_detect_interface_quirks(udev); return 0; } static void set_usb_port_removable(struct usb_device *udev) { struct usb_device *hdev = udev->parent; struct usb_hub *hub; u8 port = udev->portnum; u16 wHubCharacteristics; bool removable = true; if (!hdev) return; hub = usb_hub_to_struct_hub(udev->parent); /* * If the platform firmware has provided information about a port, * use that to determine whether it's removable. */ switch (hub->ports[udev->portnum - 1]->connect_type) { case USB_PORT_CONNECT_TYPE_HOT_PLUG: udev->removable = USB_DEVICE_REMOVABLE; return; case USB_PORT_CONNECT_TYPE_HARD_WIRED: case USB_PORT_NOT_USED: udev->removable = USB_DEVICE_FIXED; return; default: break; } /* * Otherwise, check whether the hub knows whether a port is removable * or not */ wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics); if (!(wHubCharacteristics & HUB_CHAR_COMPOUND)) return; if (hub_is_superspeed(hdev)) { if (le16_to_cpu(hub->descriptor->u.ss.DeviceRemovable) & (1 << port)) removable = false; } else { if (hub->descriptor->u.hs.DeviceRemovable[port / 8] & (1 << (port % 8))) removable = false; } if (removable) udev->removable = USB_DEVICE_REMOVABLE; else udev->removable = USB_DEVICE_FIXED; } /** * usb_new_device - perform initial device setup (usbcore-internal) * @udev: newly addressed device (in ADDRESS state) * * This is called with devices which have been detected but not fully * enumerated. The device descriptor is available, but not descriptors * for any device configuration. The caller must have locked either * the parent hub (if udev is a normal device) or else the * usb_bus_idr_lock (if udev is a root hub). The parent's pointer to * udev has already been installed, but udev is not yet visible through * sysfs or other filesystem code. * * This call is synchronous, and may not be used in an interrupt context. * * Only the hub driver or root-hub registrar should ever call this. * * Return: Whether the device is configured properly or not. Zero if the * interface was registered with the driver core; else a negative errno * value. * */ int usb_new_device(struct usb_device *udev) { int err; if (udev->parent) { /* Initialize non-root-hub device wakeup to disabled; * device (un)configuration controls wakeup capable * sysfs power/wakeup controls wakeup enabled/disabled */ device_init_wakeup(&udev->dev, 0); } /* Tell the runtime-PM framework the device is active */ pm_runtime_set_active(&udev->dev); pm_runtime_get_noresume(&udev->dev); pm_runtime_use_autosuspend(&udev->dev); pm_runtime_enable(&udev->dev); /* By default, forbid autosuspend for all devices. It will be * allowed for hubs during binding. */ usb_disable_autosuspend(udev); err = usb_enumerate_device(udev); /* Read descriptors */ if (err < 0) goto fail; dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n", udev->devnum, udev->bus->busnum, (((udev->bus->busnum-1) * 128) + (udev->devnum-1))); /* export the usbdev device-node for libusb */ udev->dev.devt = MKDEV(USB_DEVICE_MAJOR, (((udev->bus->busnum-1) * 128) + (udev->devnum-1))); /* Tell the world! */ announce_device(udev); if (udev->serial) add_device_randomness(udev->serial, strlen(udev->serial)); if (udev->product) add_device_randomness(udev->product, strlen(udev->product)); if (udev->manufacturer) add_device_randomness(udev->manufacturer, strlen(udev->manufacturer)); device_enable_async_suspend(&udev->dev); /* check whether the hub or firmware marks this port as non-removable */ if (udev->parent) set_usb_port_removable(udev); /* Register the device. The device driver is responsible * for configuring the device and invoking the add-device * notifier chain (used by usbfs and possibly others). */ err = device_add(&udev->dev); if (err) { dev_err(&udev->dev, "can't device_add, error %d\n", err); goto fail; } /* Create link files between child device and usb port device. */ if (udev->parent) { struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); int port1 = udev->portnum; struct usb_port *port_dev = hub->ports[port1 - 1]; err = sysfs_create_link(&udev->dev.kobj, &port_dev->dev.kobj, "port"); if (err) goto fail; err = sysfs_create_link(&port_dev->dev.kobj, &udev->dev.kobj, "device"); if (err) { sysfs_remove_link(&udev->dev.kobj, "port"); goto fail; } if (!test_and_set_bit(port1, hub->child_usage_bits)) pm_runtime_get_sync(&port_dev->dev); } (void) usb_create_ep_devs(&udev->dev, &udev->ep0, udev); usb_mark_last_busy(udev); pm_runtime_put_sync_autosuspend(&udev->dev); return err; fail: usb_set_device_state(udev, USB_STATE_NOTATTACHED); pm_runtime_disable(&udev->dev); pm_runtime_set_suspended(&udev->dev); return err; } /** * usb_deauthorize_device - deauthorize a device (usbcore-internal) * @usb_dev: USB device * * Move the USB device to a very basic state where interfaces are disabled * and the device is in fact unconfigured and unusable. * * We share a lock (that we have) with device_del(), so we need to * defer its call. * * Return: 0. */ int usb_deauthorize_device(struct usb_device *usb_dev) { usb_lock_device(usb_dev); if (usb_dev->authorized == 0) goto out_unauthorized; usb_dev->authorized = 0; usb_set_configuration(usb_dev, -1); out_unauthorized: usb_unlock_device(usb_dev); return 0; } int usb_authorize_device(struct usb_device *usb_dev) { int result = 0, c; usb_lock_device(usb_dev); if (usb_dev->authorized == 1) goto out_authorized; result = usb_autoresume_device(usb_dev); if (result < 0) { dev_err(&usb_dev->dev, "can't autoresume for authorization: %d\n", result); goto error_autoresume; } if (usb_dev->wusb) { result = usb_get_device_descriptor(usb_dev, sizeof(usb_dev->descriptor)); if (result < 0) { dev_err(&usb_dev->dev, "can't re-read device descriptor for " "authorization: %d\n", result); goto error_device_descriptor; } } usb_dev->authorized = 1; /* Choose and set the configuration. This registers the interfaces * with the driver core and lets interface drivers bind to them. */ c = usb_choose_configuration(usb_dev); if (c >= 0) { result = usb_set_configuration(usb_dev, c); if (result) { dev_err(&usb_dev->dev, "can't set config #%d, error %d\n", c, result); /* This need not be fatal. The user can try to * set other configurations. */ } } dev_info(&usb_dev->dev, "authorized to connect\n"); error_device_descriptor: usb_autosuspend_device(usb_dev); error_autoresume: out_authorized: usb_unlock_device(usb_dev); /* complements locktree */ return result; } /* * Return 1 if port speed is SuperSpeedPlus, 0 otherwise * check it from the link protocol field of the current speed ID attribute. * current speed ID is got from ext port status request. Sublink speed attribute * table is returned with the hub BOS SSP device capability descriptor */ static int port_speed_is_ssp(struct usb_device *hdev, int speed_id) { int ssa_count; u32 ss_attr; int i; struct usb_ssp_cap_descriptor *ssp_cap = hdev->bos->ssp_cap; if (!ssp_cap) return 0; ssa_count = le32_to_cpu(ssp_cap->bmAttributes) & USB_SSP_SUBLINK_SPEED_ATTRIBS; for (i = 0; i <= ssa_count; i++) { ss_attr = le32_to_cpu(ssp_cap->bmSublinkSpeedAttr[i]); if (speed_id == (ss_attr & USB_SSP_SUBLINK_SPEED_SSID)) return !!(ss_attr & USB_SSP_SUBLINK_SPEED_LP); } return 0; } /* Returns 1 if @hub is a WUSB root hub, 0 otherwise */ static unsigned hub_is_wusb(struct usb_hub *hub) { struct usb_hcd *hcd; if (hub->hdev->parent != NULL) /* not a root hub? */ return 0; hcd = bus_to_hcd(hub->hdev->bus); return hcd->wireless; } #define PORT_RESET_TRIES 5 #define SET_ADDRESS_TRIES 2 #define GET_DESCRIPTOR_TRIES 2 #define SET_CONFIG_TRIES (2 * (use_both_schemes + 1)) #define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)scheme) #define HUB_ROOT_RESET_TIME 60 /* times are in msec */ #define HUB_SHORT_RESET_TIME 10 #define HUB_BH_RESET_TIME 50 #define HUB_LONG_RESET_TIME 200 #define HUB_RESET_TIMEOUT 800 /* * "New scheme" enumeration causes an extra state transition to be * exposed to an xhci host and causes USB3 devices to receive control * commands in the default state. This has been seen to cause * enumeration failures, so disable this enumeration scheme for USB3 * devices. */ static bool use_new_scheme(struct usb_device *udev, int retry, struct usb_port *port_dev) { int old_scheme_first_port = port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME; int quick_enumeration = (udev->speed == USB_SPEED_HIGH); if (udev->speed >= USB_SPEED_SUPER) return false; return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first || quick_enumeration); } /* Is a USB 3.0 port in the Inactive or Compliance Mode state? * Port worm reset is required to recover */ static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1, u16 portstatus) { u16 link_state; if (!hub_is_superspeed(hub->hdev)) return false; if (test_bit(port1, hub->warm_reset_bits)) return true; link_state = portstatus & USB_PORT_STAT_LINK_STATE; return link_state == USB_SS_PORT_LS_SS_INACTIVE || link_state == USB_SS_PORT_LS_COMP_MOD; } static int hub_port_wait_reset(struct usb_hub *hub, int port1, struct usb_device *udev, unsigned int delay, bool warm) { int delay_time, ret; u16 portstatus; u16 portchange; u32 ext_portstatus = 0; for (delay_time = 0; delay_time < HUB_RESET_TIMEOUT; delay_time += delay) { /* wait to give the device a chance to reset */ msleep(delay); /* read and decode port status */ if (hub_is_superspeedplus(hub->hdev)) ret = hub_ext_port_status(hub, port1, HUB_EXT_PORT_STATUS, &portstatus, &portchange, &ext_portstatus); else ret = hub_port_status(hub, port1, &portstatus, &portchange); if (ret < 0) return ret; /* * The port state is unknown until the reset completes. * * On top of that, some chips may require additional time * to re-establish a connection after the reset is complete, * so also wait for the connection to be re-established. */ if (!(portstatus & USB_PORT_STAT_RESET) && (portstatus & USB_PORT_STAT_CONNECTION)) break; /* switch to the long delay after two short delay failures */ if (delay_time >= 2 * HUB_SHORT_RESET_TIME) delay = HUB_LONG_RESET_TIME; dev_dbg(&hub->ports[port1 - 1]->dev, "not %sreset yet, waiting %dms\n", warm ? "warm " : "", delay); } if ((portstatus & USB_PORT_STAT_RESET)) return -EBUSY; if (hub_port_warm_reset_required(hub, port1, portstatus)) return -ENOTCONN; /* Device went away? */ if (!(portstatus & USB_PORT_STAT_CONNECTION)) return -ENOTCONN; /* Retry if connect change is set but status is still connected. * A USB 3.0 connection may bounce if multiple warm resets were issued, * but the device may have successfully re-connected. Ignore it. */ if (!hub_is_superspeed(hub->hdev) && (portchange & USB_PORT_STAT_C_CONNECTION)) { usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_CONNECTION); return -EAGAIN; } if (!(portstatus & USB_PORT_STAT_ENABLE)) return -EBUSY; if (!udev) return 0; if (hub_is_superspeedplus(hub->hdev)) { /* extended portstatus Rx and Tx lane count are zero based */ udev->rx_lanes = USB_EXT_PORT_RX_LANES(ext_portstatus) + 1; udev->tx_lanes = USB_EXT_PORT_TX_LANES(ext_portstatus) + 1; } else { udev->rx_lanes = 1; udev->tx_lanes = 1; } if (hub_is_wusb(hub)) udev->speed = USB_SPEED_WIRELESS; else if (hub_is_superspeedplus(hub->hdev) && port_speed_is_ssp(hub->hdev, ext_portstatus & USB_EXT_PORT_STAT_RX_SPEED_ID)) udev->speed = USB_SPEED_SUPER_PLUS; else if (hub_is_superspeed(hub->hdev)) udev->speed = USB_SPEED_SUPER; else if (portstatus & USB_PORT_STAT_HIGH_SPEED) udev->speed = USB_SPEED_HIGH; else if (portstatus & USB_PORT_STAT_LOW_SPEED) udev->speed = USB_SPEED_LOW; else udev->speed = USB_SPEED_FULL; return 0; } /* Handle port reset and port warm(BH) reset (for USB3 protocol ports) */ static int hub_port_reset(struct usb_hub *hub, int port1, struct usb_device *udev, unsigned int delay, bool warm) { int i, status; u16 portchange, portstatus; struct usb_port *port_dev = hub->ports[port1 - 1]; int reset_recovery_time; if (!hub_is_superspeed(hub->hdev)) { if (warm) { dev_err(hub->intfdev, "only USB3 hub support " "warm reset\n"); return -EINVAL; } /* Block EHCI CF initialization during the port reset. * Some companion controllers don't like it when they mix. */ down_read(&ehci_cf_port_reset_rwsem); } else if (!warm) { /* * If the caller hasn't explicitly requested a warm reset, * double check and see if one is needed. */ if (hub_port_status(hub, port1, &portstatus, &portchange) == 0) if (hub_port_warm_reset_required(hub, port1, portstatus)) warm = true; } clear_bit(port1, hub->warm_reset_bits); /* Reset the port */ for (i = 0; i < PORT_RESET_TRIES; i++) { status = set_port_feature(hub->hdev, port1, (warm ? USB_PORT_FEAT_BH_PORT_RESET : USB_PORT_FEAT_RESET)); if (status == -ENODEV) { ; /* The hub is gone */ } else if (status) { dev_err(&port_dev->dev, "cannot %sreset (err = %d)\n", warm ? "warm " : "", status); } else { status = hub_port_wait_reset(hub, port1, udev, delay, warm); if (status && status != -ENOTCONN && status != -ENODEV) dev_dbg(hub->intfdev, "port_wait_reset: err = %d\n", status); } /* Check for disconnect or reset */ if (status == 0 || status == -ENOTCONN || status == -ENODEV) { usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_RESET); if (!hub_is_superspeed(hub->hdev)) goto done; usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_BH_PORT_RESET); usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_PORT_LINK_STATE); if (udev) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_CONNECTION); /* * If a USB 3.0 device migrates from reset to an error * state, re-issue the warm reset. */ if (hub_port_status(hub, port1, &portstatus, &portchange) < 0) goto done; if (!hub_port_warm_reset_required(hub, port1, portstatus)) goto done; /* * If the port is in SS.Inactive or Compliance Mode, the * hot or warm reset failed. Try another warm reset. */ if (!warm) { dev_dbg(&port_dev->dev, "hot reset failed, warm reset\n"); warm = true; } } dev_dbg(&port_dev->dev, "not enabled, trying %sreset again...\n", warm ? "warm " : ""); delay = HUB_LONG_RESET_TIME; } dev_err(&port_dev->dev, "Cannot enable. Maybe the USB cable is bad?\n"); done: if (status == 0) { if (port_dev->quirks & USB_PORT_QUIRK_FAST_ENUM) usleep_range(10000, 12000); else { /* TRSTRCY = 10 ms; plus some extra */ reset_recovery_time = 10 + 40; /* Hub needs extra delay after resetting its port. */ if (hub->hdev->quirks & USB_QUIRK_HUB_SLOW_RESET) reset_recovery_time += 100; msleep(reset_recovery_time); } if (udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); update_devnum(udev, 0); /* The xHC may think the device is already reset, * so ignore the status. */ if (hcd->driver->reset_device) hcd->driver->reset_device(hcd, udev); usb_set_device_state(udev, USB_STATE_DEFAULT); } } else { if (udev) usb_set_device_state(udev, USB_STATE_NOTATTACHED); } if (!hub_is_superspeed(hub->hdev)) up_read(&ehci_cf_port_reset_rwsem); return status; } /* Check if a port is power on */ static int port_is_power_on(struct usb_hub *hub, unsigned portstatus) { int ret = 0; if (hub_is_superspeed(hub->hdev)) { if (portstatus & USB_SS_PORT_STAT_POWER) ret = 1; } else { if (portstatus & USB_PORT_STAT_POWER) ret = 1; } return ret; } static void usb_lock_port(struct usb_port *port_dev) __acquires(&port_dev->status_lock) { mutex_lock(&port_dev->status_lock); __acquire(&port_dev->status_lock); } static void usb_unlock_port(struct usb_port *port_dev) __releases(&port_dev->status_lock) { mutex_unlock(&port_dev->status_lock); __release(&port_dev->status_lock); } #ifdef CONFIG_PM /* Check if a port is suspended(USB2.0 port) or in U3 state(USB3.0 port) */ static int port_is_suspended(struct usb_hub *hub, unsigned portstatus) { int ret = 0; if (hub_is_superspeed(hub->hdev)) { if ((portstatus & USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_U3) ret = 1; } else { if (portstatus & USB_PORT_STAT_SUSPEND) ret = 1; } return ret; } /* Determine whether the device on a port is ready for a normal resume, * is ready for a reset-resume, or should be disconnected. */ static int check_port_resume_type(struct usb_device *udev, struct usb_hub *hub, int port1, int status, u16 portchange, u16 portstatus) { struct usb_port *port_dev = hub->ports[port1 - 1]; int retries = 3; retry: /* Is a warm reset needed to recover the connection? */ if (status == 0 && udev->reset_resume && hub_port_warm_reset_required(hub, port1, portstatus)) { /* pass */; } /* Is the device still present? */ else if (status || port_is_suspended(hub, portstatus) || !port_is_power_on(hub, portstatus)) { if (status >= 0) status = -ENODEV; } else if (!(portstatus & USB_PORT_STAT_CONNECTION)) { if (retries--) { usleep_range(200, 300); status = hub_port_status(hub, port1, &portstatus, &portchange); goto retry; } status = -ENODEV; } /* Can't do a normal resume if the port isn't enabled, * so try a reset-resume instead. */ else if (!(portstatus & USB_PORT_STAT_ENABLE) && !udev->reset_resume) { if (udev->persist_enabled) udev->reset_resume = 1; else status = -ENODEV; } if (status) { dev_dbg(&port_dev->dev, "status %04x.%04x after resume, %d\n", portchange, portstatus, status); } else if (udev->reset_resume) { /* Late port handoff can set status-change bits */ if (portchange & USB_PORT_STAT_C_CONNECTION) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_CONNECTION); if (portchange & USB_PORT_STAT_C_ENABLE) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_ENABLE); } return status; } int usb_disable_ltm(struct usb_device *udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); /* Check if the roothub and device supports LTM. */ if (!usb_device_supports_ltm(hcd->self.root_hub) || !usb_device_supports_ltm(udev)) return 0; /* Clear Feature LTM Enable can only be sent if the device is * configured. */ if (!udev->actconfig) return 0; return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, USB_DEVICE_LTM_ENABLE, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } EXPORT_SYMBOL_GPL(usb_disable_ltm); void usb_enable_ltm(struct usb_device *udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); /* Check if the roothub and device supports LTM. */ if (!usb_device_supports_ltm(hcd->self.root_hub) || !usb_device_supports_ltm(udev)) return; /* Set Feature LTM Enable can only be sent if the device is * configured. */ if (!udev->actconfig) return; usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, USB_DEVICE_LTM_ENABLE, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } EXPORT_SYMBOL_GPL(usb_enable_ltm); /* * usb_enable_remote_wakeup - enable remote wakeup for a device * @udev: target device * * For USB-2 devices: Set the device's remote wakeup feature. * * For USB-3 devices: Assume there's only one function on the device and * enable remote wake for the first interface. FIXME if the interface * association descriptor shows there's more than one function. */ static int usb_enable_remote_wakeup(struct usb_device *udev) { if (udev->speed < USB_SPEED_SUPER) return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); else return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, USB_RECIP_INTERFACE, USB_INTRF_FUNC_SUSPEND, USB_INTRF_FUNC_SUSPEND_RW | USB_INTRF_FUNC_SUSPEND_LP, NULL, 0, USB_CTRL_SET_TIMEOUT); } /* * usb_disable_remote_wakeup - disable remote wakeup for a device * @udev: target device * * For USB-2 devices: Clear the device's remote wakeup feature. * * For USB-3 devices: Assume there's only one function on the device and * disable remote wake for the first interface. FIXME if the interface * association descriptor shows there's more than one function. */ static int usb_disable_remote_wakeup(struct usb_device *udev) { if (udev->speed < USB_SPEED_SUPER) return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); else return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, USB_RECIP_INTERFACE, USB_INTRF_FUNC_SUSPEND, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } /* Count of wakeup-enabled devices at or below udev */ static unsigned wakeup_enabled_descendants(struct usb_device *udev) { struct usb_hub *hub = usb_hub_to_struct_hub(udev); return udev->do_remote_wakeup + (hub ? hub->wakeup_enabled_descendants : 0); } /* * usb_port_suspend - suspend a usb device's upstream port * @udev: device that's no longer in active use, not a root hub * Context: must be able to sleep; device not locked; pm locks held * * Suspends a USB device that isn't in active use, conserving power. * Devices may wake out of a suspend, if anything important happens, * using the remote wakeup mechanism. They may also be taken out of * suspend by the host, using usb_port_resume(). It's also routine * to disconnect devices while they are suspended. * * This only affects the USB hardware for a device; its interfaces * (and, for hubs, child devices) must already have been suspended. * * Selective port suspend reduces power; most suspended devices draw * less than 500 uA. It's also used in OTG, along with remote wakeup. * All devices below the suspended port are also suspended. * * Devices leave suspend state when the host wakes them up. Some devices * also support "remote wakeup", where the device can activate the USB * tree above them to deliver data, such as a keypress or packet. In * some cases, this wakes the USB host. * * Suspending OTG devices may trigger HNP, if that's been enabled * between a pair of dual-role devices. That will change roles, such * as from A-Host to A-Peripheral or from B-Host back to B-Peripheral. * * Devices on USB hub ports have only one "suspend" state, corresponding * to ACPI D2, "may cause the device to lose some context". * State transitions include: * * - suspend, resume ... when the VBUS power link stays live * - suspend, disconnect ... VBUS lost * * Once VBUS drop breaks the circuit, the port it's using has to go through * normal re-enumeration procedures, starting with enabling VBUS power. * Other than re-initializing the hub (plug/unplug, except for root hubs), * Linux (2.6) currently has NO mechanisms to initiate that: no hub_wq * timer, no SRP, no requests through sysfs. * * If Runtime PM isn't enabled or used, non-SuperSpeed devices may not get * suspended until their bus goes into global suspend (i.e., the root * hub is suspended). Nevertheless, we change @udev->state to * USB_STATE_SUSPENDED as this is the device's "logical" state. The actual * upstream port setting is stored in @udev->port_is_suspended. * * Returns 0 on success, else negative errno. */ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) { struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); struct usb_port *port_dev = hub->ports[udev->portnum - 1]; int port1 = udev->portnum; int status; bool really_suspend = true; usb_lock_port(port_dev); /* enable remote wakeup when appropriate; this lets the device * wake up the upstream hub (including maybe the root hub). * * NOTE: OTG devices may issue remote wakeup (or SRP) even when * we don't explicitly enable it here. */ if (udev->do_remote_wakeup) { status = usb_enable_remote_wakeup(udev); if (status) { dev_dbg(&udev->dev, "won't remote wakeup, status %d\n", status); /* bail if autosuspend is requested */ if (PMSG_IS_AUTO(msg)) goto err_wakeup; } } /* disable USB2 hardware LPM */ if (udev->usb2_hw_lpm_enabled == 1) usb_set_usb2_hardware_lpm(udev, 0); if (usb_disable_ltm(udev)) { dev_err(&udev->dev, "Failed to disable LTM before suspend\n"); status = -ENOMEM; if (PMSG_IS_AUTO(msg)) goto err_ltm; } /* see 7.1.7.6 */ if (hub_is_superspeed(hub->hdev)) status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3); /* * For system suspend, we do not need to enable the suspend feature * on individual USB-2 ports. The devices will automatically go * into suspend a few ms after the root hub stops sending packets. * The USB 2.0 spec calls this "global suspend". * * However, many USB hubs have a bug: They don't relay wakeup requests * from a downstream port if the port's suspend feature isn't on. * Therefore we will turn on the suspend feature if udev or any of its * descendants is enabled for remote wakeup. */ else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0) status = set_port_feature(hub->hdev, port1, USB_PORT_FEAT_SUSPEND); else { really_suspend = false; status = 0; } if (status) { dev_dbg(&port_dev->dev, "can't suspend, status %d\n", status); /* Try to enable USB3 LTM again */ usb_enable_ltm(udev); err_ltm: /* Try to enable USB2 hardware LPM again */ if (udev->usb2_hw_lpm_capable == 1) usb_set_usb2_hardware_lpm(udev, 1); if (udev->do_remote_wakeup) (void) usb_disable_remote_wakeup(udev); err_wakeup: /* System sleep transitions should never fail */ if (!PMSG_IS_AUTO(msg)) status = 0; } else { dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n", (PMSG_IS_AUTO(msg) ? "auto-" : ""), udev->do_remote_wakeup); if (really_suspend) { udev->port_is_suspended = 1; /* device has up to 10 msec to fully suspend */ msleep(10); } usb_set_device_state(udev, USB_STATE_SUSPENDED); } if (status == 0 && !udev->do_remote_wakeup && udev->persist_enabled && test_and_clear_bit(port1, hub->child_usage_bits)) pm_runtime_put_sync(&port_dev->dev); usb_mark_last_busy(hub->hdev); usb_unlock_port(port_dev); return status; } /* * If the USB "suspend" state is in use (rather than "global suspend"), * many devices will be individually taken out of suspend state using * special "resume" signaling. This routine kicks in shortly after * hardware resume signaling is finished, either because of selective * resume (by host) or remote wakeup (by device) ... now see what changed * in the tree that's rooted at this device. * * If @udev->reset_resume is set then the device is reset before the * status check is done. */ static int finish_port_resume(struct usb_device *udev) { int status = 0; u16 devstatus = 0; /* caller owns the udev device lock */ dev_dbg(&udev->dev, "%s\n", udev->reset_resume ? "finish reset-resume" : "finish resume"); /* usb ch9 identifies four variants of SUSPENDED, based on what * state the device resumes to. Linux currently won't see the * first two on the host side; they'd be inside hub_port_init() * during many timeouts, but hub_wq can't suspend until later. */ usb_set_device_state(udev, udev->actconfig ? USB_STATE_CONFIGURED : USB_STATE_ADDRESS); /* 10.5.4.5 says not to reset a suspended port if the attached * device is enabled for remote wakeup. Hence the reset * operation is carried out here, after the port has been * resumed. */ if (udev->reset_resume) { /* * If the device morphs or switches modes when it is reset, * we don't want to perform a reset-resume. We'll fail the * resume, which will cause a logical disconnect, and then * the device will be rediscovered. */ retry_reset_resume: if (udev->quirks & USB_QUIRK_RESET) status = -ENODEV; else status = usb_reset_and_verify_device(udev); } /* 10.5.4.5 says be sure devices in the tree are still there. * For now let's assume the device didn't go crazy on resume, * and device drivers will know about any resume quirks. */ if (status == 0) { devstatus = 0; status = usb_get_std_status(udev, USB_RECIP_DEVICE, 0, &devstatus); /* If a normal resume failed, try doing a reset-resume */ if (status && !udev->reset_resume && udev->persist_enabled) { dev_dbg(&udev->dev, "retry with reset-resume\n"); udev->reset_resume = 1; goto retry_reset_resume; } } if (status) { dev_dbg(&udev->dev, "gone after usb resume? status %d\n", status); /* * There are a few quirky devices which violate the standard * by claiming to have remote wakeup enabled after a reset, * which crash if the feature is cleared, hence check for * udev->reset_resume */ } else if (udev->actconfig && !udev->reset_resume) { if (udev->speed < USB_SPEED_SUPER) { if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) status = usb_disable_remote_wakeup(udev); } else { status = usb_get_std_status(udev, USB_RECIP_INTERFACE, 0, &devstatus); if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP | USB_INTRF_STAT_FUNC_RW)) status = usb_disable_remote_wakeup(udev); } if (status) dev_dbg(&udev->dev, "disable remote wakeup, status %d\n", status); status = 0; } return status; } /* * There are some SS USB devices which take longer time for link training. * XHCI specs 4.19.4 says that when Link training is successful, port * sets CCS bit to 1. So if SW reads port status before successful link * training, then it will not find device to be present. * USB Analyzer log with such buggy devices show that in some cases * device switch on the RX termination after long delay of host enabling * the VBUS. In few other cases it has been seen that device fails to * negotiate link training in first attempt. It has been * reported till now that few devices take as long as 2000 ms to train * the link after host enabling its VBUS and termination. Following * routine implements a 2000 ms timeout for link training. If in a case * link trains before timeout, loop will exit earlier. * * There are also some 2.0 hard drive based devices and 3.0 thumb * drives that, when plugged into a 2.0 only port, take a long * time to set CCS after VBUS enable. * * FIXME: If a device was connected before suspend, but was removed * while system was asleep, then the loop in the following routine will * only exit at timeout. * * This routine should only be called when persist is enabled. */ static int wait_for_connected(struct usb_device *udev, struct usb_hub *hub, int *port1, u16 *portchange, u16 *portstatus) { int status = 0, delay_ms = 0; while (delay_ms < 2000) { if (status || *portstatus & USB_PORT_STAT_CONNECTION) break; if (!port_is_power_on(hub, *portstatus)) { status = -ENODEV; break; } msleep(20); delay_ms += 20; status = hub_port_status(hub, *port1, portstatus, portchange); } dev_dbg(&udev->dev, "Waited %dms for CONNECT\n", delay_ms); return status; } /* * usb_port_resume - re-activate a suspended usb device's upstream port * @udev: device to re-activate, not a root hub * Context: must be able to sleep; device not locked; pm locks held * * This will re-activate the suspended device, increasing power usage * while letting drivers communicate again with its endpoints. * USB resume explicitly guarantees that the power session between * the host and the device is the same as it was when the device * suspended. * * If @udev->reset_resume is set then this routine won't check that the * port is still enabled. Furthermore, finish_port_resume() above will * reset @udev. The end result is that a broken power session can be * recovered and @udev will appear to persist across a loss of VBUS power. * * For example, if a host controller doesn't maintain VBUS suspend current * during a system sleep or is reset when the system wakes up, all the USB * power sessions below it will be broken. This is especially troublesome * for mass-storage devices containing mounted filesystems, since the * device will appear to have disconnected and all the memory mappings * to it will be lost. Using the USB_PERSIST facility, the device can be * made to appear as if it had not disconnected. * * This facility can be dangerous. Although usb_reset_and_verify_device() makes * every effort to insure that the same device is present after the * reset as before, it cannot provide a 100% guarantee. Furthermore it's * quite possible for a device to remain unaltered but its media to be * changed. If the user replaces a flash memory card while the system is * asleep, he will have only himself to blame when the filesystem on the * new card is corrupted and the system crashes. * * Returns 0 on success, else negative errno. */ int usb_port_resume(struct usb_device *udev, pm_message_t msg) { struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); struct usb_port *port_dev = hub->ports[udev->portnum - 1]; int port1 = udev->portnum; int status; u16 portchange, portstatus; if (!test_and_set_bit(port1, hub->child_usage_bits)) { status = pm_runtime_get_sync(&port_dev->dev); if (status < 0) { dev_dbg(&udev->dev, "can't resume usb port, status %d\n", status); return status; } } usb_lock_port(port_dev); /* Skip the initial Clear-Suspend step for a remote wakeup */ status = hub_port_status(hub, port1, &portstatus, &portchange); if (status == 0 && !port_is_suspended(hub, portstatus)) { if (portchange & USB_PORT_STAT_C_SUSPEND) pm_wakeup_event(&udev->dev, 0); goto SuspendCleared; } /* see 7.1.7.7; affects power usage, but not budgeting */ if (hub_is_superspeed(hub->hdev)) status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U0); else status = usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_SUSPEND); if (status) { dev_dbg(&port_dev->dev, "can't resume, status %d\n", status); } else { /* drive resume for USB_RESUME_TIMEOUT msec */ dev_dbg(&udev->dev, "usb %sresume\n", (PMSG_IS_AUTO(msg) ? "auto-" : "")); msleep(USB_RESUME_TIMEOUT); /* Virtual root hubs can trigger on GET_PORT_STATUS to * stop resume signaling. Then finish the resume * sequence. */ status = hub_port_status(hub, port1, &portstatus, &portchange); /* TRSMRCY = 10 msec */ msleep(10); } SuspendCleared: if (status == 0) { udev->port_is_suspended = 0; if (hub_is_superspeed(hub->hdev)) { if (portchange & USB_PORT_STAT_C_LINK_STATE) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_PORT_LINK_STATE); } else { if (portchange & USB_PORT_STAT_C_SUSPEND) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_SUSPEND); } } if (udev->persist_enabled) status = wait_for_connected(udev, hub, &port1, &portchange, &portstatus); status = check_port_resume_type(udev, hub, port1, status, portchange, portstatus); if (status == 0) status = finish_port_resume(udev); if (status < 0) { dev_dbg(&udev->dev, "can't resume, status %d\n", status); hub_port_logical_disconnect(hub, port1); } else { /* Try to enable USB2 hardware LPM */ if (udev->usb2_hw_lpm_capable == 1) usb_set_usb2_hardware_lpm(udev, 1); /* Try to enable USB3 LTM */ usb_enable_ltm(udev); } usb_unlock_port(port_dev); return status; } int usb_remote_wakeup(struct usb_device *udev) { int status = 0; usb_lock_device(udev); if (udev->state == USB_STATE_SUSPENDED) { dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-"); status = usb_autoresume_device(udev); if (status == 0) { /* Let the drivers do their thing, then... */ usb_autosuspend_device(udev); } } usb_unlock_device(udev); return status; } /* Returns 1 if there was a remote wakeup and a connect status change. */ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port, u16 portstatus, u16 portchange) __must_hold(&port_dev->status_lock) { struct usb_port *port_dev = hub->ports[port - 1]; struct usb_device *hdev; struct usb_device *udev; int connect_change = 0; int ret; hdev = hub->hdev; udev = port_dev->child; if (!hub_is_superspeed(hdev)) { if (!(portchange & USB_PORT_STAT_C_SUSPEND)) return 0; usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND); } else { if (!udev || udev->state != USB_STATE_SUSPENDED || (portstatus & USB_PORT_STAT_LINK_STATE) != USB_SS_PORT_LS_U0) return 0; } if (udev) { /* TRSMRCY = 10 msec */ msleep(10); usb_unlock_port(port_dev); ret = usb_remote_wakeup(udev); usb_lock_port(port_dev); if (ret < 0) connect_change = 1; } else { ret = -ENODEV; hub_port_disable(hub, port, 1); } dev_dbg(&port_dev->dev, "resume, status %d\n", ret); return connect_change; } static int check_ports_changed(struct usb_hub *hub) { int port1; for (port1 = 1; port1 <= hub->hdev->maxchild; ++port1) { u16 portstatus, portchange; int status; status = hub_port_status(hub, port1, &portstatus, &portchange); if (!status && portchange) return 1; } return 0; } static int hub_suspend(struct usb_interface *intf, pm_message_t msg) { struct usb_hub *hub = usb_get_intfdata(intf); struct usb_device *hdev = hub->hdev; unsigned port1; int status; /* * Warn if children aren't already suspended. * Also, add up the number of wakeup-enabled descendants. */ hub->wakeup_enabled_descendants = 0; for (port1 = 1; port1 <= hdev->maxchild; port1++) { struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *udev = port_dev->child; if (udev && udev->can_submit) { dev_warn(&port_dev->dev, "device %s not suspended yet\n", dev_name(&udev->dev)); if (PMSG_IS_AUTO(msg)) return -EBUSY; } if (udev) hub->wakeup_enabled_descendants += wakeup_enabled_descendants(udev); } if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) { /* check if there are changes pending on hub ports */ if (check_ports_changed(hub)) { if (PMSG_IS_AUTO(msg)) return -EBUSY; pm_wakeup_event(&hdev->dev, 2000); } } if (hub_is_superspeed(hdev) && hdev->do_remote_wakeup) { /* Enable hub to send remote wakeup for all ports. */ for (port1 = 1; port1 <= hdev->maxchild; port1++) { status = set_port_feature(hdev, port1 | USB_PORT_FEAT_REMOTE_WAKE_CONNECT | USB_PORT_FEAT_REMOTE_WAKE_DISCONNECT | USB_PORT_FEAT_REMOTE_WAKE_OVER_CURRENT, USB_PORT_FEAT_REMOTE_WAKE_MASK); } } dev_dbg(&intf->dev, "%s\n", __func__); /* stop hub_wq and related activity */ hub_quiesce(hub, HUB_SUSPEND); return 0; } /* Report wakeup requests from the ports of a resuming root hub */ static void report_wakeup_requests(struct usb_hub *hub) { struct usb_device *hdev = hub->hdev; struct usb_device *udev; struct usb_hcd *hcd; unsigned long resuming_ports; int i; if (hdev->parent) return; /* Not a root hub */ hcd = bus_to_hcd(hdev->bus); if (hcd->driver->get_resuming_ports) { /* * The get_resuming_ports() method returns a bitmap (origin 0) * of ports which have started wakeup signaling but have not * yet finished resuming. During system resume we will * resume all the enabled ports, regardless of any wakeup * signals, which means the wakeup requests would be lost. * To prevent this, report them to the PM core here. */ resuming_ports = hcd->driver->get_resuming_ports(hcd); for (i = 0; i < hdev->maxchild; ++i) { if (test_bit(i, &resuming_ports)) { udev = hub->ports[i]->child; if (udev) pm_wakeup_event(&udev->dev, 0); } } } } static int hub_resume(struct usb_interface *intf) { struct usb_hub *hub = usb_get_intfdata(intf); dev_dbg(&intf->dev, "%s\n", __func__); hub_activate(hub, HUB_RESUME); /* * This should be called only for system resume, not runtime resume. * We can't tell the difference here, so some wakeup requests will be * reported at the wrong time or more than once. This shouldn't * matter much, so long as they do get reported. */ report_wakeup_requests(hub); return 0; } static int hub_reset_resume(struct usb_interface *intf) { struct usb_hub *hub = usb_get_intfdata(intf); dev_dbg(&intf->dev, "%s\n", __func__); hub_activate(hub, HUB_RESET_RESUME); return 0; } /** * usb_root_hub_lost_power - called by HCD if the root hub lost Vbus power * @rhdev: struct usb_device for the root hub * * The USB host controller driver calls this function when its root hub * is resumed and Vbus power has been interrupted or the controller * has been reset. The routine marks @rhdev as having lost power. * When the hub driver is resumed it will take notice and carry out * power-session recovery for all the "USB-PERSIST"-enabled child devices; * the others will be disconnected. */ void usb_root_hub_lost_power(struct usb_device *rhdev) { dev_notice(&rhdev->dev, "root hub lost power or was reset\n"); rhdev->reset_resume = 1; } EXPORT_SYMBOL_GPL(usb_root_hub_lost_power); static const char * const usb3_lpm_names[] = { "U0", "U1", "U2", "U3", }; /* * Send a Set SEL control transfer to the device, prior to enabling * device-initiated U1 or U2. This lets the device know the exit latencies from * the time the device initiates a U1 or U2 exit, to the time it will receive a * packet from the host. * * This function will fail if the SEL or PEL values for udev are greater than * the maximum allowed values for the link state to be enabled. */ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state) { struct usb_set_sel_req *sel_values; unsigned long long u1_sel; unsigned long long u1_pel; unsigned long long u2_sel; unsigned long long u2_pel; int ret; if (udev->state != USB_STATE_CONFIGURED) return 0; /* Convert SEL and PEL stored in ns to us */ u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); u2_sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); u2_pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); /* * Make sure that the calculated SEL and PEL values for the link * state we're enabling aren't bigger than the max SEL/PEL * value that will fit in the SET SEL control transfer. * Otherwise the device would get an incorrect idea of the exit * latency for the link state, and could start a device-initiated * U1/U2 when the exit latencies are too high. */ if ((state == USB3_LPM_U1 && (u1_sel > USB3_LPM_MAX_U1_SEL_PEL || u1_pel > USB3_LPM_MAX_U1_SEL_PEL)) || (state == USB3_LPM_U2 && (u2_sel > USB3_LPM_MAX_U2_SEL_PEL || u2_pel > USB3_LPM_MAX_U2_SEL_PEL))) { dev_dbg(&udev->dev, "Device-initiated %s disabled due to long SEL %llu us or PEL %llu us\n", usb3_lpm_names[state], u1_sel, u1_pel); return -EINVAL; } /* * If we're enabling device-initiated LPM for one link state, * but the other link state has a too high SEL or PEL value, * just set those values to the max in the Set SEL request. */ if (u1_sel > USB3_LPM_MAX_U1_SEL_PEL) u1_sel = USB3_LPM_MAX_U1_SEL_PEL; if (u1_pel > USB3_LPM_MAX_U1_SEL_PEL) u1_pel = USB3_LPM_MAX_U1_SEL_PEL; if (u2_sel > USB3_LPM_MAX_U2_SEL_PEL) u2_sel = USB3_LPM_MAX_U2_SEL_PEL; if (u2_pel > USB3_LPM_MAX_U2_SEL_PEL) u2_pel = USB3_LPM_MAX_U2_SEL_PEL; /* * usb_enable_lpm() can be called as part of a failed device reset, * which may be initiated by an error path of a mass storage driver. * Therefore, use GFP_NOIO. */ sel_values = kmalloc(sizeof *(sel_values), GFP_NOIO); if (!sel_values) return -ENOMEM; sel_values->u1_sel = u1_sel; sel_values->u1_pel = u1_pel; sel_values->u2_sel = cpu_to_le16(u2_sel); sel_values->u2_pel = cpu_to_le16(u2_pel); ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_SEL, USB_RECIP_DEVICE, 0, 0, sel_values, sizeof *(sel_values), USB_CTRL_SET_TIMEOUT); kfree(sel_values); return ret; } /* * Enable or disable device-initiated U1 or U2 transitions. */ static int usb_set_device_initiated_lpm(struct usb_device *udev, enum usb3_link_state state, bool enable) { int ret; int feature; switch (state) { case USB3_LPM_U1: feature = USB_DEVICE_U1_ENABLE; break; case USB3_LPM_U2: feature = USB_DEVICE_U2_ENABLE; break; default: dev_warn(&udev->dev, "%s: Can't %s non-U1 or U2 state.\n", __func__, enable ? "enable" : "disable"); return -EINVAL; } if (udev->state != USB_STATE_CONFIGURED) { dev_dbg(&udev->dev, "%s: Can't %s %s state " "for unconfigured device.\n", __func__, enable ? "enable" : "disable", usb3_lpm_names[state]); return 0; } if (enable) { /* * Now send the control transfer to enable device-initiated LPM * for either U1 or U2. */ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } else { ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } if (ret < 0) { dev_warn(&udev->dev, "%s of device-initiated %s failed.\n", enable ? "Enable" : "Disable", usb3_lpm_names[state]); return -EBUSY; } return 0; } static int usb_set_lpm_timeout(struct usb_device *udev, enum usb3_link_state state, int timeout) { int ret; int feature; switch (state) { case USB3_LPM_U1: feature = USB_PORT_FEAT_U1_TIMEOUT; break; case USB3_LPM_U2: feature = USB_PORT_FEAT_U2_TIMEOUT; break; default: dev_warn(&udev->dev, "%s: Can't set timeout for non-U1 or U2 state.\n", __func__); return -EINVAL; } if (state == USB3_LPM_U1 && timeout > USB3_LPM_U1_MAX_TIMEOUT && timeout != USB3_LPM_DEVICE_INITIATED) { dev_warn(&udev->dev, "Failed to set %s timeout to 0x%x, " "which is a reserved value.\n", usb3_lpm_names[state], timeout); return -EINVAL; } ret = set_port_feature(udev->parent, USB_PORT_LPM_TIMEOUT(timeout) | udev->portnum, feature); if (ret < 0) { dev_warn(&udev->dev, "Failed to set %s timeout to 0x%x," "error code %i\n", usb3_lpm_names[state], timeout, ret); return -EBUSY; } if (state == USB3_LPM_U1) udev->u1_params.timeout = timeout; else udev->u2_params.timeout = timeout; return 0; } /* * Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated * U1/U2 entry. * * We will attempt to enable U1 or U2, but there are no guarantees that the * control transfers to set the hub timeout or enable device-initiated U1/U2 * will be successful. * * If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI * driver know about it. If that call fails, it should be harmless, and just * take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency. */ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, enum usb3_link_state state) { int timeout, ret; __u8 u1_mel = udev->bos->ss_cap->bU1devExitLat; __le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat; /* If the device says it doesn't have *any* exit latency to come out of * U1 or U2, it's probably lying. Assume it doesn't implement that link * state. */ if ((state == USB3_LPM_U1 && u1_mel == 0) || (state == USB3_LPM_U2 && u2_mel == 0)) return; /* * First, let the device know about the exit latencies * associated with the link state we're about to enable. */ ret = usb_req_set_sel(udev, state); if (ret < 0) { dev_warn(&udev->dev, "Set SEL for device-initiated %s failed.\n", usb3_lpm_names[state]); return; } /* We allow the host controller to set the U1/U2 timeout internally * first, so that it can change its schedule to account for the * additional latency to send data to a device in a lower power * link state. */ timeout = hcd->driver->enable_usb3_lpm_timeout(hcd, udev, state); /* xHCI host controller doesn't want to enable this LPM state. */ if (timeout == 0) return; if (timeout < 0) { dev_warn(&udev->dev, "Could not enable %s link state, " "xHCI error %i.\n", usb3_lpm_names[state], timeout); return; } if (usb_set_lpm_timeout(udev, state, timeout)) { /* If we can't set the parent hub U1/U2 timeout, * device-initiated LPM won't be allowed either, so let the xHCI * host know that this link state won't be enabled. */ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); } else { /* Only a configured device will accept the Set Feature * U1/U2_ENABLE */ if (udev->actconfig) usb_set_device_initiated_lpm(udev, state, true); /* As soon as usb_set_lpm_timeout(timeout) returns 0, the * hub-initiated LPM is enabled. Thus, LPM is enabled no * matter the result of usb_set_device_initiated_lpm(). * The only difference is whether device is able to initiate * LPM. */ if (state == USB3_LPM_U1) udev->usb3_lpm_u1_enabled = 1; else if (state == USB3_LPM_U2) udev->usb3_lpm_u2_enabled = 1; } } /* * Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated * U1/U2 entry. * * If this function returns -EBUSY, the parent hub will still allow U1/U2 entry. * If zero is returned, the parent will not allow the link to go into U1/U2. * * If zero is returned, device-initiated U1/U2 entry may still be enabled, but * it won't have an effect on the bus link state because the parent hub will * still disallow device-initiated U1/U2 entry. * * If zero is returned, the xHCI host controller may still think U1/U2 entry is * possible. The result will be slightly more bus bandwidth will be taken up * (to account for U1/U2 exit latency), but it should be harmless. */ static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev, enum usb3_link_state state) { switch (state) { case USB3_LPM_U1: case USB3_LPM_U2: break; default: dev_warn(&udev->dev, "%s: Can't disable non-U1 or U2 state.\n", __func__); return -EINVAL; } if (usb_set_lpm_timeout(udev, state, 0)) return -EBUSY; usb_set_device_initiated_lpm(udev, state, false); if (hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state)) dev_warn(&udev->dev, "Could not disable xHCI %s timeout, " "bus schedule bandwidth may be impacted.\n", usb3_lpm_names[state]); /* As soon as usb_set_lpm_timeout(0) return 0, hub initiated LPM * is disabled. Hub will disallows link to enter U1/U2 as well, * even device is initiating LPM. Hence LPM is disabled if hub LPM * timeout set to 0, no matter device-initiated LPM is disabled or * not. */ if (state == USB3_LPM_U1) udev->usb3_lpm_u1_enabled = 0; else if (state == USB3_LPM_U2) udev->usb3_lpm_u2_enabled = 0; return 0; } /* * Disable hub-initiated and device-initiated U1 and U2 entry. * Caller must own the bandwidth_mutex. * * This will call usb_enable_lpm() on failure, which will decrement * lpm_disable_count, and will re-enable LPM if lpm_disable_count reaches zero. */ int usb_disable_lpm(struct usb_device *udev) { struct usb_hcd *hcd; if (!udev || !udev->parent || udev->speed < USB_SPEED_SUPER || !udev->lpm_capable || udev->state < USB_STATE_DEFAULT) return 0; hcd = bus_to_hcd(udev->bus); if (!hcd || !hcd->driver->disable_usb3_lpm_timeout) return 0; udev->lpm_disable_count++; if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0)) return 0; /* If LPM is enabled, attempt to disable it. */ if (usb_disable_link_state(hcd, udev, USB3_LPM_U1)) goto enable_lpm; if (usb_disable_link_state(hcd, udev, USB3_LPM_U2)) goto enable_lpm; return 0; enable_lpm: usb_enable_lpm(udev); return -EBUSY; } EXPORT_SYMBOL_GPL(usb_disable_lpm); /* Grab the bandwidth_mutex before calling usb_disable_lpm() */ int usb_unlocked_disable_lpm(struct usb_device *udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); int ret; if (!hcd) return -EINVAL; mutex_lock(hcd->bandwidth_mutex); ret = usb_disable_lpm(udev); mutex_unlock(hcd->bandwidth_mutex); return ret; } EXPORT_SYMBOL_GPL(usb_unlocked_disable_lpm); /* * Attempt to enable device-initiated and hub-initiated U1 and U2 entry. The * xHCI host policy may prevent U1 or U2 from being enabled. * * Other callers may have disabled link PM, so U1 and U2 entry will be disabled * until the lpm_disable_count drops to zero. Caller must own the * bandwidth_mutex. */ void usb_enable_lpm(struct usb_device *udev) { struct usb_hcd *hcd; struct usb_hub *hub; struct usb_port *port_dev; if (!udev || !udev->parent || udev->speed < USB_SPEED_SUPER || !udev->lpm_capable || udev->state < USB_STATE_DEFAULT) return; udev->lpm_disable_count--; hcd = bus_to_hcd(udev->bus); /* Double check that we can both enable and disable LPM. * Device must be configured to accept set feature U1/U2 timeout. */ if (!hcd || !hcd->driver->enable_usb3_lpm_timeout || !hcd->driver->disable_usb3_lpm_timeout) return; if (udev->lpm_disable_count > 0) return; hub = usb_hub_to_struct_hub(udev->parent); if (!hub) return; port_dev = hub->ports[udev->portnum - 1]; if (port_dev->usb3_lpm_u1_permit) usb_enable_link_state(hcd, udev, USB3_LPM_U1); if (port_dev->usb3_lpm_u2_permit) usb_enable_link_state(hcd, udev, USB3_LPM_U2); } EXPORT_SYMBOL_GPL(usb_enable_lpm); /* Grab the bandwidth_mutex before calling usb_enable_lpm() */ void usb_unlocked_enable_lpm(struct usb_device *udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); if (!hcd) return; mutex_lock(hcd->bandwidth_mutex); usb_enable_lpm(udev); mutex_unlock(hcd->bandwidth_mutex); } EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); /* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */ static void hub_usb3_port_prepare_disable(struct usb_hub *hub, struct usb_port *port_dev) { struct usb_device *udev = port_dev->child; int ret; if (udev && udev->port_is_suspended && udev->do_remote_wakeup) { ret = hub_set_port_link_state(hub, port_dev->portnum, USB_SS_PORT_LS_U0); if (!ret) { msleep(USB_RESUME_TIMEOUT); ret = usb_disable_remote_wakeup(udev); } if (ret) dev_warn(&udev->dev, "Port disable: can't disable remote wake\n"); udev->do_remote_wakeup = 0; } } #else /* CONFIG_PM */ #define hub_suspend NULL #define hub_resume NULL #define hub_reset_resume NULL static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub, struct usb_port *port_dev) { } int usb_disable_lpm(struct usb_device *udev) { return 0; } EXPORT_SYMBOL_GPL(usb_disable_lpm); void usb_enable_lpm(struct usb_device *udev) { } EXPORT_SYMBOL_GPL(usb_enable_lpm); int usb_unlocked_disable_lpm(struct usb_device *udev) { return 0; } EXPORT_SYMBOL_GPL(usb_unlocked_disable_lpm); void usb_unlocked_enable_lpm(struct usb_device *udev) { } EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); int usb_disable_ltm(struct usb_device *udev) { return 0; } EXPORT_SYMBOL_GPL(usb_disable_ltm); void usb_enable_ltm(struct usb_device *udev) { } EXPORT_SYMBOL_GPL(usb_enable_ltm); static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port, u16 portstatus, u16 portchange) { return 0; } #endif /* CONFIG_PM */ /* * USB-3 does not have a similar link state as USB-2 that will avoid negotiating * a connection with a plugged-in cable but will signal the host when the cable * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices */ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state) { struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *hdev = hub->hdev; int ret = 0; if (!hub->error) { if (hub_is_superspeed(hub->hdev)) { hub_usb3_port_prepare_disable(hub, port_dev); ret = hub_set_port_link_state(hub, port_dev->portnum, USB_SS_PORT_LS_U3); } else { ret = usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE); } } if (port_dev->child && set_state) usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED); if (ret && ret != -ENODEV) dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret); return ret; } /* * usb_port_disable - disable a usb device's upstream port * @udev: device to disable * Context: @udev locked, must be able to sleep. * * Disables a USB device that isn't in active use. */ int usb_port_disable(struct usb_device *udev) { struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); return hub_port_disable(hub, udev->portnum, 0); } /* USB 2.0 spec, 7.1.7.3 / fig 7-29: * * Between connect detection and reset signaling there must be a delay * of 100ms at least for debounce and power-settling. The corresponding * timer shall restart whenever the downstream port detects a disconnect. * * Apparently there are some bluetooth and irda-dongles and a number of * low-speed devices for which this debounce period may last over a second. * Not covered by the spec - but easy to deal with. * * This implementation uses a 1500ms total debounce timeout; if the * connection isn't stable by then it returns -ETIMEDOUT. It checks * every 25ms for transient disconnects. When the port status has been * unchanged for 100ms it returns the port status. */ int hub_port_debounce(struct usb_hub *hub, int port1, bool must_be_connected) { int ret; u16 portchange, portstatus; unsigned connection = 0xffff; int total_time, stable_time = 0; struct usb_port *port_dev = hub->ports[port1 - 1]; for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) { ret = hub_port_status(hub, port1, &portstatus, &portchange); if (ret < 0) return ret; if (!(portchange & USB_PORT_STAT_C_CONNECTION) && (portstatus & USB_PORT_STAT_CONNECTION) == connection) { if (!must_be_connected || (connection == USB_PORT_STAT_CONNECTION)) stable_time += HUB_DEBOUNCE_STEP; if (stable_time >= HUB_DEBOUNCE_STABLE) break; } else { stable_time = 0; connection = portstatus & USB_PORT_STAT_CONNECTION; } if (portchange & USB_PORT_STAT_C_CONNECTION) { usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_CONNECTION); } if (total_time >= HUB_DEBOUNCE_TIMEOUT) break; msleep(HUB_DEBOUNCE_STEP); } dev_dbg(&port_dev->dev, "debounce total %dms stable %dms status 0x%x\n", total_time, stable_time, portstatus); if (stable_time < HUB_DEBOUNCE_STABLE) return -ETIMEDOUT; return portstatus; } void usb_ep0_reinit(struct usb_device *udev) { usb_disable_endpoint(udev, 0 + USB_DIR_IN, true); usb_disable_endpoint(udev, 0 + USB_DIR_OUT, true); usb_enable_endpoint(udev, &udev->ep0, true); } EXPORT_SYMBOL_GPL(usb_ep0_reinit); #define usb_sndaddr0pipe() (PIPE_CONTROL << 30) #define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN) static int hub_set_address(struct usb_device *udev, int devnum) { int retval; struct usb_hcd *hcd = bus_to_hcd(udev->bus); /* * The host controller will choose the device address, * instead of the core having chosen it earlier */ if (!hcd->driver->address_device && devnum <= 1) return -EINVAL; if (udev->state == USB_STATE_ADDRESS) return 0; if (udev->state != USB_STATE_DEFAULT) return -EINVAL; if (hcd->driver->address_device) retval = hcd->driver->address_device(hcd, udev); else retval = usb_control_msg(udev, usb_sndaddr0pipe(), USB_REQ_SET_ADDRESS, 0, devnum, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (retval == 0) { update_devnum(udev, devnum); /* Device now using proper address. */ usb_set_device_state(udev, USB_STATE_ADDRESS); usb_ep0_reinit(udev); } return retval; } /* * There are reports of USB 3.0 devices that say they support USB 2.0 Link PM * when they're plugged into a USB 2.0 port, but they don't work when LPM is * enabled. * * Only enable USB 2.0 Link PM if the port is internal (hardwired), or the * device says it supports the new USB 2.0 Link PM errata by setting the BESL * support bit in the BOS descriptor. */ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev) { struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN; if (!udev->usb2_hw_lpm_capable || !udev->bos) return; if (hub) connect_type = hub->ports[udev->portnum - 1]->connect_type; if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) || connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) { udev->usb2_hw_lpm_allowed = 1; usb_set_usb2_hardware_lpm(udev, 1); } } static int hub_enable_device(struct usb_device *udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); if (!hcd->driver->enable_device) return 0; if (udev->state == USB_STATE_ADDRESS) return 0; if (udev->state != USB_STATE_DEFAULT) return -EINVAL; return hcd->driver->enable_device(hcd, udev); } /* Reset device, (re)assign address, get device descriptor. * Device connection must be stable, no more debouncing needed. * Returns device in USB_STATE_ADDRESS, except on error. * * If this is called for an already-existing device (as part of * usb_reset_and_verify_device), the caller must own the device lock and * the port lock. For a newly detected device that is not accessible * through any global pointers, it's not necessary to lock the device, * but it is still necessary to lock the port. */ static int hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, int retry_counter) { struct usb_device *hdev = hub->hdev; struct usb_hcd *hcd = bus_to_hcd(hdev->bus); struct usb_port *port_dev = hub->ports[port1 - 1]; int retries, operations, retval, i; unsigned delay = HUB_SHORT_RESET_TIME; enum usb_device_speed oldspeed = udev->speed; const char *speed; int devnum = udev->devnum; const char *driver_name; /* root hub ports have a slightly longer reset period * (from USB 2.0 spec, section 7.1.7.5) */ if (!hdev->parent) { delay = HUB_ROOT_RESET_TIME; if (port1 == hdev->bus->otg_port) hdev->bus->b_hnp_enable = 0; } /* Some low speed devices have problems with the quick delay, so */ /* be a bit pessimistic with those devices. RHbug #23670 */ if (oldspeed == USB_SPEED_LOW) delay = HUB_LONG_RESET_TIME; mutex_lock(hcd->address0_mutex); /* Reset the device; full speed may morph to high speed */ /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ retval = hub_port_reset(hub, port1, udev, delay, false); if (retval < 0) /* error or disconnect */ goto fail; /* success, speed is known */ retval = -ENODEV; /* Don't allow speed changes at reset, except usb 3.0 to faster */ if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed && !(oldspeed == USB_SPEED_SUPER && udev->speed > oldspeed)) { dev_dbg(&udev->dev, "device reset changed speed!\n"); goto fail; } oldspeed = udev->speed; /* USB 2.0 section 5.5.3 talks about ep0 maxpacket ... * it's fixed size except for full speed devices. * For Wireless USB devices, ep0 max packet is always 512 (tho * reported as 0xff in the device descriptor). WUSB1.0[4.8.1]. */ switch (udev->speed) { case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: case USB_SPEED_WIRELESS: /* fixed at 512 */ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512); break; case USB_SPEED_HIGH: /* fixed at 64 */ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); break; case USB_SPEED_FULL: /* 8, 16, 32, or 64 */ /* to determine the ep0 maxpacket size, try to read * the device descriptor to get bMaxPacketSize0 and * then correct our initial guess. */ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); break; case USB_SPEED_LOW: /* fixed at 8 */ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8); break; default: goto fail; } if (udev->speed == USB_SPEED_WIRELESS) speed = "variable speed Wireless"; else speed = usb_speed_string(udev->speed); /* * The controller driver may be NULL if the controller device * is the middle device between platform device and roothub. * This middle device may not need a device driver due to * all hardware control can be at platform device driver, this * platform device is usually a dual-role USB controller device. */ if (udev->bus->controller->driver) driver_name = udev->bus->controller->driver->name; else driver_name = udev->bus->sysdev->driver->name; if (udev->speed < USB_SPEED_SUPER) dev_info(&udev->dev, "%s %s USB device number %d using %s\n", (udev->config) ? "reset" : "new", speed, devnum, driver_name); /* Set up TT records, if needed */ if (hdev->tt) { udev->tt = hdev->tt; udev->ttport = hdev->ttport; } else if (udev->speed != USB_SPEED_HIGH && hdev->speed == USB_SPEED_HIGH) { if (!hub->tt.hub) { dev_err(&udev->dev, "parent hub has no TT\n"); retval = -EINVAL; goto fail; } udev->tt = &hub->tt; udev->ttport = port1; } /* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way? * Because device hardware and firmware is sometimes buggy in * this area, and this is how Linux has done it for ages. * Change it cautiously. * * NOTE: If use_new_scheme() is true we will start by issuing * a 64-byte GET_DESCRIPTOR request. This is what Windows does, * so it may help with some non-standards-compliant devices. * Otherwise we start with SET_ADDRESS and then try to read the * first 8 bytes of the device descriptor to get the ep0 maxpacket * value. */ for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) { bool did_new_scheme = false; if (use_new_scheme(udev, retry_counter, port_dev)) { struct usb_device_descriptor *buf; int r = 0; did_new_scheme = true; retval = hub_enable_device(udev); if (retval < 0) { dev_err(&udev->dev, "hub failed to enable device, error %d\n", retval); goto fail; } #define GET_DESCRIPTOR_BUFSIZE 64 buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO); if (!buf) { retval = -ENOMEM; continue; } /* Retry on all errors; some devices are flakey. * 255 is for WUSB devices, we actually need to use * 512 (WUSB1.0[4.8.1]). */ for (operations = 0; operations < 3; ++operations) { buf->bMaxPacketSize0 = 0; r = usb_control_msg(udev, usb_rcvaddr0pipe(), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, USB_DT_DEVICE << 8, 0, buf, GET_DESCRIPTOR_BUFSIZE, initial_descriptor_timeout); switch (buf->bMaxPacketSize0) { case 8: case 16: case 32: case 64: case 255: if (buf->bDescriptorType == USB_DT_DEVICE) { r = 0; break; } /* FALL THROUGH */ default: if (r == 0) r = -EPROTO; break; } /* * Some devices time out if they are powered on * when already connected. They need a second * reset. But only on the first attempt, * lest we get into a time out/reset loop */ if (r == 0 || (r == -ETIMEDOUT && retries == 0 && udev->speed > USB_SPEED_FULL)) break; } udev->descriptor.bMaxPacketSize0 = buf->bMaxPacketSize0; kfree(buf); retval = hub_port_reset(hub, port1, udev, delay, false); if (retval < 0) /* error or disconnect */ goto fail; if (oldspeed != udev->speed) { dev_dbg(&udev->dev, "device reset changed speed!\n"); retval = -ENODEV; goto fail; } if (r) { if (r != -ENODEV) dev_err(&udev->dev, "device descriptor read/64, error %d\n", r); retval = -EMSGSIZE; continue; } #undef GET_DESCRIPTOR_BUFSIZE } /* * If device is WUSB, we already assigned an * unauthorized address in the Connect Ack sequence; * authorization will assign the final address. */ if (udev->wusb == 0) { for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) { retval = hub_set_address(udev, devnum); if (retval >= 0) break; msleep(200); } if (retval < 0) { if (retval != -ENODEV) dev_err(&udev->dev, "device not accepting address %d, error %d\n", devnum, retval); goto fail; } if (udev->speed >= USB_SPEED_SUPER) { devnum = udev->devnum; dev_info(&udev->dev, "%s SuperSpeed%s%s USB device number %d using %s\n", (udev->config) ? "reset" : "new", (udev->speed == USB_SPEED_SUPER_PLUS) ? "Plus Gen 2" : " Gen 1", (udev->rx_lanes == 2 && udev->tx_lanes == 2) ? "x2" : "", devnum, driver_name); } /* cope with hardware quirkiness: * - let SET_ADDRESS settle, some device hardware wants it * - read ep0 maxpacket even for high and low speed, */ msleep(10); /* use_new_scheme() checks the speed which may have * changed since the initial look so we cache the result * in did_new_scheme */ if (did_new_scheme) break; } retval = usb_get_device_descriptor(udev, 8); if (retval < 8) { if (retval != -ENODEV) dev_err(&udev->dev, "device descriptor read/8, error %d\n", retval); if (retval >= 0) retval = -EMSGSIZE; } else { u32 delay; retval = 0; delay = udev->parent->hub_delay; udev->hub_delay = min_t(u32, delay, USB_TP_TRANSMISSION_DELAY_MAX); retval = usb_set_isoch_delay(udev); if (retval) { dev_dbg(&udev->dev, "Failed set isoch delay, error %d\n", retval); retval = 0; } break; } } if (retval) goto fail; /* * Some superspeed devices have finished the link training process * and attached to a superspeed hub port, but the device descriptor * got from those devices show they aren't superspeed devices. Warm * reset the port attached by the devices can fix them. */ if ((udev->speed >= USB_SPEED_SUPER) && (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) { dev_err(&udev->dev, "got a wrong device descriptor, " "warm reset device\n"); hub_port_reset(hub, port1, udev, HUB_BH_RESET_TIME, true); retval = -EINVAL; goto fail; } if (udev->descriptor.bMaxPacketSize0 == 0xff || udev->speed >= USB_SPEED_SUPER) i = 512; else i = udev->descriptor.bMaxPacketSize0; if (usb_endpoint_maxp(&udev->ep0.desc) != i) { if (udev->speed == USB_SPEED_LOW || !(i == 8 || i == 16 || i == 32 || i == 64)) { dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i); retval = -EMSGSIZE; goto fail; } if (udev->speed == USB_SPEED_FULL) dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i); else dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i); udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i); usb_ep0_reinit(udev); } retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE); if (retval < (signed)sizeof(udev->descriptor)) { if (retval != -ENODEV) dev_err(&udev->dev, "device descriptor read/all, error %d\n", retval); if (retval >= 0) retval = -ENOMSG; goto fail; } usb_detect_quirks(udev); if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) { retval = usb_get_bos_descriptor(udev); if (!retval) { udev->lpm_capable = usb_device_supports_lpm(udev); usb_set_lpm_parameters(udev); } } retval = 0; /* notify HCD that we have a device connected and addressed */ if (hcd->driver->update_device) hcd->driver->update_device(hcd, udev); hub_set_initial_usb2_lpm_policy(udev); fail: if (retval) { hub_port_disable(hub, port1, 0); update_devnum(udev, devnum); /* for disconnect processing */ } mutex_unlock(hcd->address0_mutex); return retval; } static void check_highspeed(struct usb_hub *hub, struct usb_device *udev, int port1) { struct usb_qualifier_descriptor *qual; int status; if (udev->quirks & USB_QUIRK_DEVICE_QUALIFIER) return; qual = kmalloc(sizeof *qual, GFP_KERNEL); if (qual == NULL) return; status = usb_get_descriptor(udev, USB_DT_DEVICE_QUALIFIER, 0, qual, sizeof *qual); if (status == sizeof *qual) { dev_info(&udev->dev, "not running at top speed; " "connect to a high speed hub\n"); /* hub LEDs are probably harder to miss than syslog */ if (hub->has_indicators) { hub->indicator[port1-1] = INDICATOR_GREEN_BLINK; queue_delayed_work(system_power_efficient_wq, &hub->leds, 0); } } kfree(qual); } static unsigned hub_power_remaining(struct usb_hub *hub) { struct usb_device *hdev = hub->hdev; int remaining; int port1; if (!hub->limited_power) return 0; remaining = hdev->bus_mA - hub->descriptor->bHubContrCurrent; for (port1 = 1; port1 <= hdev->maxchild; ++port1) { struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *udev = port_dev->child; unsigned unit_load; int delta; if (!udev) continue; if (hub_is_superspeed(udev)) unit_load = 150; else unit_load = 100; /* * Unconfigured devices may not use more than one unit load, * or 8mA for OTG ports */ if (udev->actconfig) delta = usb_get_max_power(udev, udev->actconfig); else if (port1 != udev->bus->otg_port || hdev->parent) delta = unit_load; else delta = 8; if (delta > hub->mA_per_port) dev_warn(&port_dev->dev, "%dmA is over %umA budget!\n", delta, hub->mA_per_port); remaining -= delta; } if (remaining < 0) { dev_warn(hub->intfdev, "%dmA over power budget!\n", -remaining); remaining = 0; } return remaining; } static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, u16 portchange) { int status = -ENODEV; int i; unsigned unit_load; struct usb_device *hdev = hub->hdev; struct usb_hcd *hcd = bus_to_hcd(hdev->bus); struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *udev = port_dev->child; static int unreliable_port = -1; /* Disconnect any existing devices under this port */ if (udev) { if (hcd->usb_phy && !hdev->parent) usb_phy_notify_disconnect(hcd->usb_phy, udev->speed); usb_disconnect(&port_dev->child); } /* We can forget about a "removed" device when there's a physical * disconnect or the connect status changes. */ if (!(portstatus & USB_PORT_STAT_CONNECTION) || (portchange & USB_PORT_STAT_C_CONNECTION)) clear_bit(port1, hub->removed_bits); if (portchange & (USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE)) { status = hub_port_debounce_be_stable(hub, port1); if (status < 0) { if (status != -ENODEV && port1 != unreliable_port && printk_ratelimit()) dev_err(&port_dev->dev, "connect-debounce failed\n"); portstatus &= ~USB_PORT_STAT_CONNECTION; unreliable_port = port1; } else { portstatus = status; } } /* Return now if debouncing failed or nothing is connected or * the device was "removed". */ if (!(portstatus & USB_PORT_STAT_CONNECTION) || test_bit(port1, hub->removed_bits)) { /* * maybe switch power back on (e.g. root hub was reset) * but only if the port isn't owned by someone else. */ if (hub_is_port_power_switchable(hub) && !port_is_power_on(hub, portstatus) && !port_dev->port_owner) set_port_feature(hdev, port1, USB_PORT_FEAT_POWER); if (portstatus & USB_PORT_STAT_ENABLE) goto done; return; } if (hub_is_superspeed(hub->hdev)) unit_load = 150; else unit_load = 100; status = 0; for (i = 0; i < SET_CONFIG_TRIES; i++) { /* reallocate for each attempt, since references * to the previous one can escape in various ways */ udev = usb_alloc_dev(hdev, hdev->bus, port1); if (!udev) { dev_err(&port_dev->dev, "couldn't allocate usb_device\n"); goto done; } usb_set_device_state(udev, USB_STATE_POWERED); udev->bus_mA = hub->mA_per_port; udev->level = hdev->level + 1; udev->wusb = hub_is_wusb(hub); /* Devices connected to SuperSpeed hubs are USB 3.0 or later */ if (hub_is_superspeed(hub->hdev)) udev->speed = USB_SPEED_SUPER; else udev->speed = USB_SPEED_UNKNOWN; choose_devnum(udev); if (udev->devnum <= 0) { status = -ENOTCONN; /* Don't retry */ goto loop; } /* reset (non-USB 3.0 devices) and get descriptor */ usb_lock_port(port_dev); status = hub_port_init(hub, udev, port1, i); usb_unlock_port(port_dev); if (status < 0) goto loop; if (udev->quirks & USB_QUIRK_DELAY_INIT) msleep(2000); /* consecutive bus-powered hubs aren't reliable; they can * violate the voltage drop budget. if the new child has * a "powered" LED, users should notice we didn't enable it * (without reading syslog), even without per-port LEDs * on the parent. */ if (udev->descriptor.bDeviceClass == USB_CLASS_HUB && udev->bus_mA <= unit_load) { u16 devstat; status = usb_get_std_status(udev, USB_RECIP_DEVICE, 0, &devstat); if (status) { dev_dbg(&udev->dev, "get status %d ?\n", status); goto loop_disable; } if ((devstat & (1 << USB_DEVICE_SELF_POWERED)) == 0) { dev_err(&udev->dev, "can't connect bus-powered hub " "to this port\n"); if (hub->has_indicators) { hub->indicator[port1-1] = INDICATOR_AMBER_BLINK; queue_delayed_work( system_power_efficient_wq, &hub->leds, 0); } status = -ENOTCONN; /* Don't retry */ goto loop_disable; } } /* check for devices running slower than they could */ if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200 && udev->speed == USB_SPEED_FULL && highspeed_hubs != 0) check_highspeed(hub, udev, port1); /* Store the parent's children[] pointer. At this point * udev becomes globally accessible, although presumably * no one will look at it until hdev is unlocked. */ status = 0; mutex_lock(&usb_port_peer_mutex); /* We mustn't add new devices if the parent hub has * been disconnected; we would race with the * recursively_mark_NOTATTACHED() routine. */ spin_lock_irq(&device_state_lock); if (hdev->state == USB_STATE_NOTATTACHED) status = -ENOTCONN; else port_dev->child = udev; spin_unlock_irq(&device_state_lock); mutex_unlock(&usb_port_peer_mutex); /* Run it through the hoops (find a driver, etc) */ if (!status) { status = usb_new_device(udev); if (status) { mutex_lock(&usb_port_peer_mutex); spin_lock_irq(&device_state_lock); port_dev->child = NULL; spin_unlock_irq(&device_state_lock); mutex_unlock(&usb_port_peer_mutex); } else { if (hcd->usb_phy && !hdev->parent) usb_phy_notify_connect(hcd->usb_phy, udev->speed); } } if (status) goto loop_disable; status = hub_power_remaining(hub); if (status) dev_dbg(hub->intfdev, "%dmA power budget left\n", status); return; loop_disable: hub_port_disable(hub, port1, 1); loop: usb_ep0_reinit(udev); release_devnum(udev); hub_free_dev(udev); usb_put_dev(udev); if ((status == -ENOTCONN) || (status == -ENOTSUPP)) break; /* When halfway through our retry count, power-cycle the port */ if (i == (SET_CONFIG_TRIES / 2) - 1) { dev_info(&port_dev->dev, "attempt power cycle\n"); usb_hub_set_port_power(hdev, hub, port1, false); msleep(2 * hub_power_on_good_delay(hub)); usb_hub_set_port_power(hdev, hub, port1, true); msleep(hub_power_on_good_delay(hub)); } } if (hub->hdev->parent || !hcd->driver->port_handed_over || !(hcd->driver->port_handed_over)(hcd, port1)) { if (status != -ENOTCONN && status != -ENODEV) dev_err(&port_dev->dev, "unable to enumerate USB device\n"); } done: hub_port_disable(hub, port1, 1); if (hcd->driver->relinquish_port && !hub->hdev->parent) { if (status != -ENOTCONN && status != -ENODEV) hcd->driver->relinquish_port(hcd, port1); } } /* Handle physical or logical connection change events. * This routine is called when: * a port connection-change occurs; * a port enable-change occurs (often caused by EMI); * usb_reset_and_verify_device() encounters changed descriptors (as from * a firmware download) * caller already locked the hub */ static void hub_port_connect_change(struct usb_hub *hub, int port1, u16 portstatus, u16 portchange) __must_hold(&port_dev->status_lock) { struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *udev = port_dev->child; int status = -ENODEV; dev_dbg(&port_dev->dev, "status %04x, change %04x, %s\n", portstatus, portchange, portspeed(hub, portstatus)); if (hub->has_indicators) { set_port_led(hub, port1, HUB_LED_AUTO); hub->indicator[port1-1] = INDICATOR_AUTO; } #ifdef CONFIG_USB_OTG /* during HNP, don't repeat the debounce */ if (hub->hdev->bus->is_b_host) portchange &= ~(USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE); #endif /* Try to resuscitate an existing device */ if ((portstatus & USB_PORT_STAT_CONNECTION) && udev && udev->state != USB_STATE_NOTATTACHED) { if (portstatus & USB_PORT_STAT_ENABLE) { status = 0; /* Nothing to do */ #ifdef CONFIG_PM } else if (udev->state == USB_STATE_SUSPENDED && udev->persist_enabled) { /* For a suspended device, treat this as a * remote wakeup event. */ usb_unlock_port(port_dev); status = usb_remote_wakeup(udev); usb_lock_port(port_dev); #endif } else { /* Don't resuscitate */; } } clear_bit(port1, hub->change_bits); /* successfully revalidated the connection */ if (status == 0) return; usb_unlock_port(port_dev); hub_port_connect(hub, port1, portstatus, portchange); usb_lock_port(port_dev); } /* Handle notifying userspace about hub over-current events */ static void port_over_current_notify(struct usb_port *port_dev) { char *envp[3]; struct device *hub_dev; char *port_dev_path; sysfs_notify(&port_dev->dev.kobj, NULL, "over_current_count"); hub_dev = port_dev->dev.parent; if (!hub_dev) return; port_dev_path = kobject_get_path(&port_dev->dev.kobj, GFP_KERNEL); if (!port_dev_path) return; envp[0] = kasprintf(GFP_KERNEL, "OVER_CURRENT_PORT=%s", port_dev_path); if (!envp[0]) goto exit_path; envp[1] = kasprintf(GFP_KERNEL, "OVER_CURRENT_COUNT=%u", port_dev->over_current_count); if (!envp[1]) goto exit; envp[2] = NULL; kobject_uevent_env(&hub_dev->kobj, KOBJ_CHANGE, envp); kfree(envp[1]); exit: kfree(envp[0]); exit_path: kfree(port_dev_path); } static void port_event(struct usb_hub *hub, int port1) __must_hold(&port_dev->status_lock) { int connect_change; struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *udev = port_dev->child; struct usb_device *hdev = hub->hdev; u16 portstatus, portchange; connect_change = test_bit(port1, hub->change_bits); clear_bit(port1, hub->event_bits); clear_bit(port1, hub->wakeup_bits); if (hub_port_status(hub, port1, &portstatus, &portchange) < 0) return; if (portchange & USB_PORT_STAT_C_CONNECTION) { usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION); connect_change = 1; } if (portchange & USB_PORT_STAT_C_ENABLE) { if (!connect_change) dev_dbg(&port_dev->dev, "enable change, status %08x\n", portstatus); usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_ENABLE); /* * EM interference sometimes causes badly shielded USB devices * to be shutdown by the hub, this hack enables them again. * Works at least with mouse driver. */ if (!(portstatus & USB_PORT_STAT_ENABLE) && !connect_change && udev) { dev_err(&port_dev->dev, "disabled by hub (EMI?), re-enabling...\n"); connect_change = 1; } } if (portchange & USB_PORT_STAT_C_OVERCURRENT) { u16 status = 0, unused; port_dev->over_current_count++; port_over_current_notify(port_dev); dev_dbg(&port_dev->dev, "over-current change #%u\n", port_dev->over_current_count); usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_OVER_CURRENT); msleep(100); /* Cool down */ hub_power_on(hub, true); hub_port_status(hub, port1, &status, &unused); if (status & USB_PORT_STAT_OVERCURRENT) dev_err(&port_dev->dev, "over-current condition\n"); } if (portchange & USB_PORT_STAT_C_RESET) { dev_dbg(&port_dev->dev, "reset change\n"); usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_RESET); } if ((portchange & USB_PORT_STAT_C_BH_RESET) && hub_is_superspeed(hdev)) { dev_dbg(&port_dev->dev, "warm reset change\n"); usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_BH_PORT_RESET); } if (portchange & USB_PORT_STAT_C_LINK_STATE) { dev_dbg(&port_dev->dev, "link state change\n"); usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_PORT_LINK_STATE); } if (portchange & USB_PORT_STAT_C_CONFIG_ERROR) { dev_warn(&port_dev->dev, "config error\n"); usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_PORT_CONFIG_ERROR); } /* skip port actions that require the port to be powered on */ if (!pm_runtime_active(&port_dev->dev)) return; if (hub_handle_remote_wakeup(hub, port1, portstatus, portchange)) connect_change = 1; /* * Warm reset a USB3 protocol port if it's in * SS.Inactive state. */ if (hub_port_warm_reset_required(hub, port1, portstatus)) { dev_dbg(&port_dev->dev, "do warm reset\n"); if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION) || udev->state == USB_STATE_NOTATTACHED) { if (hub_port_reset(hub, port1, NULL, HUB_BH_RESET_TIME, true) < 0) hub_port_disable(hub, port1, 1); } else { usb_unlock_port(port_dev); usb_lock_device(udev); usb_reset_device(udev); usb_unlock_device(udev); usb_lock_port(port_dev); connect_change = 0; } } if (connect_change) hub_port_connect_change(hub, port1, portstatus, portchange); } static void hub_event(struct work_struct *work) { struct usb_device *hdev; struct usb_interface *intf; struct usb_hub *hub; struct device *hub_dev; u16 hubstatus; u16 hubchange; int i, ret; hub = container_of(work, struct usb_hub, events); hdev = hub->hdev; hub_dev = hub->intfdev; intf = to_usb_interface(hub_dev); dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n", hdev->state, hdev->maxchild, /* NOTE: expects max 15 ports... */ (u16) hub->change_bits[0], (u16) hub->event_bits[0]); /* Lock the device, then check to see if we were * disconnected while waiting for the lock to succeed. */ usb_lock_device(hdev); if (unlikely(hub->disconnected)) goto out_hdev_lock; /* If the hub has died, clean up after it */ if (hdev->state == USB_STATE_NOTATTACHED) { hub->error = -ENODEV; hub_quiesce(hub, HUB_DISCONNECT); goto out_hdev_lock; } /* Autoresume */ ret = usb_autopm_get_interface(intf); if (ret) { dev_dbg(hub_dev, "Can't autoresume: %d\n", ret); goto out_hdev_lock; } /* If this is an inactive hub, do nothing */ if (hub->quiescing) goto out_autopm; if (hub->error) { dev_dbg(hub_dev, "resetting for error %d\n", hub->error); ret = usb_reset_device(hdev); if (ret) { dev_dbg(hub_dev, "error resetting hub: %d\n", ret); goto out_autopm; } hub->nerrors = 0; hub->error = 0; } /* deal with port status changes */ for (i = 1; i <= hdev->maxchild; i++) { struct usb_port *port_dev = hub->ports[i - 1]; if (test_bit(i, hub->event_bits) || test_bit(i, hub->change_bits) || test_bit(i, hub->wakeup_bits)) { /* * The get_noresume and barrier ensure that if * the port was in the process of resuming, we * flush that work and keep the port active for * the duration of the port_event(). However, * if the port is runtime pm suspended * (powered-off), we leave it in that state, run * an abbreviated port_event(), and move on. */ pm_runtime_get_noresume(&port_dev->dev); pm_runtime_barrier(&port_dev->dev); usb_lock_port(port_dev); port_event(hub, i); usb_unlock_port(port_dev); pm_runtime_put_sync(&port_dev->dev); } } /* deal with hub status changes */ if (test_and_clear_bit(0, hub->event_bits) == 0) ; /* do nothing */ else if (hub_hub_status(hub, &hubstatus, &hubchange) < 0) dev_err(hub_dev, "get_hub_status failed\n"); else { if (hubchange & HUB_CHANGE_LOCAL_POWER) { dev_dbg(hub_dev, "power change\n"); clear_hub_feature(hdev, C_HUB_LOCAL_POWER); if (hubstatus & HUB_STATUS_LOCAL_POWER) /* FIXME: Is this always true? */ hub->limited_power = 1; else hub->limited_power = 0; } if (hubchange & HUB_CHANGE_OVERCURRENT) { u16 status = 0; u16 unused; dev_dbg(hub_dev, "over-current change\n"); clear_hub_feature(hdev, C_HUB_OVER_CURRENT); msleep(500); /* Cool down */ hub_power_on(hub, true); hub_hub_status(hub, &status, &unused); if (status & HUB_STATUS_OVERCURRENT) dev_err(hub_dev, "over-current condition\n"); } } out_autopm: /* Balance the usb_autopm_get_interface() above */ usb_autopm_put_interface_no_suspend(intf); out_hdev_lock: usb_unlock_device(hdev); /* Balance the stuff in kick_hub_wq() and allow autosuspend */ usb_autopm_put_interface(intf); kref_put(&hub->kref, hub_release); } static const struct usb_device_id hub_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS, .idVendor = USB_VENDOR_GENESYS_LOGIC, .bInterfaceClass = USB_CLASS_HUB, .driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND}, { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS, .bDeviceClass = USB_CLASS_HUB}, { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, .bInterfaceClass = USB_CLASS_HUB}, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, hub_id_table); static struct usb_driver hub_driver = { .name = "hub", .probe = hub_probe, .disconnect = hub_disconnect, .suspend = hub_suspend, .resume = hub_resume, .reset_resume = hub_reset_resume, .pre_reset = hub_pre_reset, .post_reset = hub_post_reset, .unlocked_ioctl = hub_ioctl, .id_table = hub_id_table, .supports_autosuspend = 1, }; int usb_hub_init(void) { if (usb_register(&hub_driver) < 0) { printk(KERN_ERR "%s: can't register hub driver\n", usbcore_name); return -1; } /* * The workqueue needs to be freezable to avoid interfering with * USB-PERSIST port handover. Otherwise it might see that a full-speed * device was gone before the EHCI controller had handed its port * over to the companion full-speed controller. */ hub_wq = alloc_workqueue("usb_hub_wq", WQ_FREEZABLE, 0); if (hub_wq) return 0; /* Fall through if kernel_thread failed */ usb_deregister(&hub_driver); pr_err("%s: can't allocate workqueue for usb hub\n", usbcore_name); return -1; } void usb_hub_cleanup(void) { destroy_workqueue(hub_wq); /* * Hub resources are freed for us by usb_deregister. It calls * usb_driver_purge on every device which in turn calls that * devices disconnect function if it is using this driver. * The hub_disconnect function takes care of releasing the * individual hub resources. -greg */ usb_deregister(&hub_driver); } /* usb_hub_cleanup() */ static int descriptors_changed(struct usb_device *udev, struct usb_device_descriptor *old_device_descriptor, struct usb_host_bos *old_bos) { int changed = 0; unsigned index; unsigned serial_len = 0; unsigned len; unsigned old_length; int length; char *buf; if (memcmp(&udev->descriptor, old_device_descriptor, sizeof(*old_device_descriptor)) != 0) return 1; if ((old_bos && !udev->bos) || (!old_bos && udev->bos)) return 1; if (udev->bos) { len = le16_to_cpu(udev->bos->desc->wTotalLength); if (len != le16_to_cpu(old_bos->desc->wTotalLength)) return 1; if (memcmp(udev->bos->desc, old_bos->desc, len)) return 1; } /* Since the idVendor, idProduct, and bcdDevice values in the * device descriptor haven't changed, we will assume the * Manufacturer and Product strings haven't changed either. * But the SerialNumber string could be different (e.g., a * different flash card of the same brand). */ if (udev->serial) serial_len = strlen(udev->serial) + 1; len = serial_len; for (index = 0; index < udev->descriptor.bNumConfigurations; index++) { old_length = le16_to_cpu(udev->config[index].desc.wTotalLength); len = max(len, old_length); } buf = kmalloc(len, GFP_NOIO); if (!buf) /* assume the worst */ return 1; for (index = 0; index < udev->descriptor.bNumConfigurations; index++) { old_length = le16_to_cpu(udev->config[index].desc.wTotalLength); length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf, old_length); if (length != old_length) { dev_dbg(&udev->dev, "config index %d, error %d\n", index, length); changed = 1; break; } if (memcmp(buf, udev->rawdescriptors[index], old_length) != 0) { dev_dbg(&udev->dev, "config index %d changed (#%d)\n", index, ((struct usb_config_descriptor *) buf)-> bConfigurationValue); changed = 1; break; } } if (!changed && serial_len) { length = usb_string(udev, udev->descriptor.iSerialNumber, buf, serial_len); if (length + 1 != serial_len) { dev_dbg(&udev->dev, "serial string error %d\n", length); changed = 1; } else if (memcmp(buf, udev->serial, length) != 0) { dev_dbg(&udev->dev, "serial string changed\n"); changed = 1; } } kfree(buf); return changed; } /** * usb_reset_and_verify_device - perform a USB port reset to reinitialize a device * @udev: device to reset (not in SUSPENDED or NOTATTACHED state) * * WARNING - don't use this routine to reset a composite device * (one with multiple interfaces owned by separate drivers)! * Use usb_reset_device() instead. * * Do a port reset, reassign the device's address, and establish its * former operating configuration. If the reset fails, or the device's * descriptors change from their values before the reset, or the original * configuration and altsettings cannot be restored, a flag will be set * telling hub_wq to pretend the device has been disconnected and then * re-connected. All drivers will be unbound, and the device will be * re-enumerated and probed all over again. * * Return: 0 if the reset succeeded, -ENODEV if the device has been * flagged for logical disconnection, or some other negative error code * if the reset wasn't even attempted. * * Note: * The caller must own the device lock and the port lock, the latter is * taken by usb_reset_device(). For example, it's safe to use * usb_reset_device() from a driver probe() routine after downloading * new firmware. For calls that might not occur during probe(), drivers * should lock the device using usb_lock_device_for_reset(). * * Locking exception: This routine may also be called from within an * autoresume handler. Such usage won't conflict with other tasks * holding the device lock because these tasks should always call * usb_autopm_resume_device(), thereby preventing any unwanted * autoresume. The autoresume handler is expected to have already * acquired the port lock before calling this routine. */ static int usb_reset_and_verify_device(struct usb_device *udev) { struct usb_device *parent_hdev = udev->parent; struct usb_hub *parent_hub; struct usb_hcd *hcd = bus_to_hcd(udev->bus); struct usb_device_descriptor descriptor = udev->descriptor; struct usb_host_bos *bos; int i, j, ret = 0; int port1 = udev->portnum; if (udev->state == USB_STATE_NOTATTACHED || udev->state == USB_STATE_SUSPENDED) { dev_dbg(&udev->dev, "device reset not allowed in state %d\n", udev->state); return -EINVAL; } if (!parent_hdev) return -EISDIR; parent_hub = usb_hub_to_struct_hub(parent_hdev); /* Disable USB2 hardware LPM. * It will be re-enabled by the enumeration process. */ if (udev->usb2_hw_lpm_enabled == 1) usb_set_usb2_hardware_lpm(udev, 0); /* Disable LPM while we reset the device and reinstall the alt settings. * Device-initiated LPM, and system exit latency settings are cleared * when the device is reset, so we have to set them up again. */ ret = usb_unlocked_disable_lpm(udev); if (ret) { dev_err(&udev->dev, "%s Failed to disable LPM\n", __func__); goto re_enumerate_no_bos; } bos = udev->bos; udev->bos = NULL; for (i = 0; i < SET_CONFIG_TRIES; ++i) { /* ep0 maxpacket size may change; let the HCD know about it. * Other endpoints will be handled by re-enumeration. */ usb_ep0_reinit(udev); ret = hub_port_init(parent_hub, udev, port1, i); if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV) break; } if (ret < 0) goto re_enumerate; /* Device might have changed firmware (DFU or similar) */ if (descriptors_changed(udev, &descriptor, bos)) { dev_info(&udev->dev, "device firmware changed\n"); udev->descriptor = descriptor; /* for disconnect() calls */ goto re_enumerate; } /* Restore the device's previous configuration */ if (!udev->actconfig) goto done; mutex_lock(hcd->bandwidth_mutex); ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL); if (ret < 0) { dev_warn(&udev->dev, "Busted HC? Not enough HCD resources for " "old configuration.\n"); mutex_unlock(hcd->bandwidth_mutex); goto re_enumerate; } ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_REQ_SET_CONFIGURATION, 0, udev->actconfig->desc.bConfigurationValue, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (ret < 0) { dev_err(&udev->dev, "can't restore configuration #%d (error=%d)\n", udev->actconfig->desc.bConfigurationValue, ret); mutex_unlock(hcd->bandwidth_mutex); goto re_enumerate; } mutex_unlock(hcd->bandwidth_mutex); usb_set_device_state(udev, USB_STATE_CONFIGURED); /* Put interfaces back into the same altsettings as before. * Don't bother to send the Set-Interface request for interfaces * that were already in altsetting 0; besides being unnecessary, * many devices can't handle it. Instead just reset the host-side * endpoint state. */ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { struct usb_host_config *config = udev->actconfig; struct usb_interface *intf = config->interface[i]; struct usb_interface_descriptor *desc; desc = &intf->cur_altsetting->desc; if (desc->bAlternateSetting == 0) { usb_disable_interface(udev, intf, true); usb_enable_interface(udev, intf, true); ret = 0; } else { /* Let the bandwidth allocation function know that this * device has been reset, and it will have to use * alternate setting 0 as the current alternate setting. */ intf->resetting_device = 1; ret = usb_set_interface(udev, desc->bInterfaceNumber, desc->bAlternateSetting); intf->resetting_device = 0; } if (ret < 0) { dev_err(&udev->dev, "failed to restore interface %d " "altsetting %d (error=%d)\n", desc->bInterfaceNumber, desc->bAlternateSetting, ret); goto re_enumerate; } /* Resetting also frees any allocated streams */ for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++) intf->cur_altsetting->endpoint[j].streams = 0; } done: /* Now that the alt settings are re-installed, enable LTM and LPM. */ usb_set_usb2_hardware_lpm(udev, 1); usb_unlocked_enable_lpm(udev); usb_enable_ltm(udev); usb_release_bos_descriptor(udev); udev->bos = bos; return 0; re_enumerate: usb_release_bos_descriptor(udev); udev->bos = bos; re_enumerate_no_bos: /* LPM state doesn't matter when we're about to destroy the device. */ hub_port_logical_disconnect(parent_hub, port1); return -ENODEV; } /** * usb_reset_device - warn interface drivers and perform a USB port reset * @udev: device to reset (not in SUSPENDED or NOTATTACHED state) * * Warns all drivers bound to registered interfaces (using their pre_reset * method), performs the port reset, and then lets the drivers know that * the reset is over (using their post_reset method). * * Return: The same as for usb_reset_and_verify_device(). * * Note: * The caller must own the device lock. For example, it's safe to use * this from a driver probe() routine after downloading new firmware. * For calls that might not occur during probe(), drivers should lock * the device using usb_lock_device_for_reset(). * * If an interface is currently being probed or disconnected, we assume * its driver knows how to handle resets. For all other interfaces, * if the driver doesn't have pre_reset and post_reset methods then * we attempt to unbind it and rebind afterward. */ int usb_reset_device(struct usb_device *udev) { int ret; int i; unsigned int noio_flag; struct usb_port *port_dev; struct usb_host_config *config = udev->actconfig; struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); if (udev->state == USB_STATE_NOTATTACHED || udev->state == USB_STATE_SUSPENDED) { dev_dbg(&udev->dev, "device reset not allowed in state %d\n", udev->state); return -EINVAL; } if (!udev->parent) { /* this requires hcd-specific logic; see ohci_restart() */ dev_dbg(&udev->dev, "%s for root hub!\n", __func__); return -EISDIR; } port_dev = hub->ports[udev->portnum - 1]; /* * Don't allocate memory with GFP_KERNEL in current * context to avoid possible deadlock if usb mass * storage interface or usbnet interface(iSCSI case) * is included in current configuration. The easist * approach is to do it for every device reset, * because the device 'memalloc_noio' flag may have * not been set before reseting the usb device. */ noio_flag = memalloc_noio_save(); /* Prevent autosuspend during the reset */ usb_autoresume_device(udev); if (config) { for (i = 0; i < config->desc.bNumInterfaces; ++i) { struct usb_interface *cintf = config->interface[i]; struct usb_driver *drv; int unbind = 0; if (cintf->dev.driver) { drv = to_usb_driver(cintf->dev.driver); if (drv->pre_reset && drv->post_reset) unbind = (drv->pre_reset)(cintf); else if (cintf->condition == USB_INTERFACE_BOUND) unbind = 1; if (unbind) usb_forced_unbind_intf(cintf); } } } usb_lock_port(port_dev); ret = usb_reset_and_verify_device(udev); usb_unlock_port(port_dev); if (config) { for (i = config->desc.bNumInterfaces - 1; i >= 0; --i) { struct usb_interface *cintf = config->interface[i]; struct usb_driver *drv; int rebind = cintf->needs_binding; if (!rebind && cintf->dev.driver) { drv = to_usb_driver(cintf->dev.driver); if (drv->post_reset) rebind = (drv->post_reset)(cintf); else if (cintf->condition == USB_INTERFACE_BOUND) rebind = 1; if (rebind) cintf->needs_binding = 1; } } usb_unbind_and_rebind_marked_interfaces(udev); } usb_autosuspend_device(udev); memalloc_noio_restore(noio_flag); return ret; } EXPORT_SYMBOL_GPL(usb_reset_device); /** * usb_queue_reset_device - Reset a USB device from an atomic context * @iface: USB interface belonging to the device to reset * * This function can be used to reset a USB device from an atomic * context, where usb_reset_device() won't work (as it blocks). * * Doing a reset via this method is functionally equivalent to calling * usb_reset_device(), except for the fact that it is delayed to a * workqueue. This means that any drivers bound to other interfaces * might be unbound, as well as users from usbfs in user space. * * Corner cases: * * - Scheduling two resets at the same time from two different drivers * attached to two different interfaces of the same device is * possible; depending on how the driver attached to each interface * handles ->pre_reset(), the second reset might happen or not. * * - If the reset is delayed so long that the interface is unbound from * its driver, the reset will be skipped. * * - This function can be called during .probe(). It can also be called * during .disconnect(), but doing so is pointless because the reset * will not occur. If you really want to reset the device during * .disconnect(), call usb_reset_device() directly -- but watch out * for nested unbinding issues! */ void usb_queue_reset_device(struct usb_interface *iface) { if (schedule_work(&iface->reset_ws)) usb_get_intf(iface); } EXPORT_SYMBOL_GPL(usb_queue_reset_device); /** * usb_hub_find_child - Get the pointer of child device * attached to the port which is specified by @port1. * @hdev: USB device belonging to the usb hub * @port1: port num to indicate which port the child device * is attached to. * * USB drivers call this function to get hub's child device * pointer. * * Return: %NULL if input param is invalid and * child's usb_device pointer if non-NULL. */ struct usb_device *usb_hub_find_child(struct usb_device *hdev, int port1) { struct usb_hub *hub = usb_hub_to_struct_hub(hdev); if (port1 < 1 || port1 > hdev->maxchild) return NULL; return hub->ports[port1 - 1]->child; } EXPORT_SYMBOL_GPL(usb_hub_find_child); void usb_hub_adjust_deviceremovable(struct usb_device *hdev, struct usb_hub_descriptor *desc) { struct usb_hub *hub = usb_hub_to_struct_hub(hdev); enum usb_port_connect_type connect_type; int i; if (!hub) return; if (!hub_is_superspeed(hdev)) { for (i = 1; i <= hdev->maxchild; i++) { struct usb_port *port_dev = hub->ports[i - 1]; connect_type = port_dev->connect_type; if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) { u8 mask = 1 << (i%8); if (!(desc->u.hs.DeviceRemovable[i/8] & mask)) { dev_dbg(&port_dev->dev, "DeviceRemovable is changed to 1 according to platform information.\n"); desc->u.hs.DeviceRemovable[i/8] |= mask; } } } } else { u16 port_removable = le16_to_cpu(desc->u.ss.DeviceRemovable); for (i = 1; i <= hdev->maxchild; i++) { struct usb_port *port_dev = hub->ports[i - 1]; connect_type = port_dev->connect_type; if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) { u16 mask = 1 << i; if (!(port_removable & mask)) { dev_dbg(&port_dev->dev, "DeviceRemovable is changed to 1 according to platform information.\n"); port_removable |= mask; } } } desc->u.ss.DeviceRemovable = cpu_to_le16(port_removable); } } #ifdef CONFIG_ACPI /** * usb_get_hub_port_acpi_handle - Get the usb port's acpi handle * @hdev: USB device belonging to the usb hub * @port1: port num of the port * * Return: Port's acpi handle if successful, %NULL if params are * invalid. */ acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev, int port1) { struct usb_hub *hub = usb_hub_to_struct_hub(hdev); if (!hub) return NULL; return ACPI_HANDLE(&hub->ports[port1 - 1]->dev); } #endif
./CrossVul/dataset_final_sorted/CWE-400/c/bad_485_0
crossvul-cpp_data_good_1351_0
#include <config.h> #include "ftpd.h" #include "ls_p.h" #include "bsd-glob.h" #include "messages.h" #include "dynamic.h" #include "ftpwho-update.h" #include "globals.h" #include "safe_rw.h" #ifdef WITH_TLS # include "tls.h" #endif #ifdef WITH_DMALLOC # include <dmalloc.h> #endif static void wrstr(const int f, void * const tls_fd, const char *s) { static char outbuf[CONF_TCP_SO_SNDBUF]; static size_t outcnt; size_t l; if (s == NULL) { if (outcnt > (size_t) 0U) { #ifdef WITH_TLS if (tls_fd != NULL) { if (secure_safe_write(tls_fd, outbuf, outcnt) != (ssize_t) outcnt) { return; } } else #endif { (void) tls_fd; if (safe_write(f, outbuf, outcnt, -1) != (ssize_t) outcnt) { return; } } } outcnt = (size_t) 0U; return; } if ((l = strlen(s)) <= (size_t) 0U) { return; } if (l <= (sizeof outbuf - outcnt)) { memcpy(outbuf + outcnt, s, l); /* secure, see above */ outcnt += l; return; } if (outcnt < sizeof outbuf) { const size_t rest = sizeof outbuf - outcnt; memcpy(outbuf + outcnt, s, rest); /* secure, see above */ s += rest; l -= rest; } #ifdef WITH_TLS if (tls_fd != NULL) { if (secure_safe_write(tls_fd, outbuf, sizeof outbuf) != (ssize_t) sizeof outbuf) { return; } } else #endif { if (safe_write(f, outbuf, sizeof outbuf, -1) != (ssize_t) sizeof outbuf) { return; } } #ifdef WITH_TLS if (tls_fd != NULL) { while (l > sizeof outbuf) { if (secure_safe_write(tls_fd, s, sizeof outbuf) != (ssize_t) sizeof outbuf) { return; } s += sizeof outbuf; l -= sizeof outbuf; } } else #endif { while (l > sizeof outbuf) { if (safe_write(f, s, sizeof outbuf, -1) != (ssize_t) sizeof outbuf) { return; } s += sizeof outbuf; l -= sizeof outbuf; } } if (l > (size_t) 0U) { memcpy(outbuf, s, l); /* safe, l <= sizeof outbuf */ outcnt = l; } } #ifdef NO_FTP_USERS const char *getname(const uid_t uid) { static char number[11]; snprintf(number, sizeof number, "%-10d", uid); return number; } const char *getgroup(const gid_t gid) { static char number[11]; snprintf(number, sizeof number, "%-10d", gid); return number; } #else const char *getname(const uid_t uid) { struct userid *p; struct passwd *pwd = NULL; for (p = user_head; p; p = p->next) { if (p->uid == uid) { return p->name; } } if ( # ifndef ALWAYS_RESOLVE_IDS chrooted == 0 && # endif authresult.slow_tilde_expansion == 0) { pwd = getpwuid(uid); } if ((p = malloc(sizeof *p)) == NULL) { die_mem(); } p->uid = uid; if ((p->name = malloc((size_t) 11U)) == NULL) { die_mem(); } if (pwd != NULL) { if (SNCHECK(snprintf(p->name, (size_t) 11U, "%-10.10s", pwd->pw_name), (size_t) 11U)) { _EXIT(EXIT_FAILURE); } } else { if (SNCHECK(snprintf(p->name, (size_t) 11U, "%-10d", uid), (size_t) 11U)) { _EXIT(EXIT_FAILURE); } } p->next = user_head; user_head = p; return p->name; } /* eeeehm... sorry for names, ya know copy&paste :))) */ const char *getgroup(const gid_t gid) { struct groupid *p; struct group *pwd = NULL; for (p = group_head; p; p = p->next) { if (p->gid == gid) { return p->name; } } # ifndef ALWAYS_RESOLVE_IDS if (chrooted == 0) # endif { pwd = getgrgid(gid); } if ((p = malloc(sizeof *p)) == NULL) { die_mem(); } p->gid = gid; if ((p->name = malloc((size_t) 11U)) == NULL) { die_mem(); } if (pwd != NULL) { if (SNCHECK(snprintf(p->name, (size_t) 11U, "%-10.10s", pwd->gr_name), (size_t) 11U)) { _EXIT(EXIT_FAILURE); } } else { if (SNCHECK(snprintf(p->name, (size_t) 11U, "%-10d", gid), (size_t) 11U)) { _EXIT(EXIT_FAILURE); } } p->next = group_head; group_head = p; return p->name; } #endif static void addfile(const char *name, const char *suffix) { struct filename *p; unsigned int l; if (!name || !suffix) { return; } if (matches >= max_ls_files) { return; } matches++; l = (unsigned int) (strlen(name) + strlen(suffix)); if (l > colwidth) { colwidth = l; } if ((p = malloc(offsetof(struct filename, line) + l + 1U)) == NULL) { return; } if (SNCHECK(snprintf(p->line, l + 1U, "%s%s", name, suffix), l + 1U)) { _EXIT(EXIT_FAILURE); } if (tail != NULL) { tail->down = p; } else { head = p; } tail = p; filenames++; } /* listfile returns non-zero if the file is a directory */ static int listfile(const PureFileInfo * const fi, const char *name) { int rval = 0; struct stat st; struct tm *t; char suffix[2] = { 0, 0 }; char m[PATH_MAX + 1U]; const char *format; if (fi == NULL) { if (lstat(name, &st) < 0) { return 0; } } else { st.st_size = fi->size; st.st_mtime = fi->mtime; st.st_mode = fi->mode; st.st_nlink = fi->nlink; st.st_uid = fi->uid; st.st_gid = fi->gid; name = FI_NAME(fi); } #if defined(WITH_VIRTUAL_CHROOT) && defined(S_IFLNK) && defined(S_IFDIR) if (S_ISLNK(st.st_mode) && name[0] == '.' && name[1] == '.' && name[2] == 0) { st.st_mode &= ~S_IFLNK; st.st_mode |= S_IFDIR; } /* Hack to please some Windows clients that dislike ../ -> ../ */ #endif #if !defined(MINIMAL) && !defined(ALWAYS_SHOW_SYMLINKS_AS_SYMLINKS) if ( # ifndef ALWAYS_SHOW_RESOLVED_SYMLINKS broken_client_compat != 0 && # endif S_ISLNK(st.st_mode)) { struct stat sts; if (stat(name, &sts) == 0 && !S_ISLNK(sts.st_mode)) { st = sts; } } /* Show non-dangling symlinks as files/directories */ #endif #ifdef DISPLAY_FILES_IN_UTC_TIME t = gmtime((time_t *) &st.st_mtime); #else t = localtime((time_t *) &st.st_mtime); #endif if (t == NULL) { logfile(LOG_ERR, "{gm,local}gtime() for [%s]", name); return 0; } if (opt_F) { if (S_ISLNK(st.st_mode)) suffix[0] = '@'; else if (S_ISDIR(st.st_mode)) { suffix[0] = '/'; rval = 1; } else if (st.st_mode & 010101) { suffix[0] = '*'; } } if (opt_l) { strncpy(m, " ---------", (sizeof m) - (size_t) 1U); m[(sizeof m) - (size_t) 1U] = 0; switch (st.st_mode & S_IFMT) { case S_IFREG: m[0] = '-'; break; case S_IFLNK: m[0] = 'l'; break; /* readlink() here? */ case S_IFDIR: m[0] = 'd'; rval = 1; break; } if (m[0] != ' ') { char *alloca_nameline; const size_t sizeof_nameline = PATH_MAX + PATH_MAX + 128U; char timeline[6U]; if (st.st_mode & 0400) { m[1] = 'r'; } if (st.st_mode & 0200) { m[2] = 'w'; } if (st.st_mode & 0100) { m[3] = (char) (st.st_mode & 04000 ? 's' : 'x'); } else if (st.st_mode & 04000) { m[3] = 'S'; } if (st.st_mode & 040) { m[4] = 'r'; } if (st.st_mode & 020) { m[5] = 'w'; } if (st.st_mode & 010) { m[6] = (char) (st.st_mode & 02000 ? 's' : 'x'); } else if (st.st_mode & 02000) { m[6] = 'S'; } if (st.st_mode & 04) { m[7] = 'r'; } if (st.st_mode & 02) { m[8] = 'w'; } if (st.st_mode & 01) { m[9] = (char) (st.st_mode & 01000 ? 't' : 'x'); } else if (st.st_mode & 01000) { m[9] = 'T'; } if (time(NULL) - st.st_mtime > 180 * 24 * 60 * 60) { if (SNCHECK(snprintf(timeline, sizeof timeline, "%5d", t->tm_year + 1900), sizeof timeline)) { _EXIT(EXIT_FAILURE); } } else { if (SNCHECK(snprintf(timeline, sizeof timeline, "%02d:%02d", t->tm_hour, t->tm_min), sizeof timeline)) { _EXIT(EXIT_FAILURE); } } if ((alloca_nameline = ALLOCA(sizeof_nameline)) == NULL) { return 0; } if (st.st_size < 10000000000U) { format = "%s %4u %s %s %10llu %s %2d %s %s"; } else { format = "%s %4u %s %s %18llu %s %2d %s %s"; } if (SNCHECK(snprintf(alloca_nameline, sizeof_nameline, format, m, (unsigned int) st.st_nlink, getname(st.st_uid), getgroup(st.st_gid), (unsigned long long) st.st_size, months[t->tm_mon], t->tm_mday, timeline, name), sizeof_nameline)) { ALLOCA_FREE(alloca_nameline); _EXIT(EXIT_FAILURE); } if (S_ISLNK(st.st_mode)) { char *p = alloca_nameline + strlen(alloca_nameline); { ssize_t sx; if ((sx = readlink(name, m, sizeof m - 1U)) > 0) { m[sx] = 0; } else { m[0] = m[1] = '.'; m[2] = 0; } } suffix[0] = 0; if (opt_F && stat(name, &st) == 0) { if (S_ISLNK(st.st_mode)) { suffix[0] = '@'; } else if (S_ISDIR(st.st_mode)) { suffix[0] = '/'; } else if (st.st_mode & 010101) { suffix[0] = '*'; } } /* 2 * PATH_MAX + gap should be enough, but be paranoid... */ if (SNCHECK (snprintf(p, (sizeof_nameline) - strlen(alloca_nameline), " -> %s", m), (sizeof_nameline) - strlen(alloca_nameline))) { ALLOCA_FREE(alloca_nameline); _EXIT(EXIT_FAILURE); } } addfile(alloca_nameline, suffix); ALLOCA_FREE(alloca_nameline); } /* hide non-downloadable files */ } else { if (S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode)) { addfile(name, suffix); } } return rval; } static void outputfiles(int f, void * const tls_fd) { unsigned int n; struct filename *p; struct filename *q; if (!head) { return; } tail->down = NULL; tail = NULL; colwidth = (colwidth | 7U) + 1U; if (opt_l != 0 || opt_C == 0) { colwidth = 75U; } /* set up first column */ p = head; p->top = 1; if (colwidth > 75U) { n = filenames; } else { n = (filenames + (75U / colwidth) - 1U) / (75U / colwidth); } while (n && p) { p = p->down; if (p != NULL) { p->top = 0; } n--; } /* while there's a neighbour to the right, point at it */ q = head; while (p) { p->top = q->top; q->right = p; q = q->down; p = p->down; } /* some are at the right end */ while (q) { q->right = NULL; q = q->down; } /* don't want wraparound, do we? */ p = head; while (p && p->down && !p->down->top) { p = p->down; } if (p && p->down) { p->down = NULL; } /* print each line, which consists of each column */ p = head; while (p) { q = p; p = p->down; while (q) { char pad[6]; char *tmp = (char *) q; if (q->right) { memset(pad, '\t', sizeof pad - 1U); pad[(sizeof pad) - 1] = 0; pad[(colwidth + 7U - strlen(q->line)) / 8] = 0; } else { pad[0] = '\r'; pad[1] = '\n'; pad[2] = 0; } wrstr(f, tls_fd, q->line); wrstr(f, tls_fd, pad); q = q->right; free(tmp); tmp = NULL; } } /* reset variables for next time */ head = tail = NULL; colwidth = 0U; filenames = 0U; } /* functions to to sort for qsort() */ static int cmp(const void * const a, const void * const b) { return strcmp(FI_NAME((const PureFileInfo *) a), FI_NAME((const PureFileInfo *) b)); } static int cmp_r(const void * const a, const void * const b) { return strcmp(FI_NAME((const PureFileInfo *) b), FI_NAME((const PureFileInfo *) a)); } static int cmp_t(const void * const a, const void * const b) { if (((const PureFileInfo *) a)->mtime < ((const PureFileInfo *) b)->mtime) { return 1; } if (((const PureFileInfo *) a)->mtime > ((const PureFileInfo *) b)->mtime) { return -1; } return 0; } static int cmp_rt(const void * const a, const void * const b) { return cmp_t(b, a); } static int cmp_S(const void * const a, const void * const b) { if (((const PureFileInfo *) a)->size < ((const PureFileInfo *) b)->size) { return 1; } if (((const PureFileInfo *) a)->size > ((const PureFileInfo *) b)->size) { return -1; } return 0; } static int cmp_rS(const void * const a, const void * const b) { return cmp_S(b, a); } static PureFileInfo *sreaddir(char **names_pnt) { struct stat st; DIR *d; struct dirent *de; PureFileInfo *files_info; PureFileInfo *file_info; size_t files_info_size; size_t files_info_counter = (size_t) 0U; char *names; size_t names_size; size_t names_counter = (size_t) 0U; size_t name_len; int (*cmp_func)(const void * const, const void * const); if ((d = opendir(".")) == NULL) { return NULL; } names_size = CHUNK_SIZE; if ((names = malloc(names_size)) == NULL) { closedir(d); return NULL; } files_info_size = CHUNK_SIZE / sizeof *files_info; if ((files_info = malloc(files_info_size * sizeof *files_info)) == NULL) { closedir(d); free(names); return NULL; } while ((de = readdir(d)) != NULL) { if (checkprintable(de->d_name) != 0 || lstat(de->d_name, &st) < 0) { continue; } name_len = strlen(de->d_name) + (size_t) 1U; while (names_counter + name_len >= names_size) { char *new_names; if (name_len >= CHUNK_SIZE) { names_size += name_len + CHUNK_SIZE; } else { names_size += CHUNK_SIZE; } if ((new_names = realloc(names, names_size)) == NULL) { nomem: closedir(d); free(names); free(files_info); return NULL; } names = new_names; } while ((files_info_counter + (size_t) 1U) >= files_info_size) { PureFileInfo *new_files_info; files_info_size += (CHUNK_SIZE / sizeof *files_info); if ((new_files_info = realloc(files_info, files_info_size * sizeof *files_info)) == NULL) { goto nomem; } files_info = new_files_info; } memcpy(&names[names_counter], de->d_name, name_len); /* safe */ names[names_counter + name_len - 1] = 0; file_info = &files_info[files_info_counter]; file_info->names_pnt = names_pnt; file_info->name_offset = names_counter; file_info->size = st.st_size; file_info->mtime = st.st_mtime; file_info->mode = st.st_mode; file_info->nlink = st.st_nlink; file_info->uid = st.st_uid; file_info->gid = st.st_gid; names_counter += name_len; files_info_counter++; } closedir(d); files_info[files_info_counter].name_offset = (size_t) -1; *names_pnt = names; if (opt_t) { if (opt_r) { cmp_func = cmp_rt; } else { cmp_func = cmp_t; } } else if (opt_S) { if (opt_r) { cmp_func = cmp_rS; } else { cmp_func = cmp_S; } } else if (opt_r) { cmp_func = cmp_r; } else { cmp_func = cmp; } qsort(files_info, files_info_counter, sizeof files_info[0], cmp_func); return files_info; } /* have to change to the directory first (speed hack for -R) */ static void listdir(unsigned int depth, int f, void * const tls_fd, const char *name) { PureFileInfo *dir; char *names; PureFileInfo *s; PureFileInfo *r; char *alloca_subdir; size_t sizeof_subdir; int d; if (depth >= max_ls_depth || matches >= max_ls_files) { return; } if ((dir = sreaddir(&names)) == NULL) { addreply(226, MSG_CANT_READ_FILE, name); return; } s = dir; while (s->name_offset != (size_t) -1) { d = 0; if (FI_NAME(s)[0] != '.') { d = listfile(s, NULL); } else if (opt_a) { if (FI_NAME(s)[1] == 0 || (FI_NAME(s)[1] == '.' && FI_NAME(s)[2] == 0)) { listfile(s, NULL); } else { d = listfile(s, NULL); } } if (!d) { s->name_offset = (size_t) -1; } s++; } outputfiles(f, tls_fd); r = dir; sizeof_subdir = PATH_MAX + 1U; if ((alloca_subdir = ALLOCA(sizeof_subdir)) == NULL) { goto toomany; } while (opt_R && r != s) { if (r->name_offset != (size_t) -1 && !chdir(FI_NAME(r))) { if (SNCHECK(snprintf(alloca_subdir, sizeof_subdir, "%s/%s", name, FI_NAME(r)), sizeof_subdir)) { goto nolist; } wrstr(f, tls_fd, "\r\n\r\n"); wrstr(f, tls_fd, alloca_subdir); wrstr(f, tls_fd, ":\r\n\r\n"); listdir(depth + 1U, f, tls_fd, alloca_subdir); nolist: if (matches >= max_ls_files) { goto toomany; } if (chdir("..")) { /* defensive in the extreme... */ if (chdir(wd) || chdir(name)) { /* someone rmdir()'d it? */ die(421, LOG_ERR, "chdir: %s", strerror(errno)); } } } r++; } toomany: ALLOCA_FREE(alloca_subdir); free(names); free(dir); names = NULL; } static char *unescape_and_return_next_file(char * const str) { char *pnt = str; signed char seen_backslash = 0; while (*pnt != 0) { if (seen_backslash == 0) { if (*pnt == '\\') { seen_backslash = 1; } else if (*pnt == ' ') { *pnt++ = 0; if (*pnt != 0) { return pnt; } break; } pnt++; } else { seen_backslash = 0; if (*pnt == ' ' || *pnt == '\\' || *pnt == '{' || *pnt == '}') { memmove(pnt - 1, pnt, strlen(pnt) + (size_t) 1U); } } } return NULL; } void dolist(char *arg, const int on_ctrl_conn) { int c; void *tls_fd = NULL; matches = 0U; opt_a = opt_C = opt_d = opt_F = opt_R = opt_r = opt_t = opt_S = 0; opt_l = 1; if (force_ls_a != 0) { opt_a = 1; } if (arg != NULL) { while (isspace((unsigned char) *arg)) { arg++; } while (*arg == '-') { while (arg++ && isalnum((unsigned char) *arg)) { switch (*arg) { case 'a': opt_a = 1; break; case 'l': opt_l = 1; opt_C = 0; break; case '1': opt_l = opt_C = 0; break; case 'C': opt_l = 0; opt_C = 1; break; case 'F': opt_F = 1; break; case 'R': opt_R = 1; break; case 'd': opt_d = 1; break; case 'r': opt_r = 1; break; case 't': opt_t = 1; opt_S = 0; break; case 'S': opt_S = 1; opt_t = 0; break; } } while (isspace((unsigned char) *arg)) { arg++; } } } if (on_ctrl_conn == 0) { opendata(); if ((c = xferfd) == -1) { return; } doreply(); #ifdef WITH_TLS if (data_protection_level == CPL_PRIVATE) { tls_init_data_session(xferfd, passive); tls_fd = tls_data_cnx; } #endif } else { /* STAT command */ c = clientfd; #ifdef WITH_TLS if (tls_cnx != NULL) { secure_safe_write(tls_cnx, "213-STAT" CRLF, sizeof "213-STAT" CRLF - 1U); tls_fd = tls_cnx; } else #endif { safe_write(c, "213-STAT" CRLF, sizeof "213-STAT" CRLF - 1U, -1); } } if (arg != NULL && *arg != 0) { int justone; justone = 1; /* just one argument, so don't print dir name */ do { glob_t g; int a; char *endarg; if ((endarg = unescape_and_return_next_file(arg)) != NULL) { justone = 0; } /* Expand ~ here if needed */ alarm(GLOB_TIMEOUT); memset(&g, 0, sizeof g); a = sglob(arg, opt_a ? (GLOB_PERIOD | GLOB_LIMIT) : GLOB_LIMIT, NULL, &g, max_ls_files + 2, max_ls_depth * 2); alarm(0); if (a == 0) { char **path; if (g.gl_pathc <= 0) { path = NULL; } else { path = g.gl_pathv; } if (path != NULL && path[0] != NULL && path[1] != NULL) { justone = 0; } while (path != NULL && *path != NULL) { struct stat st; if (stat(*path, &st) == 0) { if (opt_d || !(S_ISDIR(st.st_mode))) { listfile(NULL, *path); **path = 0; } } else { **path = 0; } path++; } outputfiles(c, tls_fd); /* in case of opt_C */ path = g.gl_pathv; while (path != NULL && *path != NULL) { if (matches >= max_ls_files) { break; } if (**path != 0) { if (!justone) { wrstr(c, tls_fd, "\r\n\r\n"); wrstr(c, tls_fd, *path); wrstr(c, tls_fd, ":\r\n\r\n"); } if (!chdir(*path)) { listdir(0U, c, tls_fd, *path); if (chdir(wd)) { die(421, LOG_ERR, "chdir: %s", strerror(errno)); } } } path++; } } else { if (a == GLOB_NOSPACE) { addreply(226, MSG_GLOB_NO_MEMORY, arg); addreply_noformat(0, MSG_PROBABLY_DENIED); } else if (a == GLOB_ABEND) { addreply(226, MSG_GLOB_READ_ERROR, arg); } else if (a != GLOB_NOMATCH) { addreply(226, MSG_GLOB_READ_ERROR, arg); addreply_noformat(0, MSG_PROBABLY_DENIED); } } globfree(&g); arg = endarg; } while (arg != NULL); } else { if (opt_d) { listfile(NULL, "."); } else { listdir(0U, c, tls_fd, "."); } outputfiles(c, tls_fd); } wrstr(c, tls_fd, NULL); if (on_ctrl_conn == 0) { #ifdef WITH_TLS closedata(); #endif close(c); } else { addreply_noformat(213, "End."); goto end; } if (opt_a || opt_C || opt_d || opt_F || opt_l || opt_r || opt_R || opt_t || opt_S) addreply(0, "Options: %s%s%s%s%s%s%s%s%s", opt_a ? "-a " : "", opt_C ? "-C " : "", opt_d ? "-d " : "", opt_F ? "-F " : "", opt_l ? "-l " : "", opt_r ? "-r " : "", opt_R ? "-R " : "", opt_S ? "-S " : "", opt_t ? "-t" : ""); if (matches >= max_ls_files) { addreply(226, MSG_LS_TRUNCATED, matches); } else { addreply(226, MSG_LS_SUCCESS, matches); } end: if (chdir(wd)) { die(421, LOG_ERR, "chdir: %s", strerror(errno)); } } void donlst(const char *base) { char line[PATH_MAX + 3U]; DIR *dir; void *tls_fd = NULL; struct dirent *de; size_t name_len; unsigned int matches = 0; int c; int base_has_trailing_slash = 0; if (*base != 0 && chdir(base) != 0) { if (*base++ == '-') { while (!isspace((unsigned char) *base++)); while (isspace((unsigned char) *base++)); if (*base != 0 && chdir(base) != 0) { addreply_noformat(550, MSG_STAT_FAILURE2); return; } } else { addreply_noformat(550, MSG_STAT_FAILURE2); return; } } if (*base !=0 && base[strlen(base) - 1U] == '/') { base_has_trailing_slash = 1; } if ((dir = opendir(".")) == NULL) { addreply_noformat(550, MSG_STAT_FAILURE2); goto bye; } opendata(); if ((c = xferfd) == -1) { goto bye; } doreply(); #ifdef WITH_TLS if (data_protection_level == CPL_PRIVATE) { tls_init_data_session(xferfd, passive); tls_fd = tls_data_cnx; } #endif while ((de = readdir(dir)) != NULL) { if (checkprintable(de->d_name) != 0) { continue; } name_len = strlen(de->d_name); if (name_len > sizeof line - 3U) { continue; } memcpy(line, de->d_name, name_len); line[name_len] = '\r'; line[name_len + 1] = '\n'; line[name_len + 2] = 0; if (*base) { wrstr(c, tls_fd, base); if (base_has_trailing_slash == 0) { wrstr(c, tls_fd, "/"); } } wrstr(c, tls_fd, line); matches++; if (matches >= max_ls_files) { break; } } closedir(dir); wrstr(c, tls_fd, NULL); closedata(); if (matches >= max_ls_files) { addreply(226, MSG_LS_TRUNCATED, matches); } else { addreply(226, MSG_LS_SUCCESS, matches); } bye: if (chdir(wd) != 0) { die(421, LOG_ERR, "chdir: %s", strerror(errno)); } } void domlsd(const char *base) { char line[PATH_MAX + 1]; DIR *dir = NULL; void *tls_fd = NULL; struct dirent *de; unsigned int matches = 0; int c; if (*base != 0 && chdir(base) != 0) { if (*base++ == '-') { while (!isspace((unsigned char) *base++)); while (isspace((unsigned char) *base++)); if (*base != 0 && chdir(base) != 0) { addreply_noformat(550, MSG_STAT_FAILURE2); return; } } else { addreply_noformat(550, MSG_STAT_FAILURE2); return; } } if ((dir = opendir(".")) == NULL) { addreply_noformat(550, MSG_STAT_FAILURE2); goto bye; } opendata(); if ((c = xferfd) == -1) { goto bye; } doreply(); #ifdef WITH_TLS if (data_protection_level == CPL_PRIVATE) { tls_init_data_session(xferfd, passive); tls_fd = tls_data_cnx; } #endif while ((de = readdir(dir)) != NULL) { if (checkprintable(de->d_name) != 0 || modernformat(de->d_name, line, sizeof line, "") < 0) { continue; } wrstr(c, tls_fd, line); wrstr(c, tls_fd, "\r\n"); matches++; if (matches >= max_ls_files) { break; } } wrstr(c, tls_fd, NULL); closedata(); if (matches >= max_ls_files) { addreply(226, MSG_LS_TRUNCATED, matches); } else { addreply(226, MSG_LS_SUCCESS, matches); } bye: if (dir != NULL) { closedir(dir); } if (chdir(wd) != 0) { die(421, LOG_ERR, "chdir: %s", strerror(errno)); } }
./CrossVul/dataset_final_sorted/CWE-400/c/good_1351_0
crossvul-cpp_data_good_812_2
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <setjmp.h> #include <limits.h> #include "regexp.h" #include "utf.h" #define emit regemit #define next regnext #define accept regaccept #define nelem(a) (int)(sizeof (a) / sizeof (a)[0]) #define REPINF 255 #define MAXSUB REG_MAXSUB #define MAXPROG (32 << 10) #define MAXREC 1024 typedef struct Reclass Reclass; typedef struct Renode Renode; typedef struct Reinst Reinst; typedef struct Rethread Rethread; struct Reclass { Rune *end; Rune spans[64]; }; struct Reprog { Reinst *start, *end; int flags; int nsub; Reclass cclass[16]; }; struct cstate { Reprog *prog; Renode *pstart, *pend; const char *source; int ncclass; int nsub; Renode *sub[MAXSUB]; int lookahead; Rune yychar; Reclass *yycc; int yymin, yymax; const char *error; jmp_buf kaboom; }; static void die(struct cstate *g, const char *message) { g->error = message; longjmp(g->kaboom, 1); } static int canon(Rune c) { Rune u = toupperrune(c); if (c >= 128 && u < 128) return c; return u; } /* Scan */ enum { L_CHAR = 256, L_CCLASS, /* character class */ L_NCCLASS, /* negative character class */ L_NC, /* "(?:" no capture */ L_PLA, /* "(?=" positive lookahead */ L_NLA, /* "(?!" negative lookahead */ L_WORD, /* "\b" word boundary */ L_NWORD, /* "\B" non-word boundary */ L_REF, /* "\1" back-reference */ L_COUNT, /* {M,N} */ }; static int hex(struct cstate *g, int c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 0xA; if (c >= 'A' && c <= 'F') return c - 'A' + 0xA; die(g, "invalid escape sequence"); return 0; } static int dec(struct cstate *g, int c) { if (c >= '0' && c <= '9') return c - '0'; die(g, "invalid quantifier"); return 0; } #define ESCAPES "BbDdSsWw^$\\.*+?()[]{}|0123456789" static int isunicodeletter(int c) { return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || isalpharune(c); } static int nextrune(struct cstate *g) { g->source += chartorune(&g->yychar, g->source); if (g->yychar == '\\') { g->source += chartorune(&g->yychar, g->source); switch (g->yychar) { case 0: die(g, "unterminated escape sequence"); break; case 'f': g->yychar = '\f'; return 0; case 'n': g->yychar = '\n'; return 0; case 'r': g->yychar = '\r'; return 0; case 't': g->yychar = '\t'; return 0; case 'v': g->yychar = '\v'; return 0; case 'c': g->yychar = (*g->source++) & 31; return 0; case 'x': g->yychar = hex(g, *g->source++) << 4; g->yychar += hex(g, *g->source++); if (g->yychar == 0) { g->yychar = '0'; return 1; } return 0; case 'u': g->yychar = hex(g, *g->source++) << 12; g->yychar += hex(g, *g->source++) << 8; g->yychar += hex(g, *g->source++) << 4; g->yychar += hex(g, *g->source++); if (g->yychar == 0) { g->yychar = '0'; return 1; } return 0; } if (strchr(ESCAPES, g->yychar)) return 1; if (isunicodeletter(g->yychar) || g->yychar == '_') /* check identity escape */ die(g, "invalid escape character"); return 0; } return 0; } static int lexcount(struct cstate *g) { g->yychar = *g->source++; g->yymin = dec(g, g->yychar); g->yychar = *g->source++; while (g->yychar != ',' && g->yychar != '}') { g->yymin = g->yymin * 10 + dec(g, g->yychar); g->yychar = *g->source++; if (g->yymin >= REPINF) die(g, "numeric overflow"); } if (g->yychar == ',') { g->yychar = *g->source++; if (g->yychar == '}') { g->yymax = REPINF; } else { g->yymax = dec(g, g->yychar); g->yychar = *g->source++; while (g->yychar != '}') { g->yymax = g->yymax * 10 + dec(g, g->yychar); g->yychar = *g->source++; if (g->yymax >= REPINF) die(g, "numeric overflow"); } } } else { g->yymax = g->yymin; } return L_COUNT; } static void newcclass(struct cstate *g) { if (g->ncclass >= nelem(g->prog->cclass)) die(g, "too many character classes"); g->yycc = g->prog->cclass + g->ncclass++; g->yycc->end = g->yycc->spans; } static void addrange(struct cstate *g, Rune a, Rune b) { if (a > b) die(g, "invalid character class range"); if (g->yycc->end + 2 == g->yycc->spans + nelem(g->yycc->spans)) die(g, "too many character class ranges"); *g->yycc->end++ = a; *g->yycc->end++ = b; } static void addranges_d(struct cstate *g) { addrange(g, '0', '9'); } static void addranges_D(struct cstate *g) { addrange(g, 0, '0'-1); addrange(g, '9'+1, 0xFFFF); } static void addranges_s(struct cstate *g) { addrange(g, 0x9, 0xD); addrange(g, 0x20, 0x20); addrange(g, 0xA0, 0xA0); addrange(g, 0x2028, 0x2029); addrange(g, 0xFEFF, 0xFEFF); } static void addranges_S(struct cstate *g) { addrange(g, 0, 0x9-1); addrange(g, 0xD+1, 0x20-1); addrange(g, 0x20+1, 0xA0-1); addrange(g, 0xA0+1, 0x2028-1); addrange(g, 0x2029+1, 0xFEFF-1); addrange(g, 0xFEFF+1, 0xFFFF); } static void addranges_w(struct cstate *g) { addrange(g, '0', '9'); addrange(g, 'A', 'Z'); addrange(g, '_', '_'); addrange(g, 'a', 'z'); } static void addranges_W(struct cstate *g) { addrange(g, 0, '0'-1); addrange(g, '9'+1, 'A'-1); addrange(g, 'Z'+1, '_'-1); addrange(g, '_'+1, 'a'-1); addrange(g, 'z'+1, 0xFFFF); } static int lexclass(struct cstate *g) { int type = L_CCLASS; int quoted, havesave, havedash; Rune save = 0; newcclass(g); quoted = nextrune(g); if (!quoted && g->yychar == '^') { type = L_NCCLASS; quoted = nextrune(g); } havesave = havedash = 0; for (;;) { if (g->yychar == 0) die(g, "unterminated character class"); if (!quoted && g->yychar == ']') break; if (!quoted && g->yychar == '-') { if (havesave) { if (havedash) { addrange(g, save, '-'); havesave = havedash = 0; } else { havedash = 1; } } else { save = '-'; havesave = 1; } } else if (quoted && strchr("DSWdsw", g->yychar)) { if (havesave) { addrange(g, save, save); if (havedash) addrange(g, '-', '-'); } switch (g->yychar) { case 'd': addranges_d(g); break; case 's': addranges_s(g); break; case 'w': addranges_w(g); break; case 'D': addranges_D(g); break; case 'S': addranges_S(g); break; case 'W': addranges_W(g); break; } havesave = havedash = 0; } else { if (quoted) { if (g->yychar == 'b') g->yychar = '\b'; else if (g->yychar == '0') g->yychar = 0; /* else identity escape */ } if (havesave) { if (havedash) { addrange(g, save, g->yychar); havesave = havedash = 0; } else { addrange(g, save, save); save = g->yychar; } } else { save = g->yychar; havesave = 1; } } quoted = nextrune(g); } if (havesave) { addrange(g, save, save); if (havedash) addrange(g, '-', '-'); } return type; } static int lex(struct cstate *g) { int quoted = nextrune(g); if (quoted) { switch (g->yychar) { case 'b': return L_WORD; case 'B': return L_NWORD; case 'd': newcclass(g); addranges_d(g); return L_CCLASS; case 's': newcclass(g); addranges_s(g); return L_CCLASS; case 'w': newcclass(g); addranges_w(g); return L_CCLASS; case 'D': newcclass(g); addranges_d(g); return L_NCCLASS; case 'S': newcclass(g); addranges_s(g); return L_NCCLASS; case 'W': newcclass(g); addranges_w(g); return L_NCCLASS; case '0': g->yychar = 0; return L_CHAR; } if (g->yychar >= '0' && g->yychar <= '9') { g->yychar -= '0'; if (*g->source >= '0' && *g->source <= '9') g->yychar = g->yychar * 10 + *g->source++ - '0'; return L_REF; } return L_CHAR; } switch (g->yychar) { case 0: case '$': case ')': case '*': case '+': case '.': case '?': case '^': case '|': return g->yychar; } if (g->yychar == '{') return lexcount(g); if (g->yychar == '[') return lexclass(g); if (g->yychar == '(') { if (g->source[0] == '?') { if (g->source[1] == ':') { g->source += 2; return L_NC; } if (g->source[1] == '=') { g->source += 2; return L_PLA; } if (g->source[1] == '!') { g->source += 2; return L_NLA; } } return '('; } return L_CHAR; } /* Parse */ enum { P_CAT, P_ALT, P_REP, P_BOL, P_EOL, P_WORD, P_NWORD, P_PAR, P_PLA, P_NLA, P_ANY, P_CHAR, P_CCLASS, P_NCCLASS, P_REF, }; struct Renode { unsigned char type; unsigned char ng, m, n; Rune c; Reclass *cc; Renode *x; Renode *y; }; static Renode *newnode(struct cstate *g, int type) { Renode *node = g->pend++; node->type = type; node->cc = NULL; node->c = 0; node->ng = 0; node->m = 0; node->n = 0; node->x = node->y = NULL; return node; } static int empty(Renode *node) { if (!node) return 1; switch (node->type) { default: return 1; case P_CAT: return empty(node->x) && empty(node->y); case P_ALT: return empty(node->x) || empty(node->y); case P_REP: return empty(node->x) || node->m == 0; case P_PAR: return empty(node->x); case P_REF: return empty(node->x); case P_ANY: case P_CHAR: case P_CCLASS: case P_NCCLASS: return 0; } } static Renode *newrep(struct cstate *g, Renode *atom, int ng, int min, int max) { Renode *rep = newnode(g, P_REP); if (max == REPINF && empty(atom)) die(g, "infinite loop matching the empty string"); rep->ng = ng; rep->m = min; rep->n = max; rep->x = atom; return rep; } static void next(struct cstate *g) { g->lookahead = lex(g); } static int accept(struct cstate *g, int t) { if (g->lookahead == t) { next(g); return 1; } return 0; } static Renode *parsealt(struct cstate *g); static Renode *parseatom(struct cstate *g) { Renode *atom; if (g->lookahead == L_CHAR) { atom = newnode(g, P_CHAR); atom->c = g->yychar; next(g); return atom; } if (g->lookahead == L_CCLASS) { atom = newnode(g, P_CCLASS); atom->cc = g->yycc; next(g); return atom; } if (g->lookahead == L_NCCLASS) { atom = newnode(g, P_NCCLASS); atom->cc = g->yycc; next(g); return atom; } if (g->lookahead == L_REF) { atom = newnode(g, P_REF); if (g->yychar == 0 || g->yychar >= g->nsub || !g->sub[g->yychar]) die(g, "invalid back-reference"); atom->n = g->yychar; atom->x = g->sub[g->yychar]; next(g); return atom; } if (accept(g, '.')) return newnode(g, P_ANY); if (accept(g, '(')) { atom = newnode(g, P_PAR); if (g->nsub == MAXSUB) die(g, "too many captures"); atom->n = g->nsub++; atom->x = parsealt(g); g->sub[atom->n] = atom; if (!accept(g, ')')) die(g, "unmatched '('"); return atom; } if (accept(g, L_NC)) { atom = parsealt(g); if (!accept(g, ')')) die(g, "unmatched '('"); return atom; } if (accept(g, L_PLA)) { atom = newnode(g, P_PLA); atom->x = parsealt(g); if (!accept(g, ')')) die(g, "unmatched '('"); return atom; } if (accept(g, L_NLA)) { atom = newnode(g, P_NLA); atom->x = parsealt(g); if (!accept(g, ')')) die(g, "unmatched '('"); return atom; } die(g, "syntax error"); return NULL; } static Renode *parserep(struct cstate *g) { Renode *atom; if (accept(g, '^')) return newnode(g, P_BOL); if (accept(g, '$')) return newnode(g, P_EOL); if (accept(g, L_WORD)) return newnode(g, P_WORD); if (accept(g, L_NWORD)) return newnode(g, P_NWORD); atom = parseatom(g); if (g->lookahead == L_COUNT) { int min = g->yymin, max = g->yymax; next(g); if (max < min) die(g, "invalid quantifier"); return newrep(g, atom, accept(g, '?'), min, max); } if (accept(g, '*')) return newrep(g, atom, accept(g, '?'), 0, REPINF); if (accept(g, '+')) return newrep(g, atom, accept(g, '?'), 1, REPINF); if (accept(g, '?')) return newrep(g, atom, accept(g, '?'), 0, 1); return atom; } static Renode *parsecat(struct cstate *g) { Renode *cat, *head, **tail; if (g->lookahead && g->lookahead != '|' && g->lookahead != ')') { /* Build a right-leaning tree by splicing in new 'cat' at the tail. */ head = parserep(g); tail = &head; while (g->lookahead && g->lookahead != '|' && g->lookahead != ')') { cat = newnode(g, P_CAT); cat->x = *tail; cat->y = parserep(g); *tail = cat; tail = &cat->y; } return head; } return NULL; } static Renode *parsealt(struct cstate *g) { Renode *alt, *x; alt = parsecat(g); while (accept(g, '|')) { x = alt; alt = newnode(g, P_ALT); alt->x = x; alt->y = parsecat(g); } return alt; } /* Compile */ enum { I_END, I_JUMP, I_SPLIT, I_PLA, I_NLA, I_ANYNL, I_ANY, I_CHAR, I_CCLASS, I_NCCLASS, I_REF, I_BOL, I_EOL, I_WORD, I_NWORD, I_LPAR, I_RPAR }; struct Reinst { unsigned char opcode; unsigned char n; Rune c; Reclass *cc; Reinst *x; Reinst *y; }; static int count(struct cstate *g, Renode *node) { int min, max, n; if (!node) return 0; switch (node->type) { default: return 1; case P_CAT: return count(g, node->x) + count(g, node->y); case P_ALT: return count(g, node->x) + count(g, node->y) + 2; case P_REP: min = node->m; max = node->n; if (min == max) n = count(g, node->x) * min; else if (max < REPINF) n = count(g, node->x) * max + (max - min); else n = count(g, node->x) * (min + 1) + 2; if (n < 0 || n > MAXPROG) die(g, "program too large"); return n; case P_PAR: return count(g, node->x) + 2; case P_PLA: return count(g, node->x) + 2; case P_NLA: return count(g, node->x) + 2; } } static Reinst *emit(Reprog *prog, int opcode) { Reinst *inst = prog->end++; inst->opcode = opcode; inst->n = 0; inst->c = 0; inst->cc = NULL; inst->x = inst->y = NULL; return inst; } static void compile(Reprog *prog, Renode *node) { Reinst *inst, *split, *jump; int i; if (!node) return; loop: switch (node->type) { case P_CAT: compile(prog, node->x); node = node->y; goto loop; case P_ALT: split = emit(prog, I_SPLIT); compile(prog, node->x); jump = emit(prog, I_JUMP); compile(prog, node->y); split->x = split + 1; split->y = jump + 1; jump->x = prog->end; break; case P_REP: inst = NULL; /* silence compiler warning. assert(node->m > 0). */ for (i = 0; i < node->m; ++i) { inst = prog->end; compile(prog, node->x); } if (node->m == node->n) break; if (node->n < REPINF) { for (i = node->m; i < node->n; ++i) { split = emit(prog, I_SPLIT); compile(prog, node->x); if (node->ng) { split->y = split + 1; split->x = prog->end; } else { split->x = split + 1; split->y = prog->end; } } } else if (node->m == 0) { split = emit(prog, I_SPLIT); compile(prog, node->x); jump = emit(prog, I_JUMP); if (node->ng) { split->y = split + 1; split->x = prog->end; } else { split->x = split + 1; split->y = prog->end; } jump->x = split; } else { split = emit(prog, I_SPLIT); if (node->ng) { split->y = inst; split->x = prog->end; } else { split->x = inst; split->y = prog->end; } } break; case P_BOL: emit(prog, I_BOL); break; case P_EOL: emit(prog, I_EOL); break; case P_WORD: emit(prog, I_WORD); break; case P_NWORD: emit(prog, I_NWORD); break; case P_PAR: inst = emit(prog, I_LPAR); inst->n = node->n; compile(prog, node->x); inst = emit(prog, I_RPAR); inst->n = node->n; break; case P_PLA: split = emit(prog, I_PLA); compile(prog, node->x); emit(prog, I_END); split->x = split + 1; split->y = prog->end; break; case P_NLA: split = emit(prog, I_NLA); compile(prog, node->x); emit(prog, I_END); split->x = split + 1; split->y = prog->end; break; case P_ANY: emit(prog, I_ANY); break; case P_CHAR: inst = emit(prog, I_CHAR); inst->c = (prog->flags & REG_ICASE) ? canon(node->c) : node->c; break; case P_CCLASS: inst = emit(prog, I_CCLASS); inst->cc = node->cc; break; case P_NCCLASS: inst = emit(prog, I_NCCLASS); inst->cc = node->cc; break; case P_REF: inst = emit(prog, I_REF); inst->n = node->n; break; } } #ifdef TEST static void dumpnode(Renode *node) { Rune *p; if (!node) { printf("Empty"); return; } switch (node->type) { case P_CAT: printf("Cat("); dumpnode(node->x); printf(", "); dumpnode(node->y); printf(")"); break; case P_ALT: printf("Alt("); dumpnode(node->x); printf(", "); dumpnode(node->y); printf(")"); break; case P_REP: printf(node->ng ? "NgRep(%d,%d," : "Rep(%d,%d,", node->m, node->n); dumpnode(node->x); printf(")"); break; case P_BOL: printf("Bol"); break; case P_EOL: printf("Eol"); break; case P_WORD: printf("Word"); break; case P_NWORD: printf("NotWord"); break; case P_PAR: printf("Par(%d,", node->n); dumpnode(node->x); printf(")"); break; case P_PLA: printf("PLA("); dumpnode(node->x); printf(")"); break; case P_NLA: printf("NLA("); dumpnode(node->x); printf(")"); break; case P_ANY: printf("Any"); break; case P_CHAR: printf("Char(%c)", node->c); break; case P_CCLASS: printf("Class("); for (p = node->cc->spans; p < node->cc->end; p += 2) printf("%02X-%02X,", p[0], p[1]); printf(")"); break; case P_NCCLASS: printf("NotClass("); for (p = node->cc->spans; p < node->cc->end; p += 2) printf("%02X-%02X,", p[0], p[1]); printf(")"); break; case P_REF: printf("Ref(%d)", node->n); break; } } static void dumpprog(Reprog *prog) { Reinst *inst; int i; for (i = 0, inst = prog->start; inst < prog->end; ++i, ++inst) { printf("% 5d: ", i); switch (inst->opcode) { case I_END: puts("end"); break; case I_JUMP: printf("jump %d\n", (int)(inst->x - prog->start)); break; case I_SPLIT: printf("split %d %d\n", (int)(inst->x - prog->start), (int)(inst->y - prog->start)); break; case I_PLA: printf("pla %d %d\n", (int)(inst->x - prog->start), (int)(inst->y - prog->start)); break; case I_NLA: printf("nla %d %d\n", (int)(inst->x - prog->start), (int)(inst->y - prog->start)); break; case I_ANY: puts("any"); break; case I_ANYNL: puts("anynl"); break; case I_CHAR: printf(inst->c >= 32 && inst->c < 127 ? "char '%c'\n" : "char U+%04X\n", inst->c); break; case I_CCLASS: puts("cclass"); break; case I_NCCLASS: puts("ncclass"); break; case I_REF: printf("ref %d\n", inst->n); break; case I_BOL: puts("bol"); break; case I_EOL: puts("eol"); break; case I_WORD: puts("word"); break; case I_NWORD: puts("nword"); break; case I_LPAR: printf("lpar %d\n", inst->n); break; case I_RPAR: printf("rpar %d\n", inst->n); break; } } } #endif Reprog *regcompx(void *(*alloc)(void *ctx, void *p, int n), void *ctx, const char *pattern, int cflags, const char **errorp) { struct cstate g; Renode *node; Reinst *split, *jump; int i, n; g.pstart = NULL; g.prog = NULL; if (setjmp(g.kaboom)) { if (errorp) *errorp = g.error; alloc(ctx, g.pstart, 0); alloc(ctx, g.prog, 0); return NULL; } g.prog = alloc(ctx, NULL, sizeof (Reprog)); if (!g.prog) die(&g, "cannot allocate regular expression"); n = strlen(pattern) * 2; if (n > MAXPROG) die(&g, "program too large"); if (n > 0) { g.pstart = g.pend = alloc(ctx, NULL, sizeof (Renode) * n); if (!g.pstart) die(&g, "cannot allocate regular expression parse list"); } g.source = pattern; g.ncclass = 0; g.nsub = 1; for (i = 0; i < MAXSUB; ++i) g.sub[i] = 0; g.prog->flags = cflags; next(&g); node = parsealt(&g); if (g.lookahead == ')') die(&g, "unmatched ')'"); if (g.lookahead != 0) die(&g, "syntax error"); #ifdef TEST dumpnode(node); putchar('\n'); #endif n = 6 + count(&g, node); if (n < 0 || n > MAXPROG) die(&g, "program too large"); g.prog->nsub = g.nsub; g.prog->start = g.prog->end = alloc(ctx, NULL, n * sizeof (Reinst)); if (!g.prog->start) die(&g, "cannot allocate regular expression instruction list"); split = emit(g.prog, I_SPLIT); split->x = split + 3; split->y = split + 1; emit(g.prog, I_ANYNL); jump = emit(g.prog, I_JUMP); jump->x = split; emit(g.prog, I_LPAR); compile(g.prog, node); emit(g.prog, I_RPAR); emit(g.prog, I_END); #ifdef TEST dumpprog(g.prog); #endif alloc(ctx, g.pstart, 0); if (errorp) *errorp = NULL; return g.prog; } void regfreex(void *(*alloc)(void *ctx, void *p, int n), void *ctx, Reprog *prog) { if (prog) { alloc(ctx, prog->start, 0); alloc(ctx, prog, 0); } } static void *default_alloc(void *ctx, void *p, int n) { return realloc(p, (size_t)n); } Reprog *regcomp(const char *pattern, int cflags, const char **errorp) { return regcompx(default_alloc, NULL, pattern, cflags, errorp); } void regfree(Reprog *prog) { regfreex(default_alloc, NULL, prog); } /* Match */ static int isnewline(int c) { return c == 0xA || c == 0xD || c == 0x2028 || c == 0x2029; } static int iswordchar(int c) { return c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9'); } static int incclass(Reclass *cc, Rune c) { Rune *p; for (p = cc->spans; p < cc->end; p += 2) if (p[0] <= c && c <= p[1]) return 1; return 0; } static int incclasscanon(Reclass *cc, Rune c) { Rune *p, r; for (p = cc->spans; p < cc->end; p += 2) for (r = p[0]; r <= p[1]; ++r) if (c == canon(r)) return 1; return 0; } static int strncmpcanon(const char *a, const char *b, int n) { Rune ra, rb; int c; while (n--) { if (!*a) return -1; if (!*b) return 1; a += chartorune(&ra, a); b += chartorune(&rb, b); c = canon(ra) - canon(rb); if (c) return c; } return 0; } static int match(Reinst *pc, const char *sp, const char *bol, int flags, Resub *out, int depth) { Resub scratch; int result; int i; Rune c; /* stack overflow */ if (depth > MAXREC) return -1; for (;;) { switch (pc->opcode) { case I_END: return 0; case I_JUMP: pc = pc->x; break; case I_SPLIT: scratch = *out; result = match(pc->x, sp, bol, flags, &scratch, depth+1); if (result == -1) return -1; if (result == 0) { *out = scratch; return 0; } pc = pc->y; break; case I_PLA: result = match(pc->x, sp, bol, flags, out, depth+1); if (result == -1) return -1; if (result == 1) return 1; pc = pc->y; break; case I_NLA: scratch = *out; result = match(pc->x, sp, bol, flags, &scratch, depth+1); if (result == -1) return -1; if (result == 0) return 1; pc = pc->y; break; case I_ANYNL: sp += chartorune(&c, sp); if (c == 0) return 1; pc = pc + 1; break; case I_ANY: sp += chartorune(&c, sp); if (c == 0) return 1; if (isnewline(c)) return 1; pc = pc + 1; break; case I_CHAR: sp += chartorune(&c, sp); if (c == 0) return 1; if (flags & REG_ICASE) c = canon(c); if (c != pc->c) return 1; pc = pc + 1; break; case I_CCLASS: sp += chartorune(&c, sp); if (c == 0) return 1; if (flags & REG_ICASE) { if (!incclasscanon(pc->cc, canon(c))) return 1; } else { if (!incclass(pc->cc, c)) return 1; } pc = pc + 1; break; case I_NCCLASS: sp += chartorune(&c, sp); if (c == 0) return 1; if (flags & REG_ICASE) { if (incclasscanon(pc->cc, canon(c))) return 1; } else { if (incclass(pc->cc, c)) return 1; } pc = pc + 1; break; case I_REF: i = out->sub[pc->n].ep - out->sub[pc->n].sp; if (flags & REG_ICASE) { if (strncmpcanon(sp, out->sub[pc->n].sp, i)) return 1; } else { if (strncmp(sp, out->sub[pc->n].sp, i)) return 1; } if (i > 0) sp += i; pc = pc + 1; break; case I_BOL: if (sp == bol && !(flags & REG_NOTBOL)) { pc = pc + 1; break; } if (flags & REG_NEWLINE) { if (sp > bol && isnewline(sp[-1])) { pc = pc + 1; break; } } return 1; case I_EOL: if (*sp == 0) { pc = pc + 1; break; } if (flags & REG_NEWLINE) { if (isnewline(*sp)) { pc = pc + 1; break; } } return 1; case I_WORD: i = sp > bol && iswordchar(sp[-1]); i ^= iswordchar(sp[0]); if (!i) return 1; pc = pc + 1; break; case I_NWORD: i = sp > bol && iswordchar(sp[-1]); i ^= iswordchar(sp[0]); if (i) return 1; pc = pc + 1; break; case I_LPAR: out->sub[pc->n].sp = sp; pc = pc + 1; break; case I_RPAR: out->sub[pc->n].ep = sp; pc = pc + 1; break; default: return 1; } } } int regexec(Reprog *prog, const char *sp, Resub *sub, int eflags) { Resub scratch; int i; if (!sub) sub = &scratch; sub->nsub = prog->nsub; for (i = 0; i < MAXSUB; ++i) sub->sub[i].sp = sub->sub[i].ep = NULL; return match(prog->start, sp, sp, prog->flags | eflags, sub, 0); } #ifdef TEST int main(int argc, char **argv) { const char *error; const char *s; Reprog *p; Resub m; int i; if (argc > 1) { p = regcomp(argv[1], 0, &error); if (!p) { fprintf(stderr, "regcomp: %s\n", error); return 1; } if (argc > 2) { s = argv[2]; printf("nsub = %d\n", p->nsub); if (!regexec(p, s, &m, 0)) { for (i = 0; i < m.nsub; ++i) { int n = m.sub[i].ep - m.sub[i].sp; if (n > 0) printf("match %d: s=%d e=%d n=%d '%.*s'\n", i, (int)(m.sub[i].sp - s), (int)(m.sub[i].ep - s), n, n, m.sub[i].sp); else printf("match %d: n=0 ''\n", i); } } else { printf("no match\n"); } } } return 0; } #endif
./CrossVul/dataset_final_sorted/CWE-400/c/good_812_2
crossvul-cpp_data_good_1266_0
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (C) 2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <net/pkt_cls.h> #include "../nfpcore/nfp_cpp.h" #include "../nfp_app.h" #include "../nfp_net_repr.h" #include "main.h" struct nfp_abm_u32_match { u32 handle; u32 band; u8 mask; u8 val; struct list_head list; }; static bool nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode, __be16 proto, struct netlink_ext_ack *extack) { struct tc_u32_key *k; unsigned int tos_off; if (knode->exts && tcf_exts_has_actions(knode->exts)) { NL_SET_ERR_MSG_MOD(extack, "action offload not supported"); return false; } if (knode->link_handle) { NL_SET_ERR_MSG_MOD(extack, "linking not supported"); return false; } if (knode->sel->flags != TC_U32_TERMINAL) { NL_SET_ERR_MSG_MOD(extack, "flags must be equal to TC_U32_TERMINAL"); return false; } if (knode->sel->off || knode->sel->offshift || knode->sel->offmask || knode->sel->offoff || knode->fshift) { NL_SET_ERR_MSG_MOD(extack, "variable offsetting not supported"); return false; } if (knode->sel->hoff || knode->sel->hmask) { NL_SET_ERR_MSG_MOD(extack, "hashing not supported"); return false; } if (knode->val || knode->mask) { NL_SET_ERR_MSG_MOD(extack, "matching on mark not supported"); return false; } if (knode->res && knode->res->class) { NL_SET_ERR_MSG_MOD(extack, "setting non-0 class not supported"); return false; } if (knode->res && knode->res->classid >= abm->num_bands) { NL_SET_ERR_MSG_MOD(extack, "classid higher than number of bands"); return false; } if (knode->sel->nkeys != 1) { NL_SET_ERR_MSG_MOD(extack, "exactly one key required"); return false; } switch (proto) { case htons(ETH_P_IP): tos_off = 16; break; case htons(ETH_P_IPV6): tos_off = 20; break; default: NL_SET_ERR_MSG_MOD(extack, "only IP and IPv6 supported as filter protocol"); return false; } k = &knode->sel->keys[0]; if (k->offmask) { NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offsetting not supported"); return false; } if (k->off) { NL_SET_ERR_MSG_MOD(extack, "only DSCP fields can be matched"); return false; } if (k->val & ~k->mask) { NL_SET_ERR_MSG_MOD(extack, "mask does not cover the key"); return false; } if (be32_to_cpu(k->mask) >> tos_off & ~abm->dscp_mask) { NL_SET_ERR_MSG_MOD(extack, "only high DSCP class selector bits can be used"); nfp_err(abm->app->cpp, "u32 offload: requested mask %x FW can support only %x\n", be32_to_cpu(k->mask) >> tos_off, abm->dscp_mask); return false; } return true; } /* This filter list -> map conversion is O(n * m), we expect single digit or * low double digit number of prios and likewise for the filters. Also u32 * doesn't report stats, so it's really only setup time cost. */ static unsigned int nfp_abm_find_band_for_prio(struct nfp_abm_link *alink, unsigned int prio) { struct nfp_abm_u32_match *iter; list_for_each_entry(iter, &alink->dscp_map, list) if ((prio & iter->mask) == iter->val) return iter->band; return alink->def_band; } static int nfp_abm_update_band_map(struct nfp_abm_link *alink) { unsigned int i, bits_per_prio, prios_per_word, base_shift; struct nfp_abm *abm = alink->abm; u32 field_mask; alink->has_prio = !list_empty(&alink->dscp_map); bits_per_prio = roundup_pow_of_two(order_base_2(abm->num_bands)); field_mask = (1 << bits_per_prio) - 1; prios_per_word = sizeof(u32) * BITS_PER_BYTE / bits_per_prio; /* FW mask applies from top bits */ base_shift = 8 - order_base_2(abm->num_prios); for (i = 0; i < abm->num_prios; i++) { unsigned int offset; u32 *word; u8 band; word = &alink->prio_map[i / prios_per_word]; offset = (i % prios_per_word) * bits_per_prio; band = nfp_abm_find_band_for_prio(alink, i << base_shift); *word &= ~(field_mask << offset); *word |= band << offset; } /* Qdisc offload status may change if has_prio changed */ nfp_abm_qdisc_offload_update(alink); return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map); } static void nfp_abm_u32_knode_delete(struct nfp_abm_link *alink, struct tc_cls_u32_knode *knode) { struct nfp_abm_u32_match *iter; list_for_each_entry(iter, &alink->dscp_map, list) if (iter->handle == knode->handle) { list_del(&iter->list); kfree(iter); nfp_abm_update_band_map(alink); return; } } static int nfp_abm_u32_knode_replace(struct nfp_abm_link *alink, struct tc_cls_u32_knode *knode, __be16 proto, struct netlink_ext_ack *extack) { struct nfp_abm_u32_match *match = NULL, *iter; unsigned int tos_off; u8 mask, val; int err; if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) { err = -EOPNOTSUPP; goto err_delete; } tos_off = proto == htons(ETH_P_IP) ? 16 : 20; /* Extract the DSCP Class Selector bits */ val = be32_to_cpu(knode->sel->keys[0].val) >> tos_off & 0xff; mask = be32_to_cpu(knode->sel->keys[0].mask) >> tos_off & 0xff; /* Check if there is no conflicting mapping and find match by handle */ list_for_each_entry(iter, &alink->dscp_map, list) { u32 cmask; if (iter->handle == knode->handle) { match = iter; continue; } cmask = iter->mask & mask; if ((iter->val & cmask) == (val & cmask) && iter->band != knode->res->classid) { NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter"); err = -EOPNOTSUPP; goto err_delete; } } if (!match) { match = kzalloc(sizeof(*match), GFP_KERNEL); if (!match) { err = -ENOMEM; goto err_delete; } list_add(&match->list, &alink->dscp_map); } match->handle = knode->handle; match->band = knode->res->classid; match->mask = mask; match->val = val; err = nfp_abm_update_band_map(alink); if (err) goto err_delete; return 0; err_delete: nfp_abm_u32_knode_delete(alink, knode); return err; } static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { struct tc_cls_u32_offload *cls_u32 = type_data; struct nfp_repr *repr = cb_priv; struct nfp_abm_link *alink; alink = repr->app_priv; if (type != TC_SETUP_CLSU32) { NL_SET_ERR_MSG_MOD(cls_u32->common.extack, "only offload of u32 classifier supported"); return -EOPNOTSUPP; } if (!tc_cls_can_offload_and_chain0(repr->netdev, &cls_u32->common)) return -EOPNOTSUPP; if (cls_u32->common.protocol != htons(ETH_P_IP) && cls_u32->common.protocol != htons(ETH_P_IPV6)) { NL_SET_ERR_MSG_MOD(cls_u32->common.extack, "only IP and IPv6 supported as filter protocol"); return -EOPNOTSUPP; } switch (cls_u32->command) { case TC_CLSU32_NEW_KNODE: case TC_CLSU32_REPLACE_KNODE: return nfp_abm_u32_knode_replace(alink, &cls_u32->knode, cls_u32->common.protocol, cls_u32->common.extack); case TC_CLSU32_DELETE_KNODE: nfp_abm_u32_knode_delete(alink, &cls_u32->knode); return 0; default: return -EOPNOTSUPP; } } static LIST_HEAD(nfp_abm_block_cb_list); int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr, struct flow_block_offload *f) { return flow_block_cb_setup_simple(f, &nfp_abm_block_cb_list, nfp_abm_setup_tc_block_cb, repr, repr, true); }
./CrossVul/dataset_final_sorted/CWE-400/c/good_1266_0
crossvul-cpp_data_good_5356_3
/* * IPV4 GSO/GRO offload support * Linux INET implementation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * GRE GSO support */ #include <linux/skbuff.h> #include <linux/init.h> #include <net/protocol.h> #include <net/gre.h> static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; __be16 protocol = skb->protocol; u16 mac_len = skb->mac_len; int gre_offset, outer_hlen; bool need_csum, ufo; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | SKB_GSO_SIT))) goto out; if (!skb->encapsulation) goto out; if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr))) goto out; if (unlikely(!pskb_may_pull(skb, tnl_hlen))) goto out; /* setup inner skb. */ skb->encapsulation = 0; __skb_pull(skb, tnl_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); skb->protocol = skb->inner_protocol; need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); skb->encap_hdr_csum = need_csum; ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); features &= skb->dev->hw_enc_features; /* The only checksum offload we care about from here on out is the * outer one so strip the existing checksum feature flags based * on the fact that we will be computing our checksum in software. */ if (ufo) { features &= ~NETIF_F_CSUM_MASK; if (!need_csum) features |= NETIF_F_HW_CSUM; } /* segment inner packet. */ segs = skb_mac_gso_segment(skb, features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, mac_len); goto out; } outer_hlen = skb_tnl_header_len(skb); gre_offset = outer_hlen - tnl_hlen; skb = segs; do { struct gre_base_hdr *greh; __be32 *pcsum; /* Set up inner headers if we are offloading inner checksum */ if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } skb->mac_len = mac_len; skb->protocol = protocol; __skb_push(skb, outer_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb_set_transport_header(skb, gre_offset); if (!need_csum) continue; greh = (struct gre_base_hdr *)skb_transport_header(skb); pcsum = (__be32 *)(greh + 1); *pcsum = 0; *(__sum16 *)pcsum = gso_make_checksum(skb, 0); } while ((skb = skb->next)); out: return segs; } static struct sk_buff **gre_gro_receive(struct sk_buff **head, struct sk_buff *skb) { struct sk_buff **pp = NULL; struct sk_buff *p; const struct gre_base_hdr *greh; unsigned int hlen, grehlen; unsigned int off; int flush = 1; struct packet_offload *ptype; __be16 type; if (NAPI_GRO_CB(skb)->encap_mark) goto out; NAPI_GRO_CB(skb)->encap_mark = 1; off = skb_gro_offset(skb); hlen = off + sizeof(*greh); greh = skb_gro_header_fast(skb, off); if (skb_gro_header_hard(skb, hlen)) { greh = skb_gro_header_slow(skb, hlen, off); if (unlikely(!greh)) goto out; } /* Only support version 0 and K (key), C (csum) flags. Note that * although the support for the S (seq#) flag can be added easily * for GRO, this is problematic for GSO hence can not be enabled * here because a GRO pkt may end up in the forwarding path, thus * requiring GSO support to break it up correctly. */ if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0) goto out; type = greh->protocol; rcu_read_lock(); ptype = gro_find_receive_by_type(type); if (!ptype) goto out_unlock; grehlen = GRE_HEADER_SECTION; if (greh->flags & GRE_KEY) grehlen += GRE_HEADER_SECTION; if (greh->flags & GRE_CSUM) grehlen += GRE_HEADER_SECTION; hlen = off + grehlen; if (skb_gro_header_hard(skb, hlen)) { greh = skb_gro_header_slow(skb, hlen, off); if (unlikely(!greh)) goto out_unlock; } /* Don't bother verifying checksum if we're going to flush anyway. */ if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) { if (skb_gro_checksum_simple_validate(skb)) goto out_unlock; skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 0, null_compute_pseudo); } for (p = *head; p; p = p->next) { const struct gre_base_hdr *greh2; if (!NAPI_GRO_CB(p)->same_flow) continue; /* The following checks are needed to ensure only pkts * from the same tunnel are considered for aggregation. * The criteria for "the same tunnel" includes: * 1) same version (we only support version 0 here) * 2) same protocol (we only support ETH_P_IP for now) * 3) same set of flags * 4) same key if the key field is present. */ greh2 = (struct gre_base_hdr *)(p->data + off); if (greh2->flags != greh->flags || greh2->protocol != greh->protocol) { NAPI_GRO_CB(p)->same_flow = 0; continue; } if (greh->flags & GRE_KEY) { /* compare keys */ if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } } } skb_gro_pull(skb, grehlen); /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ skb_gro_postpull_rcsum(skb, greh, grehlen); pp = ptype->callbacks.gro_receive(head, skb); flush = 0; out_unlock: rcu_read_unlock(); out: NAPI_GRO_CB(skb)->flush |= flush; return pp; } static int gre_gro_complete(struct sk_buff *skb, int nhoff) { struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff); struct packet_offload *ptype; unsigned int grehlen = sizeof(*greh); int err = -ENOENT; __be16 type; skb->encapsulation = 1; skb_shinfo(skb)->gso_type = SKB_GSO_GRE; type = greh->protocol; if (greh->flags & GRE_KEY) grehlen += GRE_HEADER_SECTION; if (greh->flags & GRE_CSUM) grehlen += GRE_HEADER_SECTION; rcu_read_lock(); ptype = gro_find_complete_by_type(type); if (ptype) err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); rcu_read_unlock(); skb_set_inner_mac_header(skb, nhoff + grehlen); return err; } static const struct net_offload gre_offload = { .callbacks = { .gso_segment = gre_gso_segment, .gro_receive = gre_gro_receive, .gro_complete = gre_gro_complete, }, }; static int __init gre_offload_init(void) { return inet_add_offload(&gre_offload, IPPROTO_GRE); } device_initcall(gre_offload_init);
./CrossVul/dataset_final_sorted/CWE-400/c/good_5356_3
crossvul-cpp_data_bad_1272_3
/* * Copyright 2012-15 Advanced Micro Devices, Inc.cls * * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dm_services.h" #include "stream_encoder.h" #include "resource.h" #include "include/irq_service_interface.h" #include "dce120_resource.h" #include "dce112/dce112_resource.h" #include "dce110/dce110_resource.h" #include "../virtual/virtual_stream_encoder.h" #include "dce120_timing_generator.h" #include "irq/dce120/irq_service_dce120.h" #include "dce/dce_opp.h" #include "dce/dce_clock_source.h" #include "dce/dce_ipp.h" #include "dce/dce_mem_input.h" #include "dce110/dce110_hw_sequencer.h" #include "dce120/dce120_hw_sequencer.h" #include "dce/dce_transform.h" #include "clk_mgr.h" #include "dce/dce_audio.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" #include "dce/dce_hwseq.h" #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "dce/dce_12_0_offset.h" #include "dce/dce_12_0_sh_mask.h" #include "soc15_hw_ip.h" #include "vega10_ip_offset.h" #include "nbio/nbio_6_1_offset.h" #include "mmhub/mmhub_9_4_0_offset.h" #include "mmhub/mmhub_9_4_0_sh_mask.h" #include "reg_helper.h" #include "dce100/dce100_resource.h" #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #endif enum dce120_clk_src_array_id { DCE120_CLK_SRC_PLL0, DCE120_CLK_SRC_PLL1, DCE120_CLK_SRC_PLL2, DCE120_CLK_SRC_PLL3, DCE120_CLK_SRC_PLL4, DCE120_CLK_SRC_PLL5, DCE120_CLK_SRC_TOTAL }; static const struct dce110_timing_generator_offsets dce120_tg_offsets[] = { { .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), } }; /* begin ********************* * macros to expend register list macro defined in HW object header file */ #define BASE_INNER(seg) \ DCE_BASE__INST0_SEG ## seg #define NBIO_BASE_INNER(seg) \ NBIF_BASE__INST0_SEG ## seg #define NBIO_BASE(seg) \ NBIO_BASE_INNER(seg) /* compile time expand base address. */ #define BASE(seg) \ BASE_INNER(seg) #define SR(reg_name)\ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name #define SRI(reg_name, block, id)\ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name /* MMHUB */ #define MMHUB_BASE_INNER(seg) \ MMHUB_BASE__INST0_SEG ## seg #define MMHUB_BASE(seg) \ MMHUB_BASE_INNER(seg) #define MMHUB_SR(reg_name)\ .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name /* macros to expend register list macro defined in HW object header file * end *********************/ static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE110_COMMON_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCE110(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCE110_COMMON_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; #define ipp_regs(id)\ [id] = {\ IPP_DCE110_REG_LIST_DCE_BASE(id)\ } static const struct dce_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), ipp_regs(4), ipp_regs(5) }; static const struct dce_ipp_shift ipp_shift = { IPP_DCE120_MASK_SH_LIST_SOC_BASE(__SHIFT) }; static const struct dce_ipp_mask ipp_mask = { IPP_DCE120_MASK_SH_LIST_SOC_BASE(_MASK) }; #define transform_regs(id)\ [id] = {\ XFM_COMMON_REG_LIST_DCE110(id)\ } static const struct dce_transform_registers xfm_regs[] = { transform_regs(0), transform_regs(1), transform_regs(2), transform_regs(3), transform_regs(4), transform_regs(5) }; static const struct dce_transform_shift xfm_shift = { XFM_COMMON_MASK_SH_LIST_SOC_BASE(__SHIFT) }; static const struct dce_transform_mask xfm_mask = { XFM_COMMON_MASK_SH_LIST_SOC_BASE(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id)\ [id] = {\ LE_DCE120_REG_LIST(id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3), link_regs(4), link_regs(5), link_regs(6), }; #define stream_enc_regs(id)\ [id] = {\ SE_COMMON_REG_LIST(id),\ .TMDS_CNTL = 0,\ } static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4), stream_enc_regs(5) }; static const struct dce_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCE120(__SHIFT) }; static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE120(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_DCE_120_REG_LIST(id),\ } static const struct dce_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5) }; static const struct dce_opp_shift opp_shift = { OPP_COMMON_MASK_SH_LIST_DCE_120(__SHIFT) }; static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5) }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) static const struct dce_audio_shift audio_shift = { DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; #define clk_src_regs(index, id)\ [index] = {\ CS_COMMON_REG_LIST_DCE_112(id),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0, A), clk_src_regs(1, B), clk_src_regs(2, C), clk_src_regs(3, D), clk_src_regs(4, E), clk_src_regs(5, F) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCE_112(_MASK) }; struct output_pixel_processor *dce120_opp_create( struct dc_context *ctx, uint32_t inst) { struct dce110_opp *opp = kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); if (!opp) return NULL; dce110_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } struct dce_aux *dce120_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst]); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) }; struct dce_i2c_hw *dce120_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dce112_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static const struct bios_registers bios_regs = { .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3 + NBIO_BASE(mmBIOS_SCRATCH_3_BASE_IDX), .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX) }; static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 7, .num_stream_encoder = 6, .num_pll = 6, .num_ddc = 6, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, .pixel_format_support = { .argb8888 = true, .nv12 = false, .fp16 = false }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 1, .fp16 = 1 } }; static const struct dc_debug_options debug_defaults = { .disable_clock_gate = true, }; static struct clock_source *dce120_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(*clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce112_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } BREAK_TO_DEBUGGER(); return NULL; } static void dce120_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } static bool dce120_hw_sequencer_create(struct dc *dc) { /* All registers used by dce11.2 match those in dce11 in offset and * structure */ dce120_hw_sequencer_construct(dc); /*TODO Move to separate file and Override what is needed */ return true; } static struct timing_generator *dce120_timing_generator_create( struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { struct dce110_timing_generator *tg110 = kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); if (!tg110) return NULL; dce120_timing_generator_construct(tg110, ctx, instance, offsets); return &tg110->base; } static void dce120_transform_destroy(struct transform **xfm) { kfree(TO_DCE_TRANSFORM(*xfm)); *xfm = NULL; } static void destruct(struct dce110_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); if (pool->base.transforms[i] != NULL) dce120_transform_destroy(&pool->base.transforms[i]); if (pool->base.ipps[i] != NULL) dce_ipp_destroy(&pool->base.ipps[i]); if (pool->base.mis[i] != NULL) { kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); pool->base.mis[i] = NULL; } if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } if (pool->base.timing_generators[i] != NULL) { kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i]) dce_aud_destroy(&pool->base.audios[i]); } for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) dce120_clock_source_destroy( &pool->base.clock_sources[i]); } if (pool->base.dp_clock_source != NULL) dce120_clock_source_destroy(&pool->base.dp_clock_source); if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { uint32_t reg_val = dm_read_reg_soc15(ctx, mmCC_DC_MISC_STRAPS, 0); straps->audio_stream_number = get_reg_field_value(reg_val, CC_DC_MISC_STRAPS, AUDIO_STREAM_NUMBER); straps->hdmi_disable = get_reg_field_value(reg_val, CC_DC_MISC_STRAPS, HDMI_DISABLE); reg_val = dm_read_reg_soc15(ctx, mmDC_PINSTRAPS, 0); straps->dc_pinstraps_audio = get_reg_field_value(reg_val, DC_PINSTRAPS, DC_PINSTRAPS_AUDIO); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = false, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true, }; static struct link_encoder *dce120_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; } static struct input_pixel_processor *dce120_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dce_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static struct stream_encoder *dce120_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } #define SRII(reg_name, block, id)\ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCE120_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCE12_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE12_MASK_SH_LIST(_MASK) }; /* HWSEQ regs for VG20 */ static const struct dce_hwseq_registers dce121_hwseq_reg = { HWSEQ_VG20_REG_LIST() }; static const struct dce_hwseq_shift dce121_hwseq_shift = { HWSEQ_VG20_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask dce121_hwseq_mask = { HWSEQ_VG20_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dce120_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static struct dce_hwseq *dce121_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &dce121_hwseq_reg; hws->shifts = &dce121_hwseq_shift; hws->masks = &dce121_hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce120_stream_encoder_create, .create_hwseq = dce120_hwseq_create, }; static const struct resource_create_funcs dce121_res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce120_stream_encoder_create, .create_hwseq = dce121_hwseq_create, }; #define mi_inst_regs(id) { MI_DCE12_REG_LIST(id) } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), mi_inst_regs(1), mi_inst_regs(2), mi_inst_regs(3), mi_inst_regs(4), mi_inst_regs(5), }; static const struct dce_mem_input_shift mi_shifts = { MI_DCE12_MASK_SH_LIST(__SHIFT) }; static const struct dce_mem_input_mask mi_masks = { MI_DCE12_MASK_SH_LIST(_MASK) }; static struct mem_input *dce120_mem_input_create( struct dc_context *ctx, uint32_t inst) { struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), GFP_KERNEL); if (!dce_mi) { BREAK_TO_DEBUGGER(); return NULL; } dce120_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); return &dce_mi->base; } static struct transform *dce120_transform_create( struct dc_context *ctx, uint32_t inst) { struct dce_transform *transform = kzalloc(sizeof(struct dce_transform), GFP_KERNEL); if (!transform) return NULL; dce_transform_construct(transform, ctx, inst, &xfm_regs[inst], &xfm_shift, &xfm_mask); transform->lb_memory_size = 0x1404; /*5124*/ return &transform->base; } static void dce120_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } static const struct resource_funcs dce120_res_pool_funcs = { .destroy = dce120_destroy_resource_pool, .link_enc_create = dce120_link_encoder_create, .validate_bandwidth = dce112_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce112_add_stream_to_ctx, .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link }; static void bw_calcs_data_update_from_pplib(struct dc *dc) { struct dm_pp_clock_levels_with_latency eng_clks = {0}; struct dm_pp_clock_levels_with_latency mem_clks = {0}; struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0}; int i; unsigned int clk; unsigned int latency; /*original logic in dal3*/ int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; /*do system clock*/ if (!dm_pp_get_clock_levels_by_type_with_latency( dc->ctx, DM_PP_CLOCK_TYPE_ENGINE_CLK, &eng_clks) || eng_clks.num_levels == 0) { eng_clks.num_levels = 8; clk = 300000; for (i = 0; i < eng_clks.num_levels; i++) { eng_clks.data[i].clocks_in_khz = clk; clk += 100000; } } /* convert all the clock fro kHz to fix point mHz TODO: wloop data */ dc->bw_vbios->high_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000); dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000); dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000); dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000); dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000); dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000); dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000); dc->bw_vbios->low_sclk = bw_frc_to_fixed( eng_clks.data[0].clocks_in_khz, 1000); /*do memory clock*/ if (!dm_pp_get_clock_levels_by_type_with_latency( dc->ctx, DM_PP_CLOCK_TYPE_MEMORY_CLK, &mem_clks) || mem_clks.num_levels == 0) { mem_clks.num_levels = 3; clk = 250000; latency = 45; for (i = 0; i < eng_clks.num_levels; i++) { mem_clks.data[i].clocks_in_khz = clk; mem_clks.data[i].latency_in_us = latency; clk += 500000; latency -= 5; } } /* we don't need to call PPLIB for validation clock since they * also give us the highest sclk and highest mclk (UMA clock). * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula): * YCLK = UMACLK*m_memoryTypeMultiplier */ if (dc->bw_vbios->memory_type == bw_def_hbm) memory_type_multiplier = MEMORY_TYPE_HBM; dc->bw_vbios->low_yclk = bw_frc_to_fixed( mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, 1000); /* Now notify PPLib/SMU about which Watermarks sets they should select * depending on DPM state they are in. And update BW MGR GFX Engine and * Memory clock member variables for Watermarks calculations for each * Watermark Set */ clk_ranges.num_wm_sets = 4; clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A; clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz = eng_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz = mem_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B; clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000; clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz = mem_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C; clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz = eng_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000; clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D; clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000; clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000; /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges); } static uint32_t read_pipe_fuses(struct dc_context *ctx) { uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); /* VG20 support max 6 pipes */ value = value & 0x3f; return value; } static bool construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; int j; struct dc_context *ctx = dc->ctx; struct irq_service_init_data irq_init_data; static const struct resource_create_funcs *res_funcs; bool is_vg20 = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev); uint32_t pipe_fuses; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap; pool->base.funcs = &dce120_res_pool_funcs; /* TODO: Fill more data from GreenlandAsicCapability.cpp */ pool->base.pipe_count = res_cap.num_timing_generator; pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; dc->caps.psp_setup_panel_mode = true; dc->debug = debug_defaults; /************************************************* * Create resources * *************************************************/ pool->base.clock_sources[DCE120_CLK_SRC_PLL0] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL1] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL2] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL3] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL4] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL4, &clk_src_regs[4], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL5] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL5, &clk_src_regs[5], false); pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL; pool->base.dp_clock_source = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true); for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto clk_src_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } irq_init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data); if (!pool->base.irqs) goto irqs_create_fail; /* VG20: Pipe harvesting enabled, retrieve valid pipe fuses */ if (is_vg20) pipe_fuses = read_pipe_fuses(ctx); /* index to valid pipe resource */ j = 0; for (i = 0; i < pool->base.pipe_count; i++) { if (is_vg20) { if ((pipe_fuses & (1 << i)) != 0) { dm_error("DC: skip invalid pipe %d!\n", i); continue; } } pool->base.timing_generators[j] = dce120_timing_generator_create( ctx, i, &dce120_tg_offsets[i]); if (pool->base.timing_generators[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto controller_create_fail; } pool->base.mis[j] = dce120_mem_input_create(ctx, i); if (pool->base.mis[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto controller_create_fail; } pool->base.ipps[j] = dce120_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto controller_create_fail; } pool->base.transforms[j] = dce120_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[j] = dce120_opp_create( ctx, i); if (pool->base.opps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); } /* check next valid pipe */ j++; } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce120_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce120_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = NULL; } /* valid pipe num */ pool->base.pipe_count = j; pool->base.timing_generator_count = j; if (is_vg20) res_funcs = &dce121_res_create_funcs; else res_funcs = &res_create_funcs; if (!resource_construct(num_virtual_links, dc, &pool->base, res_funcs)) goto res_create_fail; /* Create hardware sequencer */ if (!dce120_hw_sequencer_create(dc)) goto controller_create_fail; dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); bw_calcs_data_update_from_pplib(dc); return true; irqs_create_fail: controller_create_fail: clk_src_create_fail: res_create_fail: destruct(pool); return false; } struct resource_pool *dce120_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (construct(num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); return NULL; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1272_3
crossvul-cpp_data_good_1273_6
/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dm_services.h" #include "dc.h" #include "resource.h" #include "include/irq_service_interface.h" #include "dcn20/dcn20_resource.h" #include "dcn10/dcn10_hubp.h" #include "dcn10/dcn10_ipp.h" #include "dcn20_hubbub.h" #include "dcn20_mpc.h" #include "dcn20_hubp.h" #include "irq/dcn20/irq_service_dcn20.h" #include "dcn20_dpp.h" #include "dcn20_optc.h" #include "dcn20_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn10/dcn10_resource.h" #include "dcn20_opp.h" #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dcn20_dsc.h" #endif #include "dcn20_link_encoder.h" #include "dcn20_stream_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" #include "dcn20_dccg.h" #include "dcn20_vmid.h" #include "navi10_ip_offset.h" #include "dcn/dcn_2_0_0_offset.h" #include "dcn/dcn_2_0_0_sh_mask.h" #include "nbio/nbio_2_3_offset.h" #include "dcn20/dcn20_dwb.h" #include "dcn20/dcn20_mmhubbub.h" #include "mmhub/mmhub_2_0_0_offset.h" #include "mmhub/mmhub_2_0_0_sh_mask.h" #include "reg_helper.h" #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "vm_helper.h" #include "amdgpu_socbb.h" /* NV12 SOC BB is currently in FW, mark SW bounding box invalid. */ #define SOC_BOUNDING_BOX_VALID false #define DC_LOGGER_INIT(logger) struct _vcs_dpi_ip_params_st dcn2_0_ip = { .odm_capable = 1, .gpuvm_enable = 0, .hostvm_enable = 0, .gpuvm_max_page_table_levels = 4, .hostvm_max_page_table_levels = 4, .hostvm_cached_page_table_levels = 0, .pte_group_size_bytes = 2048, #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 6, #else .num_dsc = 0, #endif .rob_buffer_size_kbytes = 168, .det_buffer_size_kbytes = 164, .dpte_buffer_size_in_pte_reqs_luma = 84, .pde_proc_buffer_size_64k_reqs = 48, .dpp_output_buffer_pixels = 2560, .opp_output_buffer_lines = 1, .pixel_chunk_size_kbytes = 8, .pte_chunk_size_kbytes = 2, .meta_chunk_size_kbytes = 2, .writeback_chunk_size_kbytes = 2, .line_buffer_size_bits = 789504, .is_line_buffer_bpp_fixed = 0, .line_buffer_fixed_bpp = 0, .dcc_supported = true, .max_line_buffer_lines = 12, .writeback_luma_buffer_size_kbytes = 12, .writeback_chroma_buffer_size_kbytes = 8, .writeback_chroma_line_buffer_width_pixels = 4, .writeback_max_hscl_ratio = 1, .writeback_max_vscl_ratio = 1, .writeback_min_hscl_ratio = 1, .writeback_min_vscl_ratio = 1, .writeback_max_hscl_taps = 12, .writeback_max_vscl_taps = 12, .writeback_line_buffer_luma_buffer_size = 0, .writeback_line_buffer_chroma_buffer_size = 14643, .cursor_buffer_size = 8, .cursor_chunk_size = 2, .max_num_otg = 6, .max_num_dpp = 6, .max_num_wb = 1, .max_dchub_pscl_bw_pix_per_clk = 4, .max_pscl_lb_bw_pix_per_clk = 2, .max_lb_vscl_bw_pix_per_clk = 4, .max_vscl_hscl_bw_pix_per_clk = 4, .max_hscl_ratio = 8, .max_vscl_ratio = 8, .hscl_mults = 4, .vscl_mults = 4, .max_hscl_taps = 8, .max_vscl_taps = 8, .dispclk_ramp_margin_percent = 1, .underscan_factor = 1.10, .min_vblank_lines = 32, // .dppclk_delay_subtotal = 77, // .dppclk_delay_scl_lb_only = 16, .dppclk_delay_scl = 50, .dppclk_delay_cnvc_formatter = 8, .dppclk_delay_cnvc_cursor = 6, .dispclk_delay_subtotal = 87, // .dcfclk_cstate_latency = 10, // SRExitTime .max_inter_dcn_tile_repeaters = 8, .xfc_supported = true, .xfc_fill_bw_overhead_percent = 10.0, .xfc_fill_constant_bytes = 0, }; struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { /* Defaults that get patched on driver load from firmware. */ .clock_limits = { { .state = 0, .dcfclk_mhz = 560.0, .fabricclk_mhz = 560.0, .dispclk_mhz = 513.0, .dppclk_mhz = 513.0, .phyclk_mhz = 540.0, .socclk_mhz = 560.0, .dscclk_mhz = 171.0, .dram_speed_mts = 8960.0, }, { .state = 1, .dcfclk_mhz = 694.0, .fabricclk_mhz = 694.0, .dispclk_mhz = 642.0, .dppclk_mhz = 642.0, .phyclk_mhz = 600.0, .socclk_mhz = 694.0, .dscclk_mhz = 214.0, .dram_speed_mts = 11104.0, }, { .state = 2, .dcfclk_mhz = 875.0, .fabricclk_mhz = 875.0, .dispclk_mhz = 734.0, .dppclk_mhz = 734.0, .phyclk_mhz = 810.0, .socclk_mhz = 875.0, .dscclk_mhz = 245.0, .dram_speed_mts = 14000.0, }, { .state = 3, .dcfclk_mhz = 1000.0, .fabricclk_mhz = 1000.0, .dispclk_mhz = 1100.0, .dppclk_mhz = 1100.0, .phyclk_mhz = 810.0, .socclk_mhz = 1000.0, .dscclk_mhz = 367.0, .dram_speed_mts = 16000.0, }, { .state = 4, .dcfclk_mhz = 1200.0, .fabricclk_mhz = 1200.0, .dispclk_mhz = 1284.0, .dppclk_mhz = 1284.0, .phyclk_mhz = 810.0, .socclk_mhz = 1200.0, .dscclk_mhz = 428.0, .dram_speed_mts = 16000.0, }, /*Extra state, no dispclk ramping*/ { .state = 5, .dcfclk_mhz = 1200.0, .fabricclk_mhz = 1200.0, .dispclk_mhz = 1284.0, .dppclk_mhz = 1284.0, .phyclk_mhz = 810.0, .socclk_mhz = 1200.0, .dscclk_mhz = 428.0, .dram_speed_mts = 16000.0, }, }, .num_states = 5, .sr_exit_time_us = 8.6, .sr_enter_plus_exit_time_us = 10.9, .urgent_latency_us = 4.0, .urgent_latency_pixel_data_only_us = 4.0, .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, .urgent_latency_vm_data_only_us = 4.0, .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0, .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0, .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, .max_avg_sdp_bw_use_normal_percent = 40.0, .max_avg_dram_bw_use_normal_percent = 40.0, .writeback_latency_us = 12.0, .ideal_dram_bw_after_urgent_percent = 40.0, .max_request_size_bytes = 256, .dram_channel_width_bytes = 2, .fabric_datapath_to_dcn_data_return_bytes = 64, .dcn_downspread_percent = 0.5, .downspread_percent = 0.38, .dram_page_open_time_ns = 50.0, .dram_rw_turnaround_time_ns = 17.5, .dram_return_buffer_per_channel_bytes = 8192, .round_trip_ping_latency_dcfclk_cycles = 131, .urgent_out_of_order_return_per_channel_bytes = 256, .channel_interleave_bytes = 256, .num_banks = 8, .num_chans = 16, .vmm_page_size_bytes = 4096, .dram_clock_change_latency_us = 404.0, .dummy_pstate_latency_us = 5.0, .writeback_dram_clock_change_latency_us = 23.0, .return_bus_width_bytes = 64, .dispclk_dppclk_vco_speed_mhz = 3850, .xfc_bus_transport_time_us = 20, .xfc_xbuf_latency_tolerance_us = 4, .use_urgent_burst_bw = 0 }; struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 }; #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #endif enum dcn20_clk_src_array_id { DCN20_CLK_SRC_PLL0, DCN20_CLK_SRC_PLL1, DCN20_CLK_SRC_PLL2, DCN20_CLK_SRC_PLL3, DCN20_CLK_SRC_PLL4, DCN20_CLK_SRC_PLL5, DCN20_CLK_SRC_TOTAL }; /* begin ********************* * macros to expend register list macro defined in HW object header file */ /* DCN */ /* TODO awful hack. fixup dcn20_dwb.h */ #undef BASE_INNER #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg #define BASE(seg) BASE_INNER(seg) #define SR(reg_name)\ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name #define SRI(reg_name, block, id)\ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define SRIR(var_name, reg_name, block, id)\ .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define SRII(reg_name, block, id)\ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define DCCG_SRII(reg_name, block, id)\ .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name /* NBIO */ #define NBIO_BASE_INNER(seg) \ NBIO_BASE__INST0_SEG ## seg #define NBIO_BASE(seg) \ NBIO_BASE_INNER(seg) #define NBIO_SR(reg_name)\ .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name /* MMHUB */ #define MMHUB_BASE_INNER(seg) \ MMHUB_BASE__INST0_SEG ## seg #define MMHUB_BASE(seg) \ MMHUB_BASE_INNER(seg) #define MMHUB_SR(reg_name)\ .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \ mmMM ## reg_name static const struct bios_registers bios_regs = { NBIO_SR(BIOS_SCRATCH_3), NBIO_SR(BIOS_SCRATCH_6) }; #define clk_src_regs(index, pllid)\ [index] = {\ CS_COMMON_REG_LIST_DCN2_0(index, pllid),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0, A), clk_src_regs(1, B), clk_src_regs(2, C), clk_src_regs(3, D), clk_src_regs(4, E), clk_src_regs(5, F) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) }; static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCN10_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCN10(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCN20_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCN20(_MASK) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6), }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) static const struct dce_audio_shift audio_shift = { DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; #define stream_enc_regs(id)\ [id] = {\ SE_DCN2_REG_LIST(id)\ } static const struct dcn10_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4), stream_enc_regs(5), }; static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn10_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCN20(_MASK) }; #define aux_regs(id)\ [id] = {\ DCN2_AUX_REG_LIST(id)\ } static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id, phyid)\ [id] = {\ LE_DCN10_REG_LIST(id), \ UNIPHY_DCN2_REG_LIST(phyid), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } static const struct dcn10_link_enc_registers link_enc_regs[] = { link_regs(0, A), link_regs(1, B), link_regs(2, C), link_regs(3, D), link_regs(4, E), link_regs(5, F) }; static const struct dcn10_link_enc_shift le_shift = { LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK) }; #define ipp_regs(id)\ [id] = {\ IPP_REG_LIST_DCN20(id),\ } static const struct dcn10_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), ipp_regs(4), ipp_regs(5), }; static const struct dcn10_ipp_shift ipp_shift = { IPP_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn10_ipp_mask ipp_mask = { IPP_MASK_SH_LIST_DCN20(_MASK), }; #define opp_regs(id)\ [id] = {\ OPP_REG_LIST_DCN20(id),\ } static const struct dcn20_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5), }; static const struct dcn20_opp_shift opp_shift = { OPP_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn20_opp_mask opp_mask = { OPP_MASK_SH_LIST_DCN20(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST0(id), \ .AUXN_IMPCAL = 0, \ .AUXP_IMPCAL = 0, \ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define tf_regs(id)\ [id] = {\ TF_REG_LIST_DCN20(id),\ } static const struct dcn2_dpp_registers tf_regs[] = { tf_regs(0), tf_regs(1), tf_regs(2), tf_regs(3), tf_regs(4), tf_regs(5), }; static const struct dcn2_dpp_shift tf_shift = { TF_REG_LIST_SH_MASK_DCN20(__SHIFT) }; static const struct dcn2_dpp_mask tf_mask = { TF_REG_LIST_SH_MASK_DCN20(_MASK) }; #define dwbc_regs_dcn2(id)\ [id] = {\ DWBC_COMMON_REG_LIST_DCN2_0(id),\ } static const struct dcn20_dwbc_registers dwbc20_regs[] = { dwbc_regs_dcn2(0), }; static const struct dcn20_dwbc_shift dwbc20_shift = { DWBC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) }; static const struct dcn20_dwbc_mask dwbc20_mask = { DWBC_COMMON_MASK_SH_LIST_DCN2_0(_MASK) }; #define mcif_wb_regs_dcn2(id)\ [id] = {\ MCIF_WB_COMMON_REG_LIST_DCN2_0(id),\ } static const struct dcn20_mmhubbub_registers mcif_wb20_regs[] = { mcif_wb_regs_dcn2(0), }; static const struct dcn20_mmhubbub_shift mcif_wb20_shift = { MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) }; static const struct dcn20_mmhubbub_mask mcif_wb20_mask = { MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(_MASK) }; static const struct dcn20_mpc_registers mpc_regs = { MPC_REG_LIST_DCN2_0(0), MPC_REG_LIST_DCN2_0(1), MPC_REG_LIST_DCN2_0(2), MPC_REG_LIST_DCN2_0(3), MPC_REG_LIST_DCN2_0(4), MPC_REG_LIST_DCN2_0(5), MPC_OUT_MUX_REG_LIST_DCN2_0(0), MPC_OUT_MUX_REG_LIST_DCN2_0(1), MPC_OUT_MUX_REG_LIST_DCN2_0(2), MPC_OUT_MUX_REG_LIST_DCN2_0(3), MPC_OUT_MUX_REG_LIST_DCN2_0(4), MPC_OUT_MUX_REG_LIST_DCN2_0(5), }; static const struct dcn20_mpc_shift mpc_shift = { MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) }; static const struct dcn20_mpc_mask mpc_mask = { MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK) }; #define tg_regs(id)\ [id] = {TG_COMMON_REG_LIST_DCN2_0(id)} static const struct dcn_optc_registers tg_regs[] = { tg_regs(0), tg_regs(1), tg_regs(2), tg_regs(3), tg_regs(4), tg_regs(5) }; static const struct dcn_optc_shift tg_shift = { TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) }; static const struct dcn_optc_mask tg_mask = { TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK) }; #define hubp_regs(id)\ [id] = {\ HUBP_REG_LIST_DCN20(id)\ } static const struct dcn_hubp2_registers hubp_regs[] = { hubp_regs(0), hubp_regs(1), hubp_regs(2), hubp_regs(3), hubp_regs(4), hubp_regs(5) }; static const struct dcn_hubp2_shift hubp_shift = { HUBP_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn_hubp2_mask hubp_mask = { HUBP_MASK_SH_LIST_DCN20(_MASK) }; static const struct dcn_hubbub_registers hubbub_reg = { HUBBUB_REG_LIST_DCN20(0) }; static const struct dcn_hubbub_shift hubbub_shift = { HUBBUB_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn_hubbub_mask hubbub_mask = { HUBBUB_MASK_SH_LIST_DCN20(_MASK) }; #define vmid_regs(id)\ [id] = {\ DCN20_VMID_REG_LIST(id)\ } static const struct dcn_vmid_registers vmid_regs[] = { vmid_regs(0), vmid_regs(1), vmid_regs(2), vmid_regs(3), vmid_regs(4), vmid_regs(5), vmid_regs(6), vmid_regs(7), vmid_regs(8), vmid_regs(9), vmid_regs(10), vmid_regs(11), vmid_regs(12), vmid_regs(13), vmid_regs(14), vmid_regs(15) }; static const struct dcn20_vmid_shift vmid_shifts = { DCN20_VMID_MASK_SH_LIST(__SHIFT) }; static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #define dsc_regsDCN20(id)\ [id] = {\ DSC_REG_LIST_DCN20(id)\ } static const struct dcn20_dsc_registers dsc_regs[] = { dsc_regsDCN20(0), dsc_regsDCN20(1), dsc_regsDCN20(2), dsc_regsDCN20(3), dsc_regsDCN20(4), dsc_regsDCN20(5) }; static const struct dcn20_dsc_shift dsc_shift = { DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) }; static const struct dcn20_dsc_mask dsc_mask = { DSC_REG_LIST_SH_MASK_DCN20(_MASK) }; #endif static const struct dccg_registers dccg_regs = { DCCG_REG_LIST_DCN2() }; static const struct dccg_shift dccg_shift = { DCCG_MASK_SH_LIST_DCN2(__SHIFT) }; static const struct dccg_mask dccg_mask = { DCCG_MASK_SH_LIST_DCN2(_MASK) }; static const struct resource_caps res_cap_nv10 = { .num_timing_generator = 6, .num_opp = 6, .num_video_plane = 6, .num_audio = 7, .num_stream_encoder = 6, .num_pll = 6, .num_dwb = 1, .num_ddc = 6, .num_vmid = 16, #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 6, #endif }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCN_UNIVERSAL, .blends_with_above = true, .blends_with_below = true, .per_pixel_alpha = true, .pixel_format_support = { .argb8888 = true, .nv12 = true, .fp16 = true }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 16000, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 250, .fp16 = 1 } }; static const struct resource_caps res_cap_nv14 = { .num_timing_generator = 5, .num_opp = 5, .num_video_plane = 5, .num_audio = 6, .num_stream_encoder = 5, .num_pll = 5, .num_dwb = 0, .num_ddc = 5, }; static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = true, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, .max_downscale_src_width = 5120,/*upto 5K*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = false, .disable_tri_buf = true, .underflow_assert_delay_us = 0xFFFFFFFF, }; static const struct dc_debug_options debug_defaults_diags = { .disable_dmcu = true, .force_abm_enable = false, .timing_trace = true, .clock_trace = true, .disable_dpp_power_gate = true, .disable_hubp_power_gate = true, .disable_clock_gate = true, .disable_pplib_clock_request = true, .disable_pplib_wm_range = true, .disable_stutter = true, .scl_reset_length10 = true, .underflow_assert_delay_us = 0xFFFFFFFF, }; void dcn20_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); *dpp = NULL; } struct dpp *dcn20_dpp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_dpp *dpp = kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL); if (!dpp) return NULL; if (dpp2_construct(dpp, ctx, inst, &tf_regs[inst], &tf_shift, &tf_mask)) return &dpp->base; BREAK_TO_DEBUGGER(); kfree(dpp); return NULL; } struct input_pixel_processor *dcn20_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_ipp *ipp = kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dcn20_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } struct output_pixel_processor *dcn20_opp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_opp *opp = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); if (!opp) { BREAK_TO_DEBUGGER(); return NULL; } dcn20_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } struct dce_aux *dcn20_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst]); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) }; struct dce_i2c_hw *dcn20_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } struct mpc *dcn20_mpc_create(struct dc_context *ctx) { struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), GFP_KERNEL); if (!mpc20) return NULL; dcn20_mpc_construct(mpc20, ctx, &mpc_regs, &mpc_shift, &mpc_mask, 6); return &mpc20->base; } struct hubbub *dcn20_hubbub_create(struct dc_context *ctx) { int i; struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); if (!hubbub) return NULL; hubbub2_construct(hubbub, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask); for (i = 0; i < res_cap_nv10.num_vmid; i++) { struct dcn20_vmid *vmid = &hubbub->vmid[i]; vmid->ctx = ctx; vmid->regs = &vmid_regs[i]; vmid->shifts = &vmid_shifts; vmid->masks = &vmid_masks; } return &hubbub->base; } struct timing_generator *dcn20_timing_generator_create( struct dc_context *ctx, uint32_t instance) { struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; tgn10->base.inst = instance; tgn10->base.ctx = ctx; tgn10->tg_regs = &tg_regs[instance]; tgn10->tg_shift = &tg_shift; tgn10->tg_mask = &tg_mask; dcn20_timing_generator_init(tgn10); return &tgn10->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = true, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true }; struct link_encoder *dcn20_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); if (!enc20) return NULL; dcn20_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); return &enc20->enc10.base; } struct clock_source *dcn20_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dcn20_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); } static struct audio *dcn20_create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } struct stream_encoder *dcn20_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dcn10_stream_encoder *enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); if (!enc1) return NULL; dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc1->base; } static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCN2_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN2_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCN2_MASK_SH_LIST(_MASK) }; struct dce_hwseq *dcn20_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = dcn20_create_audio, .create_stream_encoder = dcn20_stream_encoder_create, .create_hwseq = dcn20_hwseq_create, }; static const struct resource_create_funcs res_create_maximus_funcs = { .read_dce_straps = NULL, .create_audio = NULL, .create_stream_encoder = NULL, .create_hwseq = dcn20_hwseq_create, }; void dcn20_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct display_stream_compressor *dcn20_dsc_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_dsc *dsc = kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); if (!dsc) { BREAK_TO_DEBUGGER(); return NULL; } dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); return &dsc->base; } void dcn20_dsc_destroy(struct display_stream_compressor **dsc) { kfree(container_of(*dsc, struct dcn20_dsc, base)); *dsc = NULL; } #endif static void destruct(struct dcn20_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) { kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); pool->base.stream_enc[i] = NULL; } } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { if (pool->base.dscs[i] != NULL) dcn20_dsc_destroy(&pool->base.dscs[i]); } #endif if (pool->base.mpc != NULL) { kfree(TO_DCN20_MPC(pool->base.mpc)); pool->base.mpc = NULL; } if (pool->base.hubbub != NULL) { kfree(pool->base.hubbub); pool->base.hubbub = NULL; } for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.dpps[i] != NULL) dcn20_dpp_destroy(&pool->base.dpps[i]); if (pool->base.ipps[i] != NULL) pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); if (pool->base.hubps[i] != NULL) { kfree(TO_DCN20_HUBP(pool->base.hubps[i])); pool->base.hubps[i] = NULL; } if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_opp; i++) { if (pool->base.opps[i] != NULL) pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); } for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { if (pool->base.timing_generators[i] != NULL) { kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_dwb; i++) { if (pool->base.dwbc[i] != NULL) { kfree(TO_DCN20_DWBC(pool->base.dwbc[i])); pool->base.dwbc[i] = NULL; } if (pool->base.mcif_wb[i] != NULL) { kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i])); pool->base.mcif_wb[i] = NULL; } } for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i]) dce_aud_destroy(&pool->base.audios[i]); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dcn20_clock_source_destroy(&pool->base.clock_sources[i]); pool->base.clock_sources[i] = NULL; } } if (pool->base.dp_clock_source != NULL) { dcn20_clock_source_destroy(&pool->base.dp_clock_source); pool->base.dp_clock_source = NULL; } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); if (pool->base.pp_smu != NULL) dcn20_pp_smu_destroy(&pool->base.pp_smu); } struct hubp *dcn20_hubp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_hubp *hubp2 = kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); if (!hubp2) return NULL; if (hubp2_construct(hubp2, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask)) return &hubp2->base; BREAK_TO_DEBUGGER(); kfree(hubp2); return NULL; } static void get_pixel_clock_parameters( struct pipe_ctx *pipe_ctx, struct pixel_clk_params *pixel_clk_params) { const struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *odm_pipe; int opp_cnt = 1; for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; pixel_clk_params->encoder_object_id = stream->link->link_enc->id; pixel_clk_params->signal_type = pipe_ctx->stream->signal; pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; /* TODO: un-hardcode*/ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * LINK_RATE_REF_FREQ_IN_KHZ; pixel_clk_params->flags.ENABLE_SS = 0; pixel_clk_params->color_depth = stream->timing.display_color_depth; pixel_clk_params->flags.DISPLAY_BLANKED = 1; pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) pixel_clk_params->color_depth = COLOR_DEPTH_888; if (opp_cnt == 4) pixel_clk_params->requested_pix_clk_100hz /= 4; else if (optc1_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2) pixel_clk_params->requested_pix_clk_100hz /= 2; if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) pixel_clk_params->requested_pix_clk_100hz *= 2; } static void build_clamping_params(struct dc_stream_state *stream) { stream->clamping.clamping_level = CLAMPING_FULL_RANGE; stream->clamping.c_depth = stream->timing.display_color_depth; stream->clamping.pixel_encoding = stream->timing.pixel_encoding; } static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx) { get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); pipe_ctx->clock_source->funcs->get_pix_clk_dividers( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, &pipe_ctx->pll_settings); pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; resource_build_bit_depth_reduction_params(pipe_ctx->stream, &pipe_ctx->stream->bit_depth_params); build_clamping_params(pipe_ctx->stream); return DC_OK; } enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { enum dc_status status = DC_OK; struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); /*TODO Seems unneeded anymore */ /* if (old_context && resource_is_stream_unchanged(old_context, stream)) { if (stream != NULL && old_context->streams[i] != NULL) { todo: shouldn't have to copy missing parameter here resource_build_bit_depth_reduction_params(stream, &stream->bit_depth_params); stream->clamping.pixel_encoding = stream->timing.pixel_encoding; resource_build_bit_depth_reduction_params(stream, &stream->bit_depth_params); build_clamping_params(stream); continue; } } */ if (!pipe_ctx) return DC_ERROR_UNEXPECTED; status = build_pipe_hw_param(pipe_ctx); return status; } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT static void acquire_dsc(struct resource_context *res_ctx, const struct resource_pool *pool, struct display_stream_compressor **dsc) { int i; ASSERT(*dsc == NULL); *dsc = NULL; /* Find first free DSC */ for (i = 0; i < pool->res_cap->num_dsc; i++) if (!res_ctx->is_dsc_acquired[i]) { *dsc = pool->dscs[i]; res_ctx->is_dsc_acquired[i] = true; break; } } static void release_dsc(struct resource_context *res_ctx, const struct resource_pool *pool, struct display_stream_compressor **dsc) { int i; for (i = 0; i < pool->res_cap->num_dsc; i++) if (pool->dscs[i] == *dsc) { res_ctx->is_dsc_acquired[i] = false; *dsc = NULL; break; } } #endif #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT static enum dc_status add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_OK; int i; const struct resource_pool *pool = dc->res_pool; /* Get a DSC if required and available */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream != dc_stream) continue; acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc); /* The number of DSCs can be less than the number of pipes */ if (!pipe_ctx->stream_res.dsc) { dm_output_to_console("No DSCs available\n"); result = DC_NO_DSC_RESOURCE; } break; } return result; } static enum dc_status remove_dsc_from_stream_resource(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { struct pipe_ctx *pipe_ctx = NULL; int i; for (i = 0; i < MAX_PIPES; i++) { if (new_ctx->res_ctx.pipe_ctx[i].stream == dc_stream && !new_ctx->res_ctx.pipe_ctx[i].top_pipe) { pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream_res.dsc) release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc); } } if (!pipe_ctx) return DC_ERROR_UNEXPECTED; else return DC_OK; } #endif enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_ERROR_UNEXPECTED; result = resource_map_pool_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Get a DSC if required and available */ if (result == DC_OK && dc_stream->timing.flags.DSC) result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream); #endif if (result == DC_OK) result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream); return result; } enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_OK; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream); #endif return result; } static void swizzle_to_dml_params( enum swizzle_mode_values swizzle, unsigned int *sw_mode) { switch (swizzle) { case DC_SW_LINEAR: *sw_mode = dm_sw_linear; break; case DC_SW_4KB_S: *sw_mode = dm_sw_4kb_s; break; case DC_SW_4KB_S_X: *sw_mode = dm_sw_4kb_s_x; break; case DC_SW_4KB_D: *sw_mode = dm_sw_4kb_d; break; case DC_SW_4KB_D_X: *sw_mode = dm_sw_4kb_d_x; break; case DC_SW_64KB_S: *sw_mode = dm_sw_64kb_s; break; case DC_SW_64KB_S_X: *sw_mode = dm_sw_64kb_s_x; break; case DC_SW_64KB_S_T: *sw_mode = dm_sw_64kb_s_t; break; case DC_SW_64KB_D: *sw_mode = dm_sw_64kb_d; break; case DC_SW_64KB_D_X: *sw_mode = dm_sw_64kb_d_x; break; case DC_SW_64KB_D_T: *sw_mode = dm_sw_64kb_d_t; break; case DC_SW_64KB_R_X: *sw_mode = dm_sw_64kb_r_x; break; case DC_SW_VAR_S: *sw_mode = dm_sw_var_s; break; case DC_SW_VAR_S_X: *sw_mode = dm_sw_var_s_x; break; case DC_SW_VAR_D: *sw_mode = dm_sw_var_d; break; case DC_SW_VAR_D_X: *sw_mode = dm_sw_var_d_x; break; default: ASSERT(0); /* Not supported */ break; } } static bool dcn20_split_stream_for_odm( struct resource_context *res_ctx, const struct resource_pool *pool, struct pipe_ctx *prev_odm_pipe, struct pipe_ctx *next_odm_pipe) { int pipe_idx = next_odm_pipe->pipe_idx; *next_odm_pipe = *prev_odm_pipe; next_odm_pipe->pipe_idx = pipe_idx; next_odm_pipe->plane_res.mi = pool->mis[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.hubp = pool->hubps[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.ipp = pool->ipps[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT next_odm_pipe->stream_res.dsc = NULL; #endif if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) { ASSERT(!next_odm_pipe->next_odm_pipe); next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe; next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe; } prev_odm_pipe->next_odm_pipe = next_odm_pipe; next_odm_pipe->prev_odm_pipe = prev_odm_pipe; ASSERT(next_odm_pipe->top_pipe == NULL); if (prev_odm_pipe->plane_state) { struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data; int new_width; /* HACTIVE halved for odm combine */ sd->h_active /= 2; /* Calculate new vp and recout for left pipe */ /* Need at least 16 pixels width per side */ if (sd->recout.x + 16 >= sd->h_active) return false; new_width = sd->h_active - sd->recout.x; sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int( sd->ratios.horz, sd->recout.width - new_width)); sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int( sd->ratios.horz_c, sd->recout.width - new_width)); sd->recout.width = new_width; /* Calculate new vp and recout for right pipe */ sd = &next_odm_pipe->plane_res.scl_data; /* HACTIVE halved for odm combine */ sd->h_active /= 2; /* Need at least 16 pixels width per side */ if (new_width <= 16) return false; new_width = sd->recout.width + sd->recout.x - sd->h_active; sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int( sd->ratios.horz, sd->recout.width - new_width)); sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int( sd->ratios.horz_c, sd->recout.width - new_width)); sd->recout.width = new_width; sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int( sd->ratios.horz, sd->h_active - sd->recout.x)); sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int( sd->ratios.horz_c, sd->h_active - sd->recout.x)); sd->recout.x = 0; } next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (next_odm_pipe->stream->timing.flags.DSC == 1) { acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc); ASSERT(next_odm_pipe->stream_res.dsc); if (next_odm_pipe->stream_res.dsc == NULL) return false; } #endif return true; } static void dcn20_split_stream_for_mpc( struct resource_context *res_ctx, const struct resource_pool *pool, struct pipe_ctx *primary_pipe, struct pipe_ctx *secondary_pipe) { int pipe_idx = secondary_pipe->pipe_idx; struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe; *secondary_pipe = *primary_pipe; secondary_pipe->bottom_pipe = sec_bot_pipe; secondary_pipe->pipe_idx = pipe_idx; secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT secondary_pipe->stream_res.dsc = NULL; #endif if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) { ASSERT(!secondary_pipe->bottom_pipe); secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe; secondary_pipe->bottom_pipe->top_pipe = secondary_pipe; } primary_pipe->bottom_pipe = secondary_pipe; secondary_pipe->top_pipe = primary_pipe; ASSERT(primary_pipe->plane_state); resource_build_scaling_params(primary_pipe); resource_build_scaling_params(secondary_pipe); } void dcn20_populate_dml_writeback_from_context( struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) { int pipe_cnt, i; for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_writeback_info *wb_info = &res_ctx->pipe_ctx[i].stream->writeback_info[0]; if (!res_ctx->pipe_ctx[i].stream) continue; /* Set writeback information */ pipes[pipe_cnt].dout.wb_enable = (wb_info->wb_enabled == true) ? 1 : 0; pipes[pipe_cnt].dout.num_active_wb++; pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height; pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width; pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width; pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height; pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1; pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1; pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c; pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c; pipes[pipe_cnt].dout.wb.wb_hratio = 1.0; pipes[pipe_cnt].dout.wb.wb_vratio = 1.0; if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) { if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC) pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8; else pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10; } else pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32; pipe_cnt++; } } int dcn20_populate_dml_pipes_from_context( struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) { int pipe_cnt, i; bool synchronized_vblank = true; for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) { if (!res_ctx->pipe_ctx[i].stream) continue; if (pipe_cnt < 0) { pipe_cnt = i; continue; } if (!resource_are_streams_timing_synchronizable( res_ctx->pipe_ctx[pipe_cnt].stream, res_ctx->pipe_ctx[i].stream)) { synchronized_vblank = false; break; } } for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing; int output_bpc; if (!res_ctx->pipe_ctx[i].stream) continue; /* todo: pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0; pipes[pipe_cnt].pipe.src.dcc = 0; pipes[pipe_cnt].pipe.src.vm = 0;*/ #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC; /* todo: rotation?*/ pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h; #endif if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) { pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true; /* 1/2 vblank */ pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active = (timing->v_total - timing->v_addressable - timing->v_border_top - timing->v_border_bottom) / 2; /* 36 bytes dp, 32 hdmi */ pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes = dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32; } pipes[pipe_cnt].pipe.src.dcc = false; pipes[pipe_cnt].pipe.src.dcc_rate = 1; pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank; pipes[pipe_cnt].pipe.dest.hblank_start = timing->h_total - timing->h_front_porch; pipes[pipe_cnt].pipe.dest.hblank_end = pipes[pipe_cnt].pipe.dest.hblank_start - timing->h_addressable - timing->h_border_left - timing->h_border_right; pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch; pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom; pipes[pipe_cnt].pipe.dest.htotal = timing->h_total; pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total; pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable; pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable; pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE; pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0; if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2; pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst; pipes[pipe_cnt].dout.dp_lanes = 4; pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min; pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max; pipes[pipe_cnt].pipe.dest.odm_combine = res_ctx->pipe_ctx[i].prev_odm_pipe || res_ctx->pipe_ctx[i].next_odm_pipe; pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx; if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == res_ctx->pipe_ctx[i].plane_state) pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx; else if (res_ctx->pipe_ctx[i].prev_odm_pipe) { struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].prev_odm_pipe; while (first_pipe->prev_odm_pipe) first_pipe = first_pipe->prev_odm_pipe; pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx; } switch (res_ctx->pipe_ctx[i].stream->signal) { case SIGNAL_TYPE_DISPLAY_PORT_MST: case SIGNAL_TYPE_DISPLAY_PORT: pipes[pipe_cnt].dout.output_type = dm_dp; break; case SIGNAL_TYPE_EDP: pipes[pipe_cnt].dout.output_type = dm_edp; break; case SIGNAL_TYPE_HDMI_TYPE_A: case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: pipes[pipe_cnt].dout.output_type = dm_hdmi; break; default: /* In case there is no signal, set dp with 4 lanes to allow max config */ pipes[pipe_cnt].dout.output_type = dm_dp; pipes[pipe_cnt].dout.dp_lanes = 4; } switch (res_ctx->pipe_ctx[i].stream->timing.display_color_depth) { case COLOR_DEPTH_666: output_bpc = 6; break; case COLOR_DEPTH_888: output_bpc = 8; break; case COLOR_DEPTH_101010: output_bpc = 10; break; case COLOR_DEPTH_121212: output_bpc = 12; break; case COLOR_DEPTH_141414: output_bpc = 14; break; case COLOR_DEPTH_161616: output_bpc = 16; break; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 case COLOR_DEPTH_999: output_bpc = 9; break; case COLOR_DEPTH_111111: output_bpc = 11; break; #endif default: output_bpc = 8; break; } switch (res_ctx->pipe_ctx[i].stream->timing.pixel_encoding) { case PIXEL_ENCODING_RGB: case PIXEL_ENCODING_YCBCR444: pipes[pipe_cnt].dout.output_format = dm_444; pipes[pipe_cnt].dout.output_bpp = output_bpc * 3; break; case PIXEL_ENCODING_YCBCR420: pipes[pipe_cnt].dout.output_format = dm_420; pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2; break; case PIXEL_ENCODING_YCBCR422: if (true) /* todo */ pipes[pipe_cnt].dout.output_format = dm_s422; else pipes[pipe_cnt].dout.output_format = dm_n422; pipes[pipe_cnt].dout.output_bpp = output_bpc * 2; break; default: pipes[pipe_cnt].dout.output_format = dm_444; pipes[pipe_cnt].dout.output_bpp = output_bpc * 3; } /* todo: default max for now, until there is logic reflecting this in dc*/ pipes[pipe_cnt].dout.output_bpc = 12; /* * Use max cursor settings for calculations to minimize * bw calculations due to cursor on/off */ pipes[pipe_cnt].pipe.src.num_cursors = 2; pipes[pipe_cnt].pipe.src.cur0_src_width = 256; pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit; pipes[pipe_cnt].pipe.src.cur1_src_width = 256; pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit; if (!res_ctx->pipe_ctx[i].plane_state) { pipes[pipe_cnt].pipe.src.source_scan = dm_horz; pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear; pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile; pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable; if (pipes[pipe_cnt].pipe.src.viewport_width > 1920) pipes[pipe_cnt].pipe.src.viewport_width = 1920; pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable; if (pipes[pipe_cnt].pipe.src.viewport_height > 1080) pipes[pipe_cnt].pipe.src.viewport_height = 1080; pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */ pipes[pipe_cnt].pipe.src.source_format = dm_444_32; pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/ pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/ pipes[pipe_cnt].pipe.dest.full_recout_width = pipes[pipe_cnt].pipe.dest.recout_width; /*when is_hsplit != 1*/ pipes[pipe_cnt].pipe.dest.full_recout_height = pipes[pipe_cnt].pipe.dest.recout_height; /*when is_hsplit != 1*/ pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16; pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = 1.0; pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = 1.0; pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/ pipes[pipe_cnt].pipe.scale_taps.htaps = 1; pipes[pipe_cnt].pipe.scale_taps.vtaps = 1; pipes[pipe_cnt].pipe.src.is_hsplit = 0; pipes[pipe_cnt].pipe.dest.odm_combine = 0; pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total; pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total; } else { struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state; struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data; pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate; pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) || (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln); pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90 || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz; pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y; pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y; pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width; pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width; pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height; pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height; if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch; pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch; pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch; pipes[pipe_cnt].pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c; } else { pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch; pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch; } pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable; pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width; pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height; pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width; pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height; if (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) { pipes[pipe_cnt].pipe.dest.full_recout_width += res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.width; pipes[pipe_cnt].pipe.dest.full_recout_height += res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.height; } else if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) { pipes[pipe_cnt].pipe.dest.full_recout_width += res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.width; pipes[pipe_cnt].pipe.dest.full_recout_height += res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.height; } pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16; pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32); pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32); pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32); pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32); pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = scl->ratios.vert.value != dc_fixpt_one.value || scl->ratios.horz.value != dc_fixpt_one.value || scl->ratios.vert_c.value != dc_fixpt_one.value || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/ || dc->debug.always_scale; /*support always scale*/ pipes[pipe_cnt].pipe.scale_taps.htaps = scl->taps.h_taps; pipes[pipe_cnt].pipe.scale_taps.htaps_c = scl->taps.h_taps_c; pipes[pipe_cnt].pipe.scale_taps.vtaps = scl->taps.v_taps; pipes[pipe_cnt].pipe.scale_taps.vtaps_c = scl->taps.v_taps_c; pipes[pipe_cnt].pipe.src.macro_tile_size = swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle); swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle, &pipes[pipe_cnt].pipe.src.sw_mode); switch (pln->format) { case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: pipes[pipe_cnt].pipe.src.source_format = dm_420_8; break; case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: pipes[pipe_cnt].pipe.src.source_format = dm_420_10; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: pipes[pipe_cnt].pipe.src.source_format = dm_444_64; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: case SURFACE_PIXEL_FORMAT_GRPH_RGB565: pipes[pipe_cnt].pipe.src.source_format = dm_444_16; break; case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS: pipes[pipe_cnt].pipe.src.source_format = dm_444_8; break; default: pipes[pipe_cnt].pipe.src.source_format = dm_444_32; break; } } pipe_cnt++; } /* populate writeback information */ dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes); return pipe_cnt; } unsigned int dcn20_calc_max_scaled_time( unsigned int time_per_pixel, enum mmhubbub_wbif_mode mode, unsigned int urgent_watermark) { unsigned int time_per_byte = 0; unsigned int total_y_free_entry = 0x200; /* two memory piece for luma */ unsigned int total_c_free_entry = 0x140; /* two memory piece for chroma */ unsigned int small_free_entry, max_free_entry; unsigned int buf_lh_capability; unsigned int max_scaled_time; if (mode == PACKED_444) /* packed mode */ time_per_byte = time_per_pixel/4; else if (mode == PLANAR_420_8BPC) time_per_byte = time_per_pixel; else if (mode == PLANAR_420_10BPC) /* p010 */ time_per_byte = time_per_pixel * 819/1024; if (time_per_byte == 0) time_per_byte = 1; small_free_entry = (total_y_free_entry > total_c_free_entry) ? total_c_free_entry : total_y_free_entry; max_free_entry = (mode == PACKED_444) ? total_y_free_entry + total_c_free_entry : small_free_entry; buf_lh_capability = max_free_entry*time_per_byte*32/16; /* there is 4bit fraction */ max_scaled_time = buf_lh_capability - urgent_watermark; return max_scaled_time; } void dcn20_set_mcif_arb_params( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt) { enum mmhubbub_wbif_mode wbif_mode; struct mcif_arb_params *wb_arb_params; int i, j, k, dwb_pipe; /* Writeback MCIF_WB arbitration parameters */ dwb_pipe = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; for (j = 0; j < MAX_DWB_PIPES; j++) { if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].wb_enabled == false) continue; //wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params; wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe]; if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.out_format == dwb_scaler_mode_yuv420) { if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC) wbif_mode = PLANAR_420_8BPC; else wbif_mode = PLANAR_420_10BPC; } else wbif_mode = PACKED_444; for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) { wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; } wb_arb_params->time_per_pixel = 16.0 / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* 4 bit fraction, ms */ wb_arb_params->slice_lines = 32; wb_arb_params->arbitration_slice = 2; wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel, wbif_mode, wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */ dwb_pipe++; if (dwb_pipe >= MAX_DWB_PIPES) return; } if (dwb_pipe >= MAX_DWB_PIPES) return; } } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) { int i; /* Validate DSC config, dsc count validation is already done */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; struct dc_stream_state *stream = pipe_ctx->stream; struct dsc_config dsc_cfg; struct pipe_ctx *odm_pipe; int opp_cnt = 1; for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; /* Only need to validate top pipe */ if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC) continue; dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg)) return false; } return true; } #endif static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, struct resource_context *res_ctx, const struct resource_pool *pool, const struct pipe_ctx *primary_pipe) { struct pipe_ctx *secondary_pipe = NULL; if (dc && primary_pipe) { int j; int preferred_pipe_idx = 0; /* first check the prev dc state: * if this primary pipe has a bottom pipe in prev. state * and if the bottom pipe is still available (which it should be), * pick that pipe as secondary * Same logic applies for ODM pipes. Since mpo is not allowed with odm * check in else case. */ if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) { preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx; if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; secondary_pipe->pipe_idx = preferred_pipe_idx; } } else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) { preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx; if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; secondary_pipe->pipe_idx = preferred_pipe_idx; } } /* * if this primary pipe does not have a bottom pipe in prev. state * start backward and find a pipe that did not used to be a bottom pipe in * prev. dc state. This way we make sure we keep the same assignment as * last state and will not have to reprogram every pipe */ if (secondary_pipe == NULL) { for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) { if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL) { preferred_pipe_idx = j; if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; secondary_pipe->pipe_idx = preferred_pipe_idx; break; } } } } /* * We should never hit this assert unless assignments are shuffled around * if this happens we will prob. hit a vsync tdr */ ASSERT(secondary_pipe); /* * search backwards for the second pipe to keep pipe * assignment more consistent */ if (secondary_pipe == NULL) { for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) { preferred_pipe_idx = j; if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; secondary_pipe->pipe_idx = preferred_pipe_idx; break; } } } } return secondary_pipe; } bool dcn20_fast_validate_bw( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out) { bool out = false; int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit; bool odm_capable = context->bw_ctx.dml.ip.odm_capable; bool force_split = false; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool failed_non_odm_dsc = false; #endif int split_threshold = dc->res_pool->pipe_count / 2; bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC; ASSERT(pipes); if (!pipes) return false; /* merge previously split odm pipes since mode support needs to make the decision */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *odm_pipe = pipe->next_odm_pipe; if (pipe->prev_odm_pipe) continue; pipe->next_odm_pipe = NULL; while (odm_pipe) { struct pipe_ctx *next_odm_pipe = odm_pipe->next_odm_pipe; odm_pipe->plane_state = NULL; odm_pipe->stream = NULL; odm_pipe->top_pipe = NULL; odm_pipe->bottom_pipe = NULL; odm_pipe->prev_odm_pipe = NULL; odm_pipe->next_odm_pipe = NULL; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (odm_pipe->stream_res.dsc) release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc); #endif /* Clear plane_res and stream_res */ memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res)); memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res)); odm_pipe = next_odm_pipe; } if (pipe->plane_state) resource_build_scaling_params(pipe); } /* merge previously mpc split pipes since mode support needs to make the decision */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) continue; pipe->bottom_pipe = hsplit_pipe->bottom_pipe; if (hsplit_pipe->bottom_pipe) hsplit_pipe->bottom_pipe->top_pipe = pipe; hsplit_pipe->plane_state = NULL; hsplit_pipe->stream = NULL; hsplit_pipe->top_pipe = NULL; hsplit_pipe->bottom_pipe = NULL; /* Clear plane_res and stream_res */ memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res)); memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res)); if (pipe->plane_state) resource_build_scaling_params(pipe); } if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes); else pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, &context->res_ctx, pipes); *pipe_cnt_out = pipe_cnt; if (!pipe_cnt) { out = true; goto validate_out; } context->bw_ctx.dml.ip.odm_capable = 0; vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); context->bw_ctx.dml.ip.odm_capable = odm_capable; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* 1 dsc per stream dsc validation */ if (vlevel <= context->bw_ctx.dml.soc.num_states) if (!dcn20_validate_dsc(dc, context)) { failed_non_odm_dsc = true; vlevel = context->bw_ctx.dml.soc.num_states + 1; } #endif if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable) vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); if (vlevel > context->bw_ctx.dml.soc.num_states) goto validate_fail; if ((context->stream_count > split_threshold && dc->current_state->stream_count <= split_threshold) || (context->stream_count <= split_threshold && dc->current_state->stream_count > split_threshold)) context->commit_hints.full_update_needed = true; /*initialize pipe_just_split_from to invalid idx*/ for (i = 0; i < MAX_PIPES; i++) pipe_split_from[i] = -1; /* Single display only conditionals get set here */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; bool exit_loop = false; if (!pipe->stream || pipe->top_pipe) continue; if (dc->debug.force_single_disp_pipe_split) { if (!force_split) force_split = true; else { force_split = false; exit_loop = true; } } if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) { if (avoid_split) avoid_split = false; else { avoid_split = true; exit_loop = true; } } if (exit_loop) break; } if (context->stream_count > split_threshold) avoid_split = true; vlevel_unsplit = vlevel; for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; for (; vlevel_unsplit <= context->bw_ctx.dml.soc.num_states; vlevel_unsplit++) if (context->bw_ctx.dml.vba.NoOfDPP[vlevel_unsplit][0][pipe_idx] == 1) break; pipe_idx++; } for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; bool need_split = true; bool need_split3d; if (!pipe->stream || pipe_split_from[i] >= 0) continue; pipe_idx++; if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { force_split = true; context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = true; context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true; } if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1) context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2; if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); ASSERT(hsplit_pipe); if (!dcn20_split_stream_for_odm( &context->res_ctx, dc->res_pool, pipe, hsplit_pipe)) goto validate_fail; pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx; dcn20_build_mapped_resource(dc, context, pipe->stream); } if (!pipe->plane_state) continue; /* Skip 2nd half of already split pipe */ if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state) continue; need_split3d = ((pipe->stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE || pipe->stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) && (pipe->stream->timing.timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM || pipe->stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE)); if (avoid_split && vlevel_unsplit <= context->bw_ctx.dml.soc.num_states && !force_split && !need_split3d) { need_split = false; vlevel = vlevel_unsplit; context->bw_ctx.dml.vba.maxMpcComb = 0; } else need_split = context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 2; /* We do not support mpo + odm at the moment */ if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) goto validate_fail; if (need_split3d || need_split || force_split) { if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) { /* pipe not split previously needs split */ hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); ASSERT(hsplit_pipe || force_split); if (!hsplit_pipe) continue; if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { if (!dcn20_split_stream_for_odm( &context->res_ctx, dc->res_pool, pipe, hsplit_pipe)) goto validate_fail; } else dcn20_split_stream_for_mpc( &context->res_ctx, dc->res_pool, pipe, hsplit_pipe); pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx; } } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) { /* merge should already have been done */ ASSERT(0); } } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Actual dsc count per stream dsc validation*/ if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) { context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; goto validate_fail; } #endif *vlevel_out = vlevel; out = true; goto validate_out; validate_fail: out = false; validate_out: return out; } void dcn20_calculate_wm( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *out_pipe_cnt, int *pipe_split_from, int vlevel) { int pipe_cnt, i, pipe_idx; for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; if (pipe_split_from[i] < 0) { pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]; if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx) pipes[pipe_cnt].pipe.dest.odm_combine = context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; else pipes[pipe_cnt].pipe.dest.odm_combine = 0; pipe_idx++; } else { pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]]; if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i]) pipes[pipe_cnt].pipe.dest.odm_combine = context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]]; else pipes[pipe_cnt].pipe.dest.odm_combine = 0; } if (dc->config.forced_clocks) { pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; } if (dc->debug.min_disp_clk_khz > pipes[pipe_cnt].clks_cfg.dispclk_mhz * 1000) pipes[pipe_cnt].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; if (dc->debug.min_dpp_clk_khz > pipes[pipe_cnt].clks_cfg.dppclk_mhz * 1000) pipes[pipe_cnt].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; pipe_cnt++; } if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes); else pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, &context->res_ctx, pipes); } *out_pipe_cnt = pipe_cnt; pipes[0].clks_cfg.voltage = vlevel; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; /* only pipe 0 is read for voltage and dcf/soc clocks */ if (vlevel < 1) { pipes[0].clks_cfg.voltage = 1; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].dcfclk_mhz; pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].socclk_mhz; } context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; if (vlevel < 2) { pipes[0].clks_cfg.voltage = 2; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz; } context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; if (vlevel < 3) { pipes[0].clks_cfg.voltage = 3; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz; } context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; pipes[0].clks_cfg.voltage = vlevel; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; } void dcn20_calculate_dlg_params( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel) { int i, j, pipe_idx, pipe_idx_unsplit; bool visited[MAX_PIPES] = { 0 }; /* Writeback MCIF_WB arbitration parameters */ dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt); context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000; context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000; context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000; context->bw_ctx.bw.dcn.clk.fclk_khz = 0; context->bw_ctx.bw.dcn.clk.p_state_change_support = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_unsupported; context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; /* * An artifact of dml pipe split/odm is that pipes get merged back together for * calculation. Therefore we need to only extract for first pipe in ascending index order * and copy into the other split half. */ for (i = 0, pipe_idx = 0, pipe_idx_unsplit = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; if (!visited[pipe_idx]) { display_pipe_source_params_st *src = &pipes[pipe_idx_unsplit].pipe.src; display_pipe_dest_params_st *dst = &pipes[pipe_idx_unsplit].pipe.dest; dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit]; dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit]; dst->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit]; dst->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit]; /* * j iterates inside pipes array, unlike i which iterates inside * pipe_ctx array */ if (src->is_hsplit) for (j = pipe_idx + 1; j < pipe_cnt; j++) { display_pipe_source_params_st *src_j = &pipes[j].pipe.src; display_pipe_dest_params_st *dst_j = &pipes[j].pipe.dest; if (src_j->is_hsplit && !visited[j] && src->hsplit_grp == src_j->hsplit_grp) { dst_j->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit]; dst_j->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit]; dst_j->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit]; dst_j->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit]; visited[j] = true; } } visited[pipe_idx] = true; pipe_idx_unsplit++; } pipe_idx++; } for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; ASSERT(visited[pipe_idx]); context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; pipe_idx++; } /*save a original dppclock copy*/ context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000; context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000; for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2; if (!context->res_ctx.pipe_ctx[i].stream) continue; context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml, &context->res_ctx.pipe_ctx[i].dlg_regs, &context->res_ctx.pipe_ctx[i].ttu_regs, pipes, pipe_cnt, pipe_idx, cstate_en, context->bw_ctx.bw.dcn.clk.p_state_change_support, false, false, false); context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml, &context->res_ctx.pipe_ctx[i].rq_regs, pipes[pipe_idx].pipe); pipe_idx++; } } static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context, bool fast_validate) { bool out = false; BW_VAL_TRACE_SETUP(); int vlevel = 0; int pipe_split_from[MAX_PIPES]; int pipe_cnt = 0; display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); DC_LOGGER_INIT(dc->ctx->logger); BW_VAL_TRACE_COUNT(); out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel); if (pipe_cnt == 0) goto validate_out; if (!out) goto validate_fail; BW_VAL_TRACE_END_VOLTAGE_LEVEL(); if (fast_validate) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); BW_VAL_TRACE_END_WATERMARKS(); goto validate_out; validate_fail: DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); BW_VAL_TRACE_SKIP(fail); out = false; validate_out: kfree(pipes); BW_VAL_TRACE_FINISH(); return out; } bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { bool voltage_supported = false; bool full_pstate_supported = false; bool dummy_pstate_supported = false; double p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; if (fast_validate) return dcn20_validate_bandwidth_internal(dc, context, true); // Best case, we support full UCLK switch latency voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 || (voltage_supported && full_pstate_supported)) { context->bw_ctx.bw.dcn.clk.p_state_change_support = true; goto restore_dml_state; } // Fallback: Try to only support G6 temperature read latency context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us; voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; if (voltage_supported && dummy_pstate_supported) { context->bw_ctx.bw.dcn.clk.p_state_change_support = false; goto restore_dml_state; } // ERROR: fallback is supposed to always work. ASSERT(false); restore_dml_state: memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; return voltage_supported; } struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer( struct dc_state *state, const struct resource_pool *pool, struct dc_stream_state *stream) { struct resource_context *res_ctx = &state->res_ctx; struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe); if (!head_pipe) ASSERT(0); if (!idle_pipe) return NULL; idle_pipe->stream = head_pipe->stream; idle_pipe->stream_res.tg = head_pipe->stream_res.tg; idle_pipe->stream_res.opp = head_pipe->stream_res.opp; idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; return idle_pipe; } bool dcn20_get_dcc_compression_cap(const struct dc *dc, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output) { return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( dc->res_pool->hubbub, input, output); } static void dcn20_destroy_resource_pool(struct resource_pool **pool) { struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool); destruct(dcn20_pool); kfree(dcn20_pool); *pool = NULL; } static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state) { enum dc_status result = DC_OK; enum surface_pixel_format surf_pix_format = plane_state->format; unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format); enum swizzle_mode_values swizzle = DC_SW_LINEAR; if (bpp == 64) swizzle = DC_SW_64KB_D; else swizzle = DC_SW_64KB_S; plane_state->tiling_info.gfx9.swizzle = swizzle; return result; } static struct resource_funcs dcn20_res_pool_funcs = { .destroy = dcn20_destroy_resource_pool, .link_enc_create = dcn20_link_encoder_create, .validate_bandwidth = dcn20_validate_bandwidth, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, .add_stream_to_ctx = dcn20_add_stream_to_ctx, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, .get_default_swizzle_mode = dcn20_get_default_swizzle_mode, .set_mcif_arb_params = dcn20_set_mcif_arb_params, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link }; bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; ASSERT(pipe_count > 0); for (i = 0; i < pipe_count; i++) { struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc), GFP_KERNEL); if (!dwbc20) { dm_error("DC: failed to create dwbc20!\n"); return false; } dcn20_dwbc_construct(dwbc20, ctx, &dwbc20_regs[i], &dwbc20_shift, &dwbc20_mask, i); pool->dwbc[i] = &dwbc20->base; } return true; } bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; ASSERT(pipe_count > 0); for (i = 0; i < pipe_count; i++) { struct dcn20_mmhubbub *mcif_wb20 = kzalloc(sizeof(struct dcn20_mmhubbub), GFP_KERNEL); if (!mcif_wb20) { dm_error("DC: failed to create mcif_wb20!\n"); return false; } dcn20_mmhubbub_construct(mcif_wb20, ctx, &mcif_wb20_regs[i], &mcif_wb20_shift, &mcif_wb20_mask, i); pool->mcif_wb[i] = &mcif_wb20->base; } return true; } struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) { struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); if (!pp_smu) return pp_smu; dm_pp_get_funcs(ctx, pp_smu); if (pp_smu->ctx.ver != PP_SMU_VER_NV) pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs)); return pp_smu; } void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) { if (pp_smu && *pp_smu) { kfree(*pp_smu); *pp_smu = NULL; } } static void cap_soc_clocks( struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table max_clocks) { int i; // First pass - cap all clocks higher than the reported max for (i = 0; i < bb->num_states; i++) { if ((bb->clock_limits[i].dcfclk_mhz > (max_clocks.dcfClockInKhz / 1000)) && max_clocks.dcfClockInKhz != 0) bb->clock_limits[i].dcfclk_mhz = (max_clocks.dcfClockInKhz / 1000); if ((bb->clock_limits[i].dram_speed_mts > (max_clocks.uClockInKhz / 1000) * 16) && max_clocks.uClockInKhz != 0) bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16; if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000)) && max_clocks.fabricClockInKhz != 0) bb->clock_limits[i].fabricclk_mhz = (max_clocks.fabricClockInKhz / 1000); if ((bb->clock_limits[i].dispclk_mhz > (max_clocks.displayClockInKhz / 1000)) && max_clocks.displayClockInKhz != 0) bb->clock_limits[i].dispclk_mhz = (max_clocks.displayClockInKhz / 1000); if ((bb->clock_limits[i].dppclk_mhz > (max_clocks.dppClockInKhz / 1000)) && max_clocks.dppClockInKhz != 0) bb->clock_limits[i].dppclk_mhz = (max_clocks.dppClockInKhz / 1000); if ((bb->clock_limits[i].phyclk_mhz > (max_clocks.phyClockInKhz / 1000)) && max_clocks.phyClockInKhz != 0) bb->clock_limits[i].phyclk_mhz = (max_clocks.phyClockInKhz / 1000); if ((bb->clock_limits[i].socclk_mhz > (max_clocks.socClockInKhz / 1000)) && max_clocks.socClockInKhz != 0) bb->clock_limits[i].socclk_mhz = (max_clocks.socClockInKhz / 1000); if ((bb->clock_limits[i].dscclk_mhz > (max_clocks.dscClockInKhz / 1000)) && max_clocks.dscClockInKhz != 0) bb->clock_limits[i].dscclk_mhz = (max_clocks.dscClockInKhz / 1000); } // Second pass - remove all duplicate clock states for (i = bb->num_states - 1; i > 1; i--) { bool duplicate = true; if (bb->clock_limits[i-1].dcfclk_mhz != bb->clock_limits[i].dcfclk_mhz) duplicate = false; if (bb->clock_limits[i-1].dispclk_mhz != bb->clock_limits[i].dispclk_mhz) duplicate = false; if (bb->clock_limits[i-1].dppclk_mhz != bb->clock_limits[i].dppclk_mhz) duplicate = false; if (bb->clock_limits[i-1].dram_speed_mts != bb->clock_limits[i].dram_speed_mts) duplicate = false; if (bb->clock_limits[i-1].dscclk_mhz != bb->clock_limits[i].dscclk_mhz) duplicate = false; if (bb->clock_limits[i-1].fabricclk_mhz != bb->clock_limits[i].fabricclk_mhz) duplicate = false; if (bb->clock_limits[i-1].phyclk_mhz != bb->clock_limits[i].phyclk_mhz) duplicate = false; if (bb->clock_limits[i-1].socclk_mhz != bb->clock_limits[i].socclk_mhz) duplicate = false; if (duplicate) bb->num_states--; } } static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states) { struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0}; int i; int num_calculated_states = 0; int min_dcfclk = 0; if (num_states == 0) return; if (dc->bb_overrides.min_dcfclk_mhz > 0) min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; else // Accounting for SOC/DCF relationship, we can go as high as // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506. min_dcfclk = 507; for (i = 0; i < num_states; i++) { int min_fclk_required_by_uclk; calculated_states[i].state = i; calculated_states[i].dram_speed_mts = uclk_states[i] * 16 / 1000; // FCLK:UCLK ratio is 1.08 min_fclk_required_by_uclk = mul_u64_u32_shr(BIT_ULL(32) * 1080 / 1000000, uclk_states[i], 32); calculated_states[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ? min_dcfclk : min_fclk_required_by_uclk; calculated_states[i].socclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->socClockInKhz / 1000) ? max_clocks->socClockInKhz / 1000 : calculated_states[i].fabricclk_mhz; calculated_states[i].dcfclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->dcfClockInKhz / 1000) ? max_clocks->dcfClockInKhz / 1000 : calculated_states[i].fabricclk_mhz; calculated_states[i].dispclk_mhz = max_clocks->displayClockInKhz / 1000; calculated_states[i].dppclk_mhz = max_clocks->displayClockInKhz / 1000; calculated_states[i].dscclk_mhz = max_clocks->displayClockInKhz / (1000 * 3); calculated_states[i].phyclk_mhz = max_clocks->phyClockInKhz / 1000; num_calculated_states++; } calculated_states[num_calculated_states - 1].socclk_mhz = max_clocks->socClockInKhz / 1000; calculated_states[num_calculated_states - 1].fabricclk_mhz = max_clocks->socClockInKhz / 1000; calculated_states[num_calculated_states - 1].dcfclk_mhz = max_clocks->dcfClockInKhz / 1000; memcpy(bb->clock_limits, calculated_states, sizeof(bb->clock_limits)); bb->num_states = num_calculated_states; // Duplicate the last state, DML always an extra state identical to max state to work memcpy(&bb->clock_limits[num_calculated_states], &bb->clock_limits[num_calculated_states - 1], sizeof(struct _vcs_dpi_voltage_scaling_st)); bb->clock_limits[num_calculated_states].state = bb->num_states; } static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) { kernel_fpu_begin(); if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns && dc->bb_overrides.sr_exit_time_ns) { bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; } if ((int)(bb->sr_enter_plus_exit_time_us * 1000) != dc->bb_overrides.sr_enter_plus_exit_time_ns && dc->bb_overrides.sr_enter_plus_exit_time_ns) { bb->sr_enter_plus_exit_time_us = dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; } if ((int)(bb->urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns && dc->bb_overrides.urgent_latency_ns) { bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; } if ((int)(bb->dram_clock_change_latency_us * 1000) != dc->bb_overrides.dram_clock_change_latency_ns && dc->bb_overrides.dram_clock_change_latency_ns) { bb->dram_clock_change_latency_us = dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; } kernel_fpu_end(); } static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( uint32_t hw_internal_rev) { if (ASICREV_IS_NAVI12_P(hw_internal_rev)) return &dcn2_0_nv12_soc; return &dcn2_0_soc; } static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params( uint32_t hw_internal_rev) { /* NV12 and NV10 */ return &dcn2_0_ip; } static enum dml_project get_dml_project_version(uint32_t hw_internal_rev) { return DML_PROJECT_NAVI10v2; } #define fixed16_to_double(x) (((double) x) / ((double) (1 << 16))) #define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x)) static bool init_soc_bounding_box(struct dc *dc, struct dcn20_resource_pool *pool) { const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box; struct _vcs_dpi_soc_bounding_box_st *loaded_bb = get_asic_rev_soc_bb(dc->ctx->asic_id.hw_internal_rev); struct _vcs_dpi_ip_params_st *loaded_ip = get_asic_rev_ip_params(dc->ctx->asic_id.hw_internal_rev); DC_LOGGER_INIT(dc->ctx->logger); if (!bb && !SOC_BOUNDING_BOX_VALID) { DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__); return false; } if (bb && !SOC_BOUNDING_BOX_VALID) { int i; dcn2_0_nv12_soc.sr_exit_time_us = fixed16_to_double_to_cpu(bb->sr_exit_time_us); dcn2_0_nv12_soc.sr_enter_plus_exit_time_us = fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us); dcn2_0_nv12_soc.urgent_latency_us = fixed16_to_double_to_cpu(bb->urgent_latency_us); dcn2_0_nv12_soc.urgent_latency_pixel_data_only_us = fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us); dcn2_0_nv12_soc.urgent_latency_pixel_mixed_with_vm_data_us = fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us); dcn2_0_nv12_soc.urgent_latency_vm_data_only_us = fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us); dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes = le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes); dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes); dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_vm_only_bytes = le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes); dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only); dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm); dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only = fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only); dcn2_0_nv12_soc.max_avg_sdp_bw_use_normal_percent = fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent); dcn2_0_nv12_soc.max_avg_dram_bw_use_normal_percent = fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent); dcn2_0_nv12_soc.writeback_latency_us = fixed16_to_double_to_cpu(bb->writeback_latency_us); dcn2_0_nv12_soc.ideal_dram_bw_after_urgent_percent = fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent); dcn2_0_nv12_soc.max_request_size_bytes = le32_to_cpu(bb->max_request_size_bytes); dcn2_0_nv12_soc.dram_channel_width_bytes = le32_to_cpu(bb->dram_channel_width_bytes); dcn2_0_nv12_soc.fabric_datapath_to_dcn_data_return_bytes = le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes); dcn2_0_nv12_soc.dcn_downspread_percent = fixed16_to_double_to_cpu(bb->dcn_downspread_percent); dcn2_0_nv12_soc.downspread_percent = fixed16_to_double_to_cpu(bb->downspread_percent); dcn2_0_nv12_soc.dram_page_open_time_ns = fixed16_to_double_to_cpu(bb->dram_page_open_time_ns); dcn2_0_nv12_soc.dram_rw_turnaround_time_ns = fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns); dcn2_0_nv12_soc.dram_return_buffer_per_channel_bytes = le32_to_cpu(bb->dram_return_buffer_per_channel_bytes); dcn2_0_nv12_soc.round_trip_ping_latency_dcfclk_cycles = le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles); dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_bytes = le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes); dcn2_0_nv12_soc.channel_interleave_bytes = le32_to_cpu(bb->channel_interleave_bytes); dcn2_0_nv12_soc.num_banks = le32_to_cpu(bb->num_banks); dcn2_0_nv12_soc.num_chans = le32_to_cpu(bb->num_chans); dcn2_0_nv12_soc.vmm_page_size_bytes = le32_to_cpu(bb->vmm_page_size_bytes); dcn2_0_nv12_soc.dram_clock_change_latency_us = fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us); // HACK!! Lower uclock latency switch time so we don't switch dcn2_0_nv12_soc.dram_clock_change_latency_us = 10; dcn2_0_nv12_soc.writeback_dram_clock_change_latency_us = fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us); dcn2_0_nv12_soc.return_bus_width_bytes = le32_to_cpu(bb->return_bus_width_bytes); dcn2_0_nv12_soc.dispclk_dppclk_vco_speed_mhz = le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz); dcn2_0_nv12_soc.xfc_bus_transport_time_us = le32_to_cpu(bb->xfc_bus_transport_time_us); dcn2_0_nv12_soc.xfc_xbuf_latency_tolerance_us = le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us); dcn2_0_nv12_soc.use_urgent_burst_bw = le32_to_cpu(bb->use_urgent_burst_bw); dcn2_0_nv12_soc.num_states = le32_to_cpu(bb->num_states); for (i = 0; i < dcn2_0_nv12_soc.num_states; i++) { dcn2_0_nv12_soc.clock_limits[i].state = le32_to_cpu(bb->clock_limits[i].state); dcn2_0_nv12_soc.clock_limits[i].dcfclk_mhz = fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz); dcn2_0_nv12_soc.clock_limits[i].fabricclk_mhz = fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz); dcn2_0_nv12_soc.clock_limits[i].dispclk_mhz = fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz); dcn2_0_nv12_soc.clock_limits[i].dppclk_mhz = fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz); dcn2_0_nv12_soc.clock_limits[i].phyclk_mhz = fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz); dcn2_0_nv12_soc.clock_limits[i].socclk_mhz = fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz); dcn2_0_nv12_soc.clock_limits[i].dscclk_mhz = fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz); dcn2_0_nv12_soc.clock_limits[i].dram_speed_mts = fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts); } } if (pool->base.pp_smu) { struct pp_smu_nv_clock_table max_clocks = {0}; unsigned int uclk_states[8] = {0}; unsigned int num_states = 0; enum pp_smu_status status; bool clock_limits_available = false; bool uclk_states_available = false; if (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) { status = (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) (&pool->base.pp_smu->nv_funcs.pp_smu, uclk_states, &num_states); uclk_states_available = (status == PP_SMU_RESULT_OK); } if (pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) { status = (*pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) (&pool->base.pp_smu->nv_funcs.pp_smu, &max_clocks); /* SMU cannot set DCF clock to anything equal to or higher than SOC clock */ if (max_clocks.dcfClockInKhz >= max_clocks.socClockInKhz) max_clocks.dcfClockInKhz = max_clocks.socClockInKhz - 1000; clock_limits_available = (status == PP_SMU_RESULT_OK); } if (clock_limits_available && uclk_states_available && num_states) update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states); else if (clock_limits_available) cap_soc_clocks(loaded_bb, max_clocks); } loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; loaded_ip->max_num_dpp = pool->base.pipe_count; patch_bounding_box(dc, loaded_bb); return true; } static bool construct( uint8_t num_virtual_links, struct dc *dc, struct dcn20_resource_pool *pool) { int i; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; struct _vcs_dpi_soc_bounding_box_st *loaded_bb = get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev); struct _vcs_dpi_ip_params_st *loaded_ip = get_asic_rev_ip_params(ctx->asic_id.hw_internal_rev); enum dml_project dml_project_version = get_dml_project_version(ctx->asic_id.hw_internal_rev); ctx->dc_bios->regs = &bios_regs; pool->base.funcs = &dcn20_res_pool_funcs; if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) { pool->base.res_cap = &res_cap_nv14; pool->base.pipe_count = 5; pool->base.mpcc_count = 5; } else { pool->base.res_cap = &res_cap_nv10; pool->base.pipe_count = 6; pool->base.mpcc_count = 6; } /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 256; dc->caps.dmdata_alloc_size = 2048; dc->caps.max_slave_planes = 1; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.hw_3d_lut = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) { dc->debug = debug_defaults_drv; } else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { pool->base.pipe_count = 4; pool->base.mpcc_count = pool->base.pipe_count; dc->debug = debug_defaults_diags; } else { dc->debug = debug_defaults_diags; } //dcn2.0x dc->work_arounds.dedcn20_305_wa = true; // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); /************************************************* * Create resources * *************************************************/ pool->base.clock_sources[DCN20_CLK_SRC_PLL0] = dcn20_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[DCN20_CLK_SRC_PLL1] = dcn20_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[DCN20_CLK_SRC_PLL2] = dcn20_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); pool->base.clock_sources[DCN20_CLK_SRC_PLL3] = dcn20_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); pool->base.clock_sources[DCN20_CLK_SRC_PLL4] = dcn20_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL4, &clk_src_regs[4], false); pool->base.clock_sources[DCN20_CLK_SRC_PLL5] = dcn20_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL5, &clk_src_regs[5], false); pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL; /* todo: not reuse phy_pll registers */ pool->base.dp_clock_source = dcn20_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true); for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } } pool->base.dccg = dccg2_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); if (pool->base.dccg == NULL) { dm_error("DC: failed to create dccg!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } pool->base.dmcu = dcn20_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } pool->base.pp_smu = dcn20_pp_smu_create(ctx); if (!init_soc_bounding_box(dc, pool)) { dm_error("DC: failed to initialize soc bounding box!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } dml_init_instance(&dc->dml, loaded_bb, loaded_ip, dml_project_version); if (!dc->debug.disable_pplib_wm_range) { struct pp_smu_wm_range_sets ranges = {0}; int i = 0; ranges.num_reader_wm_sets = 0; if (loaded_bb->num_states == 1) { ranges.reader_wm_sets[0].wm_inst = i; ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; ranges.num_reader_wm_sets = 1; } else if (loaded_bb->num_states > 1) { for (i = 0; i < 4 && i < loaded_bb->num_states; i++) { ranges.reader_wm_sets[i].wm_inst = i; ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0; ranges.reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16; ranges.num_reader_wm_sets = i + 1; } ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.reader_wm_sets[ranges.num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; } ranges.num_writer_wm_sets = 1; ranges.writer_wm_sets[0].wm_inst = 0; ranges.writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; ranges.writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ if (pool->base.pp_smu->nv_funcs.set_wm_ranges) pool->base.pp_smu->nv_funcs.set_wm_ranges(&pool->base.pp_smu->nv_funcs.pp_smu, &ranges); } init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dcn20_create(&init_data); if (!pool->base.irqs) goto create_fail; /* mem input -> ipp -> dpp -> opp -> TG */ for (i = 0; i < pool->base.pipe_count; i++) { pool->base.hubps[i] = dcn20_hubp_create(ctx, i); if (pool->base.hubps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto create_fail; } pool->base.ipps[i] = dcn20_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto create_fail; } pool->base.dpps[i] = dcn20_dpp_create(ctx, i); if (pool->base.dpps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create dpps!\n"); goto create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dcn20_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto create_fail; } pool->base.hw_i2cs[i] = dcn20_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create hw i2c!!\n"); goto create_fail; } pool->base.sw_i2cs[i] = NULL; } for (i = 0; i < pool->base.res_cap->num_opp; i++) { pool->base.opps[i] = dcn20_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto create_fail; } } for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { pool->base.timing_generators[i] = dcn20_timing_generator_create( ctx, i); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto create_fail; } } pool->base.timing_generator_count = i; pool->base.mpc = dcn20_mpc_create(ctx); if (pool->base.mpc == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mpc!\n"); goto create_fail; } pool->base.hubbub = dcn20_hubbub_create(ctx); if (pool->base.hubbub == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create hubbub!\n"); goto create_fail; } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { pool->base.dscs[i] = dcn20_dsc_create(ctx, i); if (pool->base.dscs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create display stream compressor %d!\n", i); goto create_fail; } } #endif if (!dcn20_dwbc_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create dwbc!\n"); goto create_fail; } if (!dcn20_mmhubbub_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mcif_wb!\n"); goto create_fail; } if (!resource_construct(num_virtual_links, dc, &pool->base, (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? &res_create_funcs : &res_create_maximus_funcs))) goto create_fail; dcn20_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->cap_funcs = cap_funcs; return true; create_fail: destruct(pool); return false; } struct resource_pool *dcn20_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc) { struct dcn20_resource_pool *pool = kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (construct(init_data->num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); kfree(pool); return NULL; }
./CrossVul/dataset_final_sorted/CWE-400/c/good_1273_6
crossvul-cpp_data_bad_1268_0
// SPDX-License-Identifier: ISC /* * Copyright (c) 2007-2011 Atheros Communications Inc. * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc. * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com> */ #include <linux/module.h> #include <linux/usb.h> #include "debug.h" #include "core.h" #include "bmi.h" #include "hif.h" #include "htc.h" #include "usb.h" static void ath10k_usb_post_recv_transfers(struct ath10k *ar, struct ath10k_usb_pipe *recv_pipe); /* inlined helper functions */ static inline enum ath10k_htc_ep_id eid_from_htc_hdr(struct ath10k_htc_hdr *htc_hdr) { return (enum ath10k_htc_ep_id)htc_hdr->eid; } static inline bool is_trailer_only_msg(struct ath10k_htc_hdr *htc_hdr) { return __le16_to_cpu(htc_hdr->len) == htc_hdr->trailer_len; } /* pipe/urb operations */ static struct ath10k_urb_context * ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe) { struct ath10k_urb_context *urb_context = NULL; unsigned long flags; spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); if (!list_empty(&pipe->urb_list_head)) { urb_context = list_first_entry(&pipe->urb_list_head, struct ath10k_urb_context, link); list_del(&urb_context->link); pipe->urb_cnt--; } spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); return urb_context; } static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe, struct ath10k_urb_context *urb_context) { unsigned long flags; spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); pipe->urb_cnt++; list_add(&urb_context->link, &pipe->urb_list_head); spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); } static void ath10k_usb_cleanup_recv_urb(struct ath10k_urb_context *urb_context) { dev_kfree_skb(urb_context->skb); urb_context->skb = NULL; ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); } static void ath10k_usb_free_pipe_resources(struct ath10k *ar, struct ath10k_usb_pipe *pipe) { struct ath10k_urb_context *urb_context; if (!pipe->ar_usb) { /* nothing allocated for this pipe */ return; } ath10k_dbg(ar, ATH10K_DBG_USB, "usb free resources lpipe %d hpipe 0x%x urbs %d avail %d\n", pipe->logical_pipe_num, pipe->usb_pipe_handle, pipe->urb_alloc, pipe->urb_cnt); if (pipe->urb_alloc != pipe->urb_cnt) { ath10k_dbg(ar, ATH10K_DBG_USB, "usb urb leak lpipe %d hpipe 0x%x urbs %d avail %d\n", pipe->logical_pipe_num, pipe->usb_pipe_handle, pipe->urb_alloc, pipe->urb_cnt); } for (;;) { urb_context = ath10k_usb_alloc_urb_from_pipe(pipe); if (!urb_context) break; kfree(urb_context); } } static void ath10k_usb_cleanup_pipe_resources(struct ath10k *ar) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); int i; for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) ath10k_usb_free_pipe_resources(ar, &ar_usb->pipes[i]); } /* hif usb rx/tx completion functions */ static void ath10k_usb_recv_complete(struct urb *urb) { struct ath10k_urb_context *urb_context = urb->context; struct ath10k_usb_pipe *pipe = urb_context->pipe; struct ath10k *ar = pipe->ar_usb->ar; struct sk_buff *skb; int status = 0; ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb recv pipe %d stat %d len %d urb 0x%pK\n", pipe->logical_pipe_num, urb->status, urb->actual_length, urb); if (urb->status != 0) { status = -EIO; switch (urb->status) { case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* no need to spew these errors when device * removed or urb killed due to driver shutdown */ status = -ECANCELED; break; default: ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb recv pipe %d ep 0x%2.2x failed: %d\n", pipe->logical_pipe_num, pipe->ep_address, urb->status); break; } goto cleanup_recv_urb; } if (urb->actual_length == 0) goto cleanup_recv_urb; skb = urb_context->skb; /* we are going to pass it up */ urb_context->skb = NULL; skb_put(skb, urb->actual_length); /* note: queue implements a lock */ skb_queue_tail(&pipe->io_comp_queue, skb); schedule_work(&pipe->io_complete_work); cleanup_recv_urb: ath10k_usb_cleanup_recv_urb(urb_context); if (status == 0 && pipe->urb_cnt >= pipe->urb_cnt_thresh) { /* our free urbs are piling up, post more transfers */ ath10k_usb_post_recv_transfers(ar, pipe); } } static void ath10k_usb_transmit_complete(struct urb *urb) { struct ath10k_urb_context *urb_context = urb->context; struct ath10k_usb_pipe *pipe = urb_context->pipe; struct ath10k *ar = pipe->ar_usb->ar; struct sk_buff *skb; if (urb->status != 0) { ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "pipe: %d, failed:%d\n", pipe->logical_pipe_num, urb->status); } skb = urb_context->skb; urb_context->skb = NULL; ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); /* note: queue implements a lock */ skb_queue_tail(&pipe->io_comp_queue, skb); schedule_work(&pipe->io_complete_work); } /* pipe operations */ static void ath10k_usb_post_recv_transfers(struct ath10k *ar, struct ath10k_usb_pipe *recv_pipe) { struct ath10k_urb_context *urb_context; struct urb *urb; int usb_status; for (;;) { urb_context = ath10k_usb_alloc_urb_from_pipe(recv_pipe); if (!urb_context) break; urb_context->skb = dev_alloc_skb(ATH10K_USB_RX_BUFFER_SIZE); if (!urb_context->skb) goto err; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) goto err; usb_fill_bulk_urb(urb, recv_pipe->ar_usb->udev, recv_pipe->usb_pipe_handle, urb_context->skb->data, ATH10K_USB_RX_BUFFER_SIZE, ath10k_usb_recv_complete, urb_context); ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb bulk recv submit %d 0x%x ep 0x%2.2x len %d buf 0x%pK\n", recv_pipe->logical_pipe_num, recv_pipe->usb_pipe_handle, recv_pipe->ep_address, ATH10K_USB_RX_BUFFER_SIZE, urb_context->skb); usb_anchor_urb(urb, &recv_pipe->urb_submitted); usb_status = usb_submit_urb(urb, GFP_ATOMIC); if (usb_status) { ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb bulk recv failed: %d\n", usb_status); usb_unanchor_urb(urb); usb_free_urb(urb); goto err; } usb_free_urb(urb); } return; err: ath10k_usb_cleanup_recv_urb(urb_context); } static void ath10k_usb_flush_all(struct ath10k *ar) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); int i; for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) { if (ar_usb->pipes[i].ar_usb) { usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted); cancel_work_sync(&ar_usb->pipes[i].io_complete_work); } } } static void ath10k_usb_start_recv_pipes(struct ath10k *ar) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA].urb_cnt_thresh = 1; ath10k_usb_post_recv_transfers(ar, &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]); } static void ath10k_usb_tx_complete(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htc_hdr *htc_hdr; struct ath10k_htc_ep *ep; htc_hdr = (struct ath10k_htc_hdr *)skb->data; ep = &ar->htc.endpoint[htc_hdr->eid]; ath10k_htc_notify_tx_completion(ep, skb); /* The TX complete handler now owns the skb... */ } static void ath10k_usb_rx_complete(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htc *htc = &ar->htc; struct ath10k_htc_hdr *htc_hdr; enum ath10k_htc_ep_id eid; struct ath10k_htc_ep *ep; u16 payload_len; u8 *trailer; int ret; htc_hdr = (struct ath10k_htc_hdr *)skb->data; eid = eid_from_htc_hdr(htc_hdr); ep = &ar->htc.endpoint[eid]; if (ep->service_id == 0) { ath10k_warn(ar, "ep %d is not connected\n", eid); goto out_free_skb; } payload_len = le16_to_cpu(htc_hdr->len); if (!payload_len) { ath10k_warn(ar, "zero length frame received, firmware crashed?\n"); goto out_free_skb; } if (payload_len < htc_hdr->trailer_len) { ath10k_warn(ar, "malformed frame received, firmware crashed?\n"); goto out_free_skb; } if (htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT) { trailer = skb->data + sizeof(*htc_hdr) + payload_len - htc_hdr->trailer_len; ret = ath10k_htc_process_trailer(htc, trailer, htc_hdr->trailer_len, eid, NULL, NULL); if (ret) goto out_free_skb; if (is_trailer_only_msg(htc_hdr)) goto out_free_skb; /* strip off the trailer from the skb since it should not * be passed on to upper layers */ skb_trim(skb, skb->len - htc_hdr->trailer_len); } skb_pull(skb, sizeof(*htc_hdr)); ep->ep_ops.ep_rx_complete(ar, skb); /* The RX complete handler now owns the skb... */ return; out_free_skb: dev_kfree_skb(skb); } static void ath10k_usb_io_comp_work(struct work_struct *work) { struct ath10k_usb_pipe *pipe = container_of(work, struct ath10k_usb_pipe, io_complete_work); struct ath10k *ar = pipe->ar_usb->ar; struct sk_buff *skb; while ((skb = skb_dequeue(&pipe->io_comp_queue))) { if (pipe->flags & ATH10K_USB_PIPE_FLAG_TX) ath10k_usb_tx_complete(ar, skb); else ath10k_usb_rx_complete(ar, skb); } } #define ATH10K_USB_MAX_DIAG_CMD (sizeof(struct ath10k_usb_ctrl_diag_cmd_write)) #define ATH10K_USB_MAX_DIAG_RESP (sizeof(struct ath10k_usb_ctrl_diag_resp_read)) static void ath10k_usb_destroy(struct ath10k *ar) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); ath10k_usb_flush_all(ar); ath10k_usb_cleanup_pipe_resources(ar); usb_set_intfdata(ar_usb->interface, NULL); kfree(ar_usb->diag_cmd_buffer); kfree(ar_usb->diag_resp_buffer); } static int ath10k_usb_hif_start(struct ath10k *ar) { int i; struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); ath10k_usb_start_recv_pipes(ar); /* set the TX resource avail threshold for each TX pipe */ for (i = ATH10K_USB_PIPE_TX_CTRL; i <= ATH10K_USB_PIPE_TX_DATA_HP; i++) { ar_usb->pipes[i].urb_cnt_thresh = ar_usb->pipes[i].urb_alloc / 2; } return 0; } static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id, struct ath10k_hif_sg_item *items, int n_items) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct ath10k_usb_pipe *pipe = &ar_usb->pipes[pipe_id]; struct ath10k_urb_context *urb_context; struct sk_buff *skb; struct urb *urb; int ret, i; for (i = 0; i < n_items; i++) { urb_context = ath10k_usb_alloc_urb_from_pipe(pipe); if (!urb_context) { ret = -ENOMEM; goto err; } skb = items[i].transfer_context; urb_context->skb = skb; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { ret = -ENOMEM; goto err_free_urb_to_pipe; } usb_fill_bulk_urb(urb, ar_usb->udev, pipe->usb_pipe_handle, skb->data, skb->len, ath10k_usb_transmit_complete, urb_context); if (!(skb->len % pipe->max_packet_size)) { /* hit a max packet boundary on this pipe */ urb->transfer_flags |= URB_ZERO_PACKET; } usb_anchor_urb(urb, &pipe->urb_submitted); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb bulk transmit failed: %d\n", ret); usb_unanchor_urb(urb); ret = -EINVAL; goto err_free_urb_to_pipe; } usb_free_urb(urb); } return 0; err_free_urb_to_pipe: ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); err: return ret; } static void ath10k_usb_hif_stop(struct ath10k *ar) { ath10k_usb_flush_all(ar); } static u16 ath10k_usb_hif_get_free_queue_number(struct ath10k *ar, u8 pipe_id) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); return ar_usb->pipes[pipe_id].urb_cnt; } static int ath10k_usb_submit_ctrl_out(struct ath10k *ar, u8 req, u16 value, u16 index, void *data, u32 size) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); u8 *buf = NULL; int ret; if (size > 0) { buf = kmemdup(data, size, GFP_KERNEL); if (!buf) return -ENOMEM; } /* note: if successful returns number of bytes transferred */ ret = usb_control_msg(ar_usb->udev, usb_sndctrlpipe(ar_usb->udev, 0), req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, buf, size, 1000); if (ret < 0) { ath10k_warn(ar, "Failed to submit usb control message: %d\n", ret); kfree(buf); return ret; } kfree(buf); return 0; } static int ath10k_usb_submit_ctrl_in(struct ath10k *ar, u8 req, u16 value, u16 index, void *data, u32 size) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); u8 *buf = NULL; int ret; if (size > 0) { buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; } /* note: if successful returns number of bytes transferred */ ret = usb_control_msg(ar_usb->udev, usb_rcvctrlpipe(ar_usb->udev, 0), req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, buf, size, 2 * HZ); if (ret < 0) { ath10k_warn(ar, "Failed to read usb control message: %d\n", ret); kfree(buf); return ret; } memcpy((u8 *)data, buf, size); kfree(buf); return 0; } static int ath10k_usb_ctrl_msg_exchange(struct ath10k *ar, u8 req_val, u8 *req_buf, u32 req_len, u8 resp_val, u8 *resp_buf, u32 *resp_len) { int ret; /* send command */ ret = ath10k_usb_submit_ctrl_out(ar, req_val, 0, 0, req_buf, req_len); if (ret) goto err; /* get response */ if (resp_buf) { ret = ath10k_usb_submit_ctrl_in(ar, resp_val, 0, 0, resp_buf, *resp_len); if (ret) goto err; } return 0; err: return ret; } static int ath10k_usb_hif_diag_read(struct ath10k *ar, u32 address, void *buf, size_t buf_len) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct ath10k_usb_ctrl_diag_cmd_read *cmd; u32 resp_len; int ret; if (buf_len < sizeof(struct ath10k_usb_ctrl_diag_resp_read)) return -EINVAL; cmd = (struct ath10k_usb_ctrl_diag_cmd_read *)ar_usb->diag_cmd_buffer; memset(cmd, 0, sizeof(*cmd)); cmd->cmd = ATH10K_USB_CTRL_DIAG_CC_READ; cmd->address = cpu_to_le32(address); resp_len = sizeof(struct ath10k_usb_ctrl_diag_resp_read); ret = ath10k_usb_ctrl_msg_exchange(ar, ATH10K_USB_CONTROL_REQ_DIAG_CMD, (u8 *)cmd, sizeof(*cmd), ATH10K_USB_CONTROL_REQ_DIAG_RESP, ar_usb->diag_resp_buffer, &resp_len); if (ret) return ret; if (resp_len != sizeof(struct ath10k_usb_ctrl_diag_resp_read)) return -EMSGSIZE; memcpy(buf, ar_usb->diag_resp_buffer, sizeof(struct ath10k_usb_ctrl_diag_resp_read)); return 0; } static int ath10k_usb_hif_diag_write(struct ath10k *ar, u32 address, const void *data, int nbytes) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct ath10k_usb_ctrl_diag_cmd_write *cmd; int ret; if (nbytes != sizeof(cmd->value)) return -EINVAL; cmd = (struct ath10k_usb_ctrl_diag_cmd_write *)ar_usb->diag_cmd_buffer; memset(cmd, 0, sizeof(*cmd)); cmd->cmd = cpu_to_le32(ATH10K_USB_CTRL_DIAG_CC_WRITE); cmd->address = cpu_to_le32(address); memcpy(&cmd->value, data, nbytes); ret = ath10k_usb_ctrl_msg_exchange(ar, ATH10K_USB_CONTROL_REQ_DIAG_CMD, (u8 *)cmd, sizeof(*cmd), 0, NULL, NULL); if (ret) return ret; return 0; } static int ath10k_usb_bmi_exchange_msg(struct ath10k *ar, void *req, u32 req_len, void *resp, u32 *resp_len) { int ret; if (req) { ret = ath10k_usb_submit_ctrl_out(ar, ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD, 0, 0, req, req_len); if (ret) { ath10k_warn(ar, "unable to send the bmi data to the device: %d\n", ret); return ret; } } if (resp) { ret = ath10k_usb_submit_ctrl_in(ar, ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP, 0, 0, resp, *resp_len); if (ret) { ath10k_warn(ar, "Unable to read the bmi data from the device: %d\n", ret); return ret; } } return 0; } static void ath10k_usb_hif_get_default_pipe(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe) { *ul_pipe = ATH10K_USB_PIPE_TX_CTRL; *dl_pipe = ATH10K_USB_PIPE_RX_CTRL; } static int ath10k_usb_hif_map_service_to_pipe(struct ath10k *ar, u16 svc_id, u8 *ul_pipe, u8 *dl_pipe) { switch (svc_id) { case ATH10K_HTC_SVC_ID_RSVD_CTRL: case ATH10K_HTC_SVC_ID_WMI_CONTROL: *ul_pipe = ATH10K_USB_PIPE_TX_CTRL; /* due to large control packets, shift to data pipe */ *dl_pipe = ATH10K_USB_PIPE_RX_DATA; break; case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: *ul_pipe = ATH10K_USB_PIPE_TX_DATA_LP; /* Disable rxdata2 directly, it will be enabled * if FW enable rxdata2 */ *dl_pipe = ATH10K_USB_PIPE_RX_DATA; break; default: return -EPERM; } return 0; } /* This op is currently only used by htc_wait_target if the HTC ready * message times out. It is not applicable for USB since there is nothing * we can do if the HTC ready message does not arrive in time. * TODO: Make this op non mandatory by introducing a NULL check in the * hif op wrapper. */ static void ath10k_usb_hif_send_complete_check(struct ath10k *ar, u8 pipe, int force) { } static int ath10k_usb_hif_power_up(struct ath10k *ar, enum ath10k_firmware_mode fw_mode) { return 0; } static void ath10k_usb_hif_power_down(struct ath10k *ar) { ath10k_usb_flush_all(ar); } #ifdef CONFIG_PM static int ath10k_usb_hif_suspend(struct ath10k *ar) { return -EOPNOTSUPP; } static int ath10k_usb_hif_resume(struct ath10k *ar) { return -EOPNOTSUPP; } #endif static const struct ath10k_hif_ops ath10k_usb_hif_ops = { .tx_sg = ath10k_usb_hif_tx_sg, .diag_read = ath10k_usb_hif_diag_read, .diag_write = ath10k_usb_hif_diag_write, .exchange_bmi_msg = ath10k_usb_bmi_exchange_msg, .start = ath10k_usb_hif_start, .stop = ath10k_usb_hif_stop, .map_service_to_pipe = ath10k_usb_hif_map_service_to_pipe, .get_default_pipe = ath10k_usb_hif_get_default_pipe, .send_complete_check = ath10k_usb_hif_send_complete_check, .get_free_queue_number = ath10k_usb_hif_get_free_queue_number, .power_up = ath10k_usb_hif_power_up, .power_down = ath10k_usb_hif_power_down, #ifdef CONFIG_PM .suspend = ath10k_usb_hif_suspend, .resume = ath10k_usb_hif_resume, #endif }; static u8 ath10k_usb_get_logical_pipe_num(u8 ep_address, int *urb_count) { u8 pipe_num = ATH10K_USB_PIPE_INVALID; switch (ep_address) { case ATH10K_USB_EP_ADDR_APP_CTRL_IN: pipe_num = ATH10K_USB_PIPE_RX_CTRL; *urb_count = RX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_DATA_IN: pipe_num = ATH10K_USB_PIPE_RX_DATA; *urb_count = RX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_INT_IN: pipe_num = ATH10K_USB_PIPE_RX_INT; *urb_count = RX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_DATA2_IN: pipe_num = ATH10K_USB_PIPE_RX_DATA2; *urb_count = RX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_CTRL_OUT: pipe_num = ATH10K_USB_PIPE_TX_CTRL; *urb_count = TX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_DATA_LP_OUT: pipe_num = ATH10K_USB_PIPE_TX_DATA_LP; *urb_count = TX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT: pipe_num = ATH10K_USB_PIPE_TX_DATA_MP; *urb_count = TX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT: pipe_num = ATH10K_USB_PIPE_TX_DATA_HP; *urb_count = TX_URB_COUNT; break; default: /* note: there may be endpoints not currently used */ break; } return pipe_num; } static int ath10k_usb_alloc_pipe_resources(struct ath10k *ar, struct ath10k_usb_pipe *pipe, int urb_cnt) { struct ath10k_urb_context *urb_context; int i; INIT_LIST_HEAD(&pipe->urb_list_head); init_usb_anchor(&pipe->urb_submitted); for (i = 0; i < urb_cnt; i++) { urb_context = kzalloc(sizeof(*urb_context), GFP_KERNEL); if (!urb_context) return -ENOMEM; urb_context->pipe = pipe; /* we are only allocate the urb contexts here, the actual URB * is allocated from the kernel as needed to do a transaction */ pipe->urb_alloc++; ath10k_usb_free_urb_to_pipe(pipe, urb_context); } ath10k_dbg(ar, ATH10K_DBG_USB, "usb alloc resources lpipe %d hpipe 0x%x urbs %d\n", pipe->logical_pipe_num, pipe->usb_pipe_handle, pipe->urb_alloc); return 0; } static int ath10k_usb_setup_pipe_resources(struct ath10k *ar, struct usb_interface *interface) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct usb_host_interface *iface_desc = interface->cur_altsetting; struct usb_endpoint_descriptor *endpoint; struct ath10k_usb_pipe *pipe; int ret, i, urbcount; u8 pipe_num; ath10k_dbg(ar, ATH10K_DBG_USB, "usb setting up pipes using interface\n"); /* walk decriptors and setup pipes */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) { ath10k_dbg(ar, ATH10K_DBG_USB, "usb %s bulk ep 0x%2.2x maxpktsz %d\n", ATH10K_USB_IS_DIR_IN (endpoint->bEndpointAddress) ? "rx" : "tx", endpoint->bEndpointAddress, le16_to_cpu(endpoint->wMaxPacketSize)); } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) { ath10k_dbg(ar, ATH10K_DBG_USB, "usb %s int ep 0x%2.2x maxpktsz %d interval %d\n", ATH10K_USB_IS_DIR_IN (endpoint->bEndpointAddress) ? "rx" : "tx", endpoint->bEndpointAddress, le16_to_cpu(endpoint->wMaxPacketSize), endpoint->bInterval); } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) { /* TODO for ISO */ ath10k_dbg(ar, ATH10K_DBG_USB, "usb %s isoc ep 0x%2.2x maxpktsz %d interval %d\n", ATH10K_USB_IS_DIR_IN (endpoint->bEndpointAddress) ? "rx" : "tx", endpoint->bEndpointAddress, le16_to_cpu(endpoint->wMaxPacketSize), endpoint->bInterval); } urbcount = 0; pipe_num = ath10k_usb_get_logical_pipe_num(endpoint->bEndpointAddress, &urbcount); if (pipe_num == ATH10K_USB_PIPE_INVALID) continue; pipe = &ar_usb->pipes[pipe_num]; if (pipe->ar_usb) /* hmmm..pipe was already setup */ continue; pipe->ar_usb = ar_usb; pipe->logical_pipe_num = pipe_num; pipe->ep_address = endpoint->bEndpointAddress; pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize); if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) { if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { pipe->usb_pipe_handle = usb_rcvbulkpipe(ar_usb->udev, pipe->ep_address); } else { pipe->usb_pipe_handle = usb_sndbulkpipe(ar_usb->udev, pipe->ep_address); } } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) { if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { pipe->usb_pipe_handle = usb_rcvintpipe(ar_usb->udev, pipe->ep_address); } else { pipe->usb_pipe_handle = usb_sndintpipe(ar_usb->udev, pipe->ep_address); } } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) { /* TODO for ISO */ if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { pipe->usb_pipe_handle = usb_rcvisocpipe(ar_usb->udev, pipe->ep_address); } else { pipe->usb_pipe_handle = usb_sndisocpipe(ar_usb->udev, pipe->ep_address); } } pipe->ep_desc = endpoint; if (!ATH10K_USB_IS_DIR_IN(pipe->ep_address)) pipe->flags |= ATH10K_USB_PIPE_FLAG_TX; ret = ath10k_usb_alloc_pipe_resources(ar, pipe, urbcount); if (ret) return ret; } return 0; } static int ath10k_usb_create(struct ath10k *ar, struct usb_interface *interface) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct usb_device *dev = interface_to_usbdev(interface); struct ath10k_usb_pipe *pipe; int ret, i; usb_set_intfdata(interface, ar_usb); spin_lock_init(&ar_usb->cs_lock); ar_usb->udev = dev; ar_usb->interface = interface; for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) { pipe = &ar_usb->pipes[i]; INIT_WORK(&pipe->io_complete_work, ath10k_usb_io_comp_work); skb_queue_head_init(&pipe->io_comp_queue); } ar_usb->diag_cmd_buffer = kzalloc(ATH10K_USB_MAX_DIAG_CMD, GFP_KERNEL); if (!ar_usb->diag_cmd_buffer) { ret = -ENOMEM; goto err; } ar_usb->diag_resp_buffer = kzalloc(ATH10K_USB_MAX_DIAG_RESP, GFP_KERNEL); if (!ar_usb->diag_resp_buffer) { ret = -ENOMEM; goto err; } ret = ath10k_usb_setup_pipe_resources(ar, interface); if (ret) goto err; return 0; err: ath10k_usb_destroy(ar); return ret; } /* ath10k usb driver registered functions */ static int ath10k_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct ath10k *ar; struct ath10k_usb *ar_usb; struct usb_device *dev = interface_to_usbdev(interface); int ret, vendor_id, product_id; enum ath10k_hw_rev hw_rev; struct ath10k_bus_params bus_params = {}; /* Assumption: All USB based chipsets (so far) are QCA9377 based. * If there will be newer chipsets that does not use the hw reg * setup as defined in qca6174_regs and qca6174_values, this * assumption is no longer valid and hw_rev must be setup differently * depending on chipset. */ hw_rev = ATH10K_HW_QCA9377; ar = ath10k_core_create(sizeof(*ar_usb), &dev->dev, ATH10K_BUS_USB, hw_rev, &ath10k_usb_hif_ops); if (!ar) { dev_err(&dev->dev, "failed to allocate core\n"); return -ENOMEM; } usb_get_dev(dev); vendor_id = le16_to_cpu(dev->descriptor.idVendor); product_id = le16_to_cpu(dev->descriptor.idProduct); ath10k_dbg(ar, ATH10K_DBG_BOOT, "usb new func vendor 0x%04x product 0x%04x\n", vendor_id, product_id); ar_usb = ath10k_usb_priv(ar); ret = ath10k_usb_create(ar, interface); ar_usb->ar = ar; ar->dev_id = product_id; ar->id.vendor = vendor_id; ar->id.device = product_id; bus_params.dev_type = ATH10K_DEV_TYPE_HL; /* TODO: don't know yet how to get chip_id with USB */ bus_params.chip_id = 0; ret = ath10k_core_register(ar, &bus_params); if (ret) { ath10k_warn(ar, "failed to register driver core: %d\n", ret); goto err; } /* TODO: remove this once USB support is fully implemented */ ath10k_warn(ar, "Warning: ath10k USB support is incomplete, don't expect anything to work!\n"); return 0; err: ath10k_core_destroy(ar); usb_put_dev(dev); return ret; } static void ath10k_usb_remove(struct usb_interface *interface) { struct ath10k_usb *ar_usb; ar_usb = usb_get_intfdata(interface); if (!ar_usb) return; ath10k_core_unregister(ar_usb->ar); ath10k_usb_destroy(ar_usb->ar); usb_put_dev(interface_to_usbdev(interface)); ath10k_core_destroy(ar_usb->ar); } #ifdef CONFIG_PM static int ath10k_usb_pm_suspend(struct usb_interface *interface, pm_message_t message) { struct ath10k_usb *ar_usb = usb_get_intfdata(interface); ath10k_usb_flush_all(ar_usb->ar); return 0; } static int ath10k_usb_pm_resume(struct usb_interface *interface) { struct ath10k_usb *ar_usb = usb_get_intfdata(interface); struct ath10k *ar = ar_usb->ar; ath10k_usb_post_recv_transfers(ar, &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]); return 0; } #else #define ath10k_usb_pm_suspend NULL #define ath10k_usb_pm_resume NULL #endif /* table of devices that work with this driver */ static struct usb_device_id ath10k_usb_ids[] = { {USB_DEVICE(0x13b1, 0x0042)}, /* Linksys WUSB6100M */ { /* Terminating entry */ }, }; MODULE_DEVICE_TABLE(usb, ath10k_usb_ids); static struct usb_driver ath10k_usb_driver = { .name = "ath10k_usb", .probe = ath10k_usb_probe, .suspend = ath10k_usb_pm_suspend, .resume = ath10k_usb_pm_resume, .disconnect = ath10k_usb_remove, .id_table = ath10k_usb_ids, .supports_autosuspend = true, .disable_hub_initiated_lpm = 1, }; module_usb_driver(ath10k_usb_driver); MODULE_AUTHOR("Atheros Communications, Inc."); MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN USB devices"); MODULE_LICENSE("Dual BSD/GPL");
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1268_0
crossvul-cpp_data_bad_485_1
// SPDX-License-Identifier: GPL-2.0 /* * drivers/usb/core/usb.c * * (C) Copyright Linus Torvalds 1999 * (C) Copyright Johannes Erdfelt 1999-2001 * (C) Copyright Andreas Gal 1999 * (C) Copyright Gregory P. Smith 1999 * (C) Copyright Deti Fliegl 1999 (new USB architecture) * (C) Copyright Randy Dunlap 2000 * (C) Copyright David Brownell 2000-2004 * (C) Copyright Yggdrasil Computing, Inc. 2000 * (usb_device_id matching changes by Adam J. Richter) * (C) Copyright Greg Kroah-Hartman 2002-2003 * * Released under the GPLv2 only. * * NOTE! This is not actually a driver at all, rather this is * just a collection of helper routines that implement the * generic USB things that the real drivers can use.. * * Think of this as a "USB library" rather than anything else. * It should be considered a slave, with no callbacks. Callbacks * are evil. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/interrupt.h> /* for in_interrupt() */ #include <linux/kmod.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/debugfs.h> #include <linux/usb/of.h> #include <asm/io.h> #include <linux/scatterlist.h> #include <linux/mm.h> #include <linux/dma-mapping.h> #include "usb.h" const char *usbcore_name = "usbcore"; static bool nousb; /* Disable USB when built into kernel image */ module_param(nousb, bool, 0444); /* * for external read access to <nousb> */ int usb_disabled(void) { return nousb; } EXPORT_SYMBOL_GPL(usb_disabled); #ifdef CONFIG_PM static int usb_autosuspend_delay = 2; /* Default delay value, * in seconds */ module_param_named(autosuspend, usb_autosuspend_delay, int, 0644); MODULE_PARM_DESC(autosuspend, "default autosuspend delay"); #else #define usb_autosuspend_delay 0 #endif static bool match_endpoint(struct usb_endpoint_descriptor *epd, struct usb_endpoint_descriptor **bulk_in, struct usb_endpoint_descriptor **bulk_out, struct usb_endpoint_descriptor **int_in, struct usb_endpoint_descriptor **int_out) { switch (usb_endpoint_type(epd)) { case USB_ENDPOINT_XFER_BULK: if (usb_endpoint_dir_in(epd)) { if (bulk_in && !*bulk_in) { *bulk_in = epd; break; } } else { if (bulk_out && !*bulk_out) { *bulk_out = epd; break; } } return false; case USB_ENDPOINT_XFER_INT: if (usb_endpoint_dir_in(epd)) { if (int_in && !*int_in) { *int_in = epd; break; } } else { if (int_out && !*int_out) { *int_out = epd; break; } } return false; default: return false; } return (!bulk_in || *bulk_in) && (!bulk_out || *bulk_out) && (!int_in || *int_in) && (!int_out || *int_out); } /** * usb_find_common_endpoints() -- look up common endpoint descriptors * @alt: alternate setting to search * @bulk_in: pointer to descriptor pointer, or NULL * @bulk_out: pointer to descriptor pointer, or NULL * @int_in: pointer to descriptor pointer, or NULL * @int_out: pointer to descriptor pointer, or NULL * * Search the alternate setting's endpoint descriptors for the first bulk-in, * bulk-out, interrupt-in and interrupt-out endpoints and return them in the * provided pointers (unless they are NULL). * * If a requested endpoint is not found, the corresponding pointer is set to * NULL. * * Return: Zero if all requested descriptors were found, or -ENXIO otherwise. */ int usb_find_common_endpoints(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_in, struct usb_endpoint_descriptor **bulk_out, struct usb_endpoint_descriptor **int_in, struct usb_endpoint_descriptor **int_out) { struct usb_endpoint_descriptor *epd; int i; if (bulk_in) *bulk_in = NULL; if (bulk_out) *bulk_out = NULL; if (int_in) *int_in = NULL; if (int_out) *int_out = NULL; for (i = 0; i < alt->desc.bNumEndpoints; ++i) { epd = &alt->endpoint[i].desc; if (match_endpoint(epd, bulk_in, bulk_out, int_in, int_out)) return 0; } return -ENXIO; } EXPORT_SYMBOL_GPL(usb_find_common_endpoints); /** * usb_find_common_endpoints_reverse() -- look up common endpoint descriptors * @alt: alternate setting to search * @bulk_in: pointer to descriptor pointer, or NULL * @bulk_out: pointer to descriptor pointer, or NULL * @int_in: pointer to descriptor pointer, or NULL * @int_out: pointer to descriptor pointer, or NULL * * Search the alternate setting's endpoint descriptors for the last bulk-in, * bulk-out, interrupt-in and interrupt-out endpoints and return them in the * provided pointers (unless they are NULL). * * If a requested endpoint is not found, the corresponding pointer is set to * NULL. * * Return: Zero if all requested descriptors were found, or -ENXIO otherwise. */ int usb_find_common_endpoints_reverse(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_in, struct usb_endpoint_descriptor **bulk_out, struct usb_endpoint_descriptor **int_in, struct usb_endpoint_descriptor **int_out) { struct usb_endpoint_descriptor *epd; int i; if (bulk_in) *bulk_in = NULL; if (bulk_out) *bulk_out = NULL; if (int_in) *int_in = NULL; if (int_out) *int_out = NULL; for (i = alt->desc.bNumEndpoints - 1; i >= 0; --i) { epd = &alt->endpoint[i].desc; if (match_endpoint(epd, bulk_in, bulk_out, int_in, int_out)) return 0; } return -ENXIO; } EXPORT_SYMBOL_GPL(usb_find_common_endpoints_reverse); /** * usb_find_alt_setting() - Given a configuration, find the alternate setting * for the given interface. * @config: the configuration to search (not necessarily the current config). * @iface_num: interface number to search in * @alt_num: alternate interface setting number to search for. * * Search the configuration's interface cache for the given alt setting. * * Return: The alternate setting, if found. %NULL otherwise. */ struct usb_host_interface *usb_find_alt_setting( struct usb_host_config *config, unsigned int iface_num, unsigned int alt_num) { struct usb_interface_cache *intf_cache = NULL; int i; if (!config) return NULL; for (i = 0; i < config->desc.bNumInterfaces; i++) { if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber == iface_num) { intf_cache = config->intf_cache[i]; break; } } if (!intf_cache) return NULL; for (i = 0; i < intf_cache->num_altsetting; i++) if (intf_cache->altsetting[i].desc.bAlternateSetting == alt_num) return &intf_cache->altsetting[i]; printk(KERN_DEBUG "Did not find alt setting %u for intf %u, " "config %u\n", alt_num, iface_num, config->desc.bConfigurationValue); return NULL; } EXPORT_SYMBOL_GPL(usb_find_alt_setting); /** * usb_ifnum_to_if - get the interface object with a given interface number * @dev: the device whose current configuration is considered * @ifnum: the desired interface * * This walks the device descriptor for the currently active configuration * to find the interface object with the particular interface number. * * Note that configuration descriptors are not required to assign interface * numbers sequentially, so that it would be incorrect to assume that * the first interface in that descriptor corresponds to interface zero. * This routine helps device drivers avoid such mistakes. * However, you should make sure that you do the right thing with any * alternate settings available for this interfaces. * * Don't call this function unless you are bound to one of the interfaces * on this device or you have locked the device! * * Return: A pointer to the interface that has @ifnum as interface number, * if found. %NULL otherwise. */ struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev, unsigned ifnum) { struct usb_host_config *config = dev->actconfig; int i; if (!config) return NULL; for (i = 0; i < config->desc.bNumInterfaces; i++) if (config->interface[i]->altsetting[0] .desc.bInterfaceNumber == ifnum) return config->interface[i]; return NULL; } EXPORT_SYMBOL_GPL(usb_ifnum_to_if); /** * usb_altnum_to_altsetting - get the altsetting structure with a given alternate setting number. * @intf: the interface containing the altsetting in question * @altnum: the desired alternate setting number * * This searches the altsetting array of the specified interface for * an entry with the correct bAlternateSetting value. * * Note that altsettings need not be stored sequentially by number, so * it would be incorrect to assume that the first altsetting entry in * the array corresponds to altsetting zero. This routine helps device * drivers avoid such mistakes. * * Don't call this function unless you are bound to the intf interface * or you have locked the device! * * Return: A pointer to the entry of the altsetting array of @intf that * has @altnum as the alternate setting number. %NULL if not found. */ struct usb_host_interface *usb_altnum_to_altsetting( const struct usb_interface *intf, unsigned int altnum) { int i; for (i = 0; i < intf->num_altsetting; i++) { if (intf->altsetting[i].desc.bAlternateSetting == altnum) return &intf->altsetting[i]; } return NULL; } EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting); struct find_interface_arg { int minor; struct device_driver *drv; }; static int __find_interface(struct device *dev, void *data) { struct find_interface_arg *arg = data; struct usb_interface *intf; if (!is_usb_interface(dev)) return 0; if (dev->driver != arg->drv) return 0; intf = to_usb_interface(dev); return intf->minor == arg->minor; } /** * usb_find_interface - find usb_interface pointer for driver and device * @drv: the driver whose current configuration is considered * @minor: the minor number of the desired device * * This walks the bus device list and returns a pointer to the interface * with the matching minor and driver. Note, this only works for devices * that share the USB major number. * * Return: A pointer to the interface with the matching major and @minor. */ struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor) { struct find_interface_arg argb; struct device *dev; argb.minor = minor; argb.drv = &drv->drvwrap.driver; dev = bus_find_device(&usb_bus_type, NULL, &argb, __find_interface); /* Drop reference count from bus_find_device */ put_device(dev); return dev ? to_usb_interface(dev) : NULL; } EXPORT_SYMBOL_GPL(usb_find_interface); struct each_dev_arg { void *data; int (*fn)(struct usb_device *, void *); }; static int __each_dev(struct device *dev, void *data) { struct each_dev_arg *arg = (struct each_dev_arg *)data; /* There are struct usb_interface on the same bus, filter them out */ if (!is_usb_device(dev)) return 0; return arg->fn(to_usb_device(dev), arg->data); } /** * usb_for_each_dev - iterate over all USB devices in the system * @data: data pointer that will be handed to the callback function * @fn: callback function to be called for each USB device * * Iterate over all USB devices and call @fn for each, passing it @data. If it * returns anything other than 0, we break the iteration prematurely and return * that value. */ int usb_for_each_dev(void *data, int (*fn)(struct usb_device *, void *)) { struct each_dev_arg arg = {data, fn}; return bus_for_each_dev(&usb_bus_type, NULL, &arg, __each_dev); } EXPORT_SYMBOL_GPL(usb_for_each_dev); /** * usb_release_dev - free a usb device structure when all users of it are finished. * @dev: device that's been disconnected * * Will be called only by the device core when all users of this usb device are * done. */ static void usb_release_dev(struct device *dev) { struct usb_device *udev; struct usb_hcd *hcd; udev = to_usb_device(dev); hcd = bus_to_hcd(udev->bus); usb_destroy_configuration(udev); usb_release_bos_descriptor(udev); of_node_put(dev->of_node); usb_put_hcd(hcd); kfree(udev->product); kfree(udev->manufacturer); kfree(udev->serial); kfree(udev); } static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env) { struct usb_device *usb_dev; usb_dev = to_usb_device(dev); if (add_uevent_var(env, "BUSNUM=%03d", usb_dev->bus->busnum)) return -ENOMEM; if (add_uevent_var(env, "DEVNUM=%03d", usb_dev->devnum)) return -ENOMEM; return 0; } #ifdef CONFIG_PM /* USB device Power-Management thunks. * There's no need to distinguish here between quiescing a USB device * and powering it down; the generic_suspend() routine takes care of * it by skipping the usb_port_suspend() call for a quiesce. And for * USB interfaces there's no difference at all. */ static int usb_dev_prepare(struct device *dev) { return 0; /* Implement eventually? */ } static void usb_dev_complete(struct device *dev) { /* Currently used only for rebinding interfaces */ usb_resume_complete(dev); } static int usb_dev_suspend(struct device *dev) { return usb_suspend(dev, PMSG_SUSPEND); } static int usb_dev_resume(struct device *dev) { return usb_resume(dev, PMSG_RESUME); } static int usb_dev_freeze(struct device *dev) { return usb_suspend(dev, PMSG_FREEZE); } static int usb_dev_thaw(struct device *dev) { return usb_resume(dev, PMSG_THAW); } static int usb_dev_poweroff(struct device *dev) { return usb_suspend(dev, PMSG_HIBERNATE); } static int usb_dev_restore(struct device *dev) { return usb_resume(dev, PMSG_RESTORE); } static const struct dev_pm_ops usb_device_pm_ops = { .prepare = usb_dev_prepare, .complete = usb_dev_complete, .suspend = usb_dev_suspend, .resume = usb_dev_resume, .freeze = usb_dev_freeze, .thaw = usb_dev_thaw, .poweroff = usb_dev_poweroff, .restore = usb_dev_restore, .runtime_suspend = usb_runtime_suspend, .runtime_resume = usb_runtime_resume, .runtime_idle = usb_runtime_idle, }; #endif /* CONFIG_PM */ static char *usb_devnode(struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid) { struct usb_device *usb_dev; usb_dev = to_usb_device(dev); return kasprintf(GFP_KERNEL, "bus/usb/%03d/%03d", usb_dev->bus->busnum, usb_dev->devnum); } struct device_type usb_device_type = { .name = "usb_device", .release = usb_release_dev, .uevent = usb_dev_uevent, .devnode = usb_devnode, #ifdef CONFIG_PM .pm = &usb_device_pm_ops, #endif }; /* Returns 1 if @usb_bus is WUSB, 0 otherwise */ static unsigned usb_bus_is_wusb(struct usb_bus *bus) { struct usb_hcd *hcd = bus_to_hcd(bus); return hcd->wireless; } /** * usb_alloc_dev - usb device constructor (usbcore-internal) * @parent: hub to which device is connected; null to allocate a root hub * @bus: bus used to access the device * @port1: one-based index of port; ignored for root hubs * Context: !in_interrupt() * * Only hub drivers (including virtual root hub drivers for host * controllers) should ever call this. * * This call may not be used in a non-sleeping context. * * Return: On success, a pointer to the allocated usb device. %NULL on * failure. */ struct usb_device *usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1) { struct usb_device *dev; struct usb_hcd *usb_hcd = bus_to_hcd(bus); unsigned root_hub = 0; unsigned raw_port = port1; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; if (!usb_get_hcd(usb_hcd)) { kfree(dev); return NULL; } /* Root hubs aren't true devices, so don't allocate HCD resources */ if (usb_hcd->driver->alloc_dev && parent && !usb_hcd->driver->alloc_dev(usb_hcd, dev)) { usb_put_hcd(bus_to_hcd(bus)); kfree(dev); return NULL; } device_initialize(&dev->dev); dev->dev.bus = &usb_bus_type; dev->dev.type = &usb_device_type; dev->dev.groups = usb_device_groups; /* * Fake a dma_mask/offset for the USB device: * We cannot really use the dma-mapping API (dma_alloc_* and * dma_map_*) for USB devices but instead need to use * usb_alloc_coherent and pass data in 'urb's, but some subsystems * manually look into the mask/offset pair to determine whether * they need bounce buffers. * Note: calling dma_set_mask() on a USB device would set the * mask for the entire HCD, so don't do that. */ dev->dev.dma_mask = bus->sysdev->dma_mask; dev->dev.dma_pfn_offset = bus->sysdev->dma_pfn_offset; set_dev_node(&dev->dev, dev_to_node(bus->sysdev)); dev->state = USB_STATE_ATTACHED; dev->lpm_disable_count = 1; atomic_set(&dev->urbnum, 0); INIT_LIST_HEAD(&dev->ep0.urb_list); dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE; dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT; /* ep0 maxpacket comes later, from device descriptor */ usb_enable_endpoint(dev, &dev->ep0, false); dev->can_submit = 1; /* Save readable and stable topology id, distinguishing devices * by location for diagnostics, tools, driver model, etc. The * string is a path along hub ports, from the root. Each device's * dev->devpath will be stable until USB is re-cabled, and hubs * are often labeled with these port numbers. The name isn't * as stable: bus->busnum changes easily from modprobe order, * cardbus or pci hotplugging, and so on. */ if (unlikely(!parent)) { dev->devpath[0] = '0'; dev->route = 0; dev->dev.parent = bus->controller; device_set_of_node_from_dev(&dev->dev, bus->sysdev); dev_set_name(&dev->dev, "usb%d", bus->busnum); root_hub = 1; } else { /* match any labeling on the hubs; it's one-based */ if (parent->devpath[0] == '0') { snprintf(dev->devpath, sizeof dev->devpath, "%d", port1); /* Root ports are not counted in route string */ dev->route = 0; } else { snprintf(dev->devpath, sizeof dev->devpath, "%s.%d", parent->devpath, port1); /* Route string assumes hubs have less than 16 ports */ if (port1 < 15) dev->route = parent->route + (port1 << ((parent->level - 1)*4)); else dev->route = parent->route + (15 << ((parent->level - 1)*4)); } dev->dev.parent = &parent->dev; dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath); if (!parent->parent) { /* device under root hub's port */ raw_port = usb_hcd_find_raw_port_number(usb_hcd, port1); } dev->dev.of_node = usb_of_get_device_node(parent, raw_port); /* hub driver sets up TT records */ } dev->portnum = port1; dev->bus = bus; dev->parent = parent; INIT_LIST_HEAD(&dev->filelist); #ifdef CONFIG_PM pm_runtime_set_autosuspend_delay(&dev->dev, usb_autosuspend_delay * 1000); dev->connect_time = jiffies; dev->active_duration = -jiffies; #endif if (root_hub) /* Root hub always ok [and always wired] */ dev->authorized = 1; else { dev->authorized = !!HCD_DEV_AUTHORIZED(usb_hcd); dev->wusb = usb_bus_is_wusb(bus) ? 1 : 0; } return dev; } EXPORT_SYMBOL_GPL(usb_alloc_dev); /** * usb_get_dev - increments the reference count of the usb device structure * @dev: the device being referenced * * Each live reference to a device should be refcounted. * * Drivers for USB interfaces should normally record such references in * their probe() methods, when they bind to an interface, and release * them by calling usb_put_dev(), in their disconnect() methods. * * Return: A pointer to the device with the incremented reference counter. */ struct usb_device *usb_get_dev(struct usb_device *dev) { if (dev) get_device(&dev->dev); return dev; } EXPORT_SYMBOL_GPL(usb_get_dev); /** * usb_put_dev - release a use of the usb device structure * @dev: device that's been disconnected * * Must be called when a user of a device is finished with it. When the last * user of the device calls this function, the memory of the device is freed. */ void usb_put_dev(struct usb_device *dev) { if (dev) put_device(&dev->dev); } EXPORT_SYMBOL_GPL(usb_put_dev); /** * usb_get_intf - increments the reference count of the usb interface structure * @intf: the interface being referenced * * Each live reference to a interface must be refcounted. * * Drivers for USB interfaces should normally record such references in * their probe() methods, when they bind to an interface, and release * them by calling usb_put_intf(), in their disconnect() methods. * * Return: A pointer to the interface with the incremented reference counter. */ struct usb_interface *usb_get_intf(struct usb_interface *intf) { if (intf) get_device(&intf->dev); return intf; } EXPORT_SYMBOL_GPL(usb_get_intf); /** * usb_put_intf - release a use of the usb interface structure * @intf: interface that's been decremented * * Must be called when a user of an interface is finished with it. When the * last user of the interface calls this function, the memory of the interface * is freed. */ void usb_put_intf(struct usb_interface *intf) { if (intf) put_device(&intf->dev); } EXPORT_SYMBOL_GPL(usb_put_intf); /* USB device locking * * USB devices and interfaces are locked using the semaphore in their * embedded struct device. The hub driver guarantees that whenever a * device is connected or disconnected, drivers are called with the * USB device locked as well as their particular interface. * * Complications arise when several devices are to be locked at the same * time. Only hub-aware drivers that are part of usbcore ever have to * do this; nobody else needs to worry about it. The rule for locking * is simple: * * When locking both a device and its parent, always lock the * the parent first. */ /** * usb_lock_device_for_reset - cautiously acquire the lock for a usb device structure * @udev: device that's being locked * @iface: interface bound to the driver making the request (optional) * * Attempts to acquire the device lock, but fails if the device is * NOTATTACHED or SUSPENDED, or if iface is specified and the interface * is neither BINDING nor BOUND. Rather than sleeping to wait for the * lock, the routine polls repeatedly. This is to prevent deadlock with * disconnect; in some drivers (such as usb-storage) the disconnect() * or suspend() method will block waiting for a device reset to complete. * * Return: A negative error code for failure, otherwise 0. */ int usb_lock_device_for_reset(struct usb_device *udev, const struct usb_interface *iface) { unsigned long jiffies_expire = jiffies + HZ; if (udev->state == USB_STATE_NOTATTACHED) return -ENODEV; if (udev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; if (iface && (iface->condition == USB_INTERFACE_UNBINDING || iface->condition == USB_INTERFACE_UNBOUND)) return -EINTR; while (!usb_trylock_device(udev)) { /* If we can't acquire the lock after waiting one second, * we're probably deadlocked */ if (time_after(jiffies, jiffies_expire)) return -EBUSY; msleep(15); if (udev->state == USB_STATE_NOTATTACHED) return -ENODEV; if (udev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; if (iface && (iface->condition == USB_INTERFACE_UNBINDING || iface->condition == USB_INTERFACE_UNBOUND)) return -EINTR; } return 0; } EXPORT_SYMBOL_GPL(usb_lock_device_for_reset); /** * usb_get_current_frame_number - return current bus frame number * @dev: the device whose bus is being queried * * Return: The current frame number for the USB host controller used * with the given USB device. This can be used when scheduling * isochronous requests. * * Note: Different kinds of host controller have different "scheduling * horizons". While one type might support scheduling only 32 frames * into the future, others could support scheduling up to 1024 frames * into the future. * */ int usb_get_current_frame_number(struct usb_device *dev) { return usb_hcd_get_frame_number(dev); } EXPORT_SYMBOL_GPL(usb_get_current_frame_number); /*-------------------------------------------------------------------*/ /* * __usb_get_extra_descriptor() finds a descriptor of specific type in the * extra field of the interface and endpoint descriptor structs. */ int __usb_get_extra_descriptor(char *buffer, unsigned size, unsigned char type, void **ptr) { struct usb_descriptor_header *header; while (size >= sizeof(struct usb_descriptor_header)) { header = (struct usb_descriptor_header *)buffer; if (header->bLength < 2) { printk(KERN_ERR "%s: bogus descriptor, type %d length %d\n", usbcore_name, header->bDescriptorType, header->bLength); return -1; } if (header->bDescriptorType == type) { *ptr = header; return 0; } buffer += header->bLength; size -= header->bLength; } return -1; } EXPORT_SYMBOL_GPL(__usb_get_extra_descriptor); /** * usb_alloc_coherent - allocate dma-consistent buffer for URB_NO_xxx_DMA_MAP * @dev: device the buffer will be used with * @size: requested buffer size * @mem_flags: affect whether allocation may block * @dma: used to return DMA address of buffer * * Return: Either null (indicating no buffer could be allocated), or the * cpu-space pointer to a buffer that may be used to perform DMA to the * specified device. Such cpu-space buffers are returned along with the DMA * address (through the pointer provided). * * Note: * These buffers are used with URB_NO_xxx_DMA_MAP set in urb->transfer_flags * to avoid behaviors like using "DMA bounce buffers", or thrashing IOMMU * hardware during URB completion/resubmit. The implementation varies between * platforms, depending on details of how DMA will work to this device. * Using these buffers also eliminates cacheline sharing problems on * architectures where CPU caches are not DMA-coherent. On systems without * bus-snooping caches, these buffers are uncached. * * When the buffer is no longer used, free it with usb_free_coherent(). */ void *usb_alloc_coherent(struct usb_device *dev, size_t size, gfp_t mem_flags, dma_addr_t *dma) { if (!dev || !dev->bus) return NULL; return hcd_buffer_alloc(dev->bus, size, mem_flags, dma); } EXPORT_SYMBOL_GPL(usb_alloc_coherent); /** * usb_free_coherent - free memory allocated with usb_alloc_coherent() * @dev: device the buffer was used with * @size: requested buffer size * @addr: CPU address of buffer * @dma: DMA address of buffer * * This reclaims an I/O buffer, letting it be reused. The memory must have * been allocated using usb_alloc_coherent(), and the parameters must match * those provided in that allocation request. */ void usb_free_coherent(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma) { if (!dev || !dev->bus) return; if (!addr) return; hcd_buffer_free(dev->bus, size, addr, dma); } EXPORT_SYMBOL_GPL(usb_free_coherent); /** * usb_buffer_map - create DMA mapping(s) for an urb * @urb: urb whose transfer_buffer/setup_packet will be mapped * * URB_NO_TRANSFER_DMA_MAP is added to urb->transfer_flags if the operation * succeeds. If the device is connected to this system through a non-DMA * controller, this operation always succeeds. * * This call would normally be used for an urb which is reused, perhaps * as the target of a large periodic transfer, with usb_buffer_dmasync() * calls to synchronize memory and dma state. * * Reverse the effect of this call with usb_buffer_unmap(). * * Return: Either %NULL (indicating no buffer could be mapped), or @urb. * */ #if 0 struct urb *usb_buffer_map(struct urb *urb) { struct usb_bus *bus; struct device *controller; if (!urb || !urb->dev || !(bus = urb->dev->bus) || !(controller = bus->sysdev)) return NULL; if (controller->dma_mask) { urb->transfer_dma = dma_map_single(controller, urb->transfer_buffer, urb->transfer_buffer_length, usb_pipein(urb->pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); /* FIXME generic api broken like pci, can't report errors */ /* if (urb->transfer_dma == DMA_ADDR_INVALID) return 0; */ } else urb->transfer_dma = ~0; urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; return urb; } EXPORT_SYMBOL_GPL(usb_buffer_map); #endif /* 0 */ /* XXX DISABLED, no users currently. If you wish to re-enable this * XXX please determine whether the sync is to transfer ownership of * XXX the buffer from device to cpu or vice verse, and thusly use the * XXX appropriate _for_{cpu,device}() method. -DaveM */ #if 0 /** * usb_buffer_dmasync - synchronize DMA and CPU view of buffer(s) * @urb: urb whose transfer_buffer/setup_packet will be synchronized */ void usb_buffer_dmasync(struct urb *urb) { struct usb_bus *bus; struct device *controller; if (!urb || !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) || !urb->dev || !(bus = urb->dev->bus) || !(controller = bus->sysdev)) return; if (controller->dma_mask) { dma_sync_single_for_cpu(controller, urb->transfer_dma, urb->transfer_buffer_length, usb_pipein(urb->pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); if (usb_pipecontrol(urb->pipe)) dma_sync_single_for_cpu(controller, urb->setup_dma, sizeof(struct usb_ctrlrequest), DMA_TO_DEVICE); } } EXPORT_SYMBOL_GPL(usb_buffer_dmasync); #endif /** * usb_buffer_unmap - free DMA mapping(s) for an urb * @urb: urb whose transfer_buffer will be unmapped * * Reverses the effect of usb_buffer_map(). */ #if 0 void usb_buffer_unmap(struct urb *urb) { struct usb_bus *bus; struct device *controller; if (!urb || !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) || !urb->dev || !(bus = urb->dev->bus) || !(controller = bus->sysdev)) return; if (controller->dma_mask) { dma_unmap_single(controller, urb->transfer_dma, urb->transfer_buffer_length, usb_pipein(urb->pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP; } EXPORT_SYMBOL_GPL(usb_buffer_unmap); #endif /* 0 */ #if 0 /** * usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint * @dev: device to which the scatterlist will be mapped * @is_in: mapping transfer direction * @sg: the scatterlist to map * @nents: the number of entries in the scatterlist * * Return: Either < 0 (indicating no buffers could be mapped), or the * number of DMA mapping array entries in the scatterlist. * * Note: * The caller is responsible for placing the resulting DMA addresses from * the scatterlist into URB transfer buffer pointers, and for setting the * URB_NO_TRANSFER_DMA_MAP transfer flag in each of those URBs. * * Top I/O rates come from queuing URBs, instead of waiting for each one * to complete before starting the next I/O. This is particularly easy * to do with scatterlists. Just allocate and submit one URB for each DMA * mapping entry returned, stopping on the first error or when all succeed. * Better yet, use the usb_sg_*() calls, which do that (and more) for you. * * This call would normally be used when translating scatterlist requests, * rather than usb_buffer_map(), since on some hardware (with IOMMUs) it * may be able to coalesce mappings for improved I/O efficiency. * * Reverse the effect of this call with usb_buffer_unmap_sg(). */ int usb_buffer_map_sg(const struct usb_device *dev, int is_in, struct scatterlist *sg, int nents) { struct usb_bus *bus; struct device *controller; if (!dev || !(bus = dev->bus) || !(controller = bus->sysdev) || !controller->dma_mask) return -EINVAL; /* FIXME generic api broken like pci, can't report errors */ return dma_map_sg(controller, sg, nents, is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM; } EXPORT_SYMBOL_GPL(usb_buffer_map_sg); #endif /* XXX DISABLED, no users currently. If you wish to re-enable this * XXX please determine whether the sync is to transfer ownership of * XXX the buffer from device to cpu or vice verse, and thusly use the * XXX appropriate _for_{cpu,device}() method. -DaveM */ #if 0 /** * usb_buffer_dmasync_sg - synchronize DMA and CPU view of scatterlist buffer(s) * @dev: device to which the scatterlist will be mapped * @is_in: mapping transfer direction * @sg: the scatterlist to synchronize * @n_hw_ents: the positive return value from usb_buffer_map_sg * * Use this when you are re-using a scatterlist's data buffers for * another USB request. */ void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in, struct scatterlist *sg, int n_hw_ents) { struct usb_bus *bus; struct device *controller; if (!dev || !(bus = dev->bus) || !(controller = bus->sysdev) || !controller->dma_mask) return; dma_sync_sg_for_cpu(controller, sg, n_hw_ents, is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg); #endif #if 0 /** * usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist * @dev: device to which the scatterlist will be mapped * @is_in: mapping transfer direction * @sg: the scatterlist to unmap * @n_hw_ents: the positive return value from usb_buffer_map_sg * * Reverses the effect of usb_buffer_map_sg(). */ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in, struct scatterlist *sg, int n_hw_ents) { struct usb_bus *bus; struct device *controller; if (!dev || !(bus = dev->bus) || !(controller = bus->sysdev) || !controller->dma_mask) return; dma_unmap_sg(controller, sg, n_hw_ents, is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg); #endif /* * Notifications of device and interface registration */ static int usb_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; switch (action) { case BUS_NOTIFY_ADD_DEVICE: if (dev->type == &usb_device_type) (void) usb_create_sysfs_dev_files(to_usb_device(dev)); else if (dev->type == &usb_if_device_type) usb_create_sysfs_intf_files(to_usb_interface(dev)); break; case BUS_NOTIFY_DEL_DEVICE: if (dev->type == &usb_device_type) usb_remove_sysfs_dev_files(to_usb_device(dev)); else if (dev->type == &usb_if_device_type) usb_remove_sysfs_intf_files(to_usb_interface(dev)); break; } return 0; } static struct notifier_block usb_bus_nb = { .notifier_call = usb_bus_notify, }; struct dentry *usb_debug_root; EXPORT_SYMBOL_GPL(usb_debug_root); static void usb_debugfs_init(void) { usb_debug_root = debugfs_create_dir("usb", NULL); debugfs_create_file("devices", 0444, usb_debug_root, NULL, &usbfs_devices_fops); } static void usb_debugfs_cleanup(void) { debugfs_remove_recursive(usb_debug_root); } /* * Init */ static int __init usb_init(void) { int retval; if (usb_disabled()) { pr_info("%s: USB support disabled\n", usbcore_name); return 0; } usb_init_pool_max(); usb_debugfs_init(); usb_acpi_register(); retval = bus_register(&usb_bus_type); if (retval) goto bus_register_failed; retval = bus_register_notifier(&usb_bus_type, &usb_bus_nb); if (retval) goto bus_notifier_failed; retval = usb_major_init(); if (retval) goto major_init_failed; retval = usb_register(&usbfs_driver); if (retval) goto driver_register_failed; retval = usb_devio_init(); if (retval) goto usb_devio_init_failed; retval = usb_hub_init(); if (retval) goto hub_init_failed; retval = usb_register_device_driver(&usb_generic_driver, THIS_MODULE); if (!retval) goto out; usb_hub_cleanup(); hub_init_failed: usb_devio_cleanup(); usb_devio_init_failed: usb_deregister(&usbfs_driver); driver_register_failed: usb_major_cleanup(); major_init_failed: bus_unregister_notifier(&usb_bus_type, &usb_bus_nb); bus_notifier_failed: bus_unregister(&usb_bus_type); bus_register_failed: usb_acpi_unregister(); usb_debugfs_cleanup(); out: return retval; } /* * Cleanup */ static void __exit usb_exit(void) { /* This will matter if shutdown/reboot does exitcalls. */ if (usb_disabled()) return; usb_release_quirk_list(); usb_deregister_device_driver(&usb_generic_driver); usb_major_cleanup(); usb_deregister(&usbfs_driver); usb_devio_cleanup(); usb_hub_cleanup(); bus_unregister_notifier(&usb_bus_type, &usb_bus_nb); bus_unregister(&usb_bus_type); usb_acpi_unregister(); usb_debugfs_cleanup(); idr_destroy(&usb_bus_idr); } subsys_initcall(usb_init); module_exit(usb_exit); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-400/c/bad_485_1
crossvul-cpp_data_bad_5200_6
/* * sysctl.c: General linux system control interface * * Begun 24 March 1995, Stephen Tweedie * Added /proc support, Dec 1995 * Added bdflush entry and intvec min/max checking, 2/23/96, Tom Dyas. * Added hooks for /proc/sys/net (minor, minor patch), 96/4/1, Mike Shaver. * Added kernel/java-{interpreter,appletviewer}, 96/5/10, Mike Shaver. * Dynamic registration fixes, Stephen Tweedie. * Added kswapd-interval, ctrl-alt-del, printk stuff, 1/8/97, Chris Horn. * Made sysctl support optional via CONFIG_SYSCTL, 1/10/97, Chris * Horn. * Added proc_doulongvec_ms_jiffies_minmax, 09/08/99, Carlos H. Bauer. * Added proc_doulongvec_minmax, 09/08/99, Carlos H. Bauer. * Changed linked lists to use list.h instead of lists.h, 02/24/00, Bill * Wendling. * The list_for_each() macro wasn't appropriate for the sysctl loop. * Removed it and replaced it with older style, 03/23/00, Bill Wendling */ #include <linux/module.h> #include <linux/aio.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/bitmap.h> #include <linux/signal.h> #include <linux/printk.h> #include <linux/proc_fs.h> #include <linux/security.h> #include <linux/ctype.h> #include <linux/kmemcheck.h> #include <linux/kmemleak.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/kobject.h> #include <linux/net.h> #include <linux/sysrq.h> #include <linux/highuid.h> #include <linux/writeback.h> #include <linux/ratelimit.h> #include <linux/compaction.h> #include <linux/hugetlb.h> #include <linux/initrd.h> #include <linux/key.h> #include <linux/times.h> #include <linux/limits.h> #include <linux/dcache.h> #include <linux/dnotify.h> #include <linux/syscalls.h> #include <linux/vmstat.h> #include <linux/nfs_fs.h> #include <linux/acpi.h> #include <linux/reboot.h> #include <linux/ftrace.h> #include <linux/perf_event.h> #include <linux/kprobes.h> #include <linux/pipe_fs_i.h> #include <linux/oom.h> #include <linux/kmod.h> #include <linux/capability.h> #include <linux/binfmts.h> #include <linux/sched/sysctl.h> #include <linux/kexec.h> #include <linux/bpf.h> #include <asm/uaccess.h> #include <asm/processor.h> #ifdef CONFIG_X86 #include <asm/nmi.h> #include <asm/stacktrace.h> #include <asm/io.h> #endif #ifdef CONFIG_SPARC #include <asm/setup.h> #endif #ifdef CONFIG_BSD_PROCESS_ACCT #include <linux/acct.h> #endif #ifdef CONFIG_RT_MUTEXES #include <linux/rtmutex.h> #endif #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_LOCK_STAT) #include <linux/lockdep.h> #endif #ifdef CONFIG_CHR_DEV_SG #include <scsi/sg.h> #endif #ifdef CONFIG_LOCKUP_DETECTOR #include <linux/nmi.h> #endif #if defined(CONFIG_SYSCTL) /* External variables not in a header file. */ extern int suid_dumpable; #ifdef CONFIG_COREDUMP extern int core_uses_pid; extern char core_pattern[]; extern unsigned int core_pipe_limit; #endif extern int pid_max; extern int pid_max_min, pid_max_max; extern int percpu_pagelist_fraction; extern int compat_log; extern int latencytop_enabled; extern int sysctl_nr_open_min, sysctl_nr_open_max; #ifndef CONFIG_MMU extern int sysctl_nr_trim_pages; #endif /* Constants used for minimum and maximum */ #ifdef CONFIG_LOCKUP_DETECTOR static int sixty = 60; #endif static int __maybe_unused neg_one = -1; static int zero; static int __maybe_unused one = 1; static int __maybe_unused two = 2; static int __maybe_unused four = 4; static unsigned long one_ul = 1; static int one_hundred = 100; static int one_thousand = 1000; #ifdef CONFIG_PRINTK static int ten_thousand = 10000; #endif #ifdef CONFIG_PERF_EVENTS static int six_hundred_forty_kb = 640 * 1024; #endif /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ static int maxolduid = 65535; static int minolduid; static int ngroups_max = NGROUPS_MAX; static const int cap_last_cap = CAP_LAST_CAP; /*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */ #ifdef CONFIG_DETECT_HUNG_TASK static unsigned long hung_task_timeout_max = (LONG_MAX/HZ); #endif #ifdef CONFIG_INOTIFY_USER #include <linux/inotify.h> #endif #ifdef CONFIG_SPARC #endif #ifdef __hppa__ extern int pwrsw_enabled; #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW extern int unaligned_enabled; #endif #ifdef CONFIG_IA64 extern int unaligned_dump_stack; #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN extern int no_unaligned_warning; #endif #ifdef CONFIG_PROC_SYSCTL #define SYSCTL_WRITES_LEGACY -1 #define SYSCTL_WRITES_WARN 0 #define SYSCTL_WRITES_STRICT 1 static int sysctl_writes_strict = SYSCTL_WRITES_STRICT; static int proc_do_cad_pid(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); static int proc_taint(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif #ifdef CONFIG_PRINTK static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #ifdef CONFIG_COREDUMP static int proc_dostring_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif #ifdef CONFIG_MAGIC_SYSRQ /* Note: sysrq code uses it's own private copy */ static int __sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE; static int sysrq_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int error; error = proc_dointvec(table, write, buffer, lenp, ppos); if (error) return error; if (write) sysrq_toggle_support(__sysrq_enabled); return 0; } #endif static struct ctl_table kern_table[]; static struct ctl_table vm_table[]; static struct ctl_table fs_table[]; static struct ctl_table debug_table[]; static struct ctl_table dev_table[]; extern struct ctl_table random_table[]; #ifdef CONFIG_EPOLL extern struct ctl_table epoll_table[]; #endif #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT int sysctl_legacy_va_layout; #endif /* The default sysctl tables: */ static struct ctl_table sysctl_base_table[] = { { .procname = "kernel", .mode = 0555, .child = kern_table, }, { .procname = "vm", .mode = 0555, .child = vm_table, }, { .procname = "fs", .mode = 0555, .child = fs_table, }, { .procname = "debug", .mode = 0555, .child = debug_table, }, { .procname = "dev", .mode = 0555, .child = dev_table, }, { } }; #ifdef CONFIG_SCHED_DEBUG static int min_sched_granularity_ns = 100000; /* 100 usecs */ static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ static int min_wakeup_granularity_ns; /* 0 usecs */ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ #ifdef CONFIG_SMP static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1; #endif /* CONFIG_SMP */ #endif /* CONFIG_SCHED_DEBUG */ #ifdef CONFIG_COMPACTION static int min_extfrag_threshold; static int max_extfrag_threshold = 1000; #endif static struct ctl_table kern_table[] = { { .procname = "sched_child_runs_first", .data = &sysctl_sched_child_runs_first, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_SCHED_DEBUG { .procname = "sched_min_granularity_ns", .data = &sysctl_sched_min_granularity, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_sched_granularity_ns, .extra2 = &max_sched_granularity_ns, }, { .procname = "sched_latency_ns", .data = &sysctl_sched_latency, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_sched_granularity_ns, .extra2 = &max_sched_granularity_ns, }, { .procname = "sched_wakeup_granularity_ns", .data = &sysctl_sched_wakeup_granularity, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_wakeup_granularity_ns, .extra2 = &max_wakeup_granularity_ns, }, #ifdef CONFIG_SMP { .procname = "sched_tunable_scaling", .data = &sysctl_sched_tunable_scaling, .maxlen = sizeof(enum sched_tunable_scaling), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_sched_tunable_scaling, .extra2 = &max_sched_tunable_scaling, }, { .procname = "sched_migration_cost_ns", .data = &sysctl_sched_migration_cost, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_nr_migrate", .data = &sysctl_sched_nr_migrate, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_time_avg_ms", .data = &sysctl_sched_time_avg, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_shares_window_ns", .data = &sysctl_sched_shares_window, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_SCHEDSTATS { .procname = "sched_schedstats", .data = NULL, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sysctl_schedstats, .extra1 = &zero, .extra2 = &one, }, #endif /* CONFIG_SCHEDSTATS */ #endif /* CONFIG_SMP */ #ifdef CONFIG_NUMA_BALANCING { .procname = "numa_balancing_scan_delay_ms", .data = &sysctl_numa_balancing_scan_delay, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "numa_balancing_scan_period_min_ms", .data = &sysctl_numa_balancing_scan_period_min, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "numa_balancing_scan_period_max_ms", .data = &sysctl_numa_balancing_scan_period_max, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "numa_balancing_scan_size_mb", .data = &sysctl_numa_balancing_scan_size, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &one, }, { .procname = "numa_balancing", .data = NULL, /* filled in by handler */ .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sysctl_numa_balancing, .extra1 = &zero, .extra2 = &one, }, #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_SCHED_DEBUG */ { .procname = "sched_rt_period_us", .data = &sysctl_sched_rt_period, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_rt_handler, }, { .procname = "sched_rt_runtime_us", .data = &sysctl_sched_rt_runtime, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sched_rt_handler, }, { .procname = "sched_rr_timeslice_ms", .data = &sched_rr_timeslice, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sched_rr_handler, }, #ifdef CONFIG_SCHED_AUTOGROUP { .procname = "sched_autogroup_enabled", .data = &sysctl_sched_autogroup_enabled, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif #ifdef CONFIG_CFS_BANDWIDTH { .procname = "sched_cfs_bandwidth_slice_us", .data = &sysctl_sched_cfs_bandwidth_slice, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &one, }, #endif #ifdef CONFIG_PROVE_LOCKING { .procname = "prove_locking", .data = &prove_locking, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_LOCK_STAT { .procname = "lock_stat", .data = &lock_stat, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "panic", .data = &panic_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_COREDUMP { .procname = "core_uses_pid", .data = &core_uses_pid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "core_pattern", .data = core_pattern, .maxlen = CORENAME_MAX_SIZE, .mode = 0644, .proc_handler = proc_dostring_coredump, }, { .procname = "core_pipe_limit", .data = &core_pipe_limit, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_PROC_SYSCTL { .procname = "tainted", .maxlen = sizeof(long), .mode = 0644, .proc_handler = proc_taint, }, { .procname = "sysctl_writes_strict", .data = &sysctl_writes_strict, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &neg_one, .extra2 = &one, }, #endif #ifdef CONFIG_LATENCYTOP { .procname = "latencytop", .data = &latencytop_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sysctl_latencytop, }, #endif #ifdef CONFIG_BLK_DEV_INITRD { .procname = "real-root-dev", .data = &real_root_dev, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "print-fatal-signals", .data = &print_fatal_signals, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_SPARC { .procname = "reboot-cmd", .data = reboot_command, .maxlen = 256, .mode = 0644, .proc_handler = proc_dostring, }, { .procname = "stop-a", .data = &stop_a_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "scons-poweroff", .data = &scons_pwroff, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_SPARC64 { .procname = "tsb-ratio", .data = &sysctl_tsb_ratio, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef __hppa__ { .procname = "soft-power", .data = &pwrsw_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW { .procname = "unaligned-trap", .data = &unaligned_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "ctrl-alt-del", .data = &C_A_D, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_FUNCTION_TRACER { .procname = "ftrace_enabled", .data = &ftrace_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = ftrace_enable_sysctl, }, #endif #ifdef CONFIG_STACK_TRACER { .procname = "stack_tracer_enabled", .data = &stack_tracer_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = stack_trace_sysctl, }, #endif #ifdef CONFIG_TRACING { .procname = "ftrace_dump_on_oops", .data = &ftrace_dump_on_oops, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "traceoff_on_warning", .data = &__disable_trace_on_warning, .maxlen = sizeof(__disable_trace_on_warning), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "tracepoint_printk", .data = &tracepoint_printk, .maxlen = sizeof(tracepoint_printk), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_KEXEC_CORE { .procname = "kexec_load_disabled", .data = &kexec_load_disabled, .maxlen = sizeof(int), .mode = 0644, /* only handle a transition from default "0" to "1" */ .proc_handler = proc_dointvec_minmax, .extra1 = &one, .extra2 = &one, }, #endif #ifdef CONFIG_MODULES { .procname = "modprobe", .data = &modprobe_path, .maxlen = KMOD_PATH_LEN, .mode = 0644, .proc_handler = proc_dostring, }, { .procname = "modules_disabled", .data = &modules_disabled, .maxlen = sizeof(int), .mode = 0644, /* only handle a transition from default "0" to "1" */ .proc_handler = proc_dointvec_minmax, .extra1 = &one, .extra2 = &one, }, #endif #ifdef CONFIG_UEVENT_HELPER { .procname = "hotplug", .data = &uevent_helper, .maxlen = UEVENT_HELPER_PATH_LEN, .mode = 0644, .proc_handler = proc_dostring, }, #endif #ifdef CONFIG_CHR_DEV_SG { .procname = "sg-big-buff", .data = &sg_big_buff, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_BSD_PROCESS_ACCT { .procname = "acct", .data = &acct_parm, .maxlen = 3*sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_MAGIC_SYSRQ { .procname = "sysrq", .data = &__sysrq_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = sysrq_sysctl_handler, }, #endif #ifdef CONFIG_PROC_SYSCTL { .procname = "cad_pid", .data = NULL, .maxlen = sizeof (int), .mode = 0600, .proc_handler = proc_do_cad_pid, }, #endif { .procname = "threads-max", .data = NULL, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sysctl_max_threads, }, { .procname = "random", .mode = 0555, .child = random_table, }, { .procname = "usermodehelper", .mode = 0555, .child = usermodehelper_table, }, { .procname = "overflowuid", .data = &overflowuid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, { .procname = "overflowgid", .data = &overflowgid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, #ifdef CONFIG_S390 #ifdef CONFIG_MATHEMU { .procname = "ieee_emulation_warnings", .data = &sysctl_ieee_emulation_warnings, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "userprocess_debug", .data = &show_unhandled_signals, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "pid_max", .data = &pid_max, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &pid_max_min, .extra2 = &pid_max_max, }, { .procname = "panic_on_oops", .data = &panic_on_oops, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #if defined CONFIG_PRINTK { .procname = "printk", .data = &console_loglevel, .maxlen = 4*sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "printk_ratelimit", .data = &printk_ratelimit_state.interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "printk_ratelimit_burst", .data = &printk_ratelimit_state.burst, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "printk_delay", .data = &printk_delay_msec, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &ten_thousand, }, { .procname = "printk_devkmsg", .data = devkmsg_log_str, .maxlen = DEVKMSG_STR_MAX_SIZE, .mode = 0644, .proc_handler = devkmsg_sysctl_set_loglvl, }, { .procname = "dmesg_restrict", .data = &dmesg_restrict, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax_sysadmin, .extra1 = &zero, .extra2 = &one, }, { .procname = "kptr_restrict", .data = &kptr_restrict, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax_sysadmin, .extra1 = &zero, .extra2 = &two, }, #endif { .procname = "ngroups_max", .data = &ngroups_max, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "cap_last_cap", .data = (void *)&cap_last_cap, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, #if defined(CONFIG_LOCKUP_DETECTOR) { .procname = "watchdog", .data = &watchdog_user_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_watchdog, .extra1 = &zero, .extra2 = &one, }, { .procname = "watchdog_thresh", .data = &watchdog_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_watchdog_thresh, .extra1 = &zero, .extra2 = &sixty, }, { .procname = "nmi_watchdog", .data = &nmi_watchdog_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_nmi_watchdog, .extra1 = &zero, #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) .extra2 = &one, #else .extra2 = &zero, #endif }, { .procname = "soft_watchdog", .data = &soft_watchdog_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_soft_watchdog, .extra1 = &zero, .extra2 = &one, }, { .procname = "watchdog_cpumask", .data = &watchdog_cpumask_bits, .maxlen = NR_CPUS, .mode = 0644, .proc_handler = proc_watchdog_cpumask, }, { .procname = "softlockup_panic", .data = &softlockup_panic, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #ifdef CONFIG_HARDLOCKUP_DETECTOR { .procname = "hardlockup_panic", .data = &hardlockup_panic, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif #ifdef CONFIG_SMP { .procname = "softlockup_all_cpu_backtrace", .data = &sysctl_softlockup_all_cpu_backtrace, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "hardlockup_all_cpu_backtrace", .data = &sysctl_hardlockup_all_cpu_backtrace, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif /* CONFIG_SMP */ #endif #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) { .procname = "unknown_nmi_panic", .data = &unknown_nmi_panic, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_X86) { .procname = "panic_on_unrecovered_nmi", .data = &panic_on_unrecovered_nmi, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "panic_on_io_nmi", .data = &panic_on_io_nmi, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_DEBUG_STACKOVERFLOW { .procname = "panic_on_stackoverflow", .data = &sysctl_panic_on_stackoverflow, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "bootloader_type", .data = &bootloader_type, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "bootloader_version", .data = &bootloader_version, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "kstack_depth_to_print", .data = &kstack_depth_to_print, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "io_delay_type", .data = &io_delay_type, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_MMU) { .procname = "randomize_va_space", .data = &randomize_va_space, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_S390) && defined(CONFIG_SMP) { .procname = "spin_retry", .data = &spin_retry, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86) { .procname = "acpi_video_flags", .data = &acpi_realmode_flags, .maxlen = sizeof (unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN { .procname = "ignore-unaligned-usertrap", .data = &no_unaligned_warning, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_IA64 { .procname = "unaligned-dump-stack", .data = &unaligned_dump_stack, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_DETECT_HUNG_TASK { .procname = "hung_task_panic", .data = &sysctl_hung_task_panic, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "hung_task_check_count", .data = &sysctl_hung_task_check_count, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, { .procname = "hung_task_timeout_secs", .data = &sysctl_hung_task_timeout_secs, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_dohung_task_timeout_secs, .extra2 = &hung_task_timeout_max, }, { .procname = "hung_task_warnings", .data = &sysctl_hung_task_warnings, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &neg_one, }, #endif #ifdef CONFIG_COMPAT { .procname = "compat-log", .data = &compat_log, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_RT_MUTEXES { .procname = "max_lock_depth", .data = &max_lock_depth, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "poweroff_cmd", .data = &poweroff_cmd, .maxlen = POWEROFF_CMD_PATH_LEN, .mode = 0644, .proc_handler = proc_dostring, }, #ifdef CONFIG_KEYS { .procname = "keys", .mode = 0555, .child = key_sysctls, }, #endif #ifdef CONFIG_PERF_EVENTS /* * User-space scripts rely on the existence of this file * as a feature check for perf_events being enabled. * * So it's an ABI, do not remove! */ { .procname = "perf_event_paranoid", .data = &sysctl_perf_event_paranoid, .maxlen = sizeof(sysctl_perf_event_paranoid), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "perf_event_mlock_kb", .data = &sysctl_perf_event_mlock, .maxlen = sizeof(sysctl_perf_event_mlock), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "perf_event_max_sample_rate", .data = &sysctl_perf_event_sample_rate, .maxlen = sizeof(sysctl_perf_event_sample_rate), .mode = 0644, .proc_handler = perf_proc_update_handler, .extra1 = &one, }, { .procname = "perf_cpu_time_max_percent", .data = &sysctl_perf_cpu_time_max_percent, .maxlen = sizeof(sysctl_perf_cpu_time_max_percent), .mode = 0644, .proc_handler = perf_cpu_time_max_percent_handler, .extra1 = &zero, .extra2 = &one_hundred, }, { .procname = "perf_event_max_stack", .data = &sysctl_perf_event_max_stack, .maxlen = sizeof(sysctl_perf_event_max_stack), .mode = 0644, .proc_handler = perf_event_max_stack_handler, .extra1 = &zero, .extra2 = &six_hundred_forty_kb, }, { .procname = "perf_event_max_contexts_per_stack", .data = &sysctl_perf_event_max_contexts_per_stack, .maxlen = sizeof(sysctl_perf_event_max_contexts_per_stack), .mode = 0644, .proc_handler = perf_event_max_stack_handler, .extra1 = &zero, .extra2 = &one_thousand, }, #endif #ifdef CONFIG_KMEMCHECK { .procname = "kmemcheck", .data = &kmemcheck_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "panic_on_warn", .data = &panic_on_warn, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) { .procname = "timer_migration", .data = &sysctl_timer_migration, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = timer_migration_handler, }, #endif #ifdef CONFIG_BPF_SYSCALL { .procname = "unprivileged_bpf_disabled", .data = &sysctl_unprivileged_bpf_disabled, .maxlen = sizeof(sysctl_unprivileged_bpf_disabled), .mode = 0644, /* only handle a transition from default "0" to "1" */ .proc_handler = proc_dointvec_minmax, .extra1 = &one, .extra2 = &one, }, #endif #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) { .procname = "panic_on_rcu_stall", .data = &sysctl_panic_on_rcu_stall, .maxlen = sizeof(sysctl_panic_on_rcu_stall), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif { } }; static struct ctl_table vm_table[] = { { .procname = "overcommit_memory", .data = &sysctl_overcommit_memory, .maxlen = sizeof(sysctl_overcommit_memory), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &two, }, { .procname = "panic_on_oom", .data = &sysctl_panic_on_oom, .maxlen = sizeof(sysctl_panic_on_oom), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &two, }, { .procname = "oom_kill_allocating_task", .data = &sysctl_oom_kill_allocating_task, .maxlen = sizeof(sysctl_oom_kill_allocating_task), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "oom_dump_tasks", .data = &sysctl_oom_dump_tasks, .maxlen = sizeof(sysctl_oom_dump_tasks), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "overcommit_ratio", .data = &sysctl_overcommit_ratio, .maxlen = sizeof(sysctl_overcommit_ratio), .mode = 0644, .proc_handler = overcommit_ratio_handler, }, { .procname = "overcommit_kbytes", .data = &sysctl_overcommit_kbytes, .maxlen = sizeof(sysctl_overcommit_kbytes), .mode = 0644, .proc_handler = overcommit_kbytes_handler, }, { .procname = "page-cluster", .data = &page_cluster, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, { .procname = "dirty_background_ratio", .data = &dirty_background_ratio, .maxlen = sizeof(dirty_background_ratio), .mode = 0644, .proc_handler = dirty_background_ratio_handler, .extra1 = &zero, .extra2 = &one_hundred, }, { .procname = "dirty_background_bytes", .data = &dirty_background_bytes, .maxlen = sizeof(dirty_background_bytes), .mode = 0644, .proc_handler = dirty_background_bytes_handler, .extra1 = &one_ul, }, { .procname = "dirty_ratio", .data = &vm_dirty_ratio, .maxlen = sizeof(vm_dirty_ratio), .mode = 0644, .proc_handler = dirty_ratio_handler, .extra1 = &zero, .extra2 = &one_hundred, }, { .procname = "dirty_bytes", .data = &vm_dirty_bytes, .maxlen = sizeof(vm_dirty_bytes), .mode = 0644, .proc_handler = dirty_bytes_handler, .extra1 = &dirty_bytes_min, }, { .procname = "dirty_writeback_centisecs", .data = &dirty_writeback_interval, .maxlen = sizeof(dirty_writeback_interval), .mode = 0644, .proc_handler = dirty_writeback_centisecs_handler, }, { .procname = "dirty_expire_centisecs", .data = &dirty_expire_interval, .maxlen = sizeof(dirty_expire_interval), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, { .procname = "dirtytime_expire_seconds", .data = &dirtytime_expire_interval, .maxlen = sizeof(dirty_expire_interval), .mode = 0644, .proc_handler = dirtytime_interval_handler, .extra1 = &zero, }, { .procname = "nr_pdflush_threads", .mode = 0444 /* read-only */, .proc_handler = pdflush_proc_obsolete, }, { .procname = "swappiness", .data = &vm_swappiness, .maxlen = sizeof(vm_swappiness), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one_hundred, }, #ifdef CONFIG_HUGETLB_PAGE { .procname = "nr_hugepages", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = hugetlb_sysctl_handler, }, #ifdef CONFIG_NUMA { .procname = "nr_hugepages_mempolicy", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = &hugetlb_mempolicy_sysctl_handler, }, #endif { .procname = "hugetlb_shm_group", .data = &sysctl_hugetlb_shm_group, .maxlen = sizeof(gid_t), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "hugepages_treat_as_movable", .data = &hugepages_treat_as_movable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "nr_overcommit_hugepages", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = hugetlb_overcommit_handler, }, #endif { .procname = "lowmem_reserve_ratio", .data = &sysctl_lowmem_reserve_ratio, .maxlen = sizeof(sysctl_lowmem_reserve_ratio), .mode = 0644, .proc_handler = lowmem_reserve_ratio_sysctl_handler, }, { .procname = "drop_caches", .data = &sysctl_drop_caches, .maxlen = sizeof(int), .mode = 0644, .proc_handler = drop_caches_sysctl_handler, .extra1 = &one, .extra2 = &four, }, #ifdef CONFIG_COMPACTION { .procname = "compact_memory", .data = &sysctl_compact_memory, .maxlen = sizeof(int), .mode = 0200, .proc_handler = sysctl_compaction_handler, }, { .procname = "extfrag_threshold", .data = &sysctl_extfrag_threshold, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sysctl_extfrag_handler, .extra1 = &min_extfrag_threshold, .extra2 = &max_extfrag_threshold, }, { .procname = "compact_unevictable_allowed", .data = &sysctl_compact_unevictable_allowed, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, .extra2 = &one, }, #endif /* CONFIG_COMPACTION */ { .procname = "min_free_kbytes", .data = &min_free_kbytes, .maxlen = sizeof(min_free_kbytes), .mode = 0644, .proc_handler = min_free_kbytes_sysctl_handler, .extra1 = &zero, }, { .procname = "watermark_scale_factor", .data = &watermark_scale_factor, .maxlen = sizeof(watermark_scale_factor), .mode = 0644, .proc_handler = watermark_scale_factor_sysctl_handler, .extra1 = &one, .extra2 = &one_thousand, }, { .procname = "percpu_pagelist_fraction", .data = &percpu_pagelist_fraction, .maxlen = sizeof(percpu_pagelist_fraction), .mode = 0644, .proc_handler = percpu_pagelist_fraction_sysctl_handler, .extra1 = &zero, }, #ifdef CONFIG_MMU { .procname = "max_map_count", .data = &sysctl_max_map_count, .maxlen = sizeof(sysctl_max_map_count), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, #else { .procname = "nr_trim_pages", .data = &sysctl_nr_trim_pages, .maxlen = sizeof(sysctl_nr_trim_pages), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, #endif { .procname = "laptop_mode", .data = &laptop_mode, .maxlen = sizeof(laptop_mode), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "block_dump", .data = &block_dump, .maxlen = sizeof(block_dump), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, { .procname = "vfs_cache_pressure", .data = &sysctl_vfs_cache_pressure, .maxlen = sizeof(sysctl_vfs_cache_pressure), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT { .procname = "legacy_va_layout", .data = &sysctl_legacy_va_layout, .maxlen = sizeof(sysctl_legacy_va_layout), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, #endif #ifdef CONFIG_NUMA { .procname = "zone_reclaim_mode", .data = &node_reclaim_mode, .maxlen = sizeof(node_reclaim_mode), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, { .procname = "min_unmapped_ratio", .data = &sysctl_min_unmapped_ratio, .maxlen = sizeof(sysctl_min_unmapped_ratio), .mode = 0644, .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, .extra1 = &zero, .extra2 = &one_hundred, }, { .procname = "min_slab_ratio", .data = &sysctl_min_slab_ratio, .maxlen = sizeof(sysctl_min_slab_ratio), .mode = 0644, .proc_handler = sysctl_min_slab_ratio_sysctl_handler, .extra1 = &zero, .extra2 = &one_hundred, }, #endif #ifdef CONFIG_SMP { .procname = "stat_interval", .data = &sysctl_stat_interval, .maxlen = sizeof(sysctl_stat_interval), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "stat_refresh", .data = NULL, .maxlen = 0, .mode = 0600, .proc_handler = vmstat_refresh, }, #endif #ifdef CONFIG_MMU { .procname = "mmap_min_addr", .data = &dac_mmap_min_addr, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = mmap_min_addr_handler, }, #endif #ifdef CONFIG_NUMA { .procname = "numa_zonelist_order", .data = &numa_zonelist_order, .maxlen = NUMA_ZONELIST_ORDER_LEN, .mode = 0644, .proc_handler = numa_zonelist_order_handler, }, #endif #if (defined(CONFIG_X86_32) && !defined(CONFIG_UML))|| \ (defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL)) { .procname = "vdso_enabled", #ifdef CONFIG_X86_32 .data = &vdso32_enabled, .maxlen = sizeof(vdso32_enabled), #else .data = &vdso_enabled, .maxlen = sizeof(vdso_enabled), #endif .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, #endif #ifdef CONFIG_HIGHMEM { .procname = "highmem_is_dirtyable", .data = &vm_highmem_is_dirtyable, .maxlen = sizeof(vm_highmem_is_dirtyable), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif #ifdef CONFIG_MEMORY_FAILURE { .procname = "memory_failure_early_kill", .data = &sysctl_memory_failure_early_kill, .maxlen = sizeof(sysctl_memory_failure_early_kill), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "memory_failure_recovery", .data = &sysctl_memory_failure_recovery, .maxlen = sizeof(sysctl_memory_failure_recovery), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif { .procname = "user_reserve_kbytes", .data = &sysctl_user_reserve_kbytes, .maxlen = sizeof(sysctl_user_reserve_kbytes), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "admin_reserve_kbytes", .data = &sysctl_admin_reserve_kbytes, .maxlen = sizeof(sysctl_admin_reserve_kbytes), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS { .procname = "mmap_rnd_bits", .data = &mmap_rnd_bits, .maxlen = sizeof(mmap_rnd_bits), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = (void *)&mmap_rnd_bits_min, .extra2 = (void *)&mmap_rnd_bits_max, }, #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS { .procname = "mmap_rnd_compat_bits", .data = &mmap_rnd_compat_bits, .maxlen = sizeof(mmap_rnd_compat_bits), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = (void *)&mmap_rnd_compat_bits_min, .extra2 = (void *)&mmap_rnd_compat_bits_max, }, #endif { } }; static struct ctl_table fs_table[] = { { .procname = "inode-nr", .data = &inodes_stat, .maxlen = 2*sizeof(long), .mode = 0444, .proc_handler = proc_nr_inodes, }, { .procname = "inode-state", .data = &inodes_stat, .maxlen = 7*sizeof(long), .mode = 0444, .proc_handler = proc_nr_inodes, }, { .procname = "file-nr", .data = &files_stat, .maxlen = sizeof(files_stat), .mode = 0444, .proc_handler = proc_nr_files, }, { .procname = "file-max", .data = &files_stat.max_files, .maxlen = sizeof(files_stat.max_files), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "nr_open", .data = &sysctl_nr_open, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &sysctl_nr_open_min, .extra2 = &sysctl_nr_open_max, }, { .procname = "dentry-state", .data = &dentry_stat, .maxlen = 6*sizeof(long), .mode = 0444, .proc_handler = proc_nr_dentry, }, { .procname = "overflowuid", .data = &fs_overflowuid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, { .procname = "overflowgid", .data = &fs_overflowgid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, #ifdef CONFIG_FILE_LOCKING { .procname = "leases-enable", .data = &leases_enable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_DNOTIFY { .procname = "dir-notify-enable", .data = &dir_notify_enable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_MMU #ifdef CONFIG_FILE_LOCKING { .procname = "lease-break-time", .data = &lease_break_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_AIO { .procname = "aio-nr", .data = &aio_nr, .maxlen = sizeof(aio_nr), .mode = 0444, .proc_handler = proc_doulongvec_minmax, }, { .procname = "aio-max-nr", .data = &aio_max_nr, .maxlen = sizeof(aio_max_nr), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, #endif /* CONFIG_AIO */ #ifdef CONFIG_INOTIFY_USER { .procname = "inotify", .mode = 0555, .child = inotify_table, }, #endif #ifdef CONFIG_EPOLL { .procname = "epoll", .mode = 0555, .child = epoll_table, }, #endif #endif { .procname = "protected_symlinks", .data = &sysctl_protected_symlinks, .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "protected_hardlinks", .data = &sysctl_protected_hardlinks, .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "suid_dumpable", .data = &suid_dumpable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax_coredump, .extra1 = &zero, .extra2 = &two, }, #if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE) { .procname = "binfmt_misc", .mode = 0555, .child = sysctl_mount_point, }, #endif { .procname = "pipe-max-size", .data = &pipe_max_size, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &pipe_proc_fn, .extra1 = &pipe_min_size, }, { .procname = "pipe-user-pages-hard", .data = &pipe_user_pages_hard, .maxlen = sizeof(pipe_user_pages_hard), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "pipe-user-pages-soft", .data = &pipe_user_pages_soft, .maxlen = sizeof(pipe_user_pages_soft), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { } }; static struct ctl_table debug_table[] = { #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE { .procname = "exception-trace", .data = &show_unhandled_signals, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #endif #if defined(CONFIG_OPTPROBES) { .procname = "kprobes-optimization", .data = &sysctl_kprobes_optimization, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_kprobes_optimization_handler, .extra1 = &zero, .extra2 = &one, }, #endif { } }; static struct ctl_table dev_table[] = { { } }; int __init sysctl_init(void) { struct ctl_table_header *hdr; hdr = register_sysctl_table(sysctl_base_table); kmemleak_not_leak(hdr); return 0; } #endif /* CONFIG_SYSCTL */ /* * /proc/sys support */ #ifdef CONFIG_PROC_SYSCTL static int _proc_do_string(char *data, int maxlen, int write, char __user *buffer, size_t *lenp, loff_t *ppos) { size_t len; char __user *p; char c; if (!data || !maxlen || !*lenp) { *lenp = 0; return 0; } if (write) { if (sysctl_writes_strict == SYSCTL_WRITES_STRICT) { /* Only continue writes not past the end of buffer. */ len = strlen(data); if (len > maxlen - 1) len = maxlen - 1; if (*ppos > len) return 0; len = *ppos; } else { /* Start writing from beginning of buffer. */ len = 0; } *ppos += *lenp; p = buffer; while ((p - buffer) < *lenp && len < maxlen - 1) { if (get_user(c, p++)) return -EFAULT; if (c == 0 || c == '\n') break; data[len++] = c; } data[len] = 0; } else { len = strlen(data); if (len > maxlen) len = maxlen; if (*ppos > len) { *lenp = 0; return 0; } data += *ppos; len -= *ppos; if (len > *lenp) len = *lenp; if (len) if (copy_to_user(buffer, data, len)) return -EFAULT; if (len < *lenp) { if (put_user('\n', buffer + len)) return -EFAULT; len++; } *lenp = len; *ppos += len; } return 0; } static void warn_sysctl_write(struct ctl_table *table) { pr_warn_once("%s wrote to %s when file position was not 0!\n" "This will not be supported in the future. To silence this\n" "warning, set kernel.sysctl_writes_strict = -1\n", current->comm, table->procname); } /** * proc_dostring - read a string sysctl * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes a string from/to the user buffer. If the kernel * buffer provided is not large enough to hold the string, the * string is truncated. The copied string is %NULL-terminated. * If the string is being read by the user process, it is copied * and a newline '\n' is added. It is truncated if the buffer is * not large enough. * * Returns 0 on success. */ int proc_dostring(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { if (write && *ppos && sysctl_writes_strict == SYSCTL_WRITES_WARN) warn_sysctl_write(table); return _proc_do_string((char *)(table->data), table->maxlen, write, (char __user *)buffer, lenp, ppos); } static size_t proc_skip_spaces(char **buf) { size_t ret; char *tmp = skip_spaces(*buf); ret = tmp - *buf; *buf = tmp; return ret; } static void proc_skip_char(char **buf, size_t *size, const char v) { while (*size) { if (**buf != v) break; (*size)--; (*buf)++; } } #define TMPBUFLEN 22 /** * proc_get_long - reads an ASCII formatted integer from a user buffer * * @buf: a kernel buffer * @size: size of the kernel buffer * @val: this is where the number will be stored * @neg: set to %TRUE if number is negative * @perm_tr: a vector which contains the allowed trailers * @perm_tr_len: size of the perm_tr vector * @tr: pointer to store the trailer character * * In case of success %0 is returned and @buf and @size are updated with * the amount of bytes read. If @tr is non-NULL and a trailing * character exists (size is non-zero after returning from this * function), @tr is updated with the trailing character. */ static int proc_get_long(char **buf, size_t *size, unsigned long *val, bool *neg, const char *perm_tr, unsigned perm_tr_len, char *tr) { int len; char *p, tmp[TMPBUFLEN]; if (!*size) return -EINVAL; len = *size; if (len > TMPBUFLEN - 1) len = TMPBUFLEN - 1; memcpy(tmp, *buf, len); tmp[len] = 0; p = tmp; if (*p == '-' && *size > 1) { *neg = true; p++; } else *neg = false; if (!isdigit(*p)) return -EINVAL; *val = simple_strtoul(p, &p, 0); len = p - tmp; /* We don't know if the next char is whitespace thus we may accept * invalid integers (e.g. 1234...a) or two integers instead of one * (e.g. 123...1). So lets not allow such large numbers. */ if (len == TMPBUFLEN - 1) return -EINVAL; if (len < *size && perm_tr_len && !memchr(perm_tr, *p, perm_tr_len)) return -EINVAL; if (tr && (len < *size)) *tr = *p; *buf += len; *size -= len; return 0; } /** * proc_put_long - converts an integer to a decimal ASCII formatted string * * @buf: the user buffer * @size: the size of the user buffer * @val: the integer to be converted * @neg: sign of the number, %TRUE for negative * * In case of success %0 is returned and @buf and @size are updated with * the amount of bytes written. */ static int proc_put_long(void __user **buf, size_t *size, unsigned long val, bool neg) { int len; char tmp[TMPBUFLEN], *p = tmp; sprintf(p, "%s%lu", neg ? "-" : "", val); len = strlen(tmp); if (len > *size) len = *size; if (copy_to_user(*buf, tmp, len)) return -EFAULT; *size -= len; *buf += len; return 0; } #undef TMPBUFLEN static int proc_put_char(void __user **buf, size_t *size, char c) { if (*size) { char __user **buffer = (char __user **)buf; if (put_user(c, *buffer)) return -EFAULT; (*size)--, (*buffer)++; *buf = *buffer; } return 0; } static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { if (*negp) { if (*lvalp > (unsigned long) INT_MAX + 1) return -EINVAL; *valp = -*lvalp; } else { if (*lvalp > (unsigned long) INT_MAX) return -EINVAL; *valp = *lvalp; } } else { int val = *valp; if (val < 0) { *negp = true; *lvalp = -(unsigned long)val; } else { *negp = false; *lvalp = (unsigned long)val; } } return 0; } static const char proc_wspace_sep[] = { ' ', '\t', '\n' }; static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, int (*conv)(bool *negp, unsigned long *lvalp, int *valp, int write, void *data), void *data) { int *i, vleft, first = 1, err = 0; size_t left; char *kbuf = NULL, *p; if (!tbl_data || !table->maxlen || !*lenp || (*ppos && !write)) { *lenp = 0; return 0; } i = (int *) tbl_data; vleft = table->maxlen / sizeof(*i); left = *lenp; if (!conv) conv = do_proc_dointvec_conv; if (write) { if (*ppos) { switch (sysctl_writes_strict) { case SYSCTL_WRITES_STRICT: goto out; case SYSCTL_WRITES_WARN: warn_sysctl_write(table); break; default: break; } } if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; p = kbuf = memdup_user_nul(buffer, left); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); } for (; left && vleft--; i++, first=0) { unsigned long lval; bool neg; if (write) { left -= proc_skip_spaces(&p); if (!left) break; err = proc_get_long(&p, &left, &lval, &neg, proc_wspace_sep, sizeof(proc_wspace_sep), NULL); if (err) break; if (conv(&neg, &lval, i, 1, data)) { err = -EINVAL; break; } } else { if (conv(&neg, &lval, i, 0, data)) { err = -EINVAL; break; } if (!first) err = proc_put_char(&buffer, &left, '\t'); if (err) break; err = proc_put_long(&buffer, &left, lval, neg); if (err) break; } } if (!write && !first && left && !err) err = proc_put_char(&buffer, &left, '\n'); if (write && !err && left) left -= proc_skip_spaces(&p); if (write) { kfree(kbuf); if (first) return err ? : -EINVAL; } *lenp -= left; out: *ppos += *lenp; return err; } static int do_proc_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, int (*conv)(bool *negp, unsigned long *lvalp, int *valp, int write, void *data), void *data) { return __do_proc_dointvec(table->data, table, write, buffer, lenp, ppos, conv, data); } /** * proc_dointvec - read a vector of integers * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * * Returns 0 on success. */ int proc_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table,write,buffer,lenp,ppos, NULL,NULL); } /* * Taint values can only be increased * This means we can safely use a temporary. */ static int proc_taint(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; unsigned long tmptaint = get_taint(); int err; if (write && !capable(CAP_SYS_ADMIN)) return -EPERM; t = *table; t.data = &tmptaint; err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); if (err < 0) return err; if (write) { /* * Poor man's atomic or. Not worth adding a primitive * to everyone's atomic.h for this */ int i; for (i = 0; i < BITS_PER_LONG && tmptaint >> i; i++) { if ((tmptaint >> i) & 1) add_taint(i, LOCKDEP_STILL_OK); } } return err; } #ifdef CONFIG_PRINTK static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { if (write && !capable(CAP_SYS_ADMIN)) return -EPERM; return proc_dointvec_minmax(table, write, buffer, lenp, ppos); } #endif struct do_proc_dointvec_minmax_conv_param { int *min; int *max; }; static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { struct do_proc_dointvec_minmax_conv_param *param = data; if (write) { int val = *negp ? -*lvalp : *lvalp; if ((param->min && *param->min > val) || (param->max && *param->max < val)) return -EINVAL; *valp = val; } else { int val = *valp; if (val < 0) { *negp = true; *lvalp = -(unsigned long)val; } else { *negp = false; *lvalp = (unsigned long)val; } } return 0; } /** * proc_dointvec_minmax - read a vector of integers with min/max values * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * * This routine will ensure the values are within the range specified by * table->extra1 (min) and table->extra2 (max). * * Returns 0 on success. */ int proc_dointvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct do_proc_dointvec_minmax_conv_param param = { .min = (int *) table->extra1, .max = (int *) table->extra2, }; return do_proc_dointvec(table, write, buffer, lenp, ppos, do_proc_dointvec_minmax_conv, &param); } static void validate_coredump_safety(void) { #ifdef CONFIG_COREDUMP if (suid_dumpable == SUID_DUMP_ROOT && core_pattern[0] != '/' && core_pattern[0] != '|') { printk(KERN_WARNING "Unsafe core_pattern used with "\ "suid_dumpable=2. Pipe handler or fully qualified "\ "core dump path required.\n"); } #endif } static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (!error) validate_coredump_safety(); return error; } #ifdef CONFIG_COREDUMP static int proc_dostring_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int error = proc_dostring(table, write, buffer, lenp, ppos); if (!error) validate_coredump_safety(); return error; } #endif static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, unsigned long convmul, unsigned long convdiv) { unsigned long *i, *min, *max; int vleft, first = 1, err = 0; size_t left; char *kbuf = NULL, *p; if (!data || !table->maxlen || !*lenp || (*ppos && !write)) { *lenp = 0; return 0; } i = (unsigned long *) data; min = (unsigned long *) table->extra1; max = (unsigned long *) table->extra2; vleft = table->maxlen / sizeof(unsigned long); left = *lenp; if (write) { if (*ppos) { switch (sysctl_writes_strict) { case SYSCTL_WRITES_STRICT: goto out; case SYSCTL_WRITES_WARN: warn_sysctl_write(table); break; default: break; } } if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; p = kbuf = memdup_user_nul(buffer, left); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); } for (; left && vleft--; i++, first = 0) { unsigned long val; if (write) { bool neg; left -= proc_skip_spaces(&p); err = proc_get_long(&p, &left, &val, &neg, proc_wspace_sep, sizeof(proc_wspace_sep), NULL); if (err) break; if (neg) continue; if ((min && val < *min) || (max && val > *max)) continue; *i = val; } else { val = convdiv * (*i) / convmul; if (!first) { err = proc_put_char(&buffer, &left, '\t'); if (err) break; } err = proc_put_long(&buffer, &left, val, false); if (err) break; } } if (!write && !first && left && !err) err = proc_put_char(&buffer, &left, '\n'); if (write && !err) left -= proc_skip_spaces(&p); if (write) { kfree(kbuf); if (first) return err ? : -EINVAL; } *lenp -= left; out: *ppos += *lenp; return err; } static int do_proc_doulongvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, unsigned long convmul, unsigned long convdiv) { return __do_proc_doulongvec_minmax(table->data, table, write, buffer, lenp, ppos, convmul, convdiv); } /** * proc_doulongvec_minmax - read a vector of long integers with min/max values * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned long) unsigned long * values from/to the user buffer, treated as an ASCII string. * * This routine will ensure the values are within the range specified by * table->extra1 (min) and table->extra2 (max). * * Returns 0 on success. */ int proc_doulongvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, 1l, 1l); } /** * proc_doulongvec_ms_jiffies_minmax - read a vector of millisecond values with min/max values * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned long) unsigned long * values from/to the user buffer, treated as an ASCII string. The values * are treated as milliseconds, and converted to jiffies when they are stored. * * This routine will ensure the values are within the range specified by * table->extra1 (min) and table->extra2 (max). * * Returns 0 on success. */ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, HZ, 1000l); } static int do_proc_dointvec_jiffies_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { if (*lvalp > LONG_MAX / HZ) return 1; *valp = *negp ? -(*lvalp*HZ) : (*lvalp*HZ); } else { int val = *valp; unsigned long lval; if (val < 0) { *negp = true; lval = -(unsigned long)val; } else { *negp = false; lval = (unsigned long)val; } *lvalp = lval / HZ; } return 0; } static int do_proc_dointvec_userhz_jiffies_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { if (USER_HZ < HZ && *lvalp > (LONG_MAX / HZ) * USER_HZ) return 1; *valp = clock_t_to_jiffies(*negp ? -*lvalp : *lvalp); } else { int val = *valp; unsigned long lval; if (val < 0) { *negp = true; lval = -(unsigned long)val; } else { *negp = false; lval = (unsigned long)val; } *lvalp = jiffies_to_clock_t(lval); } return 0; } static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { unsigned long jif = msecs_to_jiffies(*negp ? -*lvalp : *lvalp); if (jif > INT_MAX) return 1; *valp = (int)jif; } else { int val = *valp; unsigned long lval; if (val < 0) { *negp = true; lval = -(unsigned long)val; } else { *negp = false; lval = (unsigned long)val; } *lvalp = jiffies_to_msecs(lval); } return 0; } /** * proc_dointvec_jiffies - read a vector of integers as seconds * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * The values read are assumed to be in seconds, and are converted into * jiffies. * * Returns 0 on success. */ int proc_dointvec_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table,write,buffer,lenp,ppos, do_proc_dointvec_jiffies_conv,NULL); } /** * proc_dointvec_userhz_jiffies - read a vector of integers as 1/USER_HZ seconds * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: pointer to the file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * The values read are assumed to be in 1/USER_HZ seconds, and * are converted into jiffies. * * Returns 0 on success. */ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table,write,buffer,lenp,ppos, do_proc_dointvec_userhz_jiffies_conv,NULL); } /** * proc_dointvec_ms_jiffies - read a vector of integers as 1 milliseconds * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * @ppos: the current position in the file * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * The values read are assumed to be in 1/1000 seconds, and * are converted into jiffies. * * Returns 0 on success. */ int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table, write, buffer, lenp, ppos, do_proc_dointvec_ms_jiffies_conv, NULL); } static int proc_do_cad_pid(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct pid *new_pid; pid_t tmp; int r; tmp = pid_vnr(cad_pid); r = __do_proc_dointvec(&tmp, table, write, buffer, lenp, ppos, NULL, NULL); if (r || !write) return r; new_pid = find_get_pid(tmp); if (!new_pid) return -ESRCH; put_pid(xchg(&cad_pid, new_pid)); return 0; } /** * proc_do_large_bitmap - read/write from/to a large bitmap * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * The bitmap is stored at table->data and the bitmap length (in bits) * in table->maxlen. * * We use a range comma separated format (e.g. 1,3-4,10-10) so that * large bitmaps may be represented in a compact manner. Writing into * the file will clear the bitmap then update it with the given input. * * Returns 0 on success. */ int proc_do_large_bitmap(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int err = 0; bool first = 1; size_t left = *lenp; unsigned long bitmap_len = table->maxlen; unsigned long *bitmap = *(unsigned long **) table->data; unsigned long *tmp_bitmap = NULL; char tr_a[] = { '-', ',', '\n' }, tr_b[] = { ',', '\n', 0 }, c; if (!bitmap || !bitmap_len || !left || (*ppos && !write)) { *lenp = 0; return 0; } if (write) { char *kbuf, *p; if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; p = kbuf = memdup_user_nul(buffer, left); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); tmp_bitmap = kzalloc(BITS_TO_LONGS(bitmap_len) * sizeof(unsigned long), GFP_KERNEL); if (!tmp_bitmap) { kfree(kbuf); return -ENOMEM; } proc_skip_char(&p, &left, '\n'); while (!err && left) { unsigned long val_a, val_b; bool neg; err = proc_get_long(&p, &left, &val_a, &neg, tr_a, sizeof(tr_a), &c); if (err) break; if (val_a >= bitmap_len || neg) { err = -EINVAL; break; } val_b = val_a; if (left) { p++; left--; } if (c == '-') { err = proc_get_long(&p, &left, &val_b, &neg, tr_b, sizeof(tr_b), &c); if (err) break; if (val_b >= bitmap_len || neg || val_a > val_b) { err = -EINVAL; break; } if (left) { p++; left--; } } bitmap_set(tmp_bitmap, val_a, val_b - val_a + 1); first = 0; proc_skip_char(&p, &left, '\n'); } kfree(kbuf); } else { unsigned long bit_a, bit_b = 0; while (left) { bit_a = find_next_bit(bitmap, bitmap_len, bit_b); if (bit_a >= bitmap_len) break; bit_b = find_next_zero_bit(bitmap, bitmap_len, bit_a + 1) - 1; if (!first) { err = proc_put_char(&buffer, &left, ','); if (err) break; } err = proc_put_long(&buffer, &left, bit_a, false); if (err) break; if (bit_a != bit_b) { err = proc_put_char(&buffer, &left, '-'); if (err) break; err = proc_put_long(&buffer, &left, bit_b, false); if (err) break; } first = 0; bit_b++; } if (!err) err = proc_put_char(&buffer, &left, '\n'); } if (!err) { if (write) { if (*ppos) bitmap_or(bitmap, bitmap, tmp_bitmap, bitmap_len); else bitmap_copy(bitmap, tmp_bitmap, bitmap_len); } kfree(tmp_bitmap); *lenp -= left; *ppos += *lenp; return 0; } else { kfree(tmp_bitmap); return err; } } #else /* CONFIG_PROC_SYSCTL */ int proc_dostring(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_doulongvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } #endif /* CONFIG_PROC_SYSCTL */ /* * No sense putting this after each symbol definition, twice, * exception granted :-) */ EXPORT_SYMBOL(proc_dointvec); EXPORT_SYMBOL(proc_dointvec_jiffies); EXPORT_SYMBOL(proc_dointvec_minmax); EXPORT_SYMBOL(proc_dointvec_userhz_jiffies); EXPORT_SYMBOL(proc_dointvec_ms_jiffies); EXPORT_SYMBOL(proc_dostring); EXPORT_SYMBOL(proc_doulongvec_minmax); EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
./CrossVul/dataset_final_sorted/CWE-400/c/bad_5200_6
crossvul-cpp_data_bad_1256_0
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ /* * bfa_attr.c Linux driver configuration interface module. */ #include "bfad_drv.h" #include "bfad_im.h" /* * FC transport template entry, get SCSI target port ID. */ static void bfad_im_get_starget_port_id(struct scsi_target *starget) { struct Scsi_Host *shost; struct bfad_im_port_s *im_port; struct bfad_s *bfad; struct bfad_itnim_s *itnim = NULL; u32 fc_id = -1; unsigned long flags; shost = dev_to_shost(starget->dev.parent); im_port = (struct bfad_im_port_s *) shost->hostdata[0]; bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); itnim = bfad_get_itnim(im_port, starget->id); if (itnim) fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); fc_starget_port_id(starget) = fc_id; spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* * FC transport template entry, get SCSI target nwwn. */ static void bfad_im_get_starget_node_name(struct scsi_target *starget) { struct Scsi_Host *shost; struct bfad_im_port_s *im_port; struct bfad_s *bfad; struct bfad_itnim_s *itnim = NULL; u64 node_name = 0; unsigned long flags; shost = dev_to_shost(starget->dev.parent); im_port = (struct bfad_im_port_s *) shost->hostdata[0]; bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); itnim = bfad_get_itnim(im_port, starget->id); if (itnim) node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim); fc_starget_node_name(starget) = cpu_to_be64(node_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* * FC transport template entry, get SCSI target pwwn. */ static void bfad_im_get_starget_port_name(struct scsi_target *starget) { struct Scsi_Host *shost; struct bfad_im_port_s *im_port; struct bfad_s *bfad; struct bfad_itnim_s *itnim = NULL; u64 port_name = 0; unsigned long flags; shost = dev_to_shost(starget->dev.parent); im_port = (struct bfad_im_port_s *) shost->hostdata[0]; bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); itnim = bfad_get_itnim(im_port, starget->id); if (itnim) port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); fc_starget_port_name(starget) = cpu_to_be64(port_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* * FC transport template entry, get SCSI host port ID. */ static void bfad_im_get_host_port_id(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; fc_host_port_id(shost) = bfa_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port)); } /* * FC transport template entry, get SCSI host port type. */ static void bfad_im_get_host_port_type(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_lport_attr_s port_attr; bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); switch (port_attr.port_type) { case BFA_PORT_TYPE_NPORT: fc_host_port_type(shost) = FC_PORTTYPE_NPORT; break; case BFA_PORT_TYPE_NLPORT: fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; break; case BFA_PORT_TYPE_P2P: fc_host_port_type(shost) = FC_PORTTYPE_PTP; break; case BFA_PORT_TYPE_LPORT: fc_host_port_type(shost) = FC_PORTTYPE_LPORT; break; default: fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; break; } } /* * FC transport template entry, get SCSI host port state. */ static void bfad_im_get_host_port_state(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_port_attr_s attr; bfa_fcport_get_attr(&bfad->bfa, &attr); switch (attr.port_state) { case BFA_PORT_ST_LINKDOWN: fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; break; case BFA_PORT_ST_LINKUP: fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; break; case BFA_PORT_ST_DISABLED: case BFA_PORT_ST_STOPPED: case BFA_PORT_ST_IOCDOWN: case BFA_PORT_ST_IOCDIS: fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; break; case BFA_PORT_ST_UNINIT: case BFA_PORT_ST_ENABLING_QWAIT: case BFA_PORT_ST_ENABLING: case BFA_PORT_ST_DISABLING_QWAIT: case BFA_PORT_ST_DISABLING: default: fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; break; } } /* * FC transport template entry, get SCSI host active fc4s. */ static void bfad_im_get_host_active_fc4s(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; memset(fc_host_active_fc4s(shost), 0, sizeof(fc_host_active_fc4s(shost))); if (port->supported_fc4s & BFA_LPORT_ROLE_FCP_IM) fc_host_active_fc4s(shost)[2] = 1; fc_host_active_fc4s(shost)[7] = 1; } /* * FC transport template entry, get SCSI host link speed. */ static void bfad_im_get_host_speed(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_port_attr_s attr; bfa_fcport_get_attr(&bfad->bfa, &attr); switch (attr.speed) { case BFA_PORT_SPEED_10GBPS: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; case BFA_PORT_SPEED_16GBPS: fc_host_speed(shost) = FC_PORTSPEED_16GBIT; break; case BFA_PORT_SPEED_8GBPS: fc_host_speed(shost) = FC_PORTSPEED_8GBIT; break; case BFA_PORT_SPEED_4GBPS: fc_host_speed(shost) = FC_PORTSPEED_4GBIT; break; case BFA_PORT_SPEED_2GBPS: fc_host_speed(shost) = FC_PORTSPEED_2GBIT; break; case BFA_PORT_SPEED_1GBPS: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } } /* * FC transport template entry, get SCSI host port type. */ static void bfad_im_get_host_fabric_name(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; wwn_t fabric_nwwn = 0; fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port); fc_host_fabric_name(shost) = cpu_to_be64(fabric_nwwn); } /* * FC transport template entry, get BFAD statistics. */ static struct fc_host_statistics * bfad_im_get_stats(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_hal_comp fcomp; union bfa_port_stats_u *fcstats; struct fc_host_statistics *hstats; bfa_status_t rc; unsigned long flags; fcstats = kzalloc(sizeof(union bfa_port_stats_u), GFP_KERNEL); if (fcstats == NULL) return NULL; hstats = &bfad->link_stats; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); memset(hstats, 0, sizeof(struct fc_host_statistics)); rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), fcstats, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) return NULL; wait_for_completion(&fcomp.comp); /* Fill the fc_host_statistics structure */ hstats->seconds_since_last_reset = fcstats->fc.secs_reset; hstats->tx_frames = fcstats->fc.tx_frames; hstats->tx_words = fcstats->fc.tx_words; hstats->rx_frames = fcstats->fc.rx_frames; hstats->rx_words = fcstats->fc.rx_words; hstats->lip_count = fcstats->fc.lip_count; hstats->nos_count = fcstats->fc.nos_count; hstats->error_frames = fcstats->fc.error_frames; hstats->dumped_frames = fcstats->fc.dropped_frames; hstats->link_failure_count = fcstats->fc.link_failures; hstats->loss_of_sync_count = fcstats->fc.loss_of_syncs; hstats->loss_of_signal_count = fcstats->fc.loss_of_signals; hstats->prim_seq_protocol_err_count = fcstats->fc.primseq_errs; hstats->invalid_crc_count = fcstats->fc.invalid_crcs; kfree(fcstats); return hstats; } /* * FC transport template entry, reset BFAD statistics. */ static void bfad_im_reset_stats(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_hal_comp fcomp; unsigned long flags; bfa_status_t rc; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) return; wait_for_completion(&fcomp.comp); return; } /* * FC transport template entry, set rport loss timeout. * Update dev_loss_tmo based on the value pushed down by the stack * In case it is lesser than path_tov of driver, set it to path_tov + 1 * to ensure that the driver times out before the application */ static void bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) { struct bfad_itnim_data_s *itnim_data = rport->dd_data; struct bfad_itnim_s *itnim = itnim_data->itnim; struct bfad_s *bfad = itnim->im->bfad; uint16_t path_tov = bfa_fcpim_path_tov_get(&bfad->bfa); rport->dev_loss_tmo = timeout; if (timeout < path_tov) rport->dev_loss_tmo = path_tov + 1; } static int bfad_im_vport_create(struct fc_vport *fc_vport, bool disable) { char *vname = fc_vport->symbolic_name; struct Scsi_Host *shost = fc_vport->shost; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_lport_cfg_s port_cfg; struct bfad_vport_s *vp; int status = 0, rc; unsigned long flags; memset(&port_cfg, 0, sizeof(port_cfg)); u64_to_wwn(fc_vport->node_name, (u8 *)&port_cfg.nwwn); u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn); if (strlen(vname) > 0) strcpy((char *)&port_cfg.sym_name, vname); port_cfg.roles = BFA_LPORT_ROLE_FCP_IM; spin_lock_irqsave(&bfad->bfad_lock, flags); list_for_each_entry(vp, &bfad->pbc_vport_list, list_entry) { if (port_cfg.pwwn == vp->fcs_vport.lport.port_cfg.pwwn) { port_cfg.preboot_vp = vp->fcs_vport.lport.port_cfg.preboot_vp; break; } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev); if (rc == BFA_STATUS_OK) { struct bfad_vport_s *vport; struct bfa_fcs_vport_s *fcs_vport; struct Scsi_Host *vshost; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, port_cfg.pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return VPCERR_BAD_WWN; fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); if (disable) { spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_vport_stop(fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); } vport = fcs_vport->vport_drv; vshost = vport->drv_port.im_port->shost; fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn); fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn); fc_host_supported_classes(vshost) = FC_COS_CLASS3; memset(fc_host_supported_fc4s(vshost), 0, sizeof(fc_host_supported_fc4s(vshost))); /* For FCP type 0x08 */ if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM) fc_host_supported_fc4s(vshost)[2] = 1; /* For fibre channel services type 0x20 */ fc_host_supported_fc4s(vshost)[7] = 1; fc_host_supported_speeds(vshost) = bfad_im_supported_speeds(&bfad->bfa); fc_host_maxframe_size(vshost) = bfa_fcport_get_maxfrsize(&bfad->bfa); fc_vport->dd_data = vport; vport->drv_port.im_port->fc_vport = fc_vport; } else if (rc == BFA_STATUS_INVALID_WWN) return VPCERR_BAD_WWN; else if (rc == BFA_STATUS_VPORT_EXISTS) return VPCERR_BAD_WWN; else if (rc == BFA_STATUS_VPORT_MAX) return VPCERR_NO_FABRIC_SUPP; else if (rc == BFA_STATUS_VPORT_WWN_BP) return VPCERR_BAD_WWN; else return FC_VPORT_FAILED; return status; } int bfad_im_issue_fc_host_lip(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_hal_comp fcomp; unsigned long flags; uint32_t status; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); status = bfa_port_disable(&bfad->bfa.modules.port, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (status != BFA_STATUS_OK) return -EIO; wait_for_completion(&fcomp.comp); if (fcomp.status != BFA_STATUS_OK) return -EIO; spin_lock_irqsave(&bfad->bfad_lock, flags); status = bfa_port_enable(&bfad->bfa.modules.port, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (status != BFA_STATUS_OK) return -EIO; wait_for_completion(&fcomp.comp); if (fcomp.status != BFA_STATUS_OK) return -EIO; return 0; } static int bfad_im_vport_delete(struct fc_vport *fc_vport) { struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) vport->drv_port.im_port; struct bfad_s *bfad = im_port->bfad; struct bfa_fcs_vport_s *fcs_vport; struct Scsi_Host *vshost; wwn_t pwwn; int rc; unsigned long flags; struct completion fcomp; if (im_port->flags & BFAD_PORT_DELETE) { bfad_scsi_host_free(bfad, im_port); list_del(&vport->list_entry); kfree(vport); return 0; } vshost = vport->drv_port.im_port->shost; u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return VPCERR_BAD_WWN; vport->drv_port.flags |= BFAD_PORT_DELETE; vport->comp_del = &fcomp; init_completion(vport->comp_del); spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_fcs_vport_delete(&vport->fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc == BFA_STATUS_PBC) { vport->drv_port.flags &= ~BFAD_PORT_DELETE; vport->comp_del = NULL; return -1; } wait_for_completion(vport->comp_del); bfad_scsi_host_free(bfad, im_port); list_del(&vport->list_entry); kfree(vport); return 0; } static int bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable) { struct bfad_vport_s *vport; struct bfad_s *bfad; struct bfa_fcs_vport_s *fcs_vport; struct Scsi_Host *vshost; wwn_t pwwn; unsigned long flags; vport = (struct bfad_vport_s *)fc_vport->dd_data; bfad = vport->drv_port.bfad; vshost = vport->drv_port.im_port->shost; u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return VPCERR_BAD_WWN; if (disable) { bfa_fcs_vport_stop(fcs_vport); fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); } else { bfa_fcs_vport_start(fcs_vport); fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); } return 0; } void bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport) { struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *)vport->drv_port.im_port; struct bfad_s *bfad = im_port->bfad; struct Scsi_Host *vshost = vport->drv_port.im_port->shost; char *sym_name = fc_vport->symbolic_name; struct bfa_fcs_vport_s *fcs_vport; wwn_t pwwn; unsigned long flags; u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return; spin_lock_irqsave(&bfad->bfad_lock, flags); if (strlen(sym_name) > 0) bfa_fcs_lport_set_symname(&fcs_vport->lport, sym_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } struct fc_function_template bfad_im_fc_function_template = { /* Target dynamic attributes */ .get_starget_port_id = bfad_im_get_starget_port_id, .show_starget_port_id = 1, .get_starget_node_name = bfad_im_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = bfad_im_get_starget_port_name, .show_starget_port_name = 1, /* Host dynamic attribute */ .get_host_port_id = bfad_im_get_host_port_id, .show_host_port_id = 1, /* Host fixed attributes */ .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, /* More host dynamic attributes */ .show_host_port_type = 1, .get_host_port_type = bfad_im_get_host_port_type, .show_host_port_state = 1, .get_host_port_state = bfad_im_get_host_port_state, .show_host_active_fc4s = 1, .get_host_active_fc4s = bfad_im_get_host_active_fc4s, .show_host_speed = 1, .get_host_speed = bfad_im_get_host_speed, .show_host_fabric_name = 1, .get_host_fabric_name = bfad_im_get_host_fabric_name, .show_host_symbolic_name = 1, /* Statistics */ .get_fc_host_stats = bfad_im_get_stats, .reset_fc_host_stats = bfad_im_reset_stats, /* Allocation length for host specific data */ .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *), /* Remote port fixed attributes */ .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_rport_dev_loss_tmo = 1, .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, .issue_fc_host_lip = bfad_im_issue_fc_host_lip, .vport_create = bfad_im_vport_create, .vport_delete = bfad_im_vport_delete, .vport_disable = bfad_im_vport_disable, .set_vport_symbolic_name = bfad_im_vport_set_symbolic_name, .bsg_request = bfad_im_bsg_request, .bsg_timeout = bfad_im_bsg_timeout, }; struct fc_function_template bfad_im_vport_fc_function_template = { /* Target dynamic attributes */ .get_starget_port_id = bfad_im_get_starget_port_id, .show_starget_port_id = 1, .get_starget_node_name = bfad_im_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = bfad_im_get_starget_port_name, .show_starget_port_name = 1, /* Host dynamic attribute */ .get_host_port_id = bfad_im_get_host_port_id, .show_host_port_id = 1, /* Host fixed attributes */ .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, /* More host dynamic attributes */ .show_host_port_type = 1, .get_host_port_type = bfad_im_get_host_port_type, .show_host_port_state = 1, .get_host_port_state = bfad_im_get_host_port_state, .show_host_active_fc4s = 1, .get_host_active_fc4s = bfad_im_get_host_active_fc4s, .show_host_speed = 1, .get_host_speed = bfad_im_get_host_speed, .show_host_fabric_name = 1, .get_host_fabric_name = bfad_im_get_host_fabric_name, .show_host_symbolic_name = 1, /* Statistics */ .get_fc_host_stats = bfad_im_get_stats, .reset_fc_host_stats = bfad_im_reset_stats, /* Allocation length for host specific data */ .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *), /* Remote port fixed attributes */ .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_rport_dev_loss_tmo = 1, .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, }; /* * Scsi_Host_attrs SCSI host attributes */ static ssize_t bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; bfa_get_adapter_serial_num(&bfad->bfa, serial_num); return snprintf(buf, PAGE_SIZE, "%s\n", serial_num); } static ssize_t bfad_im_model_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char model[BFA_ADAPTER_MODEL_NAME_LEN]; bfa_get_adapter_model(&bfad->bfa, model); return snprintf(buf, PAGE_SIZE, "%s\n", model); } static ssize_t bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char model[BFA_ADAPTER_MODEL_NAME_LEN]; char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; int nports = 0; bfa_get_adapter_model(&bfad->bfa, model); nports = bfa_get_nports(&bfad->bfa); if (!strcmp(model, "QLogic-425")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 4Gbps PCIe dual port FC HBA"); else if (!strcmp(model, "QLogic-825")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 8Gbps PCIe dual port FC HBA"); else if (!strcmp(model, "QLogic-42B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 4Gbps PCIe dual port FC HBA for HP"); else if (!strcmp(model, "QLogic-82B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 8Gbps PCIe dual port FC HBA for HP"); else if (!strcmp(model, "QLogic-1010")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps single port CNA"); else if (!strcmp(model, "QLogic-1020")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps dual port CNA"); else if (!strcmp(model, "QLogic-1007")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps CNA for IBM Blade Center"); else if (!strcmp(model, "QLogic-415")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 4Gbps PCIe single port FC HBA"); else if (!strcmp(model, "QLogic-815")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 8Gbps PCIe single port FC HBA"); else if (!strcmp(model, "QLogic-41B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 4Gbps PCIe single port FC HBA for HP"); else if (!strcmp(model, "QLogic-81B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 8Gbps PCIe single port FC HBA for HP"); else if (!strcmp(model, "QLogic-804")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 8Gbps FC HBA for HP Bladesystem C-class"); else if (!strcmp(model, "QLogic-1741")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps CNA for Dell M-Series Blade Servers"); else if (strstr(model, "QLogic-1860")) { if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps single port CNA"); else if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 16Gbps PCIe single port FC HBA"); else if (nports == 2 && bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps dual port CNA"); else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 16Gbps PCIe dual port FC HBA"); } else if (!strcmp(model, "QLogic-1867")) { if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 16Gbps PCIe single port FC HBA for IBM"); else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 16Gbps PCIe dual port FC HBA for IBM"); } else snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Invalid Model"); return snprintf(buf, PAGE_SIZE, "%s\n", model_descr); } static ssize_t bfad_im_node_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; u64 nwwn; nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port); return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn)); } static ssize_t bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_lport_attr_s port_attr; char symname[BFA_SYMNAME_MAXLEN]; bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); strlcpy(symname, port_attr.port_cfg.sym_name.symname, BFA_SYMNAME_MAXLEN); return snprintf(buf, PAGE_SIZE, "%s\n", symname); } static ssize_t bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char hw_ver[BFA_VERSION_LEN]; bfa_get_pci_chip_rev(&bfad->bfa, hw_ver); return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver); } static ssize_t bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION); } static ssize_t bfad_im_optionrom_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char optrom_ver[BFA_VERSION_LEN]; bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver); return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver); } static ssize_t bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char fw_ver[BFA_VERSION_LEN]; bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver); } static ssize_t bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; return snprintf(buf, PAGE_SIZE, "%d\n", bfa_get_nports(&bfad->bfa)); } static ssize_t bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME); } static ssize_t bfad_im_num_of_discovered_ports_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; struct bfad_s *bfad = im_port->bfad; int nrports = 2048; struct bfa_rport_qualifier_s *rports = NULL; unsigned long flags; rports = kcalloc(nrports, sizeof(struct bfa_rport_qualifier_s), GFP_ATOMIC); if (rports == NULL) return snprintf(buf, PAGE_SIZE, "Failed\n"); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports); spin_unlock_irqrestore(&bfad->bfad_lock, flags); kfree(rports); return snprintf(buf, PAGE_SIZE, "%d\n", nrports); } static DEVICE_ATTR(serial_number, S_IRUGO, bfad_im_serial_num_show, NULL); static DEVICE_ATTR(model, S_IRUGO, bfad_im_model_show, NULL); static DEVICE_ATTR(model_description, S_IRUGO, bfad_im_model_desc_show, NULL); static DEVICE_ATTR(node_name, S_IRUGO, bfad_im_node_name_show, NULL); static DEVICE_ATTR(symbolic_name, S_IRUGO, bfad_im_symbolic_name_show, NULL); static DEVICE_ATTR(hardware_version, S_IRUGO, bfad_im_hw_version_show, NULL); static DEVICE_ATTR(driver_version, S_IRUGO, bfad_im_drv_version_show, NULL); static DEVICE_ATTR(option_rom_version, S_IRUGO, bfad_im_optionrom_version_show, NULL); static DEVICE_ATTR(firmware_version, S_IRUGO, bfad_im_fw_version_show, NULL); static DEVICE_ATTR(number_of_ports, S_IRUGO, bfad_im_num_of_ports_show, NULL); static DEVICE_ATTR(driver_name, S_IRUGO, bfad_im_drv_name_show, NULL); static DEVICE_ATTR(number_of_discovered_ports, S_IRUGO, bfad_im_num_of_discovered_ports_show, NULL); struct device_attribute *bfad_im_host_attrs[] = { &dev_attr_serial_number, &dev_attr_model, &dev_attr_model_description, &dev_attr_node_name, &dev_attr_symbolic_name, &dev_attr_hardware_version, &dev_attr_driver_version, &dev_attr_option_rom_version, &dev_attr_firmware_version, &dev_attr_number_of_ports, &dev_attr_driver_name, &dev_attr_number_of_discovered_ports, NULL, }; struct device_attribute *bfad_im_vport_attrs[] = { &dev_attr_serial_number, &dev_attr_model, &dev_attr_model_description, &dev_attr_node_name, &dev_attr_symbolic_name, &dev_attr_hardware_version, &dev_attr_driver_version, &dev_attr_option_rom_version, &dev_attr_firmware_version, &dev_attr_number_of_ports, &dev_attr_driver_name, &dev_attr_number_of_discovered_ports, NULL, };
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1256_0
crossvul-cpp_data_good_1272_3
/* * Copyright 2012-15 Advanced Micro Devices, Inc.cls * * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dm_services.h" #include "stream_encoder.h" #include "resource.h" #include "include/irq_service_interface.h" #include "dce120_resource.h" #include "dce112/dce112_resource.h" #include "dce110/dce110_resource.h" #include "../virtual/virtual_stream_encoder.h" #include "dce120_timing_generator.h" #include "irq/dce120/irq_service_dce120.h" #include "dce/dce_opp.h" #include "dce/dce_clock_source.h" #include "dce/dce_ipp.h" #include "dce/dce_mem_input.h" #include "dce110/dce110_hw_sequencer.h" #include "dce120/dce120_hw_sequencer.h" #include "dce/dce_transform.h" #include "clk_mgr.h" #include "dce/dce_audio.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" #include "dce/dce_hwseq.h" #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "dce/dce_12_0_offset.h" #include "dce/dce_12_0_sh_mask.h" #include "soc15_hw_ip.h" #include "vega10_ip_offset.h" #include "nbio/nbio_6_1_offset.h" #include "mmhub/mmhub_9_4_0_offset.h" #include "mmhub/mmhub_9_4_0_sh_mask.h" #include "reg_helper.h" #include "dce100/dce100_resource.h" #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #endif enum dce120_clk_src_array_id { DCE120_CLK_SRC_PLL0, DCE120_CLK_SRC_PLL1, DCE120_CLK_SRC_PLL2, DCE120_CLK_SRC_PLL3, DCE120_CLK_SRC_PLL4, DCE120_CLK_SRC_PLL5, DCE120_CLK_SRC_TOTAL }; static const struct dce110_timing_generator_offsets dce120_tg_offsets[] = { { .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), } }; /* begin ********************* * macros to expend register list macro defined in HW object header file */ #define BASE_INNER(seg) \ DCE_BASE__INST0_SEG ## seg #define NBIO_BASE_INNER(seg) \ NBIF_BASE__INST0_SEG ## seg #define NBIO_BASE(seg) \ NBIO_BASE_INNER(seg) /* compile time expand base address. */ #define BASE(seg) \ BASE_INNER(seg) #define SR(reg_name)\ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name #define SRI(reg_name, block, id)\ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name /* MMHUB */ #define MMHUB_BASE_INNER(seg) \ MMHUB_BASE__INST0_SEG ## seg #define MMHUB_BASE(seg) \ MMHUB_BASE_INNER(seg) #define MMHUB_SR(reg_name)\ .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name /* macros to expend register list macro defined in HW object header file * end *********************/ static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE110_COMMON_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCE110(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCE110_COMMON_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; #define ipp_regs(id)\ [id] = {\ IPP_DCE110_REG_LIST_DCE_BASE(id)\ } static const struct dce_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), ipp_regs(4), ipp_regs(5) }; static const struct dce_ipp_shift ipp_shift = { IPP_DCE120_MASK_SH_LIST_SOC_BASE(__SHIFT) }; static const struct dce_ipp_mask ipp_mask = { IPP_DCE120_MASK_SH_LIST_SOC_BASE(_MASK) }; #define transform_regs(id)\ [id] = {\ XFM_COMMON_REG_LIST_DCE110(id)\ } static const struct dce_transform_registers xfm_regs[] = { transform_regs(0), transform_regs(1), transform_regs(2), transform_regs(3), transform_regs(4), transform_regs(5) }; static const struct dce_transform_shift xfm_shift = { XFM_COMMON_MASK_SH_LIST_SOC_BASE(__SHIFT) }; static const struct dce_transform_mask xfm_mask = { XFM_COMMON_MASK_SH_LIST_SOC_BASE(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id)\ [id] = {\ LE_DCE120_REG_LIST(id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3), link_regs(4), link_regs(5), link_regs(6), }; #define stream_enc_regs(id)\ [id] = {\ SE_COMMON_REG_LIST(id),\ .TMDS_CNTL = 0,\ } static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4), stream_enc_regs(5) }; static const struct dce_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCE120(__SHIFT) }; static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE120(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_DCE_120_REG_LIST(id),\ } static const struct dce_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5) }; static const struct dce_opp_shift opp_shift = { OPP_COMMON_MASK_SH_LIST_DCE_120(__SHIFT) }; static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5) }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) static const struct dce_audio_shift audio_shift = { DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; #define clk_src_regs(index, id)\ [index] = {\ CS_COMMON_REG_LIST_DCE_112(id),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0, A), clk_src_regs(1, B), clk_src_regs(2, C), clk_src_regs(3, D), clk_src_regs(4, E), clk_src_regs(5, F) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCE_112(_MASK) }; struct output_pixel_processor *dce120_opp_create( struct dc_context *ctx, uint32_t inst) { struct dce110_opp *opp = kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); if (!opp) return NULL; dce110_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } struct dce_aux *dce120_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst]); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) }; struct dce_i2c_hw *dce120_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dce112_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static const struct bios_registers bios_regs = { .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3 + NBIO_BASE(mmBIOS_SCRATCH_3_BASE_IDX), .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX) }; static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 7, .num_stream_encoder = 6, .num_pll = 6, .num_ddc = 6, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, .pixel_format_support = { .argb8888 = true, .nv12 = false, .fp16 = false }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 1, .fp16 = 1 } }; static const struct dc_debug_options debug_defaults = { .disable_clock_gate = true, }; static struct clock_source *dce120_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(*clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce112_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } BREAK_TO_DEBUGGER(); return NULL; } static void dce120_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } static bool dce120_hw_sequencer_create(struct dc *dc) { /* All registers used by dce11.2 match those in dce11 in offset and * structure */ dce120_hw_sequencer_construct(dc); /*TODO Move to separate file and Override what is needed */ return true; } static struct timing_generator *dce120_timing_generator_create( struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { struct dce110_timing_generator *tg110 = kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); if (!tg110) return NULL; dce120_timing_generator_construct(tg110, ctx, instance, offsets); return &tg110->base; } static void dce120_transform_destroy(struct transform **xfm) { kfree(TO_DCE_TRANSFORM(*xfm)); *xfm = NULL; } static void destruct(struct dce110_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); if (pool->base.transforms[i] != NULL) dce120_transform_destroy(&pool->base.transforms[i]); if (pool->base.ipps[i] != NULL) dce_ipp_destroy(&pool->base.ipps[i]); if (pool->base.mis[i] != NULL) { kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); pool->base.mis[i] = NULL; } if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } if (pool->base.timing_generators[i] != NULL) { kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i]) dce_aud_destroy(&pool->base.audios[i]); } for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) dce120_clock_source_destroy( &pool->base.clock_sources[i]); } if (pool->base.dp_clock_source != NULL) dce120_clock_source_destroy(&pool->base.dp_clock_source); if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { uint32_t reg_val = dm_read_reg_soc15(ctx, mmCC_DC_MISC_STRAPS, 0); straps->audio_stream_number = get_reg_field_value(reg_val, CC_DC_MISC_STRAPS, AUDIO_STREAM_NUMBER); straps->hdmi_disable = get_reg_field_value(reg_val, CC_DC_MISC_STRAPS, HDMI_DISABLE); reg_val = dm_read_reg_soc15(ctx, mmDC_PINSTRAPS, 0); straps->dc_pinstraps_audio = get_reg_field_value(reg_val, DC_PINSTRAPS, DC_PINSTRAPS_AUDIO); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = false, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true, }; static struct link_encoder *dce120_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; } static struct input_pixel_processor *dce120_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dce_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static struct stream_encoder *dce120_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } #define SRII(reg_name, block, id)\ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCE120_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCE12_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE12_MASK_SH_LIST(_MASK) }; /* HWSEQ regs for VG20 */ static const struct dce_hwseq_registers dce121_hwseq_reg = { HWSEQ_VG20_REG_LIST() }; static const struct dce_hwseq_shift dce121_hwseq_shift = { HWSEQ_VG20_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask dce121_hwseq_mask = { HWSEQ_VG20_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dce120_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static struct dce_hwseq *dce121_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &dce121_hwseq_reg; hws->shifts = &dce121_hwseq_shift; hws->masks = &dce121_hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce120_stream_encoder_create, .create_hwseq = dce120_hwseq_create, }; static const struct resource_create_funcs dce121_res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce120_stream_encoder_create, .create_hwseq = dce121_hwseq_create, }; #define mi_inst_regs(id) { MI_DCE12_REG_LIST(id) } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), mi_inst_regs(1), mi_inst_regs(2), mi_inst_regs(3), mi_inst_regs(4), mi_inst_regs(5), }; static const struct dce_mem_input_shift mi_shifts = { MI_DCE12_MASK_SH_LIST(__SHIFT) }; static const struct dce_mem_input_mask mi_masks = { MI_DCE12_MASK_SH_LIST(_MASK) }; static struct mem_input *dce120_mem_input_create( struct dc_context *ctx, uint32_t inst) { struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), GFP_KERNEL); if (!dce_mi) { BREAK_TO_DEBUGGER(); return NULL; } dce120_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); return &dce_mi->base; } static struct transform *dce120_transform_create( struct dc_context *ctx, uint32_t inst) { struct dce_transform *transform = kzalloc(sizeof(struct dce_transform), GFP_KERNEL); if (!transform) return NULL; dce_transform_construct(transform, ctx, inst, &xfm_regs[inst], &xfm_shift, &xfm_mask); transform->lb_memory_size = 0x1404; /*5124*/ return &transform->base; } static void dce120_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } static const struct resource_funcs dce120_res_pool_funcs = { .destroy = dce120_destroy_resource_pool, .link_enc_create = dce120_link_encoder_create, .validate_bandwidth = dce112_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce112_add_stream_to_ctx, .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link }; static void bw_calcs_data_update_from_pplib(struct dc *dc) { struct dm_pp_clock_levels_with_latency eng_clks = {0}; struct dm_pp_clock_levels_with_latency mem_clks = {0}; struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0}; int i; unsigned int clk; unsigned int latency; /*original logic in dal3*/ int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; /*do system clock*/ if (!dm_pp_get_clock_levels_by_type_with_latency( dc->ctx, DM_PP_CLOCK_TYPE_ENGINE_CLK, &eng_clks) || eng_clks.num_levels == 0) { eng_clks.num_levels = 8; clk = 300000; for (i = 0; i < eng_clks.num_levels; i++) { eng_clks.data[i].clocks_in_khz = clk; clk += 100000; } } /* convert all the clock fro kHz to fix point mHz TODO: wloop data */ dc->bw_vbios->high_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000); dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000); dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000); dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000); dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000); dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000); dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000); dc->bw_vbios->low_sclk = bw_frc_to_fixed( eng_clks.data[0].clocks_in_khz, 1000); /*do memory clock*/ if (!dm_pp_get_clock_levels_by_type_with_latency( dc->ctx, DM_PP_CLOCK_TYPE_MEMORY_CLK, &mem_clks) || mem_clks.num_levels == 0) { mem_clks.num_levels = 3; clk = 250000; latency = 45; for (i = 0; i < eng_clks.num_levels; i++) { mem_clks.data[i].clocks_in_khz = clk; mem_clks.data[i].latency_in_us = latency; clk += 500000; latency -= 5; } } /* we don't need to call PPLIB for validation clock since they * also give us the highest sclk and highest mclk (UMA clock). * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula): * YCLK = UMACLK*m_memoryTypeMultiplier */ if (dc->bw_vbios->memory_type == bw_def_hbm) memory_type_multiplier = MEMORY_TYPE_HBM; dc->bw_vbios->low_yclk = bw_frc_to_fixed( mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, 1000); /* Now notify PPLib/SMU about which Watermarks sets they should select * depending on DPM state they are in. And update BW MGR GFX Engine and * Memory clock member variables for Watermarks calculations for each * Watermark Set */ clk_ranges.num_wm_sets = 4; clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A; clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz = eng_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz = mem_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B; clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000; clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz = mem_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C; clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz = eng_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000; clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D; clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000; clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000; /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges); } static uint32_t read_pipe_fuses(struct dc_context *ctx) { uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); /* VG20 support max 6 pipes */ value = value & 0x3f; return value; } static bool construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; int j; struct dc_context *ctx = dc->ctx; struct irq_service_init_data irq_init_data; static const struct resource_create_funcs *res_funcs; bool is_vg20 = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev); uint32_t pipe_fuses; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap; pool->base.funcs = &dce120_res_pool_funcs; /* TODO: Fill more data from GreenlandAsicCapability.cpp */ pool->base.pipe_count = res_cap.num_timing_generator; pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; dc->caps.psp_setup_panel_mode = true; dc->debug = debug_defaults; /************************************************* * Create resources * *************************************************/ pool->base.clock_sources[DCE120_CLK_SRC_PLL0] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL1] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL2] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL3] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL4] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL4, &clk_src_regs[4], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL5] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL5, &clk_src_regs[5], false); pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL; pool->base.dp_clock_source = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true); for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto clk_src_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } irq_init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data); if (!pool->base.irqs) goto irqs_create_fail; /* VG20: Pipe harvesting enabled, retrieve valid pipe fuses */ if (is_vg20) pipe_fuses = read_pipe_fuses(ctx); /* index to valid pipe resource */ j = 0; for (i = 0; i < pool->base.pipe_count; i++) { if (is_vg20) { if ((pipe_fuses & (1 << i)) != 0) { dm_error("DC: skip invalid pipe %d!\n", i); continue; } } pool->base.timing_generators[j] = dce120_timing_generator_create( ctx, i, &dce120_tg_offsets[i]); if (pool->base.timing_generators[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto controller_create_fail; } pool->base.mis[j] = dce120_mem_input_create(ctx, i); if (pool->base.mis[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto controller_create_fail; } pool->base.ipps[j] = dce120_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto controller_create_fail; } pool->base.transforms[j] = dce120_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[j] = dce120_opp_create( ctx, i); if (pool->base.opps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); } /* check next valid pipe */ j++; } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce120_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce120_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = NULL; } /* valid pipe num */ pool->base.pipe_count = j; pool->base.timing_generator_count = j; if (is_vg20) res_funcs = &dce121_res_create_funcs; else res_funcs = &res_create_funcs; if (!resource_construct(num_virtual_links, dc, &pool->base, res_funcs)) goto res_create_fail; /* Create hardware sequencer */ if (!dce120_hw_sequencer_create(dc)) goto controller_create_fail; dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); bw_calcs_data_update_from_pplib(dc); return true; irqs_create_fail: controller_create_fail: clk_src_create_fail: res_create_fail: destruct(pool); return false; } struct resource_pool *dce120_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; }
./CrossVul/dataset_final_sorted/CWE-400/c/good_1272_3
crossvul-cpp_data_bad_1255_0
/* * Copyright(c) 2015 - 2018 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * - Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/spinlock.h> #include <linux/seqlock.h> #include <linux/netdevice.h> #include <linux/moduleparam.h> #include <linux/bitops.h> #include <linux/timer.h> #include <linux/vmalloc.h> #include <linux/highmem.h> #include "hfi.h" #include "common.h" #include "qp.h" #include "sdma.h" #include "iowait.h" #include "trace.h" /* must be a power of 2 >= 64 <= 32768 */ #define SDMA_DESCQ_CNT 2048 #define SDMA_DESC_INTR 64 #define INVALID_TAIL 0xffff static uint sdma_descq_cnt = SDMA_DESCQ_CNT; module_param(sdma_descq_cnt, uint, S_IRUGO); MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries"); static uint sdma_idle_cnt = 250; module_param(sdma_idle_cnt, uint, S_IRUGO); MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)"); uint mod_num_sdma; module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO); MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use"); static uint sdma_desct_intr = SDMA_DESC_INTR; module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt"); #define SDMA_WAIT_BATCH_SIZE 20 /* max wait time for a SDMA engine to indicate it has halted */ #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */ /* all SDMA engine errors that cause a halt */ #define SD(name) SEND_DMA_##name #define ALL_SDMA_ENG_HALT_ERRS \ (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \ | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK)) /* sdma_sendctrl operations */ #define SDMA_SENDCTRL_OP_ENABLE BIT(0) #define SDMA_SENDCTRL_OP_INTENABLE BIT(1) #define SDMA_SENDCTRL_OP_HALT BIT(2) #define SDMA_SENDCTRL_OP_CLEANUP BIT(3) /* handle long defines */ #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \ SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \ SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT static const char * const sdma_state_names[] = { [sdma_state_s00_hw_down] = "s00_HwDown", [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait", [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait", [sdma_state_s20_idle] = "s20_Idle", [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait", [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait", [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait", [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait", [sdma_state_s80_hw_freeze] = "s80_HwFreeze", [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean", [sdma_state_s99_running] = "s99_Running", }; #ifdef CONFIG_SDMA_VERBOSITY static const char * const sdma_event_names[] = { [sdma_event_e00_go_hw_down] = "e00_GoHwDown", [sdma_event_e10_go_hw_start] = "e10_GoHwStart", [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone", [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone", [sdma_event_e30_go_running] = "e30_GoRunning", [sdma_event_e40_sw_cleaned] = "e40_SwCleaned", [sdma_event_e50_hw_cleaned] = "e50_HwCleaned", [sdma_event_e60_hw_halted] = "e60_HwHalted", [sdma_event_e70_go_idle] = "e70_GoIdle", [sdma_event_e80_hw_freeze] = "e80_HwFreeze", [sdma_event_e81_hw_frozen] = "e81_HwFrozen", [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze", [sdma_event_e85_link_down] = "e85_LinkDown", [sdma_event_e90_sw_halted] = "e90_SwHalted", }; #endif static const struct sdma_set_state_action sdma_action_table[] = { [sdma_state_s00_hw_down] = { .go_s99_running_tofalse = 1, .op_enable = 0, .op_intenable = 0, .op_halt = 0, .op_cleanup = 0, }, [sdma_state_s10_hw_start_up_halt_wait] = { .op_enable = 0, .op_intenable = 0, .op_halt = 1, .op_cleanup = 0, }, [sdma_state_s15_hw_start_up_clean_wait] = { .op_enable = 0, .op_intenable = 1, .op_halt = 0, .op_cleanup = 1, }, [sdma_state_s20_idle] = { .op_enable = 0, .op_intenable = 1, .op_halt = 0, .op_cleanup = 0, }, [sdma_state_s30_sw_clean_up_wait] = { .op_enable = 0, .op_intenable = 0, .op_halt = 0, .op_cleanup = 0, }, [sdma_state_s40_hw_clean_up_wait] = { .op_enable = 0, .op_intenable = 0, .op_halt = 0, .op_cleanup = 1, }, [sdma_state_s50_hw_halt_wait] = { .op_enable = 0, .op_intenable = 0, .op_halt = 0, .op_cleanup = 0, }, [sdma_state_s60_idle_halt_wait] = { .go_s99_running_tofalse = 1, .op_enable = 0, .op_intenable = 0, .op_halt = 1, .op_cleanup = 0, }, [sdma_state_s80_hw_freeze] = { .op_enable = 0, .op_intenable = 0, .op_halt = 0, .op_cleanup = 0, }, [sdma_state_s82_freeze_sw_clean] = { .op_enable = 0, .op_intenable = 0, .op_halt = 0, .op_cleanup = 0, }, [sdma_state_s99_running] = { .op_enable = 1, .op_intenable = 1, .op_halt = 0, .op_cleanup = 0, .go_s99_running_totrue = 1, }, }; #define SDMA_TAIL_UPDATE_THRESH 0x1F /* declare all statics here rather than keep sorting */ static void sdma_complete(struct kref *); static void sdma_finalput(struct sdma_state *); static void sdma_get(struct sdma_state *); static void sdma_hw_clean_up_task(unsigned long); static void sdma_put(struct sdma_state *); static void sdma_set_state(struct sdma_engine *, enum sdma_states); static void sdma_start_hw_clean_up(struct sdma_engine *); static void sdma_sw_clean_up_task(unsigned long); static void sdma_sendctrl(struct sdma_engine *, unsigned); static void init_sdma_regs(struct sdma_engine *, u32, uint); static void sdma_process_event( struct sdma_engine *sde, enum sdma_events event); static void __sdma_process_event( struct sdma_engine *sde, enum sdma_events event); static void dump_sdma_state(struct sdma_engine *sde); static void sdma_make_progress(struct sdma_engine *sde, u64 status); static void sdma_desc_avail(struct sdma_engine *sde, uint avail); static void sdma_flush_descq(struct sdma_engine *sde); /** * sdma_state_name() - return state string from enum * @state: state */ static const char *sdma_state_name(enum sdma_states state) { return sdma_state_names[state]; } static void sdma_get(struct sdma_state *ss) { kref_get(&ss->kref); } static void sdma_complete(struct kref *kref) { struct sdma_state *ss = container_of(kref, struct sdma_state, kref); complete(&ss->comp); } static void sdma_put(struct sdma_state *ss) { kref_put(&ss->kref, sdma_complete); } static void sdma_finalput(struct sdma_state *ss) { sdma_put(ss); wait_for_completion(&ss->comp); } static inline void write_sde_csr( struct sdma_engine *sde, u32 offset0, u64 value) { write_kctxt_csr(sde->dd, sde->this_idx, offset0, value); } static inline u64 read_sde_csr( struct sdma_engine *sde, u32 offset0) { return read_kctxt_csr(sde->dd, sde->this_idx, offset0); } /* * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for * sdma engine 'sde' to drop to 0. */ static void sdma_wait_for_packet_egress(struct sdma_engine *sde, int pause) { u64 off = 8 * sde->this_idx; struct hfi1_devdata *dd = sde->dd; int lcnt = 0; u64 reg_prev; u64 reg = 0; while (1) { reg_prev = reg; reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS); reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK; reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT; if (reg == 0) break; /* counter is reest if accupancy count changes */ if (reg != reg_prev) lcnt = 0; if (lcnt++ > 500) { /* timed out - bounce the link */ dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n", __func__, sde->this_idx, (u32)reg); queue_work(dd->pport->link_wq, &dd->pport->link_bounce_work); break; } udelay(1); } } /* * sdma_wait() - wait for packet egress to complete for all SDMA engines, * and pause for credit return. */ void sdma_wait(struct hfi1_devdata *dd) { int i; for (i = 0; i < dd->num_sdma; i++) { struct sdma_engine *sde = &dd->per_sdma[i]; sdma_wait_for_packet_egress(sde, 0); } } static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt) { u64 reg; if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT)) return; reg = cnt; reg &= SD(DESC_CNT_CNT_MASK); reg <<= SD(DESC_CNT_CNT_SHIFT); write_sde_csr(sde, SD(DESC_CNT), reg); } static inline void complete_tx(struct sdma_engine *sde, struct sdma_txreq *tx, int res) { /* protect against complete modifying */ struct iowait *wait = tx->wait; callback_t complete = tx->complete; #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER trace_hfi1_sdma_out_sn(sde, tx->sn); if (WARN_ON_ONCE(sde->head_sn != tx->sn)) dd_dev_err(sde->dd, "expected %llu got %llu\n", sde->head_sn, tx->sn); sde->head_sn++; #endif __sdma_txclean(sde->dd, tx); if (complete) (*complete)(tx, res); if (iowait_sdma_dec(wait)) iowait_drain_wakeup(wait); } /* * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status * * Depending on timing there can be txreqs in two places: * - in the descq ring * - in the flush list * * To avoid ordering issues the descq ring needs to be flushed * first followed by the flush list. * * This routine is called from two places * - From a work queue item * - Directly from the state machine just before setting the * state to running * * Must be called with head_lock held * */ static void sdma_flush(struct sdma_engine *sde) { struct sdma_txreq *txp, *txp_next; LIST_HEAD(flushlist); unsigned long flags; uint seq; /* flush from head to tail */ sdma_flush_descq(sde); spin_lock_irqsave(&sde->flushlist_lock, flags); /* copy flush list */ list_splice_init(&sde->flushlist, &flushlist); spin_unlock_irqrestore(&sde->flushlist_lock, flags); /* flush from flush list */ list_for_each_entry_safe(txp, txp_next, &flushlist, list) complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); /* wakeup QPs orphaned on the dmawait list */ do { struct iowait *w, *nw; seq = read_seqbegin(&sde->waitlock); if (!list_empty(&sde->dmawait)) { write_seqlock(&sde->waitlock); list_for_each_entry_safe(w, nw, &sde->dmawait, list) { if (w->wakeup) { w->wakeup(w, SDMA_AVAIL_REASON); list_del_init(&w->list); } } write_sequnlock(&sde->waitlock); } } while (read_seqretry(&sde->waitlock, seq)); } /* * Fields a work request for flushing the descq ring * and the flush list * * If the engine has been brought to running during * the scheduling delay, the flush is ignored, assuming * that the process of bringing the engine to running * would have done this flush prior to going to running. * */ static void sdma_field_flush(struct work_struct *work) { unsigned long flags; struct sdma_engine *sde = container_of(work, struct sdma_engine, flush_worker); write_seqlock_irqsave(&sde->head_lock, flags); if (!__sdma_running(sde)) sdma_flush(sde); write_sequnlock_irqrestore(&sde->head_lock, flags); } static void sdma_err_halt_wait(struct work_struct *work) { struct sdma_engine *sde = container_of(work, struct sdma_engine, err_halt_worker); u64 statuscsr; unsigned long timeout; timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT); while (1) { statuscsr = read_sde_csr(sde, SD(STATUS)); statuscsr &= SD(STATUS_ENG_HALTED_SMASK); if (statuscsr) break; if (time_after(jiffies, timeout)) { dd_dev_err(sde->dd, "SDMA engine %d - timeout waiting for engine to halt\n", sde->this_idx); /* * Continue anyway. This could happen if there was * an uncorrectable error in the wrong spot. */ break; } usleep_range(80, 120); } sdma_process_event(sde, sdma_event_e15_hw_halt_done); } static void sdma_err_progress_check_schedule(struct sdma_engine *sde) { if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) { unsigned index; struct hfi1_devdata *dd = sde->dd; for (index = 0; index < dd->num_sdma; index++) { struct sdma_engine *curr_sdma = &dd->per_sdma[index]; if (curr_sdma != sde) curr_sdma->progress_check_head = curr_sdma->descq_head; } dd_dev_err(sde->dd, "SDMA engine %d - check scheduled\n", sde->this_idx); mod_timer(&sde->err_progress_check_timer, jiffies + 10); } } static void sdma_err_progress_check(struct timer_list *t) { unsigned index; struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer); dd_dev_err(sde->dd, "SDE progress check event\n"); for (index = 0; index < sde->dd->num_sdma; index++) { struct sdma_engine *curr_sde = &sde->dd->per_sdma[index]; unsigned long flags; /* check progress on each engine except the current one */ if (curr_sde == sde) continue; /* * We must lock interrupts when acquiring sde->lock, * to avoid a deadlock if interrupt triggers and spins on * the same lock on same CPU */ spin_lock_irqsave(&curr_sde->tail_lock, flags); write_seqlock(&curr_sde->head_lock); /* skip non-running queues */ if (curr_sde->state.current_state != sdma_state_s99_running) { write_sequnlock(&curr_sde->head_lock); spin_unlock_irqrestore(&curr_sde->tail_lock, flags); continue; } if ((curr_sde->descq_head != curr_sde->descq_tail) && (curr_sde->descq_head == curr_sde->progress_check_head)) __sdma_process_event(curr_sde, sdma_event_e90_sw_halted); write_sequnlock(&curr_sde->head_lock); spin_unlock_irqrestore(&curr_sde->tail_lock, flags); } schedule_work(&sde->err_halt_worker); } static void sdma_hw_clean_up_task(unsigned long opaque) { struct sdma_engine *sde = (struct sdma_engine *)opaque; u64 statuscsr; while (1) { #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); #endif statuscsr = read_sde_csr(sde, SD(STATUS)); statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK); if (statuscsr) break; udelay(10); } sdma_process_event(sde, sdma_event_e25_hw_clean_up_done); } static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde) { return sde->tx_ring[sde->tx_head & sde->sdma_mask]; } /* * flush ring for recovery */ static void sdma_flush_descq(struct sdma_engine *sde) { u16 head, tail; int progress = 0; struct sdma_txreq *txp = get_txhead(sde); /* The reason for some of the complexity of this code is that * not all descriptors have corresponding txps. So, we have to * be able to skip over descs until we wander into the range of * the next txp on the list. */ head = sde->descq_head & sde->sdma_mask; tail = sde->descq_tail & sde->sdma_mask; while (head != tail) { /* advance head, wrap if needed */ head = ++sde->descq_head & sde->sdma_mask; /* if now past this txp's descs, do the callback */ if (txp && txp->next_descq_idx == head) { /* remove from list */ sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); trace_hfi1_sdma_progress(sde, head, tail, txp); txp = get_txhead(sde); } progress++; } if (progress) sdma_desc_avail(sde, sdma_descq_freecnt(sde)); } static void sdma_sw_clean_up_task(unsigned long opaque) { struct sdma_engine *sde = (struct sdma_engine *)opaque; unsigned long flags; spin_lock_irqsave(&sde->tail_lock, flags); write_seqlock(&sde->head_lock); /* * At this point, the following should always be true: * - We are halted, so no more descriptors are getting retired. * - We are not running, so no one is submitting new work. * - Only we can send the e40_sw_cleaned, so we can't start * running again until we say so. So, the active list and * descq are ours to play with. */ /* * In the error clean up sequence, software clean must be called * before the hardware clean so we can use the hardware head in * the progress routine. A hardware clean or SPC unfreeze will * reset the hardware head. * * Process all retired requests. The progress routine will use the * latest physical hardware head - we are not running so speed does * not matter. */ sdma_make_progress(sde, 0); sdma_flush(sde); /* * Reset our notion of head and tail. * Note that the HW registers have been reset via an earlier * clean up. */ sde->descq_tail = 0; sde->descq_head = 0; sde->desc_avail = sdma_descq_freecnt(sde); *sde->head_dma = 0; __sdma_process_event(sde, sdma_event_e40_sw_cleaned); write_sequnlock(&sde->head_lock); spin_unlock_irqrestore(&sde->tail_lock, flags); } static void sdma_sw_tear_down(struct sdma_engine *sde) { struct sdma_state *ss = &sde->state; /* Releasing this reference means the state machine has stopped. */ sdma_put(ss); /* stop waiting for all unfreeze events to complete */ atomic_set(&sde->dd->sdma_unfreeze_count, -1); wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); } static void sdma_start_hw_clean_up(struct sdma_engine *sde) { tasklet_hi_schedule(&sde->sdma_hw_clean_up_task); } static void sdma_set_state(struct sdma_engine *sde, enum sdma_states next_state) { struct sdma_state *ss = &sde->state; const struct sdma_set_state_action *action = sdma_action_table; unsigned op = 0; trace_hfi1_sdma_state( sde, sdma_state_names[ss->current_state], sdma_state_names[next_state]); /* debugging bookkeeping */ ss->previous_state = ss->current_state; ss->previous_op = ss->current_op; ss->current_state = next_state; if (ss->previous_state != sdma_state_s99_running && next_state == sdma_state_s99_running) sdma_flush(sde); if (action[next_state].op_enable) op |= SDMA_SENDCTRL_OP_ENABLE; if (action[next_state].op_intenable) op |= SDMA_SENDCTRL_OP_INTENABLE; if (action[next_state].op_halt) op |= SDMA_SENDCTRL_OP_HALT; if (action[next_state].op_cleanup) op |= SDMA_SENDCTRL_OP_CLEANUP; if (action[next_state].go_s99_running_tofalse) ss->go_s99_running = 0; if (action[next_state].go_s99_running_totrue) ss->go_s99_running = 1; ss->current_op = op; sdma_sendctrl(sde, ss->current_op); } /** * sdma_get_descq_cnt() - called when device probed * * Return a validated descq count. * * This is currently only used in the verbs initialization to build the tx * list. * * This will probably be deleted in favor of a more scalable approach to * alloc tx's. * */ u16 sdma_get_descq_cnt(void) { u16 count = sdma_descq_cnt; if (!count) return SDMA_DESCQ_CNT; /* count must be a power of 2 greater than 64 and less than * 32768. Otherwise return default. */ if (!is_power_of_2(count)) return SDMA_DESCQ_CNT; if (count < 64 || count > 32768) return SDMA_DESCQ_CNT; return count; } /** * sdma_engine_get_vl() - return vl for a given sdma engine * @sde: sdma engine * * This function returns the vl mapped to a given engine, or an error if * the mapping can't be found. The mapping fields are protected by RCU. */ int sdma_engine_get_vl(struct sdma_engine *sde) { struct hfi1_devdata *dd = sde->dd; struct sdma_vl_map *m; u8 vl; if (sde->this_idx >= TXE_NUM_SDMA_ENGINES) return -EINVAL; rcu_read_lock(); m = rcu_dereference(dd->sdma_map); if (unlikely(!m)) { rcu_read_unlock(); return -EINVAL; } vl = m->engine_to_vl[sde->this_idx]; rcu_read_unlock(); return vl; } /** * sdma_select_engine_vl() - select sdma engine * @dd: devdata * @selector: a spreading factor * @vl: this vl * * * This function returns an engine based on the selector and a vl. The * mapping fields are protected by RCU. */ struct sdma_engine *sdma_select_engine_vl( struct hfi1_devdata *dd, u32 selector, u8 vl) { struct sdma_vl_map *m; struct sdma_map_elem *e; struct sdma_engine *rval; /* NOTE This should only happen if SC->VL changed after the initial * checks on the QP/AH * Default will return engine 0 below */ if (vl >= num_vls) { rval = NULL; goto done; } rcu_read_lock(); m = rcu_dereference(dd->sdma_map); if (unlikely(!m)) { rcu_read_unlock(); return &dd->per_sdma[0]; } e = m->map[vl & m->mask]; rval = e->sde[selector & e->mask]; rcu_read_unlock(); done: rval = !rval ? &dd->per_sdma[0] : rval; trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx); return rval; } /** * sdma_select_engine_sc() - select sdma engine * @dd: devdata * @selector: a spreading factor * @sc5: the 5 bit sc * * * This function returns an engine based on the selector and an sc. */ struct sdma_engine *sdma_select_engine_sc( struct hfi1_devdata *dd, u32 selector, u8 sc5) { u8 vl = sc_to_vlt(dd, sc5); return sdma_select_engine_vl(dd, selector, vl); } struct sdma_rht_map_elem { u32 mask; u8 ctr; struct sdma_engine *sde[0]; }; struct sdma_rht_node { unsigned long cpu_id; struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED]; struct rhash_head node; }; #define NR_CPUS_HINT 192 static const struct rhashtable_params sdma_rht_params = { .nelem_hint = NR_CPUS_HINT, .head_offset = offsetof(struct sdma_rht_node, node), .key_offset = offsetof(struct sdma_rht_node, cpu_id), .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id), .max_size = NR_CPUS, .min_size = 8, .automatic_shrinking = true, }; /* * sdma_select_user_engine() - select sdma engine based on user setup * @dd: devdata * @selector: a spreading factor * @vl: this vl * * This function returns an sdma engine for a user sdma request. * User defined sdma engine affinity setting is honored when applicable, * otherwise system default sdma engine mapping is used. To ensure correct * ordering, the mapping from <selector, vl> to sde must remain unchanged. */ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, u32 selector, u8 vl) { struct sdma_rht_node *rht_node; struct sdma_engine *sde = NULL; unsigned long cpu_id; /* * To ensure that always the same sdma engine(s) will be * selected make sure the process is pinned to this CPU only. */ if (current->nr_cpus_allowed != 1) goto out; cpu_id = smp_processor_id(); rcu_read_lock(); rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id, sdma_rht_params); if (rht_node && rht_node->map[vl]) { struct sdma_rht_map_elem *map = rht_node->map[vl]; sde = map->sde[selector & map->mask]; } rcu_read_unlock(); if (sde) return sde; out: return sdma_select_engine_vl(dd, selector, vl); } static void sdma_populate_sde_map(struct sdma_rht_map_elem *map) { int i; for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++) map->sde[map->ctr + i] = map->sde[i]; } static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map, struct sdma_engine *sde) { unsigned int i, pow; /* only need to check the first ctr entries for a match */ for (i = 0; i < map->ctr; i++) { if (map->sde[i] == sde) { memmove(&map->sde[i], &map->sde[i + 1], (map->ctr - i - 1) * sizeof(map->sde[0])); map->ctr--; pow = roundup_pow_of_two(map->ctr ? : 1); map->mask = pow - 1; sdma_populate_sde_map(map); break; } } } /* * Prevents concurrent reads and writes of the sdma engine cpu_mask */ static DEFINE_MUTEX(process_to_sde_mutex); ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf, size_t count) { struct hfi1_devdata *dd = sde->dd; cpumask_var_t mask, new_mask; unsigned long cpu; int ret, vl, sz; struct sdma_rht_node *rht_node; vl = sdma_engine_get_vl(sde); if (unlikely(vl < 0 || vl >= ARRAY_SIZE(rht_node->map))) return -EINVAL; ret = zalloc_cpumask_var(&mask, GFP_KERNEL); if (!ret) return -ENOMEM; ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL); if (!ret) { free_cpumask_var(mask); return -ENOMEM; } ret = cpulist_parse(buf, mask); if (ret) goto out_free; if (!cpumask_subset(mask, cpu_online_mask)) { dd_dev_warn(sde->dd, "Invalid CPU mask\n"); ret = -EINVAL; goto out_free; } sz = sizeof(struct sdma_rht_map_elem) + (TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *)); mutex_lock(&process_to_sde_mutex); for_each_cpu(cpu, mask) { /* Check if we have this already mapped */ if (cpumask_test_cpu(cpu, &sde->cpu_mask)) { cpumask_set_cpu(cpu, new_mask); continue; } rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu, sdma_rht_params); if (!rht_node) { rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL); if (!rht_node) { ret = -ENOMEM; goto out; } rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); if (!rht_node->map[vl]) { kfree(rht_node); ret = -ENOMEM; goto out; } rht_node->cpu_id = cpu; rht_node->map[vl]->mask = 0; rht_node->map[vl]->ctr = 1; rht_node->map[vl]->sde[0] = sde; ret = rhashtable_insert_fast(dd->sdma_rht, &rht_node->node, sdma_rht_params); if (ret) { kfree(rht_node->map[vl]); kfree(rht_node); dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n", cpu); goto out; } } else { int ctr, pow; /* Add new user mappings */ if (!rht_node->map[vl]) rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); if (!rht_node->map[vl]) { ret = -ENOMEM; goto out; } rht_node->map[vl]->ctr++; ctr = rht_node->map[vl]->ctr; rht_node->map[vl]->sde[ctr - 1] = sde; pow = roundup_pow_of_two(ctr); rht_node->map[vl]->mask = pow - 1; /* Populate the sde map table */ sdma_populate_sde_map(rht_node->map[vl]); } cpumask_set_cpu(cpu, new_mask); } /* Clean up old mappings */ for_each_cpu(cpu, cpu_online_mask) { struct sdma_rht_node *rht_node; /* Don't cleanup sdes that are set in the new mask */ if (cpumask_test_cpu(cpu, mask)) continue; rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu, sdma_rht_params); if (rht_node) { bool empty = true; int i; /* Remove mappings for old sde */ for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) if (rht_node->map[i]) sdma_cleanup_sde_map(rht_node->map[i], sde); /* Free empty hash table entries */ for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) { if (!rht_node->map[i]) continue; if (rht_node->map[i]->ctr) { empty = false; break; } } if (empty) { ret = rhashtable_remove_fast(dd->sdma_rht, &rht_node->node, sdma_rht_params); WARN_ON(ret); for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) kfree(rht_node->map[i]); kfree(rht_node); } } } cpumask_copy(&sde->cpu_mask, new_mask); out: mutex_unlock(&process_to_sde_mutex); out_free: free_cpumask_var(mask); free_cpumask_var(new_mask); return ret ? : strnlen(buf, PAGE_SIZE); } ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf) { mutex_lock(&process_to_sde_mutex); if (cpumask_empty(&sde->cpu_mask)) snprintf(buf, PAGE_SIZE, "%s\n", "empty"); else cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask); mutex_unlock(&process_to_sde_mutex); return strnlen(buf, PAGE_SIZE); } static void sdma_rht_free(void *ptr, void *arg) { struct sdma_rht_node *rht_node = ptr; int i; for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) kfree(rht_node->map[i]); kfree(rht_node); } /** * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings * @s: seq file * @dd: hfi1_devdata * @cpuid: cpu id * * This routine dumps the process to sde mappings per cpu */ void sdma_seqfile_dump_cpu_list(struct seq_file *s, struct hfi1_devdata *dd, unsigned long cpuid) { struct sdma_rht_node *rht_node; int i, j; rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid, sdma_rht_params); if (!rht_node) return; seq_printf(s, "cpu%3lu: ", cpuid); for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) { if (!rht_node->map[i] || !rht_node->map[i]->ctr) continue; seq_printf(s, " vl%d: [", i); for (j = 0; j < rht_node->map[i]->ctr; j++) { if (!rht_node->map[i]->sde[j]) continue; if (j > 0) seq_puts(s, ","); seq_printf(s, " sdma%2d", rht_node->map[i]->sde[j]->this_idx); } seq_puts(s, " ]"); } seq_puts(s, "\n"); } /* * Free the indicated map struct */ static void sdma_map_free(struct sdma_vl_map *m) { int i; for (i = 0; m && i < m->actual_vls; i++) kfree(m->map[i]); kfree(m); } /* * Handle RCU callback */ static void sdma_map_rcu_callback(struct rcu_head *list) { struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list); sdma_map_free(m); } /** * sdma_map_init - called when # vls change * @dd: hfi1_devdata * @port: port number * @num_vls: number of vls * @vl_engines: per vl engine mapping (optional) * * This routine changes the mapping based on the number of vls. * * vl_engines is used to specify a non-uniform vl/engine loading. NULL * implies auto computing the loading and giving each VLs a uniform * distribution of engines per VL. * * The auto algorithm computes the sde_per_vl and the number of extra * engines. Any extra engines are added from the last VL on down. * * rcu locking is used here to control access to the mapping fields. * * If either the num_vls or num_sdma are non-power of 2, the array sizes * in the struct sdma_vl_map and the struct sdma_map_elem are rounded * up to the next highest power of 2 and the first entry is reused * in a round robin fashion. * * If an error occurs the map change is not done and the mapping is * not changed. * */ int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines) { int i, j; int extra, sde_per_vl; int engine = 0; u8 lvl_engines[OPA_MAX_VLS]; struct sdma_vl_map *oldmap, *newmap; if (!(dd->flags & HFI1_HAS_SEND_DMA)) return 0; if (!vl_engines) { /* truncate divide */ sde_per_vl = dd->num_sdma / num_vls; /* extras */ extra = dd->num_sdma % num_vls; vl_engines = lvl_engines; /* add extras from last vl down */ for (i = num_vls - 1; i >= 0; i--, extra--) vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0); } /* build new map */ newmap = kzalloc( sizeof(struct sdma_vl_map) + roundup_pow_of_two(num_vls) * sizeof(struct sdma_map_elem *), GFP_KERNEL); if (!newmap) goto bail; newmap->actual_vls = num_vls; newmap->vls = roundup_pow_of_two(num_vls); newmap->mask = (1 << ilog2(newmap->vls)) - 1; /* initialize back-map */ for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++) newmap->engine_to_vl[i] = -1; for (i = 0; i < newmap->vls; i++) { /* save for wrap around */ int first_engine = engine; if (i < newmap->actual_vls) { int sz = roundup_pow_of_two(vl_engines[i]); /* only allocate once */ newmap->map[i] = kzalloc( sizeof(struct sdma_map_elem) + sz * sizeof(struct sdma_engine *), GFP_KERNEL); if (!newmap->map[i]) goto bail; newmap->map[i]->mask = (1 << ilog2(sz)) - 1; /* assign engines */ for (j = 0; j < sz; j++) { newmap->map[i]->sde[j] = &dd->per_sdma[engine]; if (++engine >= first_engine + vl_engines[i]) /* wrap back to first engine */ engine = first_engine; } /* assign back-map */ for (j = 0; j < vl_engines[i]; j++) newmap->engine_to_vl[first_engine + j] = i; } else { /* just re-use entry without allocating */ newmap->map[i] = newmap->map[i % num_vls]; } engine = first_engine + vl_engines[i]; } /* newmap in hand, save old map */ spin_lock_irq(&dd->sde_map_lock); oldmap = rcu_dereference_protected(dd->sdma_map, lockdep_is_held(&dd->sde_map_lock)); /* publish newmap */ rcu_assign_pointer(dd->sdma_map, newmap); spin_unlock_irq(&dd->sde_map_lock); /* success, free any old map after grace period */ if (oldmap) call_rcu(&oldmap->list, sdma_map_rcu_callback); return 0; bail: /* free any partial allocation */ sdma_map_free(newmap); return -ENOMEM; } /** * sdma_clean() Clean up allocated memory * @dd: struct hfi1_devdata * @num_engines: num sdma engines * * This routine can be called regardless of the success of * sdma_init() */ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines) { size_t i; struct sdma_engine *sde; if (dd->sdma_pad_dma) { dma_free_coherent(&dd->pcidev->dev, 4, (void *)dd->sdma_pad_dma, dd->sdma_pad_phys); dd->sdma_pad_dma = NULL; dd->sdma_pad_phys = 0; } if (dd->sdma_heads_dma) { dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size, (void *)dd->sdma_heads_dma, dd->sdma_heads_phys); dd->sdma_heads_dma = NULL; dd->sdma_heads_phys = 0; } for (i = 0; dd->per_sdma && i < num_engines; ++i) { sde = &dd->per_sdma[i]; sde->head_dma = NULL; sde->head_phys = 0; if (sde->descq) { dma_free_coherent( &dd->pcidev->dev, sde->descq_cnt * sizeof(u64[2]), sde->descq, sde->descq_phys ); sde->descq = NULL; sde->descq_phys = 0; } kvfree(sde->tx_ring); sde->tx_ring = NULL; } spin_lock_irq(&dd->sde_map_lock); sdma_map_free(rcu_access_pointer(dd->sdma_map)); RCU_INIT_POINTER(dd->sdma_map, NULL); spin_unlock_irq(&dd->sde_map_lock); synchronize_rcu(); kfree(dd->per_sdma); dd->per_sdma = NULL; if (dd->sdma_rht) { rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL); kfree(dd->sdma_rht); dd->sdma_rht = NULL; } } /** * sdma_init() - called when device probed * @dd: hfi1_devdata * @port: port number (currently only zero) * * Initializes each sde and its csrs. * Interrupts are not required to be enabled. * * Returns: * 0 - success, -errno on failure */ int sdma_init(struct hfi1_devdata *dd, u8 port) { unsigned this_idx; struct sdma_engine *sde; struct rhashtable *tmp_sdma_rht; u16 descq_cnt; void *curr_head; struct hfi1_pportdata *ppd = dd->pport + port; u32 per_sdma_credits; uint idle_cnt = sdma_idle_cnt; size_t num_engines = chip_sdma_engines(dd); int ret = -ENOMEM; if (!HFI1_CAP_IS_KSET(SDMA)) { HFI1_CAP_CLEAR(SDMA_AHG); return 0; } if (mod_num_sdma && /* can't exceed chip support */ mod_num_sdma <= chip_sdma_engines(dd) && /* count must be >= vls */ mod_num_sdma >= num_vls) num_engines = mod_num_sdma; dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma); dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", chip_sdma_engines(dd)); dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n", chip_sdma_mem_size(dd)); per_sdma_credits = chip_sdma_mem_size(dd) / (num_engines * SDMA_BLOCK_SIZE); /* set up freeze waitqueue */ init_waitqueue_head(&dd->sdma_unfreeze_wq); atomic_set(&dd->sdma_unfreeze_count, 0); descq_cnt = sdma_get_descq_cnt(); dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n", num_engines, descq_cnt); /* alloc memory for array of send engines */ dd->per_sdma = kcalloc_node(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL, dd->node); if (!dd->per_sdma) return ret; idle_cnt = ns_to_cclock(dd, idle_cnt); if (idle_cnt) dd->default_desc1 = SDMA_DESC1_HEAD_TO_HOST_FLAG; else dd->default_desc1 = SDMA_DESC1_INT_REQ_FLAG; if (!sdma_desct_intr) sdma_desct_intr = SDMA_DESC_INTR; /* Allocate memory for SendDMA descriptor FIFOs */ for (this_idx = 0; this_idx < num_engines; ++this_idx) { sde = &dd->per_sdma[this_idx]; sde->dd = dd; sde->ppd = ppd; sde->this_idx = this_idx; sde->descq_cnt = descq_cnt; sde->desc_avail = sdma_descq_freecnt(sde); sde->sdma_shift = ilog2(descq_cnt); sde->sdma_mask = (1 << sde->sdma_shift) - 1; /* Create a mask specifically for each interrupt source */ sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES + this_idx); sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES + this_idx); sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES + this_idx); /* Create a combined mask to cover all 3 interrupt sources */ sde->imask = sde->int_mask | sde->progress_mask | sde->idle_mask; spin_lock_init(&sde->tail_lock); seqlock_init(&sde->head_lock); spin_lock_init(&sde->senddmactrl_lock); spin_lock_init(&sde->flushlist_lock); seqlock_init(&sde->waitlock); /* insure there is always a zero bit */ sde->ahg_bits = 0xfffffffe00000000ULL; sdma_set_state(sde, sdma_state_s00_hw_down); /* set up reference counting */ kref_init(&sde->state.kref); init_completion(&sde->state.comp); INIT_LIST_HEAD(&sde->flushlist); INIT_LIST_HEAD(&sde->dmawait); sde->tail_csr = get_kctxt_csr_addr(dd, this_idx, SD(TAIL)); tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task, (unsigned long)sde); tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task, (unsigned long)sde); INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait); INIT_WORK(&sde->flush_worker, sdma_field_flush); sde->progress_check_head = 0; timer_setup(&sde->err_progress_check_timer, sdma_err_progress_check, 0); sde->descq = dma_alloc_coherent(&dd->pcidev->dev, descq_cnt * sizeof(u64[2]), &sde->descq_phys, GFP_KERNEL); if (!sde->descq) goto bail; sde->tx_ring = kvzalloc_node(array_size(descq_cnt, sizeof(struct sdma_txreq *)), GFP_KERNEL, dd->node); if (!sde->tx_ring) goto bail; } dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; /* Allocate memory for DMA of head registers to memory */ dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev, dd->sdma_heads_size, &dd->sdma_heads_phys, GFP_KERNEL); if (!dd->sdma_heads_dma) { dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); goto bail; } /* Allocate memory for pad */ dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32), &dd->sdma_pad_phys, GFP_KERNEL); if (!dd->sdma_pad_dma) { dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); goto bail; } /* assign each engine to different cacheline and init registers */ curr_head = (void *)dd->sdma_heads_dma; for (this_idx = 0; this_idx < num_engines; ++this_idx) { unsigned long phys_offset; sde = &dd->per_sdma[this_idx]; sde->head_dma = curr_head; curr_head += L1_CACHE_BYTES; phys_offset = (unsigned long)sde->head_dma - (unsigned long)dd->sdma_heads_dma; sde->head_phys = dd->sdma_heads_phys + phys_offset; init_sdma_regs(sde, per_sdma_credits, idle_cnt); } dd->flags |= HFI1_HAS_SEND_DMA; dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0; dd->num_sdma = num_engines; ret = sdma_map_init(dd, port, ppd->vls_operational, NULL); if (ret < 0) goto bail; tmp_sdma_rht = kzalloc(sizeof(*tmp_sdma_rht), GFP_KERNEL); if (!tmp_sdma_rht) { ret = -ENOMEM; goto bail; } ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params); if (ret < 0) goto bail; dd->sdma_rht = tmp_sdma_rht; dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma); return 0; bail: sdma_clean(dd, num_engines); return ret; } /** * sdma_all_running() - called when the link goes up * @dd: hfi1_devdata * * This routine moves all engines to the running state. */ void sdma_all_running(struct hfi1_devdata *dd) { struct sdma_engine *sde; unsigned int i; /* move all engines to running */ for (i = 0; i < dd->num_sdma; ++i) { sde = &dd->per_sdma[i]; sdma_process_event(sde, sdma_event_e30_go_running); } } /** * sdma_all_idle() - called when the link goes down * @dd: hfi1_devdata * * This routine moves all engines to the idle state. */ void sdma_all_idle(struct hfi1_devdata *dd) { struct sdma_engine *sde; unsigned int i; /* idle all engines */ for (i = 0; i < dd->num_sdma; ++i) { sde = &dd->per_sdma[i]; sdma_process_event(sde, sdma_event_e70_go_idle); } } /** * sdma_start() - called to kick off state processing for all engines * @dd: hfi1_devdata * * This routine is for kicking off the state processing for all required * sdma engines. Interrupts need to be working at this point. * */ void sdma_start(struct hfi1_devdata *dd) { unsigned i; struct sdma_engine *sde; /* kick off the engines state processing */ for (i = 0; i < dd->num_sdma; ++i) { sde = &dd->per_sdma[i]; sdma_process_event(sde, sdma_event_e10_go_hw_start); } } /** * sdma_exit() - used when module is removed * @dd: hfi1_devdata */ void sdma_exit(struct hfi1_devdata *dd) { unsigned this_idx; struct sdma_engine *sde; for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma; ++this_idx) { sde = &dd->per_sdma[this_idx]; if (!list_empty(&sde->dmawait)) dd_dev_err(dd, "sde %u: dmawait list not empty!\n", sde->this_idx); sdma_process_event(sde, sdma_event_e00_go_hw_down); del_timer_sync(&sde->err_progress_check_timer); /* * This waits for the state machine to exit so it is not * necessary to kill the sdma_sw_clean_up_task to make sure * it is not running. */ sdma_finalput(&sde->state); } } /* * unmap the indicated descriptor */ static inline void sdma_unmap_desc( struct hfi1_devdata *dd, struct sdma_desc *descp) { switch (sdma_mapping_type(descp)) { case SDMA_MAP_SINGLE: dma_unmap_single( &dd->pcidev->dev, sdma_mapping_addr(descp), sdma_mapping_len(descp), DMA_TO_DEVICE); break; case SDMA_MAP_PAGE: dma_unmap_page( &dd->pcidev->dev, sdma_mapping_addr(descp), sdma_mapping_len(descp), DMA_TO_DEVICE); break; } } /* * return the mode as indicated by the first * descriptor in the tx. */ static inline u8 ahg_mode(struct sdma_txreq *tx) { return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK) >> SDMA_DESC1_HEADER_MODE_SHIFT; } /** * __sdma_txclean() - clean tx of mappings, descp *kmalloc's * @dd: hfi1_devdata for unmapping * @tx: tx request to clean * * This is used in the progress routine to clean the tx or * by the ULP to toss an in-process tx build. * * The code can be called multiple times without issue. * */ void __sdma_txclean( struct hfi1_devdata *dd, struct sdma_txreq *tx) { u16 i; if (tx->num_desc) { u8 skip = 0, mode = ahg_mode(tx); /* unmap first */ sdma_unmap_desc(dd, &tx->descp[0]); /* determine number of AHG descriptors to skip */ if (mode > SDMA_AHG_APPLY_UPDATE1) skip = mode >> 1; for (i = 1 + skip; i < tx->num_desc; i++) sdma_unmap_desc(dd, &tx->descp[i]); tx->num_desc = 0; } kfree(tx->coalesce_buf); tx->coalesce_buf = NULL; /* kmalloc'ed descp */ if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) { tx->desc_limit = ARRAY_SIZE(tx->descs); kfree(tx->descp); } } static inline u16 sdma_gethead(struct sdma_engine *sde) { struct hfi1_devdata *dd = sde->dd; int use_dmahead; u16 hwhead; #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); #endif retry: use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) && (dd->flags & HFI1_HAS_SDMA_TIMEOUT); hwhead = use_dmahead ? (u16)le64_to_cpu(*sde->head_dma) : (u16)read_sde_csr(sde, SD(HEAD)); if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) { u16 cnt; u16 swtail; u16 swhead; int sane; swhead = sde->descq_head & sde->sdma_mask; /* this code is really bad for cache line trading */ swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; cnt = sde->descq_cnt; if (swhead < swtail) /* not wrapped */ sane = (hwhead >= swhead) & (hwhead <= swtail); else if (swhead > swtail) /* wrapped around */ sane = ((hwhead >= swhead) && (hwhead < cnt)) || (hwhead <= swtail); else /* empty */ sane = (hwhead == swhead); if (unlikely(!sane)) { dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n", sde->this_idx, use_dmahead ? "dma" : "kreg", hwhead, swhead, swtail, cnt); if (use_dmahead) { /* try one more time, using csr */ use_dmahead = 0; goto retry; } /* proceed as if no progress */ hwhead = swhead; } } return hwhead; } /* * This is called when there are send DMA descriptors that might be * available. * * This is called with head_lock held. */ static void sdma_desc_avail(struct sdma_engine *sde, uint avail) { struct iowait *wait, *nw, *twait; struct iowait *waits[SDMA_WAIT_BATCH_SIZE]; uint i, n = 0, seq, tidx = 0; #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); dd_dev_err(sde->dd, "avail: %u\n", avail); #endif do { seq = read_seqbegin(&sde->waitlock); if (!list_empty(&sde->dmawait)) { /* at least one item */ write_seqlock(&sde->waitlock); /* Harvest waiters wanting DMA descriptors */ list_for_each_entry_safe( wait, nw, &sde->dmawait, list) { u32 num_desc; if (!wait->wakeup) continue; if (n == ARRAY_SIZE(waits)) break; iowait_init_priority(wait); num_desc = iowait_get_all_desc(wait); if (num_desc > avail) break; avail -= num_desc; /* Find the top-priority wait memeber */ if (n) { twait = waits[tidx]; tidx = iowait_priority_update_top(wait, twait, n, tidx); } list_del_init(&wait->list); waits[n++] = wait; } write_sequnlock(&sde->waitlock); break; } } while (read_seqretry(&sde->waitlock, seq)); /* Schedule the top-priority entry first */ if (n) waits[tidx]->wakeup(waits[tidx], SDMA_AVAIL_REASON); for (i = 0; i < n; i++) if (i != tidx) waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON); } /* head_lock must be held */ static void sdma_make_progress(struct sdma_engine *sde, u64 status) { struct sdma_txreq *txp = NULL; int progress = 0; u16 hwhead, swhead; int idle_check_done = 0; hwhead = sdma_gethead(sde); /* The reason for some of the complexity of this code is that * not all descriptors have corresponding txps. So, we have to * be able to skip over descs until we wander into the range of * the next txp on the list. */ retry: txp = get_txhead(sde); swhead = sde->descq_head & sde->sdma_mask; trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); while (swhead != hwhead) { /* advance head, wrap if needed */ swhead = ++sde->descq_head & sde->sdma_mask; /* if now past this txp's descs, do the callback */ if (txp && txp->next_descq_idx == swhead) { /* remove from list */ sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; complete_tx(sde, txp, SDMA_TXREQ_S_OK); /* see if there is another txp */ txp = get_txhead(sde); } trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); progress++; } /* * The SDMA idle interrupt is not guaranteed to be ordered with respect * to updates to the the dma_head location in host memory. The head * value read might not be fully up to date. If there are pending * descriptors and the SDMA idle interrupt fired then read from the * CSR SDMA head instead to get the latest value from the hardware. * The hardware SDMA head should be read at most once in this invocation * of sdma_make_progress(..) which is ensured by idle_check_done flag */ if ((status & sde->idle_mask) && !idle_check_done) { u16 swtail; swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; if (swtail != hwhead) { hwhead = (u16)read_sde_csr(sde, SD(HEAD)); idle_check_done = 1; goto retry; } } sde->last_status = status; if (progress) sdma_desc_avail(sde, sdma_descq_freecnt(sde)); } /* * sdma_engine_interrupt() - interrupt handler for engine * @sde: sdma engine * @status: sdma interrupt reason * * Status is a mask of the 3 possible interrupts for this engine. It will * contain bits _only_ for this SDMA engine. It will contain at least one * bit, it may contain more. */ void sdma_engine_interrupt(struct sdma_engine *sde, u64 status) { trace_hfi1_sdma_engine_interrupt(sde, status); write_seqlock(&sde->head_lock); sdma_set_desc_cnt(sde, sdma_desct_intr); if (status & sde->idle_mask) sde->idle_int_cnt++; else if (status & sde->progress_mask) sde->progress_int_cnt++; else if (status & sde->int_mask) sde->sdma_int_cnt++; sdma_make_progress(sde, status); write_sequnlock(&sde->head_lock); } /** * sdma_engine_error() - error handler for engine * @sde: sdma engine * @status: sdma interrupt reason */ void sdma_engine_error(struct sdma_engine *sde, u64 status) { unsigned long flags; #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n", sde->this_idx, (unsigned long long)status, sdma_state_names[sde->state.current_state]); #endif spin_lock_irqsave(&sde->tail_lock, flags); write_seqlock(&sde->head_lock); if (status & ALL_SDMA_ENG_HALT_ERRS) __sdma_process_event(sde, sdma_event_e60_hw_halted); if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) { dd_dev_err(sde->dd, "SDMA (%u) engine error: 0x%llx state %s\n", sde->this_idx, (unsigned long long)status, sdma_state_names[sde->state.current_state]); dump_sdma_state(sde); } write_sequnlock(&sde->head_lock); spin_unlock_irqrestore(&sde->tail_lock, flags); } static void sdma_sendctrl(struct sdma_engine *sde, unsigned op) { u64 set_senddmactrl = 0; u64 clr_senddmactrl = 0; unsigned long flags; #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n", sde->this_idx, (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0, (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0, (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0, (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0); #endif if (op & SDMA_SENDCTRL_OP_ENABLE) set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK); else clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK); if (op & SDMA_SENDCTRL_OP_INTENABLE) set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK); else clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK); if (op & SDMA_SENDCTRL_OP_HALT) set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK); else clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK); spin_lock_irqsave(&sde->senddmactrl_lock, flags); sde->p_senddmactrl |= set_senddmactrl; sde->p_senddmactrl &= ~clr_senddmactrl; if (op & SDMA_SENDCTRL_OP_CLEANUP) write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl | SD(CTRL_SDMA_CLEANUP_SMASK)); else write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl); spin_unlock_irqrestore(&sde->senddmactrl_lock, flags); #ifdef CONFIG_SDMA_VERBOSITY sdma_dumpstate(sde); #endif } static void sdma_setlengen(struct sdma_engine *sde) { #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); #endif /* * Set SendDmaLenGen and clear-then-set the MSB of the generation * count to enable generation checking and load the internal * generation counter. */ write_sde_csr(sde, SD(LEN_GEN), (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)); write_sde_csr(sde, SD(LEN_GEN), ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) | (4ULL << SD(LEN_GEN_GENERATION_SHIFT))); } static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail) { /* Commit writes to memory and advance the tail on the chip */ smp_wmb(); /* see get_txhead() */ writeq(tail, sde->tail_csr); } /* * This is called when changing to state s10_hw_start_up_halt_wait as * a result of send buffer errors or send DMA descriptor errors. */ static void sdma_hw_start_up(struct sdma_engine *sde) { u64 reg; #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); #endif sdma_setlengen(sde); sdma_update_tail(sde, 0); /* Set SendDmaTail */ *sde->head_dma = 0; reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) << SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT); write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); } /* * set_sdma_integrity * * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'. */ static void set_sdma_integrity(struct sdma_engine *sde) { struct hfi1_devdata *dd = sde->dd; write_sde_csr(sde, SD(CHECK_ENABLE), hfi1_pkt_base_sdma_integrity(dd)); } static void init_sdma_regs( struct sdma_engine *sde, u32 credits, uint idle_cnt) { u8 opval, opmask; #ifdef CONFIG_SDMA_VERBOSITY struct hfi1_devdata *dd = sde->dd; dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); #endif write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys); sdma_setlengen(sde); sdma_update_tail(sde, 0); /* Set SendDmaTail */ write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt); write_sde_csr(sde, SD(DESC_CNT), 0); write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys); write_sde_csr(sde, SD(MEMORY), ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) | ((u64)(credits * sde->this_idx) << SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT))); write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull); set_sdma_integrity(sde); opmask = OPCODE_CHECK_MASK_DISABLED; opval = OPCODE_CHECK_VAL_DISABLED; write_sde_csr(sde, SD(CHECK_OPCODE), (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) | (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT)); } #ifdef CONFIG_SDMA_VERBOSITY #define sdma_dumpstate_helper0(reg) do { \ csr = read_csr(sde->dd, reg); \ dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \ } while (0) #define sdma_dumpstate_helper(reg) do { \ csr = read_sde_csr(sde, reg); \ dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \ #reg, sde->this_idx, csr); \ } while (0) #define sdma_dumpstate_helper2(reg) do { \ csr = read_csr(sde->dd, reg + (8 * i)); \ dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \ #reg, i, csr); \ } while (0) void sdma_dumpstate(struct sdma_engine *sde) { u64 csr; unsigned i; sdma_dumpstate_helper(SD(CTRL)); sdma_dumpstate_helper(SD(STATUS)); sdma_dumpstate_helper0(SD(ERR_STATUS)); sdma_dumpstate_helper0(SD(ERR_MASK)); sdma_dumpstate_helper(SD(ENG_ERR_STATUS)); sdma_dumpstate_helper(SD(ENG_ERR_MASK)); for (i = 0; i < CCE_NUM_INT_CSRS; ++i) { sdma_dumpstate_helper2(CCE_INT_STATUS); sdma_dumpstate_helper2(CCE_INT_MASK); sdma_dumpstate_helper2(CCE_INT_BLOCKED); } sdma_dumpstate_helper(SD(TAIL)); sdma_dumpstate_helper(SD(HEAD)); sdma_dumpstate_helper(SD(PRIORITY_THLD)); sdma_dumpstate_helper(SD(IDLE_CNT)); sdma_dumpstate_helper(SD(RELOAD_CNT)); sdma_dumpstate_helper(SD(DESC_CNT)); sdma_dumpstate_helper(SD(DESC_FETCHED_CNT)); sdma_dumpstate_helper(SD(MEMORY)); sdma_dumpstate_helper0(SD(ENGINES)); sdma_dumpstate_helper0(SD(MEM_SIZE)); /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */ sdma_dumpstate_helper(SD(BASE_ADDR)); sdma_dumpstate_helper(SD(LEN_GEN)); sdma_dumpstate_helper(SD(HEAD_ADDR)); sdma_dumpstate_helper(SD(CHECK_ENABLE)); sdma_dumpstate_helper(SD(CHECK_VL)); sdma_dumpstate_helper(SD(CHECK_JOB_KEY)); sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY)); sdma_dumpstate_helper(SD(CHECK_SLID)); sdma_dumpstate_helper(SD(CHECK_OPCODE)); } #endif static void dump_sdma_state(struct sdma_engine *sde) { struct hw_sdma_desc *descqp; u64 desc[2]; u64 addr; u8 gen; u16 len; u16 head, tail, cnt; head = sde->descq_head & sde->sdma_mask; tail = sde->descq_tail & sde->sdma_mask; cnt = sdma_descq_freecnt(sde); dd_dev_err(sde->dd, "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n", sde->this_idx, head, tail, cnt, !list_empty(&sde->flushlist)); /* print info for each entry in the descriptor queue */ while (head != tail) { char flags[6] = { 'x', 'x', 'x', 'x', 0 }; descqp = &sde->descq[head]; desc[0] = le64_to_cpu(descqp->qw[0]); desc[1] = le64_to_cpu(descqp->qw[1]); flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-'; flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 'H' : '-'; flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-'; flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-'; addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT) & SDMA_DESC0_PHY_ADDR_MASK; gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT) & SDMA_DESC1_GENERATION_MASK; len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) & SDMA_DESC0_BYTE_COUNT_MASK; dd_dev_err(sde->dd, "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", head, flags, addr, gen, len); dd_dev_err(sde->dd, "\tdesc0:0x%016llx desc1 0x%016llx\n", desc[0], desc[1]); if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) dd_dev_err(sde->dd, "\taidx: %u amode: %u alen: %u\n", (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK) >> SDMA_DESC1_HEADER_INDEX_SHIFT), (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK) >> SDMA_DESC1_HEADER_MODE_SHIFT), (u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK) >> SDMA_DESC1_HEADER_DWS_SHIFT)); head++; head &= sde->sdma_mask; } } #define SDE_FMT \ "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n" /** * sdma_seqfile_dump_sde() - debugfs dump of sde * @s: seq file * @sde: send dma engine to dump * * This routine dumps the sde to the indicated seq file. */ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) { u16 head, tail; struct hw_sdma_desc *descqp; u64 desc[2]; u64 addr; u8 gen; u16 len; head = sde->descq_head & sde->sdma_mask; tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; seq_printf(s, SDE_FMT, sde->this_idx, sde->cpu, sdma_state_name(sde->state.current_state), (unsigned long long)read_sde_csr(sde, SD(CTRL)), (unsigned long long)read_sde_csr(sde, SD(STATUS)), (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)), (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail, (unsigned long long)read_sde_csr(sde, SD(HEAD)), head, (unsigned long long)le64_to_cpu(*sde->head_dma), (unsigned long long)read_sde_csr(sde, SD(MEMORY)), (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)), (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)), (unsigned long long)sde->last_status, (unsigned long long)sde->ahg_bits, sde->tx_tail, sde->tx_head, sde->descq_tail, sde->descq_head, !list_empty(&sde->flushlist), sde->descq_full_count, (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID)); /* print info for each entry in the descriptor queue */ while (head != tail) { char flags[6] = { 'x', 'x', 'x', 'x', 0 }; descqp = &sde->descq[head]; desc[0] = le64_to_cpu(descqp->qw[0]); desc[1] = le64_to_cpu(descqp->qw[1]); flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-'; flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 'H' : '-'; flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-'; flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-'; addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT) & SDMA_DESC0_PHY_ADDR_MASK; gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT) & SDMA_DESC1_GENERATION_MASK; len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) & SDMA_DESC0_BYTE_COUNT_MASK; seq_printf(s, "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", head, flags, addr, gen, len); if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n", (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK) >> SDMA_DESC1_HEADER_INDEX_SHIFT), (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK) >> SDMA_DESC1_HEADER_MODE_SHIFT)); head = (head + 1) & sde->sdma_mask; } } /* * add the generation number into * the qw1 and return */ static inline u64 add_gen(struct sdma_engine *sde, u64 qw1) { u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3; qw1 &= ~SDMA_DESC1_GENERATION_SMASK; qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK) << SDMA_DESC1_GENERATION_SHIFT; return qw1; } /* * This routine submits the indicated tx * * Space has already been guaranteed and * tail side of ring is locked. * * The hardware tail update is done * in the caller and that is facilitated * by returning the new tail. * * There is special case logic for ahg * to not add the generation number for * up to 2 descriptors that follow the * first descriptor. * */ static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx) { int i; u16 tail; struct sdma_desc *descp = tx->descp; u8 skip = 0, mode = ahg_mode(tx); tail = sde->descq_tail & sde->sdma_mask; sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1])); trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1], tail, &sde->descq[tail]); tail = ++sde->descq_tail & sde->sdma_mask; descp++; if (mode > SDMA_AHG_APPLY_UPDATE1) skip = mode >> 1; for (i = 1; i < tx->num_desc; i++, descp++) { u64 qw1; sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); if (skip) { /* edits don't have generation */ qw1 = descp->qw[1]; skip--; } else { /* replace generation with real one for non-edits */ qw1 = add_gen(sde, descp->qw[1]); } sde->descq[tail].qw[1] = cpu_to_le64(qw1); trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1, tail, &sde->descq[tail]); tail = ++sde->descq_tail & sde->sdma_mask; } tx->next_descq_idx = tail; #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER tx->sn = sde->tail_sn++; trace_hfi1_sdma_in_sn(sde, tx->sn); WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]); #endif sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx; sde->desc_avail -= tx->num_desc; return tail; } /* * Check for progress */ static int sdma_check_progress( struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *tx, bool pkts_sent) { int ret; sde->desc_avail = sdma_descq_freecnt(sde); if (tx->num_desc <= sde->desc_avail) return -EAGAIN; /* pulse the head_lock */ if (wait && iowait_ioww_to_iow(wait)->sleep) { unsigned seq; seq = raw_seqcount_begin( (const seqcount_t *)&sde->head_lock.seqcount); ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent); if (ret == -EAGAIN) sde->desc_avail = sdma_descq_freecnt(sde); } else { ret = -EBUSY; } return ret; } /** * sdma_send_txreq() - submit a tx req to ring * @sde: sdma engine to use * @wait: SE wait structure to use when full (may be NULL) * @tx: sdma_txreq to submit * @pkts_sent: has any packet been sent yet? * * The call submits the tx into the ring. If a iowait structure is non-NULL * the packet will be queued to the list in wait. * * Return: * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in * ring (wait == NULL) * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state */ int sdma_send_txreq(struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *tx, bool pkts_sent) { int ret = 0; u16 tail; unsigned long flags; /* user should have supplied entire packet */ if (unlikely(tx->tlen)) return -EINVAL; tx->wait = iowait_ioww_to_iow(wait); spin_lock_irqsave(&sde->tail_lock, flags); retry: if (unlikely(!__sdma_running(sde))) goto unlock_noconn; if (unlikely(tx->num_desc > sde->desc_avail)) goto nodesc; tail = submit_tx(sde, tx); if (wait) iowait_sdma_inc(iowait_ioww_to_iow(wait)); sdma_update_tail(sde, tail); unlock: spin_unlock_irqrestore(&sde->tail_lock, flags); return ret; unlock_noconn: if (wait) iowait_sdma_inc(iowait_ioww_to_iow(wait)); tx->next_descq_idx = 0; #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER tx->sn = sde->tail_sn++; trace_hfi1_sdma_in_sn(sde, tx->sn); #endif spin_lock(&sde->flushlist_lock); list_add_tail(&tx->list, &sde->flushlist); spin_unlock(&sde->flushlist_lock); iowait_inc_wait_count(wait, tx->num_desc); queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); ret = -ECOMM; goto unlock; nodesc: ret = sdma_check_progress(sde, wait, tx, pkts_sent); if (ret == -EAGAIN) { ret = 0; goto retry; } sde->descq_full_count++; goto unlock; } /** * sdma_send_txlist() - submit a list of tx req to ring * @sde: sdma engine to use * @wait: SE wait structure to use when full (may be NULL) * @tx_list: list of sdma_txreqs to submit * @count: pointer to a u16 which, after return will contain the total number of * sdma_txreqs removed from the tx_list. This will include sdma_txreqs * whose SDMA descriptors are submitted to the ring and the sdma_txreqs * which are added to SDMA engine flush list if the SDMA engine state is * not running. * * The call submits the list into the ring. * * If the iowait structure is non-NULL and not equal to the iowait list * the unprocessed part of the list will be appended to the list in wait. * * In all cases, the tx_list will be updated so the head of the tx_list is * the list of descriptors that have yet to be transmitted. * * The intent of this call is to provide a more efficient * way of submitting multiple packets to SDMA while holding the tail * side locking. * * Return: * 0 - Success, * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL) * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state */ int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait, struct list_head *tx_list, u16 *count_out) { struct sdma_txreq *tx, *tx_next; int ret = 0; unsigned long flags; u16 tail = INVALID_TAIL; u32 submit_count = 0, flush_count = 0, total_count; spin_lock_irqsave(&sde->tail_lock, flags); retry: list_for_each_entry_safe(tx, tx_next, tx_list, list) { tx->wait = iowait_ioww_to_iow(wait); if (unlikely(!__sdma_running(sde))) goto unlock_noconn; if (unlikely(tx->num_desc > sde->desc_avail)) goto nodesc; if (unlikely(tx->tlen)) { ret = -EINVAL; goto update_tail; } list_del_init(&tx->list); tail = submit_tx(sde, tx); submit_count++; if (tail != INVALID_TAIL && (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) { sdma_update_tail(sde, tail); tail = INVALID_TAIL; } } update_tail: total_count = submit_count + flush_count; if (wait) { iowait_sdma_add(iowait_ioww_to_iow(wait), total_count); iowait_starve_clear(submit_count > 0, iowait_ioww_to_iow(wait)); } if (tail != INVALID_TAIL) sdma_update_tail(sde, tail); spin_unlock_irqrestore(&sde->tail_lock, flags); *count_out = total_count; return ret; unlock_noconn: spin_lock(&sde->flushlist_lock); list_for_each_entry_safe(tx, tx_next, tx_list, list) { tx->wait = iowait_ioww_to_iow(wait); list_del_init(&tx->list); tx->next_descq_idx = 0; #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER tx->sn = sde->tail_sn++; trace_hfi1_sdma_in_sn(sde, tx->sn); #endif list_add_tail(&tx->list, &sde->flushlist); flush_count++; iowait_inc_wait_count(wait, tx->num_desc); } spin_unlock(&sde->flushlist_lock); queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); ret = -ECOMM; goto update_tail; nodesc: ret = sdma_check_progress(sde, wait, tx, submit_count > 0); if (ret == -EAGAIN) { ret = 0; goto retry; } sde->descq_full_count++; goto update_tail; } static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event) { unsigned long flags; spin_lock_irqsave(&sde->tail_lock, flags); write_seqlock(&sde->head_lock); __sdma_process_event(sde, event); if (sde->state.current_state == sdma_state_s99_running) sdma_desc_avail(sde, sdma_descq_freecnt(sde)); write_sequnlock(&sde->head_lock); spin_unlock_irqrestore(&sde->tail_lock, flags); } static void __sdma_process_event(struct sdma_engine *sde, enum sdma_events event) { struct sdma_state *ss = &sde->state; int need_progress = 0; /* CONFIG SDMA temporary */ #ifdef CONFIG_SDMA_VERBOSITY dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx, sdma_state_names[ss->current_state], sdma_event_names[event]); #endif switch (ss->current_state) { case sdma_state_s00_hw_down: switch (event) { case sdma_event_e00_go_hw_down: break; case sdma_event_e30_go_running: /* * If down, but running requested (usually result * of link up, then we need to start up. * This can happen when hw down is requested while * bringing the link up with traffic active on * 7220, e.g. */ ss->go_s99_running = 1; /* fall through -- and start dma engine */ case sdma_event_e10_go_hw_start: /* This reference means the state machine is started */ sdma_get(&sde->state); sdma_set_state(sde, sdma_state_s10_hw_start_up_halt_wait); break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e40_sw_cleaned: sdma_sw_tear_down(sde); break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: break; case sdma_event_e70_go_idle: break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s10_hw_start_up_halt_wait: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); sdma_sw_tear_down(sde); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: sdma_set_state(sde, sdma_state_s15_hw_start_up_clean_wait); sdma_start_hw_clean_up(sde); break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: schedule_work(&sde->err_halt_worker); break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s15_hw_start_up_clean_wait: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); sdma_sw_tear_down(sde); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: sdma_hw_start_up(sde); sdma_set_state(sde, ss->go_s99_running ? sdma_state_s99_running : sdma_state_s20_idle); break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s20_idle: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); sdma_sw_tear_down(sde); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: sdma_set_state(sde, sdma_state_s99_running); ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: sdma_set_state(sde, sdma_state_s50_hw_halt_wait); schedule_work(&sde->err_halt_worker); break; case sdma_event_e70_go_idle: break; case sdma_event_e85_link_down: /* fall through */ case sdma_event_e80_hw_freeze: sdma_set_state(sde, sdma_state_s80_hw_freeze); atomic_dec(&sde->dd->sdma_unfreeze_count); wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s30_sw_clean_up_wait: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait); sdma_start_hw_clean_up(sde); break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: ss->go_s99_running = 0; break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s40_hw_clean_up_wait: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: sdma_hw_start_up(sde); sdma_set_state(sde, ss->go_s99_running ? sdma_state_s99_running : sdma_state_s20_idle); break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: ss->go_s99_running = 0; break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s50_hw_halt_wait: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: schedule_work(&sde->err_halt_worker); break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: ss->go_s99_running = 0; break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s60_idle_halt_wait: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: schedule_work(&sde->err_halt_worker); break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s80_hw_freeze: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: sdma_set_state(sde, sdma_state_s82_freeze_sw_clean); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e82_hw_unfreeze: break; case sdma_event_e85_link_down: break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s82_freeze_sw_clean: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: ss->go_s99_running = 1; break; case sdma_event_e40_sw_cleaned: /* notify caller this engine is done cleaning */ atomic_dec(&sde->dd->sdma_unfreeze_count); wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: break; case sdma_event_e70_go_idle: ss->go_s99_running = 0; break; case sdma_event_e80_hw_freeze: break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: sdma_hw_start_up(sde); sdma_set_state(sde, ss->go_s99_running ? sdma_state_s99_running : sdma_state_s20_idle); break; case sdma_event_e85_link_down: break; case sdma_event_e90_sw_halted: break; } break; case sdma_state_s99_running: switch (event) { case sdma_event_e00_go_hw_down: sdma_set_state(sde, sdma_state_s00_hw_down); tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); break; case sdma_event_e10_go_hw_start: break; case sdma_event_e15_hw_halt_done: break; case sdma_event_e25_hw_clean_up_done: break; case sdma_event_e30_go_running: break; case sdma_event_e40_sw_cleaned: break; case sdma_event_e50_hw_cleaned: break; case sdma_event_e60_hw_halted: need_progress = 1; sdma_err_progress_check_schedule(sde); /* fall through */ case sdma_event_e90_sw_halted: /* * SW initiated halt does not perform engines * progress check */ sdma_set_state(sde, sdma_state_s50_hw_halt_wait); schedule_work(&sde->err_halt_worker); break; case sdma_event_e70_go_idle: sdma_set_state(sde, sdma_state_s60_idle_halt_wait); break; case sdma_event_e85_link_down: ss->go_s99_running = 0; /* fall through */ case sdma_event_e80_hw_freeze: sdma_set_state(sde, sdma_state_s80_hw_freeze); atomic_dec(&sde->dd->sdma_unfreeze_count); wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); break; case sdma_event_e81_hw_frozen: break; case sdma_event_e82_hw_unfreeze: break; } break; } ss->last_event = event; if (need_progress) sdma_make_progress(sde, 0); } /* * _extend_sdma_tx_descs() - helper to extend txreq * * This is called once the initial nominal allocation * of descriptors in the sdma_txreq is exhausted. * * The code will bump the allocation up to the max * of MAX_DESC (64) descriptors. There doesn't seem * much point in an interim step. The last descriptor * is reserved for coalesce buffer in order to support * cases where input packet has >MAX_DESC iovecs. * */ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) { int i; /* Handle last descriptor */ if (unlikely((tx->num_desc == (MAX_DESC - 1)))) { /* if tlen is 0, it is for padding, release last descriptor */ if (!tx->tlen) { tx->desc_limit = MAX_DESC; } else if (!tx->coalesce_buf) { /* allocate coalesce buffer with space for padding */ tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32), GFP_ATOMIC); if (!tx->coalesce_buf) goto enomem; tx->coalesce_idx = 0; } return 0; } if (unlikely(tx->num_desc == MAX_DESC)) goto enomem; tx->descp = kmalloc_array( MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC); if (!tx->descp) goto enomem; /* reserve last descriptor for coalescing */ tx->desc_limit = MAX_DESC - 1; /* copy ones already built */ for (i = 0; i < tx->num_desc; i++) tx->descp[i] = tx->descs[i]; return 0; enomem: __sdma_txclean(dd, tx); return -ENOMEM; } /* * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors * * This is called once the initial nominal allocation of descriptors * in the sdma_txreq is exhausted. * * This function calls _extend_sdma_tx_descs to extend or allocate * coalesce buffer. If there is a allocated coalesce buffer, it will * copy the input packet data into the coalesce buffer. It also adds * coalesce buffer descriptor once when whole packet is received. * * Return: * <0 - error * 0 - coalescing, don't populate descriptor * 1 - continue with populating descriptor */ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, int type, void *kvaddr, struct page *page, unsigned long offset, u16 len) { int pad_len, rval; dma_addr_t addr; rval = _extend_sdma_tx_descs(dd, tx); if (rval) { __sdma_txclean(dd, tx); return rval; } /* If coalesce buffer is allocated, copy data into it */ if (tx->coalesce_buf) { if (type == SDMA_MAP_NONE) { __sdma_txclean(dd, tx); return -EINVAL; } if (type == SDMA_MAP_PAGE) { kvaddr = kmap(page); kvaddr += offset; } else if (WARN_ON(!kvaddr)) { __sdma_txclean(dd, tx); return -EINVAL; } memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len); tx->coalesce_idx += len; if (type == SDMA_MAP_PAGE) kunmap(page); /* If there is more data, return */ if (tx->tlen - tx->coalesce_idx) return 0; /* Whole packet is received; add any padding */ pad_len = tx->packet_len & (sizeof(u32) - 1); if (pad_len) { pad_len = sizeof(u32) - pad_len; memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len); /* padding is taken care of for coalescing case */ tx->packet_len += pad_len; tx->tlen += pad_len; } /* dma map the coalesce buffer */ addr = dma_map_single(&dd->pcidev->dev, tx->coalesce_buf, tx->tlen, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { __sdma_txclean(dd, tx); return -ENOSPC; } /* Add descriptor for coalesce buffer */ tx->desc_limit = MAX_DESC; return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, addr, tx->tlen); } return 1; } /* Update sdes when the lmc changes */ void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid) { struct sdma_engine *sde; int i; u64 sreg; sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) << SD(CHECK_SLID_MASK_SHIFT)) | (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) << SD(CHECK_SLID_VALUE_SHIFT)); for (i = 0; i < dd->num_sdma; i++) { hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x", i, (u32)sreg); sde = &dd->per_sdma[i]; write_sde_csr(sde, SD(CHECK_SLID), sreg); } } /* tx not dword sized - pad */ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) { int rval = 0; tx->num_desc++; if ((unlikely(tx->num_desc == tx->desc_limit))) { rval = _extend_sdma_tx_descs(dd, tx); if (rval) { __sdma_txclean(dd, tx); return rval; } } /* finish the one just added */ make_tx_sdma_desc( tx, SDMA_MAP_NONE, dd->sdma_pad_phys, sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1))); _sdma_close_tx(dd, tx); return rval; } /* * Add ahg to the sdma_txreq * * The logic will consume up to 3 * descriptors at the beginning of * sdma_txreq. */ void _sdma_txreq_ahgadd( struct sdma_txreq *tx, u8 num_ahg, u8 ahg_entry, u32 *ahg, u8 ahg_hlen) { u32 i, shift = 0, desc = 0; u8 mode; WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4); /* compute mode */ if (num_ahg == 1) mode = SDMA_AHG_APPLY_UPDATE1; else if (num_ahg <= 5) mode = SDMA_AHG_APPLY_UPDATE2; else mode = SDMA_AHG_APPLY_UPDATE3; tx->num_desc++; /* initialize to consumed descriptors to zero */ switch (mode) { case SDMA_AHG_APPLY_UPDATE3: tx->num_desc++; tx->descs[2].qw[0] = 0; tx->descs[2].qw[1] = 0; /* FALLTHROUGH */ case SDMA_AHG_APPLY_UPDATE2: tx->num_desc++; tx->descs[1].qw[0] = 0; tx->descs[1].qw[1] = 0; break; } ahg_hlen >>= 2; tx->descs[0].qw[1] |= (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK) << SDMA_DESC1_HEADER_INDEX_SHIFT) | (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK) << SDMA_DESC1_HEADER_DWS_SHIFT) | (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK) << SDMA_DESC1_HEADER_MODE_SHIFT) | (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK) << SDMA_DESC1_HEADER_UPDATE1_SHIFT); for (i = 0; i < (num_ahg - 1); i++) { if (!shift && !(i & 2)) desc++; tx->descs[desc].qw[!!(i & 2)] |= (((u64)ahg[i + 1]) << shift); shift = (shift + 32) & 63; } } /** * sdma_ahg_alloc - allocate an AHG entry * @sde: engine to allocate from * * Return: * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled, * -ENOSPC if an entry is not available */ int sdma_ahg_alloc(struct sdma_engine *sde) { int nr; int oldbit; if (!sde) { trace_hfi1_ahg_allocate(sde, -EINVAL); return -EINVAL; } while (1) { nr = ffz(READ_ONCE(sde->ahg_bits)); if (nr > 31) { trace_hfi1_ahg_allocate(sde, -ENOSPC); return -ENOSPC; } oldbit = test_and_set_bit(nr, &sde->ahg_bits); if (!oldbit) break; cpu_relax(); } trace_hfi1_ahg_allocate(sde, nr); return nr; } /** * sdma_ahg_free - free an AHG entry * @sde: engine to return AHG entry * @ahg_index: index to free * * This routine frees the indicate AHG entry. */ void sdma_ahg_free(struct sdma_engine *sde, int ahg_index) { if (!sde) return; trace_hfi1_ahg_deallocate(sde, ahg_index); if (ahg_index < 0 || ahg_index > 31) return; clear_bit(ahg_index, &sde->ahg_bits); } /* * SPC freeze handling for SDMA engines. Called when the driver knows * the SPC is going into a freeze but before the freeze is fully * settled. Generally an error interrupt. * * This event will pull the engine out of running so no more entries can be * added to the engine's queue. */ void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down) { int i; enum sdma_events event = link_down ? sdma_event_e85_link_down : sdma_event_e80_hw_freeze; /* set up the wait but do not wait here */ atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma); /* tell all engines to stop running and wait */ for (i = 0; i < dd->num_sdma; i++) sdma_process_event(&dd->per_sdma[i], event); /* sdma_freeze() will wait for all engines to have stopped */ } /* * SPC freeze handling for SDMA engines. Called when the driver knows * the SPC is fully frozen. */ void sdma_freeze(struct hfi1_devdata *dd) { int i; int ret; /* * Make sure all engines have moved out of the running state before * continuing. */ ret = wait_event_interruptible(dd->sdma_unfreeze_wq, atomic_read(&dd->sdma_unfreeze_count) <= 0); /* interrupted or count is negative, then unloading - just exit */ if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0) return; /* set up the count for the next wait */ atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma); /* tell all engines that the SPC is frozen, they can start cleaning */ for (i = 0; i < dd->num_sdma; i++) sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen); /* * Wait for everyone to finish software clean before exiting. The * software clean will read engine CSRs, so must be completed before * the next step, which will clear the engine CSRs. */ (void)wait_event_interruptible(dd->sdma_unfreeze_wq, atomic_read(&dd->sdma_unfreeze_count) <= 0); /* no need to check results - done no matter what */ } /* * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen. * * The SPC freeze acts like a SDMA halt and a hardware clean combined. All * that is left is a software clean. We could do it after the SPC is fully * frozen, but then we'd have to add another state to wait for the unfreeze. * Instead, just defer the software clean until the unfreeze step. */ void sdma_unfreeze(struct hfi1_devdata *dd) { int i; /* tell all engines start freeze clean up */ for (i = 0; i < dd->num_sdma; i++) sdma_process_event(&dd->per_sdma[i], sdma_event_e82_hw_unfreeze); } /** * _sdma_engine_progress_schedule() - schedule progress on engine * @sde: sdma_engine to schedule progress * */ void _sdma_engine_progress_schedule( struct sdma_engine *sde) { trace_hfi1_sdma_engine_progress(sde, sde->progress_mask); /* assume we have selected a good cpu */ write_csr(sde->dd, CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)), sde->progress_mask); }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1255_0
crossvul-cpp_data_bad_812_0
#include "jsi.h" #include "jsvalue.h" #include "jsbuiltin.h" #include "regexp.h" void js_newregexp(js_State *J, const char *pattern, int flags) { const char *error; js_Object *obj; Reprog *prog; int opts; obj = jsV_newobject(J, JS_CREGEXP, J->RegExp_prototype); opts = 0; if (flags & JS_REGEXP_I) opts |= REG_ICASE; if (flags & JS_REGEXP_M) opts |= REG_NEWLINE; prog = js_regcompx(J->alloc, J->actx, pattern, opts, &error); if (!prog) js_syntaxerror(J, "regular expression: %s", error); obj->u.r.prog = prog; obj->u.r.source = js_strdup(J, pattern); obj->u.r.flags = flags; obj->u.r.last = 0; js_pushobject(J, obj); } void js_RegExp_prototype_exec(js_State *J, js_Regexp *re, const char *text) { int i; int opts; Resub m; opts = 0; if (re->flags & JS_REGEXP_G) { if (re->last > strlen(text)) { re->last = 0; js_pushnull(J); return; } if (re->last > 0) { text += re->last; opts |= REG_NOTBOL; } } if (!js_regexec(re->prog, text, &m, opts)) { js_newarray(J); js_pushstring(J, text); js_setproperty(J, -2, "input"); js_pushnumber(J, js_utfptrtoidx(text, m.sub[0].sp)); js_setproperty(J, -2, "index"); for (i = 0; i < m.nsub; ++i) { js_pushlstring(J, m.sub[i].sp, m.sub[i].ep - m.sub[i].sp); js_setindex(J, -2, i); } if (re->flags & JS_REGEXP_G) re->last = re->last + (m.sub[0].ep - text); return; } if (re->flags & JS_REGEXP_G) re->last = 0; js_pushnull(J); } static void Rp_test(js_State *J) { js_Regexp *re; const char *text; int opts; Resub m; re = js_toregexp(J, 0); text = js_tostring(J, 1); opts = 0; if (re->flags & JS_REGEXP_G) { if (re->last > strlen(text)) { re->last = 0; js_pushboolean(J, 0); return; } if (re->last > 0) { text += re->last; opts |= REG_NOTBOL; } } if (!js_regexec(re->prog, text, &m, opts)) { if (re->flags & JS_REGEXP_G) re->last = re->last + (m.sub[0].ep - text); js_pushboolean(J, 1); return; } if (re->flags & JS_REGEXP_G) re->last = 0; js_pushboolean(J, 0); } static void jsB_new_RegExp(js_State *J) { js_Regexp *old; const char *pattern; int flags; if (js_isregexp(J, 1)) { if (js_isdefined(J, 2)) js_typeerror(J, "cannot supply flags when creating one RegExp from another"); old = js_toregexp(J, 1); pattern = old->source; flags = old->flags; } else if (js_isundefined(J, 1)) { pattern = "(?:)"; flags = 0; } else { pattern = js_tostring(J, 1); flags = 0; } if (strlen(pattern) == 0) pattern = "(?:)"; if (js_isdefined(J, 2)) { const char *s = js_tostring(J, 2); int g = 0, i = 0, m = 0; while (*s) { if (*s == 'g') ++g; else if (*s == 'i') ++i; else if (*s == 'm') ++m; else js_syntaxerror(J, "invalid regular expression flag: '%c'", *s); ++s; } if (g > 1) js_syntaxerror(J, "invalid regular expression flag: 'g'"); if (i > 1) js_syntaxerror(J, "invalid regular expression flag: 'i'"); if (m > 1) js_syntaxerror(J, "invalid regular expression flag: 'm'"); if (g) flags |= JS_REGEXP_G; if (i) flags |= JS_REGEXP_I; if (m) flags |= JS_REGEXP_M; } js_newregexp(J, pattern, flags); } static void jsB_RegExp(js_State *J) { if (js_isregexp(J, 1)) return; jsB_new_RegExp(J); } static void Rp_toString(js_State *J) { js_Regexp *re; char *out; re = js_toregexp(J, 0); out = js_malloc(J, strlen(re->source) + 6); /* extra space for //gim */ strcpy(out, "/"); strcat(out, re->source); strcat(out, "/"); if (re->flags & JS_REGEXP_G) strcat(out, "g"); if (re->flags & JS_REGEXP_I) strcat(out, "i"); if (re->flags & JS_REGEXP_M) strcat(out, "m"); if (js_try(J)) { js_free(J, out); js_throw(J); } js_pop(J, 0); js_pushstring(J, out); js_endtry(J); js_free(J, out); } static void Rp_exec(js_State *J) { js_RegExp_prototype_exec(J, js_toregexp(J, 0), js_tostring(J, 1)); } void jsB_initregexp(js_State *J) { js_pushobject(J, J->RegExp_prototype); { jsB_propf(J, "RegExp.prototype.toString", Rp_toString, 0); jsB_propf(J, "RegExp.prototype.test", Rp_test, 0); jsB_propf(J, "RegExp.prototype.exec", Rp_exec, 0); } js_newcconstructor(J, jsB_RegExp, jsB_new_RegExp, "RegExp", 1); js_defglobal(J, "RegExp", JS_DONTENUM); }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_812_0
crossvul-cpp_data_good_1269_0
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2018, Linaro Ltd */ #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/skbuff.h> #include <linux/uaccess.h> #include "qrtr.h" struct qrtr_tun { struct qrtr_endpoint ep; struct sk_buff_head queue; wait_queue_head_t readq; }; static int qrtr_tun_send(struct qrtr_endpoint *ep, struct sk_buff *skb) { struct qrtr_tun *tun = container_of(ep, struct qrtr_tun, ep); skb_queue_tail(&tun->queue, skb); /* wake up any blocking processes, waiting for new data */ wake_up_interruptible(&tun->readq); return 0; } static int qrtr_tun_open(struct inode *inode, struct file *filp) { struct qrtr_tun *tun; tun = kzalloc(sizeof(*tun), GFP_KERNEL); if (!tun) return -ENOMEM; skb_queue_head_init(&tun->queue); init_waitqueue_head(&tun->readq); tun->ep.xmit = qrtr_tun_send; filp->private_data = tun; return qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO); } static ssize_t qrtr_tun_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *filp = iocb->ki_filp; struct qrtr_tun *tun = filp->private_data; struct sk_buff *skb; int count; while (!(skb = skb_dequeue(&tun->queue))) { if (filp->f_flags & O_NONBLOCK) return -EAGAIN; /* Wait until we get data or the endpoint goes away */ if (wait_event_interruptible(tun->readq, !skb_queue_empty(&tun->queue))) return -ERESTARTSYS; } count = min_t(size_t, iov_iter_count(to), skb->len); if (copy_to_iter(skb->data, count, to) != count) count = -EFAULT; kfree_skb(skb); return count; } static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *filp = iocb->ki_filp; struct qrtr_tun *tun = filp->private_data; size_t len = iov_iter_count(from); ssize_t ret; void *kbuf; kbuf = kzalloc(len, GFP_KERNEL); if (!kbuf) return -ENOMEM; if (!copy_from_iter_full(kbuf, len, from)) { kfree(kbuf); return -EFAULT; } ret = qrtr_endpoint_post(&tun->ep, kbuf, len); kfree(kbuf); return ret < 0 ? ret : len; } static __poll_t qrtr_tun_poll(struct file *filp, poll_table *wait) { struct qrtr_tun *tun = filp->private_data; __poll_t mask = 0; poll_wait(filp, &tun->readq, wait); if (!skb_queue_empty(&tun->queue)) mask |= EPOLLIN | EPOLLRDNORM; return mask; } static int qrtr_tun_release(struct inode *inode, struct file *filp) { struct qrtr_tun *tun = filp->private_data; struct sk_buff *skb; qrtr_endpoint_unregister(&tun->ep); /* Discard all SKBs */ while (!skb_queue_empty(&tun->queue)) { skb = skb_dequeue(&tun->queue); kfree_skb(skb); } kfree(tun); return 0; } static const struct file_operations qrtr_tun_ops = { .owner = THIS_MODULE, .open = qrtr_tun_open, .poll = qrtr_tun_poll, .read_iter = qrtr_tun_read_iter, .write_iter = qrtr_tun_write_iter, .release = qrtr_tun_release, }; static struct miscdevice qrtr_tun_miscdev = { MISC_DYNAMIC_MINOR, "qrtr-tun", &qrtr_tun_ops, }; static int __init qrtr_tun_init(void) { int ret; ret = misc_register(&qrtr_tun_miscdev); if (ret) pr_err("failed to register Qualcomm IPC Router tun device\n"); return ret; } static void __exit qrtr_tun_exit(void) { misc_deregister(&qrtr_tun_miscdev); } module_init(qrtr_tun_init); module_exit(qrtr_tun_exit); MODULE_DESCRIPTION("Qualcomm IPC Router TUN device"); MODULE_LICENSE("GPL v2");
./CrossVul/dataset_final_sorted/CWE-400/c/good_1269_0
crossvul-cpp_data_bad_1269_0
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2018, Linaro Ltd */ #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/skbuff.h> #include <linux/uaccess.h> #include "qrtr.h" struct qrtr_tun { struct qrtr_endpoint ep; struct sk_buff_head queue; wait_queue_head_t readq; }; static int qrtr_tun_send(struct qrtr_endpoint *ep, struct sk_buff *skb) { struct qrtr_tun *tun = container_of(ep, struct qrtr_tun, ep); skb_queue_tail(&tun->queue, skb); /* wake up any blocking processes, waiting for new data */ wake_up_interruptible(&tun->readq); return 0; } static int qrtr_tun_open(struct inode *inode, struct file *filp) { struct qrtr_tun *tun; tun = kzalloc(sizeof(*tun), GFP_KERNEL); if (!tun) return -ENOMEM; skb_queue_head_init(&tun->queue); init_waitqueue_head(&tun->readq); tun->ep.xmit = qrtr_tun_send; filp->private_data = tun; return qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO); } static ssize_t qrtr_tun_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *filp = iocb->ki_filp; struct qrtr_tun *tun = filp->private_data; struct sk_buff *skb; int count; while (!(skb = skb_dequeue(&tun->queue))) { if (filp->f_flags & O_NONBLOCK) return -EAGAIN; /* Wait until we get data or the endpoint goes away */ if (wait_event_interruptible(tun->readq, !skb_queue_empty(&tun->queue))) return -ERESTARTSYS; } count = min_t(size_t, iov_iter_count(to), skb->len); if (copy_to_iter(skb->data, count, to) != count) count = -EFAULT; kfree_skb(skb); return count; } static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *filp = iocb->ki_filp; struct qrtr_tun *tun = filp->private_data; size_t len = iov_iter_count(from); ssize_t ret; void *kbuf; kbuf = kzalloc(len, GFP_KERNEL); if (!kbuf) return -ENOMEM; if (!copy_from_iter_full(kbuf, len, from)) return -EFAULT; ret = qrtr_endpoint_post(&tun->ep, kbuf, len); return ret < 0 ? ret : len; } static __poll_t qrtr_tun_poll(struct file *filp, poll_table *wait) { struct qrtr_tun *tun = filp->private_data; __poll_t mask = 0; poll_wait(filp, &tun->readq, wait); if (!skb_queue_empty(&tun->queue)) mask |= EPOLLIN | EPOLLRDNORM; return mask; } static int qrtr_tun_release(struct inode *inode, struct file *filp) { struct qrtr_tun *tun = filp->private_data; struct sk_buff *skb; qrtr_endpoint_unregister(&tun->ep); /* Discard all SKBs */ while (!skb_queue_empty(&tun->queue)) { skb = skb_dequeue(&tun->queue); kfree_skb(skb); } kfree(tun); return 0; } static const struct file_operations qrtr_tun_ops = { .owner = THIS_MODULE, .open = qrtr_tun_open, .poll = qrtr_tun_poll, .read_iter = qrtr_tun_read_iter, .write_iter = qrtr_tun_write_iter, .release = qrtr_tun_release, }; static struct miscdevice qrtr_tun_miscdev = { MISC_DYNAMIC_MINOR, "qrtr-tun", &qrtr_tun_ops, }; static int __init qrtr_tun_init(void) { int ret; ret = misc_register(&qrtr_tun_miscdev); if (ret) pr_err("failed to register Qualcomm IPC Router tun device\n"); return ret; } static void __exit qrtr_tun_exit(void) { misc_deregister(&qrtr_tun_miscdev); } module_init(qrtr_tun_init); module_exit(qrtr_tun_exit); MODULE_DESCRIPTION("Qualcomm IPC Router TUN device"); MODULE_LICENSE("GPL v2");
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1269_0
crossvul-cpp_data_bad_1272_1
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dm_services.h" #include "link_encoder.h" #include "stream_encoder.h" #include "resource.h" #include "dce110/dce110_resource.h" #include "include/irq_service_interface.h" #include "dce/dce_audio.h" #include "dce110/dce110_timing_generator.h" #include "irq/dce110/irq_service_dce110.h" #include "dce110/dce110_timing_generator_v.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" #include "dce/dce_mem_input.h" #include "dce110/dce110_mem_input_v.h" #include "dce/dce_ipp.h" #include "dce/dce_transform.h" #include "dce110/dce110_transform_v.h" #include "dce/dce_opp.h" #include "dce110/dce110_opp_v.h" #include "dce/dce_clock_source.h" #include "dce/dce_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dce/dce_aux.h" #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_i2c.h" #define DC_LOGGER \ dc->ctx->logger #include "dce110/dce110_compressor.h" #include "reg_helper.h" #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_8_2_d.h" #include "gmc/gmc_8_2_sh_mask.h" #endif #ifndef mmDP_DPHY_INTERNAL_CTRL #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7 #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7 #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7 #endif #ifndef mmBIOS_SCRATCH_2 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF #endif #ifndef mmDP_DPHY_BS_SR_SWAP_CNTL #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC #endif #ifndef mmDP_DPHY_FAST_TRAINING #define mmDP_DPHY_FAST_TRAINING 0x4ABC #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC #endif #ifndef DPHY_RX_FAST_TRAINING_CAPABLE #define DPHY_RX_FAST_TRAINING_CAPABLE 0x1 #endif static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = { { .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), } }; /* set register offset */ #define SR(reg_name)\ .reg_name = mm ## reg_name /* set register offset with instance */ #define SRI(reg_name, block, id)\ .reg_name = mm ## block ## id ## _ ## reg_name static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE110_COMMON_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCE110(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCE110_COMMON_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; #define ipp_regs(id)\ [id] = {\ IPP_DCE110_REG_LIST_DCE_BASE(id)\ } static const struct dce_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2) }; static const struct dce_ipp_shift ipp_shift = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_ipp_mask ipp_mask = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; #define transform_regs(id)\ [id] = {\ XFM_COMMON_REG_LIST_DCE110(id)\ } static const struct dce_transform_registers xfm_regs[] = { transform_regs(0), transform_regs(1), transform_regs(2) }; static const struct dce_transform_shift xfm_shift = { XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_transform_mask xfm_mask = { XFM_COMMON_MASK_SH_LIST_DCE110(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id)\ [id] = {\ LE_DCE110_REG_LIST(id)\ } static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3), link_regs(4), link_regs(5), link_regs(6), }; #define stream_enc_regs(id)\ [id] = {\ SE_COMMON_REG_LIST(id),\ .TMDS_CNTL = 0,\ } static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2) }; static const struct dce_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE110(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_DCE_110_REG_LIST(id),\ } static const struct dce_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5) }; static const struct dce_opp_shift opp_shift = { OPP_COMMON_MASK_SH_LIST_DCE_110(__SHIFT) }; static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6), }; static const struct dce_audio_shift audio_shift = { AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { AUD_COMMON_MASK_SH_LIST(_MASK) }; /* AG TBD Needs to be reduced back to 3 pipes once dce10 hw sequencer implemented. */ #define clk_src_regs(id)\ [id] = {\ CS_COMMON_REG_LIST_DCE_100_110(id),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0), clk_src_regs(1), clk_src_regs(2) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; static const struct bios_registers bios_regs = { .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; static const struct resource_caps carrizo_resource_cap = { .num_timing_generator = 3, .num_video_plane = 1, .num_audio = 3, .num_stream_encoder = 3, .num_pll = 2, .num_ddc = 3, }; static const struct resource_caps stoney_resource_cap = { .num_timing_generator = 2, .num_video_plane = 1, .num_audio = 3, .num_stream_encoder = 3, .num_pll = 2, .num_ddc = 3, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, .blends_with_below = true, .blends_with_above = true, .per_pixel_alpha = 1, .pixel_format_support = { .argb8888 = true, .nv12 = false, .fp16 = false }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 1, .fp16 = 1 } }; static const struct dc_plane_cap underlay_plane_cap = { .type = DC_PLANE_TYPE_DCE_UNDERLAY, .blends_with_above = true, .per_pixel_alpha = 1, .pixel_format_support = { .argb8888 = false, .nv12 = true, .fp16 = false }, .max_upscale_factor = { .argb8888 = 1, .nv12 = 16000, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 1, .nv12 = 250, .fp16 = 1 } }; #define CTX ctx #define REG(reg) mm ## reg #ifndef mmCC_DC_HDMI_STRAPS #define mmCC_DC_HDMI_STRAPS 0x4819 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { REG_GET_2(CC_DC_HDMI_STRAPS, HDMI_DISABLE, &straps->hdmi_disable, AUDIO_STREAM_NUMBER, &straps->audio_stream_number); REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct timing_generator *dce110_timing_generator_create( struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { struct dce110_timing_generator *tg110 = kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); if (!tg110) return NULL; dce110_timing_generator_construct(tg110, ctx, instance, offsets); return &tg110->base; } static struct stream_encoder *dce110_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } #define SRII(reg_name, block, id)\ .reg_name[id] = mm ## block ## id ## _ ## reg_name static const struct dce_hwseq_registers hwseq_stoney_reg = { HWSEQ_ST_REG_LIST() }; static const struct dce_hwseq_registers hwseq_cz_reg = { HWSEQ_CZ_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCE11_MASK_SH_LIST(__SHIFT), }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE11_MASK_SH_LIST(_MASK), }; static struct dce_hwseq *dce110_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = ASIC_REV_IS_STONEY(ctx->asic_id.hw_internal_rev) ? &hwseq_stoney_reg : &hwseq_cz_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; hws->wa.blnd_crtc_trigger = true; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce110_stream_encoder_create, .create_hwseq = dce110_hwseq_create, }; #define mi_inst_regs(id) { \ MI_DCE11_REG_LIST(id), \ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), mi_inst_regs(1), mi_inst_regs(2), }; static const struct dce_mem_input_shift mi_shifts = { MI_DCE11_MASK_SH_LIST(__SHIFT), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT }; static const struct dce_mem_input_mask mi_masks = { MI_DCE11_MASK_SH_LIST(_MASK), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK }; static struct mem_input *dce110_mem_input_create( struct dc_context *ctx, uint32_t inst) { struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), GFP_KERNEL); if (!dce_mi) { BREAK_TO_DEBUGGER(); return NULL; } dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); dce_mi->wa.single_head_rdreq_dmif_limit = 3; return &dce_mi->base; } static void dce110_transform_destroy(struct transform **xfm) { kfree(TO_DCE_TRANSFORM(*xfm)); *xfm = NULL; } static struct transform *dce110_transform_create( struct dc_context *ctx, uint32_t inst) { struct dce_transform *transform = kzalloc(sizeof(struct dce_transform), GFP_KERNEL); if (!transform) return NULL; dce_transform_construct(transform, ctx, inst, &xfm_regs[inst], &xfm_shift, &xfm_mask); return &transform->base; } static struct input_pixel_processor *dce110_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dce_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 300000, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true }; static struct link_encoder *dce110_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; } static struct output_pixel_processor *dce110_opp_create( struct dc_context *ctx, uint32_t inst) { struct dce110_opp *opp = kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); if (!opp) return NULL; dce110_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } struct dce_aux *dce110_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst]); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) }; struct dce_i2c_hw *dce110_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dce100_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } struct clock_source *dce110_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce110_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } BREAK_TO_DEBUGGER(); return NULL; } void dce110_clock_source_destroy(struct clock_source **clk_src) { struct dce110_clk_src *dce110_clk_src; if (!clk_src) return; dce110_clk_src = TO_DCE110_CLK_SRC(*clk_src); kfree(dce110_clk_src->dp_ss_params); kfree(dce110_clk_src->hdmi_ss_params); kfree(dce110_clk_src->dvi_ss_params); kfree(dce110_clk_src); *clk_src = NULL; } static void destruct(struct dce110_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); if (pool->base.transforms[i] != NULL) dce110_transform_destroy(&pool->base.transforms[i]); if (pool->base.ipps[i] != NULL) dce_ipp_destroy(&pool->base.ipps[i]); if (pool->base.mis[i] != NULL) { kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); pool->base.mis[i] = NULL; } if (pool->base.timing_generators[i] != NULL) { kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dce110_clock_source_destroy(&pool->base.clock_sources[i]); } } if (pool->base.dp_clock_source != NULL) dce110_clock_source_destroy(&pool->base.dp_clock_source); for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i] != NULL) { dce_aud_destroy(&pool->base.audios[i]); } } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } } static void get_pixel_clock_parameters( const struct pipe_ctx *pipe_ctx, struct pixel_clk_params *pixel_clk_params) { const struct dc_stream_state *stream = pipe_ctx->stream; /*TODO: is this halved for YCbCr 420? in that case we might want to move * the pixel clock normalization for hdmi up to here instead of doing it * in pll_adjust_pix_clk */ pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; pixel_clk_params->encoder_object_id = stream->link->link_enc->id; pixel_clk_params->signal_type = pipe_ctx->stream->signal; pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; /* TODO: un-hardcode*/ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * LINK_RATE_REF_FREQ_IN_KHZ; pixel_clk_params->flags.ENABLE_SS = 0; pixel_clk_params->color_depth = stream->timing.display_color_depth; pixel_clk_params->flags.DISPLAY_BLANKED = 1; pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420); pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) { pixel_clk_params->color_depth = COLOR_DEPTH_888; } if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2; } if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) pixel_clk_params->requested_pix_clk_100hz *= 2; } void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx) { get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); pipe_ctx->clock_source->funcs->get_pix_clk_dividers( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, &pipe_ctx->pll_settings); resource_build_bit_depth_reduction_params(pipe_ctx->stream, &pipe_ctx->stream->bit_depth_params); pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; } static bool is_surface_pixel_format_supported(struct pipe_ctx *pipe_ctx, unsigned int underlay_idx) { if (pipe_ctx->pipe_idx != underlay_idx) return true; if (!pipe_ctx->plane_state) return false; if (pipe_ctx->plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return false; return true; } static enum dc_status build_mapped_resource( const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; if (!is_surface_pixel_format_supported(pipe_ctx, dc->res_pool->underlay_pipe_index)) return DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED; dce110_resource_build_pipe_hw_param(pipe_ctx); /* TODO: validate audio ASIC caps, encoder */ resource_build_info_frame(pipe_ctx); return DC_OK; } static bool dce110_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) { bool result = false; DC_LOG_BANDWIDTH_CALCS( "%s: start", __func__); if (bw_calcs( dc->ctx, dc->bw_dceip, dc->bw_vbios, context->res_ctx.pipe_ctx, dc->res_pool->pipe_count, &context->bw_ctx.bw.dce)) result = true; if (!result) DC_LOG_BANDWIDTH_VALIDATION("%s: %dx%d@%d Bandwidth validation failed!\n", __func__, context->streams[0]->timing.h_addressable, context->streams[0]->timing.v_addressable, context->streams[0]->timing.pix_clk_100hz / 10); if (memcmp(&dc->current_state->bw_ctx.bw.dce, &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) { DC_LOG_BANDWIDTH_CALCS( "%s: finish,\n" "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" "stutMark_b: %d stutMark_a: %d\n" "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" "stutMark_b: %d stutMark_a: %d\n" "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n" "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n" "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n" , __func__, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark, context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark, context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark, context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark, context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark, context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark, context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark, context->bw_ctx.bw.dce.stutter_mode_enable, context->bw_ctx.bw.dce.cpuc_state_change_enable, context->bw_ctx.bw.dce.cpup_state_change_enable, context->bw_ctx.bw.dce.nbp_state_change_enable, context->bw_ctx.bw.dce.all_displays_in_sync, context->bw_ctx.bw.dce.dispclk_khz, context->bw_ctx.bw.dce.sclk_khz, context->bw_ctx.bw.dce.sclk_deep_sleep_khz, context->bw_ctx.bw.dce.yclk_khz, context->bw_ctx.bw.dce.blackout_recovery_time_us); } return result; } enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) { if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) || ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height)) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static bool dce110_validate_surface_sets( struct dc_state *context) { int i, j; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) continue; if (context->stream_status[i].plane_count > 2) return false; for (j = 0; j < context->stream_status[i].plane_count; j++) { struct dc_plane_state *plane = context->stream_status[i].plane_states[j]; /* underlay validation */ if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { if ((plane->src_rect.width > 1920 || plane->src_rect.height > 1080)) return false; /* we don't have the logic to support underlay * only yet so block the use case where we get * NV12 plane as top layer */ if (j == 0) return false; /* irrespective of plane format, * stream should be RGB encoded */ if (context->streams[i]->timing.pixel_encoding != PIXEL_ENCODING_RGB) return false; } } } return true; } enum dc_status dce110_validate_global( struct dc *dc, struct dc_state *context) { if (!dce110_validate_surface_sets(context)) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static enum dc_status dce110_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_ERROR_UNEXPECTED; result = resource_map_pool_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = resource_map_clock_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = build_mapped_resource(dc, new_ctx, dc_stream); return result; } static struct pipe_ctx *dce110_acquire_underlay( struct dc_state *context, const struct resource_pool *pool, struct dc_stream_state *stream) { struct dc *dc = stream->ctx->dc; struct resource_context *res_ctx = &context->res_ctx; unsigned int underlay_idx = pool->underlay_pipe_index; struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx]; if (res_ctx->pipe_ctx[underlay_idx].stream) return NULL; pipe_ctx->stream_res.tg = pool->timing_generators[underlay_idx]; pipe_ctx->plane_res.mi = pool->mis[underlay_idx]; /*pipe_ctx->plane_res.ipp = res_ctx->pool->ipps[underlay_idx];*/ pipe_ctx->plane_res.xfm = pool->transforms[underlay_idx]; pipe_ctx->stream_res.opp = pool->opps[underlay_idx]; pipe_ctx->pipe_idx = underlay_idx; pipe_ctx->stream = stream; if (!dc->current_state->res_ctx.pipe_ctx[underlay_idx].stream) { struct tg_color black_color = {0}; struct dc_bios *dcb = dc->ctx->dc_bios; dc->hwss.enable_display_power_gating( dc, pipe_ctx->stream_res.tg->inst, dcb, PIPE_GATING_CONTROL_DISABLE); /* * This is for powering on underlay, so crtc does not * need to be enabled */ pipe_ctx->stream_res.tg->funcs->program_timing(pipe_ctx->stream_res.tg, &stream->timing, 0, 0, 0, 0, pipe_ctx->stream->signal, false); pipe_ctx->stream_res.tg->funcs->enable_advanced_request( pipe_ctx->stream_res.tg, true, &stream->timing); pipe_ctx->plane_res.mi->funcs->allocate_mem_input(pipe_ctx->plane_res.mi, stream->timing.h_total, stream->timing.v_total, stream->timing.pix_clk_100hz / 10, context->stream_count); color_space_to_black_color(dc, COLOR_SPACE_YCBCR601, &black_color); pipe_ctx->stream_res.tg->funcs->set_blank_color( pipe_ctx->stream_res.tg, &black_color); } return pipe_ctx; } static void dce110_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link( struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) { int i; int j = -1; struct dc_link *link = stream->link; for (i = 0; i < pool->stream_enc_count; i++) { if (!res_ctx->is_stream_enc_acquired[i] && pool->stream_enc[i]) { /* Store first available for MST second display * in daisy chain use case */ j = i; if (pool->stream_enc[i]->id == link->link_enc->preferred_engine) return pool->stream_enc[i]; } } /* * For CZ and later, we can allow DIG FE and BE to differ for all display types */ if (j >= 0) return pool->stream_enc[j]; return NULL; } static const struct resource_funcs dce110_res_pool_funcs = { .destroy = dce110_destroy_resource_pool, .link_enc_create = dce110_link_encoder_create, .validate_bandwidth = dce110_validate_bandwidth, .validate_plane = dce110_validate_plane, .acquire_idle_pipe_for_layer = dce110_acquire_underlay, .add_stream_to_ctx = dce110_add_stream_to_ctx, .validate_global = dce110_validate_global, .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link }; static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool) { struct dce110_timing_generator *dce110_tgv = kzalloc(sizeof(*dce110_tgv), GFP_KERNEL); struct dce_transform *dce110_xfmv = kzalloc(sizeof(*dce110_xfmv), GFP_KERNEL); struct dce_mem_input *dce110_miv = kzalloc(sizeof(*dce110_miv), GFP_KERNEL); struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv), GFP_KERNEL); if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) { kfree(dce110_tgv); kfree(dce110_xfmv); kfree(dce110_miv); kfree(dce110_oppv); return false; } dce110_opp_v_construct(dce110_oppv, ctx); dce110_timing_generator_v_construct(dce110_tgv, ctx); dce110_mem_input_v_construct(dce110_miv, ctx); dce110_transform_v_construct(dce110_xfmv, ctx); pool->opps[pool->pipe_count] = &dce110_oppv->base; pool->timing_generators[pool->pipe_count] = &dce110_tgv->base; pool->mis[pool->pipe_count] = &dce110_miv->base; pool->transforms[pool->pipe_count] = &dce110_xfmv->base; pool->pipe_count++; /* update the public caps to indicate an underlay is available */ ctx->dc->caps.max_slave_planes = 1; ctx->dc->caps.max_slave_planes = 1; return true; } static void bw_calcs_data_update_from_pplib(struct dc *dc) { struct dm_pp_clock_levels clks = {0}; /*do system clock*/ dm_pp_get_clock_levels_by_type( dc->ctx, DM_PP_CLOCK_TYPE_ENGINE_CLK, &clks); /* convert all the clock fro kHz to fix point mHz */ dc->bw_vbios->high_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels-1], 1000); dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels/8], 1000); dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*2/8], 1000); dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*3/8], 1000); dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*4/8], 1000); dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*5/8], 1000); dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*6/8], 1000); dc->bw_vbios->low_sclk = bw_frc_to_fixed( clks.clocks_in_khz[0], 1000); dc->sclk_lvls = clks; /*do display clock*/ dm_pp_get_clock_levels_by_type( dc->ctx, DM_PP_CLOCK_TYPE_DISPLAY_CLK, &clks); dc->bw_vbios->high_voltage_max_dispclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels-1], 1000); dc->bw_vbios->mid_voltage_max_dispclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels>>1], 1000); dc->bw_vbios->low_voltage_max_dispclk = bw_frc_to_fixed( clks.clocks_in_khz[0], 1000); /*do memory clock*/ dm_pp_get_clock_levels_by_type( dc->ctx, DM_PP_CLOCK_TYPE_MEMORY_CLK, &clks); dc->bw_vbios->low_yclk = bw_frc_to_fixed( clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); } const struct resource_caps *dce110_resource_cap( struct hw_asic_id *asic_id) { if (ASIC_REV_IS_STONEY(asic_id->hw_internal_rev)) return &stoney_resource_cap; else return &carrizo_resource_cap; } static bool construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool, struct hw_asic_id asic_id) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = dce110_resource_cap(&ctx->asic_id); pool->base.funcs = &dce110_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.pipe_count = pool->base.res_cap->num_timing_generator; pool->base.underlay_pipe_index = pool->base.pipe_count; pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; dc->caps.max_downscale_ratio = 150; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; dc->caps.is_apu = true; /************************************************* * Create resources * *************************************************/ bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clk_src_count = 2; /* TODO: find out if CZ support 3 PLLs */ } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce110_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce110_timing_generator_create( ctx, i, &dce110_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce110_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce110_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce110_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce110_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce110_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce110_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = NULL; } if (dc->config.fbc_support) dc->fbc_compressor = dce110_compressor_create(ctx); if (!underlay_create(ctx, &pool->base)) goto res_create_fail; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce110_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < pool->base.underlay_pipe_index; ++i) dc->caps.planes[i] = plane_cap; dc->caps.planes[pool->base.underlay_pipe_index] = underlay_plane_cap; bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); bw_calcs_data_update_from_pplib(dc); return true; res_create_fail: destruct(pool); return false; } struct resource_pool *dce110_create_resource_pool( uint8_t num_virtual_links, struct dc *dc, struct hw_asic_id asic_id) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (construct(num_virtual_links, dc, pool, asic_id)) return &pool->base; BREAK_TO_DEBUGGER(); return NULL; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1272_1
crossvul-cpp_data_bad_4773_1
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO DDDD U U L EEEEE % % MM MM O O D D U U L E % % M M M O O D D U U L EEE % % M M O O D D U U L E % % M M OOO DDDD UUU LLLLL EEEEE % % % % % % MagickCore Module Methods % % % % Software Design % % Bob Friesenhahn % % March 2000 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/blob.h" #include "magick/coder.h" #include "magick/client.h" #include "magick/configure.h" #include "magick/deprecate.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/log.h" #include "magick/hashmap.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/nt-base-private.h" #include "magick/policy.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/token.h" #include "magick/utility.h" #if defined(MAGICKCORE_MODULES_SUPPORT) #if defined(MAGICKCORE_LTDL_DELEGATE) #include "ltdl.h" typedef lt_dlhandle ModuleHandle; #else typedef void *ModuleHandle; #endif /* Define declarations. */ #if defined(MAGICKCORE_LTDL_DELEGATE) # define ModuleGlobExpression "*.la" #else # if defined(_DEBUG) # define ModuleGlobExpression "IM_MOD_DB_*.dll" # else # define ModuleGlobExpression "IM_MOD_RL_*.dll" # endif #endif /* Global declarations. */ static SemaphoreInfo *module_semaphore = (SemaphoreInfo *) NULL; static SplayTreeInfo *module_list = (SplayTreeInfo *) NULL; /* Forward declarations. */ static const ModuleInfo *RegisterModule(const ModuleInfo *,ExceptionInfo *); static MagickBooleanType GetMagickModulePath(const char *,MagickModuleType,char *,ExceptionInfo *), IsModuleTreeInstantiated(ExceptionInfo *), UnregisterModule(const ModuleInfo *,ExceptionInfo *); static void TagToCoderModuleName(const char *,char *), TagToFilterModuleName(const char *,char *), TagToModuleName(const char *,const char *,char *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M o d u l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireModuleInfo() allocates the ModuleInfo structure. % % The format of the AcquireModuleInfo method is: % % ModuleInfo *AcquireModuleInfo(const char *path,const char *tag) % % A description of each parameter follows: % % o path: the path associated with the tag. % % o tag: a character string that represents the image format we are % looking for. % */ MagickExport ModuleInfo *AcquireModuleInfo(const char *path,const char *tag) { ModuleInfo *module_info; module_info=(ModuleInfo *) AcquireMagickMemory(sizeof(*module_info)); if (module_info == (ModuleInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(module_info,0,sizeof(*module_info)); if (path != (const char *) NULL) module_info->path=ConstantString(path); if (tag != (const char *) NULL) module_info->tag=ConstantString(tag); module_info->timestamp=time(0); module_info->signature=MagickSignature; return(module_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y M o d u l e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyModuleList() unregisters any previously loaded modules and exits % the module loaded environment. % % The format of the DestroyModuleList module is: % % void DestroyModuleList(void) % */ MagickExport void DestroyModuleList(void) { /* Destroy magick modules. */ LockSemaphoreInfo(module_semaphore); #if defined(MAGICKCORE_MODULES_SUPPORT) if (module_list != (SplayTreeInfo *) NULL) module_list=DestroySplayTree(module_list); #endif UnlockSemaphoreInfo(module_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M o d u l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetModuleInfo() returns a pointer to a ModuleInfo structure that matches the % specified tag. If tag is NULL, the head of the module list is returned. If % no modules are loaded, or the requested module is not found, NULL is % returned. % % The format of the GetModuleInfo module is: % % ModuleInfo *GetModuleInfo(const char *tag,ExceptionInfo *exception) % % A description of each parameter follows: % % o tag: a character string that represents the image format we are % looking for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ModuleInfo *GetModuleInfo(const char *tag,ExceptionInfo *exception) { ModuleInfo *module_info; if (IsModuleTreeInstantiated(exception) == MagickFalse) return((ModuleInfo *) NULL); LockSemaphoreInfo(module_semaphore); ResetSplayTreeIterator(module_list); if ((tag == (const char *) NULL) || (LocaleCompare(tag,"*") == 0)) { #if defined(MAGICKCORE_MODULES_SUPPORT) if (LocaleCompare(tag,"*") == 0) (void) OpenModules(exception); #endif module_info=(ModuleInfo *) GetNextValueInSplayTree(module_list); UnlockSemaphoreInfo(module_semaphore); return(module_info); } module_info=(ModuleInfo *) GetValueFromSplayTree(module_list,tag); UnlockSemaphoreInfo(module_semaphore); return(module_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M o d u l e I n f o L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetModuleInfoList() returns any modules that match the specified pattern. % % The format of the GetModuleInfoList function is: % % const ModuleInfo **GetModuleInfoList(const char *pattern, % size_t *number_modules,ExceptionInfo *exception) % % A description of each parameter follows: % % o pattern: Specifies a pointer to a text string containing a pattern. % % o number_modules: This integer returns the number of modules in the list. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int ModuleInfoCompare(const void *x,const void *y) { const ModuleInfo **p, **q; p=(const ModuleInfo **) x, q=(const ModuleInfo **) y; if (LocaleCompare((*p)->path,(*q)->path) == 0) return(LocaleCompare((*p)->tag,(*q)->tag)); return(LocaleCompare((*p)->path,(*q)->path)); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport const ModuleInfo **GetModuleInfoList(const char *pattern, size_t *number_modules,ExceptionInfo *exception) { const ModuleInfo **modules; register const ModuleInfo *p; register ssize_t i; /* Allocate module list. */ assert(pattern != (char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",pattern); assert(number_modules != (size_t *) NULL); *number_modules=0; p=GetModuleInfo("*",exception); if (p == (const ModuleInfo *) NULL) return((const ModuleInfo **) NULL); modules=(const ModuleInfo **) AcquireQuantumMemory((size_t) GetNumberOfNodesInSplayTree(module_list)+1UL,sizeof(*modules)); if (modules == (const ModuleInfo **) NULL) return((const ModuleInfo **) NULL); /* Generate module list. */ LockSemaphoreInfo(module_semaphore); ResetSplayTreeIterator(module_list); p=(const ModuleInfo *) GetNextValueInSplayTree(module_list); for (i=0; p != (const ModuleInfo *) NULL; ) { if ((p->stealth == MagickFalse) && (GlobExpression(p->tag,pattern,MagickFalse) != MagickFalse)) modules[i++]=p; p=(const ModuleInfo *) GetNextValueInSplayTree(module_list); } UnlockSemaphoreInfo(module_semaphore); qsort((void *) modules,(size_t) i,sizeof(*modules),ModuleInfoCompare); modules[i]=(ModuleInfo *) NULL; *number_modules=(size_t) i; return(modules); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M o d u l e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetModuleList() returns any image format modules that match the specified % pattern. % % The format of the GetModuleList function is: % % char **GetModuleList(const char *pattern,const MagickModuleType type, % size_t *number_modules,ExceptionInfo *exception) % % A description of each parameter follows: % % o pattern: Specifies a pointer to a text string containing a pattern. % % o type: choose from MagickImageCoderModule or MagickImageFilterModule. % % o number_modules: This integer returns the number of modules in the % list. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int ModuleCompare(const void *x,const void *y) { register const char **p, **q; p=(const char **) x; q=(const char **) y; return(LocaleCompare(*p,*q)); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static inline int MagickReadDirectory(DIR *directory,struct dirent *entry, struct dirent **result) { #if defined(MAGICKCORE_HAVE_READDIR_R) return(readdir_r(directory,entry,result)); #else (void) entry; errno=0; *result=readdir(directory); return(errno); #endif } MagickExport char **GetModuleList(const char *pattern, const MagickModuleType type,size_t *number_modules,ExceptionInfo *exception) { #define MaxModules 511 char **modules, filename[MaxTextExtent], module_path[MaxTextExtent], path[MaxTextExtent]; DIR *directory; MagickBooleanType status; register ssize_t i; size_t max_entries; struct dirent *buffer, *entry; /* Locate all modules in the image coder or filter path. */ switch (type) { case MagickImageCoderModule: default: { TagToCoderModuleName("magick",filename); status=GetMagickModulePath(filename,MagickImageCoderModule,module_path, exception); break; } case MagickImageFilterModule: { TagToFilterModuleName("analyze",filename); status=GetMagickModulePath(filename,MagickImageFilterModule,module_path, exception); break; } } if (status == MagickFalse) return((char **) NULL); GetPathComponent(module_path,HeadPath,path); max_entries=MaxModules; modules=(char **) AcquireQuantumMemory((size_t) max_entries+1UL, sizeof(*modules)); if (modules == (char **) NULL) return((char **) NULL); *modules=(char *) NULL; directory=opendir(path); if (directory == (DIR *) NULL) { modules=(char **) RelinquishMagickMemory(modules); return((char **) NULL); } buffer=(struct dirent *) AcquireMagickMemory(sizeof(*buffer)+FILENAME_MAX+1); if (buffer == (struct dirent *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; while ((MagickReadDirectory(directory,buffer,&entry) == 0) && (entry != (struct dirent *) NULL)) { status=GlobExpression(entry->d_name,ModuleGlobExpression,MagickFalse); if (status == MagickFalse) continue; if (GlobExpression(entry->d_name,pattern,MagickFalse) == MagickFalse) continue; if (i >= (ssize_t) max_entries) { modules=(char **) NULL; if (~max_entries > max_entries) modules=(char **) ResizeQuantumMemory(modules,(size_t) (max_entries << 1),sizeof(*modules)); max_entries<<=1; if (modules == (char **) NULL) break; } /* Add new module name to list. */ modules[i]=AcquireString((char *) NULL); GetPathComponent(entry->d_name,BasePath,modules[i]); if (LocaleNCompare("IM_MOD_",modules[i],7) == 0) { (void) CopyMagickString(modules[i],modules[i]+10,MaxTextExtent); modules[i][strlen(modules[i])-1]='\0'; } i++; } buffer=(struct dirent *) RelinquishMagickMemory(buffer); (void) closedir(directory); if (modules == (char **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ConfigureError, "MemoryAllocationFailed","`%s'",pattern); return((char **) NULL); } qsort((void *) modules,(size_t) i,sizeof(*modules),ModuleCompare); modules[i]=(char *) NULL; *number_modules=(size_t) i; return(modules); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a g i c k M o d u l e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMagickModulePath() finds a module with the specified module type and % filename. % % The format of the GetMagickModulePath module is: % % MagickBooleanType GetMagickModulePath(const char *filename, % MagickModuleType module_type,char *path,ExceptionInfo *exception) % % A description of each parameter follows: % % o filename: the module file name. % % o module_type: the module type: MagickImageCoderModule or % MagickImageFilterModule. % % o path: the path associated with the filename. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetMagickModulePath(const char *filename, MagickModuleType module_type,char *path,ExceptionInfo *exception) { char *module_path; assert(filename != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",filename); assert(path != (char *) NULL); assert(exception != (ExceptionInfo *) NULL); (void) CopyMagickString(path,filename,MaxTextExtent); module_path=(char *) NULL; switch (module_type) { case MagickImageCoderModule: default: { (void) LogMagickEvent(ModuleEvent,GetMagickModule(), "Searching for coder module file \"%s\" ...",filename); module_path=GetEnvironmentValue("MAGICK_CODER_MODULE_PATH"); #if defined(MAGICKCORE_CODER_PATH) if (module_path == (char *) NULL) module_path=AcquireString(MAGICKCORE_CODER_PATH); #endif break; } case MagickImageFilterModule: { (void) LogMagickEvent(ModuleEvent,GetMagickModule(), "Searching for filter module file \"%s\" ...",filename); module_path=GetEnvironmentValue("MAGICK_CODER_FILTER_PATH"); #if defined(MAGICKCORE_FILTER_PATH) if (module_path == (char *) NULL) module_path=AcquireString(MAGICKCORE_FILTER_PATH); #endif break; } } if (module_path != (char *) NULL) { register char *p, *q; for (p=module_path-1; p != (char *) NULL; ) { (void) CopyMagickString(path,p+1,MaxTextExtent); q=strchr(path,DirectoryListSeparator); if (q != (char *) NULL) *q='\0'; q=path+strlen(path)-1; if ((q >= path) && (*q != *DirectorySeparator)) (void) ConcatenateMagickString(path,DirectorySeparator,MaxTextExtent); (void) ConcatenateMagickString(path,filename,MaxTextExtent); if (IsPathAccessible(path) != MagickFalse) { module_path=DestroyString(module_path); return(MagickTrue); } p=strchr(p+1,DirectoryListSeparator); } module_path=DestroyString(module_path); } #if defined(MAGICKCORE_INSTALLED_SUPPORT) else #if defined(MAGICKCORE_CODER_PATH) { const char *directory; /* Search hard coded paths. */ switch (module_type) { case MagickImageCoderModule: default: { directory=MAGICKCORE_CODER_PATH; break; } case MagickImageFilterModule: { directory=MAGICKCORE_FILTER_PATH; break; } } (void) FormatLocaleString(path,MaxTextExtent,"%s%s",directory,filename); if (IsPathAccessible(path) == MagickFalse) { ThrowFileException(exception,ConfigureWarning, "UnableToOpenModuleFile",path); return(MagickFalse); } return(MagickTrue); } #else #if defined(MAGICKCORE_WINDOWS_SUPPORT) { const char *registery_key; unsigned char *key_value; /* Locate path via registry key. */ switch (module_type) { case MagickImageCoderModule: default: { registery_key="CoderModulesPath"; break; } case MagickImageFilterModule: { registery_key="FilterModulesPath"; break; } } key_value=NTRegistryKeyLookup(registery_key); if (key_value == (unsigned char *) NULL) { ThrowMagickException(exception,GetMagickModule(),ConfigureError, "RegistryKeyLookupFailed","`%s'",registery_key); return(MagickFalse); } (void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",(char *) key_value, DirectorySeparator,filename); key_value=(unsigned char *) RelinquishMagickMemory(key_value); if (IsPathAccessible(path) == MagickFalse) { ThrowFileException(exception,ConfigureWarning, "UnableToOpenModuleFile",path); return(MagickFalse); } return(MagickTrue); } #endif #endif #if !defined(MAGICKCORE_CODER_PATH) && !defined(MAGICKCORE_WINDOWS_SUPPORT) # error MAGICKCORE_CODER_PATH or MAGICKCORE_WINDOWS_SUPPORT must be defined when MAGICKCORE_INSTALLED_SUPPORT is defined #endif #else { char *home; home=GetEnvironmentValue("MAGICK_HOME"); if (home != (char *) NULL) { /* Search MAGICK_HOME. */ #if !defined(MAGICKCORE_POSIX_SUPPORT) (void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",home, DirectorySeparator,filename); #else const char *directory; switch (module_type) { case MagickImageCoderModule: default: { directory=MAGICKCORE_CODER_RELATIVE_PATH; break; } case MagickImageFilterModule: { directory=MAGICKCORE_FILTER_RELATIVE_PATH; break; } } (void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",home, directory,filename); #endif home=DestroyString(home); if (IsPathAccessible(path) != MagickFalse) return(MagickTrue); } } if (*GetClientPath() != '\0') { /* Search based on executable directory. */ #if !defined(MAGICKCORE_POSIX_SUPPORT) (void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",GetClientPath(), DirectorySeparator,filename); #else char prefix[MaxTextExtent]; const char *directory; switch (module_type) { case MagickImageCoderModule: default: { directory="coders"; break; } case MagickImageFilterModule: { directory="filters"; break; } } (void) CopyMagickString(prefix,GetClientPath(),MaxTextExtent); ChopPathComponents(prefix,1); (void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s/%s",prefix, MAGICKCORE_MODULES_RELATIVE_PATH,directory,filename); #endif if (IsPathAccessible(path) != MagickFalse) return(MagickTrue); } #if defined(MAGICKCORE_WINDOWS_SUPPORT) { /* Search module path. */ if ((NTGetModulePath("CORE_RL_magick_.dll",path) != MagickFalse) || (NTGetModulePath("CORE_DB_magick_.dll",path) != MagickFalse) || (NTGetModulePath("Magick.dll",path) != MagickFalse)) { (void) ConcatenateMagickString(path,DirectorySeparator,MaxTextExtent); (void) ConcatenateMagickString(path,filename,MaxTextExtent); if (IsPathAccessible(path) != MagickFalse) return(MagickTrue); } } #endif { char *home; home=GetEnvironmentValue("XDG_CONFIG_HOME"); if (home == (char *) NULL) home=GetEnvironmentValue("LOCALAPPDATA"); if (home == (char *) NULL) home=GetEnvironmentValue("APPDATA"); if (home == (char *) NULL) home=GetEnvironmentValue("USERPROFILE"); if (home != (char *) NULL) { /* Search $XDG_CONFIG_HOME/ImageMagick. */ (void) FormatLocaleString(path,MaxTextExtent,"%s%sImageMagick%s%s", home,DirectorySeparator,DirectorySeparator,filename); home=DestroyString(home); if (IsPathAccessible(path) != MagickFalse) return(MagickTrue); } home=GetEnvironmentValue("HOME"); if (home != (char *) NULL) { /* Search $HOME/.config/ImageMagick. */ (void) FormatLocaleString(path,MaxTextExtent, "%s%s.config%sImageMagick%s%s",home,DirectorySeparator, DirectorySeparator,DirectorySeparator,filename); if (IsPathAccessible(path) != MagickFalse) { home=DestroyString(home); return(MagickTrue); } /* Search $HOME/.magick. */ (void) FormatLocaleString(path,MaxTextExtent,"%s%s.magick%s%s",home, DirectorySeparator,DirectorySeparator,filename); home=DestroyString(home); if (IsPathAccessible(path) != MagickFalse) return(MagickTrue); } } /* Search current directory. */ if (IsPathAccessible(path) != MagickFalse) return(MagickTrue); if (exception->severity < ConfigureError) ThrowFileException(exception,ConfigureWarning,"UnableToOpenModuleFile", path); #endif return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M o d u l e T r e e I n s t a n t i a t e d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsModuleTreeInstantiated() determines if the module tree is instantiated. % If not, it instantiates the tree and returns it. % % The format of the IsModuleTreeInstantiated() method is: % % MagickBooleanType IsModuleTreeInstantiated(Exceptioninfo *exception) % % A description of each parameter follows. % % o exception: return any errors or warnings in this structure. % */ static void *DestroyModuleNode(void *module_info) { ExceptionInfo *exception; register ModuleInfo *p; exception=AcquireExceptionInfo(); p=(ModuleInfo *) module_info; if (UnregisterModule(p,exception) == MagickFalse) CatchException(exception); if (p->tag != (char *) NULL) p->tag=DestroyString(p->tag); if (p->path != (char *) NULL) p->path=DestroyString(p->path); exception=DestroyExceptionInfo(exception); return(RelinquishMagickMemory(p)); } static MagickBooleanType IsModuleTreeInstantiated( ExceptionInfo *magick_unused(exception)) { magick_unreferenced(exception); if (module_list == (SplayTreeInfo *) NULL) { if (module_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&module_semaphore); LockSemaphoreInfo(module_semaphore); if (module_list == (SplayTreeInfo *) NULL) { MagickBooleanType status; ModuleInfo *module_info; module_list=NewSplayTree(CompareSplayTreeString, (void *(*)(void *)) NULL,DestroyModuleNode); if (module_list == (SplayTreeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); module_info=AcquireModuleInfo((const char *) NULL,"[boot-strap]"); module_info->stealth=MagickTrue; status=AddValueToSplayTree(module_list,module_info->tag,module_info); if (status == MagickFalse) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); if (lt_dlinit() != 0) ThrowFatalException(ModuleFatalError, "UnableToInitializeModuleLoader"); } UnlockSemaphoreInfo(module_semaphore); } return(module_list != (SplayTreeInfo *) NULL ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType InitializeModuleList(ExceptionInfo *exception) { return(IsModuleTreeInstantiated(exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v o k e D y n a m i c I m a g e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InvokeDynamicImageFilter() invokes a dynamic image filter. % % The format of the InvokeDynamicImageFilter module is: % % MagickBooleanType InvokeDynamicImageFilter(const char *tag,Image **image, % const int argc,const char **argv,ExceptionInfo *exception) % % A description of each parameter follows: % % o tag: a character string that represents the name of the particular % module. % % o image: the image. % % o argc: a pointer to an integer describing the number of elements in the % argument vector. % % o argv: a pointer to a text array containing the command line arguments. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType InvokeDynamicImageFilter(const char *tag, Image **images,const int argc,const char **argv,ExceptionInfo *exception) { char name[MaxTextExtent], path[MaxTextExtent]; ImageFilterHandler *image_filter; MagickBooleanType status; ModuleHandle handle; PolicyRights rights; /* Find the module. */ assert(images != (Image **) NULL); assert((*images)->signature == MagickSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); #if !defined(MAGICKCORE_BUILD_MODULES) { MagickBooleanType status; status=InvokeStaticImageFilter(tag,images,argc,argv,exception); if (status != MagickFalse) return(status); } #endif rights=ReadPolicyRights; if (IsRightsAuthorized(FilterPolicyDomain,rights,tag) == MagickFalse) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",tag); return(MagickFalse); } TagToFilterModuleName(tag,name); status=GetMagickModulePath(name,MagickImageFilterModule,path,exception); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "UnableToLoadModule","`%s': %s",name,path); return(MagickFalse); } /* Open the module. */ handle=(ModuleHandle) lt_dlopen(path); if (handle == (ModuleHandle) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "UnableToLoadModule","`%s': %s",name,lt_dlerror()); return(MagickFalse); } /* Locate the module. */ #if !defined(MAGICKCORE_NAMESPACE_PREFIX) (void) FormatLocaleString(name,MaxTextExtent,"%sImage",tag); #else (void) FormatLocaleString(name,MaxTextExtent,"%s%sImage", MAGICKCORE_NAMESPACE_PREFIX,tag); #endif /* Execute the module. */ ClearMagickException(exception); image_filter=(ImageFilterHandler *) lt_dlsym(handle,name); if (image_filter == (ImageFilterHandler *) NULL) (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "UnableToLoadModule","`%s': %s",name,lt_dlerror()); else { size_t signature; if ((*images)->debug != MagickFalse) (void) LogMagickEvent(ModuleEvent,GetMagickModule(), "Invoking \"%s\" dynamic image filter",tag); signature=image_filter(images,argc,argv,exception); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(ModuleEvent,GetMagickModule(),"\"%s\" completes", tag); if (signature != MagickImageFilterSignature) (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "ImageFilterSignatureMismatch","`%s': %8lx != %8lx",tag, (unsigned long) signature,(unsigned long) MagickImageFilterSignature); } /* Close the module. */ if (lt_dlclose(handle) != 0) (void) ThrowMagickException(exception,GetMagickModule(),ModuleWarning, "UnableToCloseModule","`%s': %s",name,lt_dlerror()); return(exception->severity < ErrorException ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t M o d u l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListModuleInfo() lists the module info to a file. % % The format of the ListModuleInfo module is: % % MagickBooleanType ListModuleInfo(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to a FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListModuleInfo(FILE *file, ExceptionInfo *exception) { char filename[MaxTextExtent], module_path[MaxTextExtent], **modules, path[MaxTextExtent]; register ssize_t i; size_t number_modules; if (file == (const FILE *) NULL) file=stdout; /* List image coders. */ modules=GetModuleList("*",MagickImageCoderModule,&number_modules,exception); if (modules == (char **) NULL) return(MagickFalse); TagToCoderModuleName("magick",filename); (void) GetMagickModulePath(filename,MagickImageCoderModule,module_path, exception); GetPathComponent(module_path,HeadPath,path); (void) FormatLocaleFile(file,"\nPath: %s\n\n",path); (void) FormatLocaleFile(file,"Image Coder\n"); (void) FormatLocaleFile(file, "-------------------------------------------------" "------------------------------\n"); for (i=0; i < (ssize_t) number_modules; i++) { (void) FormatLocaleFile(file,"%s",modules[i]); (void) FormatLocaleFile(file,"\n"); } (void) fflush(file); /* Relinquish resources. */ for (i=0; i < (ssize_t) number_modules; i++) modules[i]=DestroyString(modules[i]); modules=(char **) RelinquishMagickMemory(modules); /* List image filters. */ modules=GetModuleList("*",MagickImageFilterModule,&number_modules,exception); if (modules == (char **) NULL) return(MagickFalse); TagToFilterModuleName("analyze",filename); (void) GetMagickModulePath(filename,MagickImageFilterModule,module_path, exception); GetPathComponent(module_path,HeadPath,path); (void) FormatLocaleFile(file,"\nPath: %s\n\n",path); (void) FormatLocaleFile(file,"Image Filter\n"); (void) FormatLocaleFile(file, "-------------------------------------------------" "------------------------------\n"); for (i=0; i < (ssize_t) number_modules; i++) { (void) FormatLocaleFile(file,"%s",modules[i]); (void) FormatLocaleFile(file,"\n"); } (void) fflush(file); /* Relinquish resources. */ for (i=0; i < (ssize_t) number_modules; i++) modules[i]=DestroyString(modules[i]); modules=(char **) RelinquishMagickMemory(modules); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o d u l e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModuleComponentGenesis() instantiates the module component. % % The format of the ModuleComponentGenesis method is: % % MagickBooleanType ModuleComponentGenesis(void) % */ MagickExport MagickBooleanType ModuleComponentGenesis(void) { ExceptionInfo *exception; MagickBooleanType status; if (module_semaphore == (SemaphoreInfo *) NULL) module_semaphore=AllocateSemaphoreInfo(); exception=AcquireExceptionInfo(); status=IsModuleTreeInstantiated(exception); exception=DestroyExceptionInfo(exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o d u l e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModuleComponentTerminus() destroys the module component. % % The format of the ModuleComponentTerminus method is: % % ModuleComponentTerminus(void) % */ MagickExport void ModuleComponentTerminus(void) { if (module_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&module_semaphore); DestroyModuleList(); DestroySemaphoreInfo(&module_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p e n M o d u l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenModule() loads a module, and invokes its registration module. It % returns MagickTrue on success, and MagickFalse if there is an error. % % The format of the OpenModule module is: % % MagickBooleanType OpenModule(const char *module,ExceptionInfo *exception) % % A description of each parameter follows: % % o module: a character string that indicates the module to load. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OpenModule(const char *module, ExceptionInfo *exception) { char filename[MaxTextExtent], module_name[MaxTextExtent], name[MaxTextExtent], path[MaxTextExtent]; MagickBooleanType status; ModuleHandle handle; ModuleInfo *module_info; register const CoderInfo *p; size_t signature; /* Assign module name from alias. */ assert(module != (const char *) NULL); module_info=(ModuleInfo *) GetModuleInfo(module,exception); if (module_info != (ModuleInfo *) NULL) return(MagickTrue); (void) CopyMagickString(module_name,module,MaxTextExtent); p=GetCoderInfo(module,exception); if (p != (CoderInfo *) NULL) (void) CopyMagickString(module_name,p->name,MaxTextExtent); if (GetValueFromSplayTree(module_list,module_name) != (void *) NULL) return(MagickTrue); /* module already opened, return */ /* Locate module. */ handle=(ModuleHandle) NULL; TagToCoderModuleName(module_name,filename); (void) LogMagickEvent(ModuleEvent,GetMagickModule(), "Searching for module \"%s\" using filename \"%s\"",module_name,filename); *path='\0'; status=GetMagickModulePath(filename,MagickImageCoderModule,path,exception); if (status == MagickFalse) return(MagickFalse); /* Load module */ (void) LogMagickEvent(ModuleEvent,GetMagickModule(), "Opening module at path \"%s\"",path); handle=(ModuleHandle) lt_dlopen(path); if (handle == (ModuleHandle) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "UnableToLoadModule","`%s': %s",path,lt_dlerror()); return(MagickFalse); } /* Register module. */ module_info=AcquireModuleInfo(path,module_name); module_info->handle=handle; if (RegisterModule(module_info,exception) == (ModuleInfo *) NULL) return(MagickFalse); /* Define RegisterFORMATImage method. */ TagToModuleName(module_name,"Register%sImage",name); module_info->register_module=(size_t (*)(void)) lt_dlsym(handle,name); if (module_info->register_module == (size_t (*)(void)) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "UnableToRegisterImageFormat","`%s': %s",module_name,lt_dlerror()); return(MagickFalse); } (void) LogMagickEvent(ModuleEvent,GetMagickModule(), "Method \"%s\" in module \"%s\" at address %p",name,module_name, (void *) module_info->register_module); /* Define UnregisterFORMATImage method. */ TagToModuleName(module_name,"Unregister%sImage",name); module_info->unregister_module=(void (*)(void)) lt_dlsym(handle,name); if (module_info->unregister_module == (void (*)(void)) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "UnableToRegisterImageFormat","`%s': %s",module_name,lt_dlerror()); return(MagickFalse); } (void) LogMagickEvent(ModuleEvent,GetMagickModule(), "Method \"%s\" in module \"%s\" at address %p",name,module_name, (void *) module_info->unregister_module); signature=module_info->register_module(); if (signature != MagickImageCoderSignature) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "ImageCoderSignatureMismatch","`%s': %8lx != %8lx",module_name, (unsigned long) signature,(unsigned long) MagickImageCoderSignature); return(MagickFalse); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p e n M o d u l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenModules() loads all available modules. % % The format of the OpenModules module is: % % MagickBooleanType OpenModules(ExceptionInfo *exception) % % A description of each parameter follows: % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OpenModules(ExceptionInfo *exception) { char **modules; register ssize_t i; size_t number_modules; /* Load all modules. */ (void) GetMagickInfo((char *) NULL,exception); number_modules=0; modules=GetModuleList("*",MagickImageCoderModule,&number_modules,exception); if ((modules == (char **) NULL) || (*modules == (char *) NULL)) { if (modules != (char **) NULL) modules=(char **) RelinquishMagickMemory(modules); return(MagickFalse); } for (i=0; i < (ssize_t) number_modules; i++) (void) OpenModule(modules[i],exception); /* Relinquish resources. */ for (i=0; i < (ssize_t) number_modules; i++) modules[i]=DestroyString(modules[i]); modules=(char **) RelinquishMagickMemory(modules); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r M o d u l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterModule() adds an entry to the module list. It returns a pointer to % the registered entry on success. % % The format of the RegisterModule module is: % % ModuleInfo *RegisterModule(const ModuleInfo *module_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o info: a pointer to the registered entry is returned. % % o module_info: a pointer to the ModuleInfo structure to register. % % o exception: return any errors or warnings in this structure. % */ static const ModuleInfo *RegisterModule(const ModuleInfo *module_info, ExceptionInfo *exception) { MagickBooleanType status; assert(module_info != (ModuleInfo *) NULL); assert(module_info->signature == MagickSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",module_info->tag); if (module_list == (SplayTreeInfo *) NULL) return((const ModuleInfo *) NULL); status=AddValueToSplayTree(module_list,module_info->tag,module_info); if (status == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(),ResourceLimitError, "MemoryAllocationFailed","`%s'",module_info->tag); return(module_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T a g T o C o d e r M o d u l e N a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TagToCoderModuleName() munges a module tag and obtains the filename of the % corresponding module. % % The format of the TagToCoderModuleName module is: % % char *TagToCoderModuleName(const char *tag,char *name) % % A description of each parameter follows: % % o tag: a character string representing the module tag. % % o name: return the module name here. % */ static void TagToCoderModuleName(const char *tag,char *name) { assert(tag != (char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",tag); assert(name != (char *) NULL); #if defined(MAGICKCORE_LTDL_DELEGATE) (void) FormatLocaleString(name,MaxTextExtent,"%s.la",tag); (void) LocaleLower(name); #else #if defined(MAGICKCORE_WINDOWS_SUPPORT) if (LocaleNCompare("IM_MOD_",tag,7) == 0) (void) CopyMagickString(name,tag,MaxTextExtent); else { #if defined(_DEBUG) (void) FormatLocaleString(name,MaxTextExtent,"IM_MOD_DB_%s_.dll",tag); #else (void) FormatLocaleString(name,MaxTextExtent,"IM_MOD_RL_%s_.dll",tag); #endif } #endif #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T a g T o F i l t e r M o d u l e N a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TagToFilterModuleName() munges a module tag and returns the filename of the % corresponding filter module. % % The format of the TagToFilterModuleName module is: % % void TagToFilterModuleName(const char *tag,char name) % % A description of each parameter follows: % % o tag: a character string representing the module tag. % % o name: return the filter name here. % */ static void TagToFilterModuleName(const char *tag,char *name) { assert(tag != (char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",tag); assert(name != (char *) NULL); #if !defined(MAGICKCORE_LTDL_DELEGATE) (void) FormatLocaleString(name,MaxTextExtent,"%s.dll",tag); #else (void) FormatLocaleString(name,MaxTextExtent,"%s.la",tag); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T a g T o M o d u l e N a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TagToModuleName() munges the module tag name and returns an upper-case tag % name as the input string, and a user-provided format. % % The format of the TagToModuleName module is: % % TagToModuleName(const char *tag,const char *format,char *module) % % A description of each parameter follows: % % o tag: the module tag. % % o format: a sprintf-compatible format string containing %s where the % upper-case tag name is to be inserted. % % o module: pointer to a destination buffer for the formatted result. % */ static void TagToModuleName(const char *tag,const char *format,char *module) { char name[MaxTextExtent]; assert(tag != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",tag); assert(format != (const char *) NULL); assert(module != (char *) NULL); (void) CopyMagickString(name,tag,MaxTextExtent); LocaleUpper(name); #if !defined(MAGICKCORE_NAMESPACE_PREFIX) (void) FormatLocaleString(module,MaxTextExtent,format,name); #else { char prefix_format[MaxTextExtent]; (void) FormatLocaleString(prefix_format,MaxTextExtent,"%s%s", MAGICKCORE_NAMESPACE_PREFIX,format); (void) FormatLocaleString(module,MaxTextExtent,prefix_format,name); } #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r M o d u l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterModule() unloads a module, and invokes its de-registration module. % Returns MagickTrue on success, and MagickFalse if there is an error. % % The format of the UnregisterModule module is: % % MagickBooleanType UnregisterModule(const ModuleInfo *module_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o module_info: the module info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType UnregisterModule(const ModuleInfo *module_info, ExceptionInfo *exception) { /* Locate and execute UnregisterFORMATImage module. */ assert(module_info != (const ModuleInfo *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",module_info->tag); assert(exception != (ExceptionInfo *) NULL); if (module_info->unregister_module == NULL) return(MagickTrue); module_info->unregister_module(); if (lt_dlclose((ModuleHandle) module_info->handle) != 0) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleWarning, "UnableToCloseModule","`%s': %s",module_info->tag,lt_dlerror()); return(MagickFalse); } return(MagickTrue); } #else #if !defined(MAGICKCORE_BUILD_MODULES) extern size_t analyzeImage(Image **,const int,const char **,ExceptionInfo *); #endif MagickExport MagickBooleanType ListModuleInfo(FILE *magick_unused(file), ExceptionInfo *magick_unused(exception)) { magick_unreferenced(file); magick_unreferenced(exception); return(MagickTrue); } MagickExport MagickBooleanType InvokeDynamicImageFilter(const char *tag, Image **image,const int argc,const char **argv,ExceptionInfo *exception) { PolicyRights rights; assert(image != (Image **) NULL); assert((*image)->signature == MagickSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); rights=ReadPolicyRights; if (IsRightsAuthorized(FilterPolicyDomain,rights,tag) == MagickFalse) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",tag); return(MagickFalse); } #if defined(MAGICKCORE_BUILD_MODULES) (void) tag; (void) argc; (void) argv; (void) exception; #else { ImageFilterHandler *image_filter; image_filter=(ImageFilterHandler *) NULL; if (LocaleCompare("analyze",tag) == 0) image_filter=(ImageFilterHandler *) analyzeImage; if (image_filter == (ImageFilterHandler *) NULL) (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "UnableToLoadModule","`%s'",tag); else { size_t signature; if ((*image)->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Invoking \"%s\" static image filter",tag); signature=image_filter(image,argc,argv,exception); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"\"%s\" completes", tag); if (signature != MagickImageFilterSignature) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "ImageFilterSignatureMismatch","`%s': %8lx != %8lx",tag, (unsigned long) signature,(unsigned long) MagickImageFilterSignature); return(MagickFalse); } } } #endif return(MagickTrue); } #endif
./CrossVul/dataset_final_sorted/CWE-400/c/bad_4773_1
crossvul-cpp_data_good_1252_0
// SPDX-License-Identifier: GPL-2.0-only /* * Crypto user configuration API. * * Copyright (C) 2011 secunet Security Networks AG * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com> */ #include <linux/module.h> #include <linux/crypto.h> #include <linux/cryptouser.h> #include <linux/sched.h> #include <linux/security.h> #include <net/netlink.h> #include <net/net_namespace.h> #include <net/sock.h> #include <crypto/internal/skcipher.h> #include <crypto/internal/rng.h> #include <crypto/akcipher.h> #include <crypto/kpp.h> #include <crypto/internal/cryptouser.h> #include "internal.h" #define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x)) static DEFINE_MUTEX(crypto_cfg_mutex); struct crypto_dump_info { struct sk_buff *in_skb; struct sk_buff *out_skb; u32 nlmsg_seq; u16 nlmsg_flags; }; struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) { struct crypto_alg *q, *alg = NULL; down_read(&crypto_alg_sem); list_for_each_entry(q, &crypto_alg_list, cra_list) { int match = 0; if (crypto_is_larval(q)) continue; if ((q->cra_flags ^ p->cru_type) & p->cru_mask) continue; if (strlen(p->cru_driver_name)) match = !strcmp(q->cra_driver_name, p->cru_driver_name); else if (!exact) match = !strcmp(q->cra_name, p->cru_name); if (!match) continue; if (unlikely(!crypto_mod_get(q))) continue; alg = q; break; } up_read(&crypto_alg_sem); return alg; } static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_cipher rcipher; memset(&rcipher, 0, sizeof(rcipher)); strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); rcipher.blocksize = alg->cra_blocksize; rcipher.min_keysize = alg->cra_cipher.cia_min_keysize; rcipher.max_keysize = alg->cra_cipher.cia_max_keysize; return nla_put(skb, CRYPTOCFGA_REPORT_CIPHER, sizeof(rcipher), &rcipher); } static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_comp rcomp; memset(&rcomp, 0, sizeof(rcomp)); strscpy(rcomp.type, "compression", sizeof(rcomp.type)); return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp); } static int crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { memset(ualg, 0, sizeof(*ualg)); strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); strscpy(ualg->cru_driver_name, alg->cra_driver_name, sizeof(ualg->cru_driver_name)); strscpy(ualg->cru_module_name, module_name(alg->cra_module), sizeof(ualg->cru_module_name)); ualg->cru_type = 0; ualg->cru_mask = 0; ualg->cru_flags = alg->cra_flags; ualg->cru_refcnt = refcount_read(&alg->cra_refcnt); if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) goto nla_put_failure; if (alg->cra_flags & CRYPTO_ALG_LARVAL) { struct crypto_report_larval rl; memset(&rl, 0, sizeof(rl)); strscpy(rl.type, "larval", sizeof(rl.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(rl), &rl)) goto nla_put_failure; goto out; } if (alg->cra_type && alg->cra_type->report) { if (alg->cra_type->report(skb, alg)) goto nla_put_failure; goto out; } switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) { case CRYPTO_ALG_TYPE_CIPHER: if (crypto_report_cipher(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_COMPRESS: if (crypto_report_comp(skb, alg)) goto nla_put_failure; break; } out: return 0; nla_put_failure: return -EMSGSIZE; } static int crypto_report_alg(struct crypto_alg *alg, struct crypto_dump_info *info) { struct sk_buff *in_skb = info->in_skb; struct sk_buff *skb = info->out_skb; struct nlmsghdr *nlh; struct crypto_user_alg *ualg; int err = 0; nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq, CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags); if (!nlh) { err = -EMSGSIZE; goto out; } ualg = nlmsg_data(nlh); err = crypto_report_one(alg, ualg, skb); if (err) { nlmsg_cancel(skb, nlh); goto out; } nlmsg_end(skb, nlh); out: return err; } static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs) { struct net *net = sock_net(in_skb->sk); struct crypto_user_alg *p = nlmsg_data(in_nlh); struct crypto_alg *alg; struct sk_buff *skb; struct crypto_dump_info info; int err; if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) return -EINVAL; alg = crypto_alg_match(p, 0); if (!alg) return -ENOENT; err = -ENOMEM; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) goto drop_alg; info.in_skb = in_skb; info.out_skb = skb; info.nlmsg_seq = in_nlh->nlmsg_seq; info.nlmsg_flags = 0; err = crypto_report_alg(alg, &info); drop_alg: crypto_mod_put(alg); if (err) { kfree_skb(skb); return err; } return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid); } static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb) { const size_t start_pos = cb->args[0]; size_t pos = 0; struct crypto_dump_info info; struct crypto_alg *alg; int res; info.in_skb = cb->skb; info.out_skb = skb; info.nlmsg_seq = cb->nlh->nlmsg_seq; info.nlmsg_flags = NLM_F_MULTI; down_read(&crypto_alg_sem); list_for_each_entry(alg, &crypto_alg_list, cra_list) { if (pos >= start_pos) { res = crypto_report_alg(alg, &info); if (res == -EMSGSIZE) break; if (res) goto out; } pos++; } cb->args[0] = pos; res = skb->len; out: up_read(&crypto_alg_sem); return res; } static int crypto_dump_report_done(struct netlink_callback *cb) { return 0; } static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct crypto_alg *alg; struct crypto_user_alg *p = nlmsg_data(nlh); struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL]; LIST_HEAD(list); if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) return -EINVAL; if (priority && !strlen(p->cru_driver_name)) return -EINVAL; alg = crypto_alg_match(p, 1); if (!alg) return -ENOENT; down_write(&crypto_alg_sem); crypto_remove_spawns(alg, &list, NULL); if (priority) alg->cra_priority = nla_get_u32(priority); up_write(&crypto_alg_sem); crypto_mod_put(alg); crypto_remove_final(&list); return 0; } static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct crypto_alg *alg; struct crypto_user_alg *p = nlmsg_data(nlh); int err; if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) return -EINVAL; alg = crypto_alg_match(p, 1); if (!alg) return -ENOENT; /* We can not unregister core algorithms such as aes-generic. * We would loose the reference in the crypto_alg_list to this algorithm * if we try to unregister. Unregistering such an algorithm without * removing the module is not possible, so we restrict to crypto * instances that are build from templates. */ err = -EINVAL; if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE)) goto drop_alg; err = -EBUSY; if (refcount_read(&alg->cra_refcnt) > 2) goto drop_alg; err = crypto_unregister_instance((struct crypto_instance *)alg); drop_alg: crypto_mod_put(alg); return err; } static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { int exact = 0; const char *name; struct crypto_alg *alg; struct crypto_user_alg *p = nlmsg_data(nlh); struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL]; if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) return -EINVAL; if (strlen(p->cru_driver_name)) exact = 1; if (priority && !exact) return -EINVAL; alg = crypto_alg_match(p, exact); if (alg) { crypto_mod_put(alg); return -EEXIST; } if (strlen(p->cru_driver_name)) name = p->cru_driver_name; else name = p->cru_name; alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask); if (IS_ERR(alg)) return PTR_ERR(alg); down_write(&crypto_alg_sem); if (priority) alg->cra_priority = nla_get_u32(priority); up_write(&crypto_alg_sem); crypto_mod_put(alg); return 0; } static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; return crypto_del_default_rng(); } #define MSGSIZE(type) sizeof(struct type) static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = { [CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), [CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0, [CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), }; static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = { [CRYPTOCFGA_PRIORITY_VAL] = { .type = NLA_U32}, }; #undef MSGSIZE static const struct crypto_link { int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); } crypto_dispatch[CRYPTO_NR_MSGTYPES] = { [CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = { .doit = crypto_add_alg}, [CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = { .doit = crypto_del_alg}, [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = { .doit = crypto_update_alg}, [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = { .doit = crypto_report, .dump = crypto_dump_report, .done = crypto_dump_report_done}, [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = { .doit = crypto_del_rng }, [CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = { .doit = crypto_reportstat}, }; static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *attrs[CRYPTOCFGA_MAX+1]; const struct crypto_link *link; int type, err; type = nlh->nlmsg_type; if (type > CRYPTO_MSG_MAX) return -EINVAL; type -= CRYPTO_MSG_BASE; link = &crypto_dispatch[type]; if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) && (nlh->nlmsg_flags & NLM_F_DUMP))) { struct crypto_alg *alg; unsigned long dump_alloc = 0; if (link->dump == NULL) return -EINVAL; down_read(&crypto_alg_sem); list_for_each_entry(alg, &crypto_alg_list, cra_list) dump_alloc += CRYPTO_REPORT_MAXSIZE; up_read(&crypto_alg_sem); { struct netlink_dump_control c = { .dump = link->dump, .done = link->done, .min_dump_alloc = min(dump_alloc, 65535UL), }; err = netlink_dump_start(net->crypto_nlsk, skb, nlh, &c); } return err; } err = nlmsg_parse_deprecated(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX, crypto_policy, extack); if (err < 0) return err; if (link->doit == NULL) return -EINVAL; return link->doit(skb, nlh, attrs); } static void crypto_netlink_rcv(struct sk_buff *skb) { mutex_lock(&crypto_cfg_mutex); netlink_rcv_skb(skb, &crypto_user_rcv_msg); mutex_unlock(&crypto_cfg_mutex); } static int __net_init crypto_netlink_init(struct net *net) { struct netlink_kernel_cfg cfg = { .input = crypto_netlink_rcv, }; net->crypto_nlsk = netlink_kernel_create(net, NETLINK_CRYPTO, &cfg); return net->crypto_nlsk == NULL ? -ENOMEM : 0; } static void __net_exit crypto_netlink_exit(struct net *net) { netlink_kernel_release(net->crypto_nlsk); net->crypto_nlsk = NULL; } static struct pernet_operations crypto_netlink_net_ops = { .init = crypto_netlink_init, .exit = crypto_netlink_exit, }; static int __init crypto_user_init(void) { return register_pernet_subsys(&crypto_netlink_net_ops); } static void __exit crypto_user_exit(void) { unregister_pernet_subsys(&crypto_netlink_net_ops); } module_init(crypto_user_init); module_exit(crypto_user_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); MODULE_DESCRIPTION("Crypto userspace configuration API"); MODULE_ALIAS("net-pf-16-proto-21");
./CrossVul/dataset_final_sorted/CWE-400/c/good_1252_0
crossvul-cpp_data_bad_5356_5
/* * IPV6 GSO/GRO offload support * Linux INET6 implementation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/socket.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/printk.h> #include <net/protocol.h> #include <net/ipv6.h> #include "ip6_offload.h" static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) { const struct net_offload *ops = NULL; for (;;) { struct ipv6_opt_hdr *opth; int len; if (proto != NEXTHDR_HOP) { ops = rcu_dereference(inet6_offloads[proto]); if (unlikely(!ops)) break; if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) break; } if (unlikely(!pskb_may_pull(skb, 8))) break; opth = (void *)skb->data; len = ipv6_optlen(opth); if (unlikely(!pskb_may_pull(skb, len))) break; opth = (void *)skb->data; proto = opth->nexthdr; __skb_pull(skb, len); } return proto; } static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct ipv6hdr *ipv6h; const struct net_offload *ops; int proto; struct frag_hdr *fptr; unsigned int unfrag_ip6hlen; u8 *prevhdr; int offset = 0; bool encap, udpfrag; int nhoff; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_TCPV4 | SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | SKB_GSO_SIT | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_TUNNEL_REMCSUM | SKB_GSO_TCPV6 | 0))) goto out; skb_reset_network_header(skb); nhoff = skb_network_header(skb) - skb_mac_header(skb); if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) goto out; encap = SKB_GSO_CB(skb)->encap_level > 0; if (encap) features &= skb->dev->hw_enc_features; SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); ipv6h = ipv6_hdr(skb); __skb_pull(skb, sizeof(*ipv6h)); segs = ERR_PTR(-EPROTONOSUPPORT); proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); if (skb->encapsulation && skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP)) udpfrag = proto == IPPROTO_UDP && encap; else udpfrag = proto == IPPROTO_UDP && !skb->encapsulation; ops = rcu_dereference(inet6_offloads[proto]); if (likely(ops && ops->callbacks.gso_segment)) { skb_reset_transport_header(skb); segs = ops->callbacks.gso_segment(skb, features); } if (IS_ERR(segs)) goto out; for (skb = segs; skb; skb = skb->next) { ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h)); skb->network_header = (u8 *)ipv6h - skb->head; if (udpfrag) { unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); fptr->frag_off = htons(offset); if (skb->next) fptr->frag_off |= htons(IP6_MF); offset += (ntohs(ipv6h->payload_len) - sizeof(struct frag_hdr)); } if (encap) skb_reset_inner_headers(skb); } out: return segs; } /* Return the total length of all the extension hdrs, following the same * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs. */ static int ipv6_exthdrs_len(struct ipv6hdr *iph, const struct net_offload **opps) { struct ipv6_opt_hdr *opth = (void *)iph; int len = 0, proto, optlen = sizeof(*iph); proto = iph->nexthdr; for (;;) { if (proto != NEXTHDR_HOP) { *opps = rcu_dereference(inet6_offloads[proto]); if (unlikely(!(*opps))) break; if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR)) break; } opth = (void *)opth + optlen; optlen = ipv6_optlen(opth); len += optlen; proto = opth->nexthdr; } return len; } static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, struct sk_buff *skb) { const struct net_offload *ops; struct sk_buff **pp = NULL; struct sk_buff *p; struct ipv6hdr *iph; unsigned int nlen; unsigned int hlen; unsigned int off; u16 flush = 1; int proto; off = skb_gro_offset(skb); hlen = off + sizeof(*iph); iph = skb_gro_header_fast(skb, off); if (skb_gro_header_hard(skb, hlen)) { iph = skb_gro_header_slow(skb, hlen, off); if (unlikely(!iph)) goto out; } skb_set_network_header(skb, off); skb_gro_pull(skb, sizeof(*iph)); skb_set_transport_header(skb, skb_gro_offset(skb)); flush += ntohs(iph->payload_len) != skb_gro_len(skb); rcu_read_lock(); proto = iph->nexthdr; ops = rcu_dereference(inet6_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) { __pskb_pull(skb, skb_gro_offset(skb)); proto = ipv6_gso_pull_exthdrs(skb, proto); skb_gro_pull(skb, -skb_transport_offset(skb)); skb_reset_transport_header(skb); __skb_push(skb, skb_gro_offset(skb)); ops = rcu_dereference(inet6_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) goto out_unlock; iph = ipv6_hdr(skb); } NAPI_GRO_CB(skb)->proto = proto; flush--; nlen = skb_network_header_len(skb); for (p = *head; p; p = p->next) { const struct ipv6hdr *iph2; __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */ if (!NAPI_GRO_CB(p)->same_flow) continue; iph2 = (struct ipv6hdr *)(p->data + off); first_word = *(__be32 *)iph ^ *(__be32 *)iph2; /* All fields must match except length and Traffic Class. * XXX skbs on the gro_list have all been parsed and pulled * already so we don't need to compare nlen * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops))) * memcmp() alone below is suffcient, right? */ if ((first_word & htonl(0xF00FFFFF)) || memcmp(&iph->nexthdr, &iph2->nexthdr, nlen - offsetof(struct ipv6hdr, nexthdr))) { NAPI_GRO_CB(p)->same_flow = 0; continue; } /* flush if Traffic Class fields are different */ NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000)); NAPI_GRO_CB(p)->flush |= flush; /* Clear flush_id, there's really no concept of ID in IPv6. */ NAPI_GRO_CB(p)->flush_id = 0; } NAPI_GRO_CB(skb)->flush |= flush; skb_gro_postpull_rcsum(skb, iph, nlen); pp = ops->callbacks.gro_receive(head, skb); out_unlock: rcu_read_unlock(); out: NAPI_GRO_CB(skb)->flush |= flush; return pp; } static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) { const struct net_offload *ops; struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff); int err = -ENOSYS; if (skb->encapsulation) skb_set_inner_network_header(skb, nhoff); iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); rcu_read_lock(); nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops); if (WARN_ON(!ops || !ops->callbacks.gro_complete)) goto out_unlock; err = ops->callbacks.gro_complete(skb, nhoff); out_unlock: rcu_read_unlock(); return err; } static int sit_gro_complete(struct sk_buff *skb, int nhoff) { skb->encapsulation = 1; skb_shinfo(skb)->gso_type |= SKB_GSO_SIT; return ipv6_gro_complete(skb, nhoff); } static struct packet_offload ipv6_packet_offload __read_mostly = { .type = cpu_to_be16(ETH_P_IPV6), .callbacks = { .gso_segment = ipv6_gso_segment, .gro_receive = ipv6_gro_receive, .gro_complete = ipv6_gro_complete, }, }; static const struct net_offload sit_offload = { .callbacks = { .gso_segment = ipv6_gso_segment, .gro_receive = ipv6_gro_receive, .gro_complete = sit_gro_complete, }, }; static int __init ipv6_offload_init(void) { if (tcpv6_offload_init() < 0) pr_crit("%s: Cannot add TCP protocol offload\n", __func__); if (udp_offload_init() < 0) pr_crit("%s: Cannot add UDP protocol offload\n", __func__); if (ipv6_exthdrs_offload_init() < 0) pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__); dev_add_offload(&ipv6_packet_offload); inet_add_offload(&sit_offload, IPPROTO_IPV6); return 0; } fs_initcall(ipv6_offload_init);
./CrossVul/dataset_final_sorted/CWE-400/c/bad_5356_5
crossvul-cpp_data_bad_1273_1
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dm_services.h" #include "link_encoder.h" #include "stream_encoder.h" #include "resource.h" #include "dce110/dce110_resource.h" #include "include/irq_service_interface.h" #include "dce/dce_audio.h" #include "dce110/dce110_timing_generator.h" #include "irq/dce110/irq_service_dce110.h" #include "dce110/dce110_timing_generator_v.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" #include "dce/dce_mem_input.h" #include "dce110/dce110_mem_input_v.h" #include "dce/dce_ipp.h" #include "dce/dce_transform.h" #include "dce110/dce110_transform_v.h" #include "dce/dce_opp.h" #include "dce110/dce110_opp_v.h" #include "dce/dce_clock_source.h" #include "dce/dce_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dce/dce_aux.h" #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_i2c.h" #define DC_LOGGER \ dc->ctx->logger #include "dce110/dce110_compressor.h" #include "reg_helper.h" #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_8_2_d.h" #include "gmc/gmc_8_2_sh_mask.h" #endif #ifndef mmDP_DPHY_INTERNAL_CTRL #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7 #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7 #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7 #endif #ifndef mmBIOS_SCRATCH_2 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF #endif #ifndef mmDP_DPHY_BS_SR_SWAP_CNTL #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC #endif #ifndef mmDP_DPHY_FAST_TRAINING #define mmDP_DPHY_FAST_TRAINING 0x4ABC #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC #endif #ifndef DPHY_RX_FAST_TRAINING_CAPABLE #define DPHY_RX_FAST_TRAINING_CAPABLE 0x1 #endif static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = { { .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), } }; /* set register offset */ #define SR(reg_name)\ .reg_name = mm ## reg_name /* set register offset with instance */ #define SRI(reg_name, block, id)\ .reg_name = mm ## block ## id ## _ ## reg_name static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE110_COMMON_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCE110(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCE110_COMMON_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; #define ipp_regs(id)\ [id] = {\ IPP_DCE110_REG_LIST_DCE_BASE(id)\ } static const struct dce_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2) }; static const struct dce_ipp_shift ipp_shift = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_ipp_mask ipp_mask = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; #define transform_regs(id)\ [id] = {\ XFM_COMMON_REG_LIST_DCE110(id)\ } static const struct dce_transform_registers xfm_regs[] = { transform_regs(0), transform_regs(1), transform_regs(2) }; static const struct dce_transform_shift xfm_shift = { XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_transform_mask xfm_mask = { XFM_COMMON_MASK_SH_LIST_DCE110(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id)\ [id] = {\ LE_DCE110_REG_LIST(id)\ } static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3), link_regs(4), link_regs(5), link_regs(6), }; #define stream_enc_regs(id)\ [id] = {\ SE_COMMON_REG_LIST(id),\ .TMDS_CNTL = 0,\ } static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2) }; static const struct dce_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE110(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_DCE_110_REG_LIST(id),\ } static const struct dce_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5) }; static const struct dce_opp_shift opp_shift = { OPP_COMMON_MASK_SH_LIST_DCE_110(__SHIFT) }; static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6), }; static const struct dce_audio_shift audio_shift = { AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { AUD_COMMON_MASK_SH_LIST(_MASK) }; /* AG TBD Needs to be reduced back to 3 pipes once dce10 hw sequencer implemented. */ #define clk_src_regs(id)\ [id] = {\ CS_COMMON_REG_LIST_DCE_100_110(id),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0), clk_src_regs(1), clk_src_regs(2) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; static const struct bios_registers bios_regs = { .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; static const struct resource_caps carrizo_resource_cap = { .num_timing_generator = 3, .num_video_plane = 1, .num_audio = 3, .num_stream_encoder = 3, .num_pll = 2, .num_ddc = 3, }; static const struct resource_caps stoney_resource_cap = { .num_timing_generator = 2, .num_video_plane = 1, .num_audio = 3, .num_stream_encoder = 3, .num_pll = 2, .num_ddc = 3, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, .blends_with_below = true, .blends_with_above = true, .per_pixel_alpha = 1, .pixel_format_support = { .argb8888 = true, .nv12 = false, .fp16 = false }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 1, .fp16 = 1 } }; static const struct dc_plane_cap underlay_plane_cap = { .type = DC_PLANE_TYPE_DCE_UNDERLAY, .blends_with_above = true, .per_pixel_alpha = 1, .pixel_format_support = { .argb8888 = false, .nv12 = true, .fp16 = false }, .max_upscale_factor = { .argb8888 = 1, .nv12 = 16000, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 1, .nv12 = 250, .fp16 = 1 } }; #define CTX ctx #define REG(reg) mm ## reg #ifndef mmCC_DC_HDMI_STRAPS #define mmCC_DC_HDMI_STRAPS 0x4819 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { REG_GET_2(CC_DC_HDMI_STRAPS, HDMI_DISABLE, &straps->hdmi_disable, AUDIO_STREAM_NUMBER, &straps->audio_stream_number); REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct timing_generator *dce110_timing_generator_create( struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { struct dce110_timing_generator *tg110 = kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); if (!tg110) return NULL; dce110_timing_generator_construct(tg110, ctx, instance, offsets); return &tg110->base; } static struct stream_encoder *dce110_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } #define SRII(reg_name, block, id)\ .reg_name[id] = mm ## block ## id ## _ ## reg_name static const struct dce_hwseq_registers hwseq_stoney_reg = { HWSEQ_ST_REG_LIST() }; static const struct dce_hwseq_registers hwseq_cz_reg = { HWSEQ_CZ_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCE11_MASK_SH_LIST(__SHIFT), }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE11_MASK_SH_LIST(_MASK), }; static struct dce_hwseq *dce110_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = ASIC_REV_IS_STONEY(ctx->asic_id.hw_internal_rev) ? &hwseq_stoney_reg : &hwseq_cz_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; hws->wa.blnd_crtc_trigger = true; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce110_stream_encoder_create, .create_hwseq = dce110_hwseq_create, }; #define mi_inst_regs(id) { \ MI_DCE11_REG_LIST(id), \ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), mi_inst_regs(1), mi_inst_regs(2), }; static const struct dce_mem_input_shift mi_shifts = { MI_DCE11_MASK_SH_LIST(__SHIFT), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT }; static const struct dce_mem_input_mask mi_masks = { MI_DCE11_MASK_SH_LIST(_MASK), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK }; static struct mem_input *dce110_mem_input_create( struct dc_context *ctx, uint32_t inst) { struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), GFP_KERNEL); if (!dce_mi) { BREAK_TO_DEBUGGER(); return NULL; } dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); dce_mi->wa.single_head_rdreq_dmif_limit = 3; return &dce_mi->base; } static void dce110_transform_destroy(struct transform **xfm) { kfree(TO_DCE_TRANSFORM(*xfm)); *xfm = NULL; } static struct transform *dce110_transform_create( struct dc_context *ctx, uint32_t inst) { struct dce_transform *transform = kzalloc(sizeof(struct dce_transform), GFP_KERNEL); if (!transform) return NULL; dce_transform_construct(transform, ctx, inst, &xfm_regs[inst], &xfm_shift, &xfm_mask); return &transform->base; } static struct input_pixel_processor *dce110_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dce_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 300000, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true }; static struct link_encoder *dce110_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; } static struct output_pixel_processor *dce110_opp_create( struct dc_context *ctx, uint32_t inst) { struct dce110_opp *opp = kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); if (!opp) return NULL; dce110_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } struct dce_aux *dce110_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst]); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) }; struct dce_i2c_hw *dce110_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dce100_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } struct clock_source *dce110_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce110_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } BREAK_TO_DEBUGGER(); return NULL; } void dce110_clock_source_destroy(struct clock_source **clk_src) { struct dce110_clk_src *dce110_clk_src; if (!clk_src) return; dce110_clk_src = TO_DCE110_CLK_SRC(*clk_src); kfree(dce110_clk_src->dp_ss_params); kfree(dce110_clk_src->hdmi_ss_params); kfree(dce110_clk_src->dvi_ss_params); kfree(dce110_clk_src); *clk_src = NULL; } static void destruct(struct dce110_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); if (pool->base.transforms[i] != NULL) dce110_transform_destroy(&pool->base.transforms[i]); if (pool->base.ipps[i] != NULL) dce_ipp_destroy(&pool->base.ipps[i]); if (pool->base.mis[i] != NULL) { kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); pool->base.mis[i] = NULL; } if (pool->base.timing_generators[i] != NULL) { kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dce110_clock_source_destroy(&pool->base.clock_sources[i]); } } if (pool->base.dp_clock_source != NULL) dce110_clock_source_destroy(&pool->base.dp_clock_source); for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i] != NULL) { dce_aud_destroy(&pool->base.audios[i]); } } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } } static void get_pixel_clock_parameters( const struct pipe_ctx *pipe_ctx, struct pixel_clk_params *pixel_clk_params) { const struct dc_stream_state *stream = pipe_ctx->stream; /*TODO: is this halved for YCbCr 420? in that case we might want to move * the pixel clock normalization for hdmi up to here instead of doing it * in pll_adjust_pix_clk */ pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; pixel_clk_params->encoder_object_id = stream->link->link_enc->id; pixel_clk_params->signal_type = pipe_ctx->stream->signal; pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; /* TODO: un-hardcode*/ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * LINK_RATE_REF_FREQ_IN_KHZ; pixel_clk_params->flags.ENABLE_SS = 0; pixel_clk_params->color_depth = stream->timing.display_color_depth; pixel_clk_params->flags.DISPLAY_BLANKED = 1; pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420); pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) { pixel_clk_params->color_depth = COLOR_DEPTH_888; } if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2; } if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) pixel_clk_params->requested_pix_clk_100hz *= 2; } void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx) { get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); pipe_ctx->clock_source->funcs->get_pix_clk_dividers( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, &pipe_ctx->pll_settings); resource_build_bit_depth_reduction_params(pipe_ctx->stream, &pipe_ctx->stream->bit_depth_params); pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; } static bool is_surface_pixel_format_supported(struct pipe_ctx *pipe_ctx, unsigned int underlay_idx) { if (pipe_ctx->pipe_idx != underlay_idx) return true; if (!pipe_ctx->plane_state) return false; if (pipe_ctx->plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return false; return true; } static enum dc_status build_mapped_resource( const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; if (!is_surface_pixel_format_supported(pipe_ctx, dc->res_pool->underlay_pipe_index)) return DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED; dce110_resource_build_pipe_hw_param(pipe_ctx); /* TODO: validate audio ASIC caps, encoder */ resource_build_info_frame(pipe_ctx); return DC_OK; } static bool dce110_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) { bool result = false; DC_LOG_BANDWIDTH_CALCS( "%s: start", __func__); if (bw_calcs( dc->ctx, dc->bw_dceip, dc->bw_vbios, context->res_ctx.pipe_ctx, dc->res_pool->pipe_count, &context->bw_ctx.bw.dce)) result = true; if (!result) DC_LOG_BANDWIDTH_VALIDATION("%s: %dx%d@%d Bandwidth validation failed!\n", __func__, context->streams[0]->timing.h_addressable, context->streams[0]->timing.v_addressable, context->streams[0]->timing.pix_clk_100hz / 10); if (memcmp(&dc->current_state->bw_ctx.bw.dce, &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) { DC_LOG_BANDWIDTH_CALCS( "%s: finish,\n" "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" "stutMark_b: %d stutMark_a: %d\n" "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" "stutMark_b: %d stutMark_a: %d\n" "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n" "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n" "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n" , __func__, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark, context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark, context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark, context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark, context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark, context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark, context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark, context->bw_ctx.bw.dce.stutter_mode_enable, context->bw_ctx.bw.dce.cpuc_state_change_enable, context->bw_ctx.bw.dce.cpup_state_change_enable, context->bw_ctx.bw.dce.nbp_state_change_enable, context->bw_ctx.bw.dce.all_displays_in_sync, context->bw_ctx.bw.dce.dispclk_khz, context->bw_ctx.bw.dce.sclk_khz, context->bw_ctx.bw.dce.sclk_deep_sleep_khz, context->bw_ctx.bw.dce.yclk_khz, context->bw_ctx.bw.dce.blackout_recovery_time_us); } return result; } enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) { if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) || ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height)) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static bool dce110_validate_surface_sets( struct dc_state *context) { int i, j; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) continue; if (context->stream_status[i].plane_count > 2) return false; for (j = 0; j < context->stream_status[i].plane_count; j++) { struct dc_plane_state *plane = context->stream_status[i].plane_states[j]; /* underlay validation */ if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { if ((plane->src_rect.width > 1920 || plane->src_rect.height > 1080)) return false; /* we don't have the logic to support underlay * only yet so block the use case where we get * NV12 plane as top layer */ if (j == 0) return false; /* irrespective of plane format, * stream should be RGB encoded */ if (context->streams[i]->timing.pixel_encoding != PIXEL_ENCODING_RGB) return false; } } } return true; } enum dc_status dce110_validate_global( struct dc *dc, struct dc_state *context) { if (!dce110_validate_surface_sets(context)) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static enum dc_status dce110_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_ERROR_UNEXPECTED; result = resource_map_pool_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = resource_map_clock_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = build_mapped_resource(dc, new_ctx, dc_stream); return result; } static struct pipe_ctx *dce110_acquire_underlay( struct dc_state *context, const struct resource_pool *pool, struct dc_stream_state *stream) { struct dc *dc = stream->ctx->dc; struct resource_context *res_ctx = &context->res_ctx; unsigned int underlay_idx = pool->underlay_pipe_index; struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx]; if (res_ctx->pipe_ctx[underlay_idx].stream) return NULL; pipe_ctx->stream_res.tg = pool->timing_generators[underlay_idx]; pipe_ctx->plane_res.mi = pool->mis[underlay_idx]; /*pipe_ctx->plane_res.ipp = res_ctx->pool->ipps[underlay_idx];*/ pipe_ctx->plane_res.xfm = pool->transforms[underlay_idx]; pipe_ctx->stream_res.opp = pool->opps[underlay_idx]; pipe_ctx->pipe_idx = underlay_idx; pipe_ctx->stream = stream; if (!dc->current_state->res_ctx.pipe_ctx[underlay_idx].stream) { struct tg_color black_color = {0}; struct dc_bios *dcb = dc->ctx->dc_bios; dc->hwss.enable_display_power_gating( dc, pipe_ctx->stream_res.tg->inst, dcb, PIPE_GATING_CONTROL_DISABLE); /* * This is for powering on underlay, so crtc does not * need to be enabled */ pipe_ctx->stream_res.tg->funcs->program_timing(pipe_ctx->stream_res.tg, &stream->timing, 0, 0, 0, 0, pipe_ctx->stream->signal, false); pipe_ctx->stream_res.tg->funcs->enable_advanced_request( pipe_ctx->stream_res.tg, true, &stream->timing); pipe_ctx->plane_res.mi->funcs->allocate_mem_input(pipe_ctx->plane_res.mi, stream->timing.h_total, stream->timing.v_total, stream->timing.pix_clk_100hz / 10, context->stream_count); color_space_to_black_color(dc, COLOR_SPACE_YCBCR601, &black_color); pipe_ctx->stream_res.tg->funcs->set_blank_color( pipe_ctx->stream_res.tg, &black_color); } return pipe_ctx; } static void dce110_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link( struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) { int i; int j = -1; struct dc_link *link = stream->link; for (i = 0; i < pool->stream_enc_count; i++) { if (!res_ctx->is_stream_enc_acquired[i] && pool->stream_enc[i]) { /* Store first available for MST second display * in daisy chain use case */ j = i; if (pool->stream_enc[i]->id == link->link_enc->preferred_engine) return pool->stream_enc[i]; } } /* * For CZ and later, we can allow DIG FE and BE to differ for all display types */ if (j >= 0) return pool->stream_enc[j]; return NULL; } static const struct resource_funcs dce110_res_pool_funcs = { .destroy = dce110_destroy_resource_pool, .link_enc_create = dce110_link_encoder_create, .validate_bandwidth = dce110_validate_bandwidth, .validate_plane = dce110_validate_plane, .acquire_idle_pipe_for_layer = dce110_acquire_underlay, .add_stream_to_ctx = dce110_add_stream_to_ctx, .validate_global = dce110_validate_global, .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link }; static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool) { struct dce110_timing_generator *dce110_tgv = kzalloc(sizeof(*dce110_tgv), GFP_KERNEL); struct dce_transform *dce110_xfmv = kzalloc(sizeof(*dce110_xfmv), GFP_KERNEL); struct dce_mem_input *dce110_miv = kzalloc(sizeof(*dce110_miv), GFP_KERNEL); struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv), GFP_KERNEL); if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) { kfree(dce110_tgv); kfree(dce110_xfmv); kfree(dce110_miv); kfree(dce110_oppv); return false; } dce110_opp_v_construct(dce110_oppv, ctx); dce110_timing_generator_v_construct(dce110_tgv, ctx); dce110_mem_input_v_construct(dce110_miv, ctx); dce110_transform_v_construct(dce110_xfmv, ctx); pool->opps[pool->pipe_count] = &dce110_oppv->base; pool->timing_generators[pool->pipe_count] = &dce110_tgv->base; pool->mis[pool->pipe_count] = &dce110_miv->base; pool->transforms[pool->pipe_count] = &dce110_xfmv->base; pool->pipe_count++; /* update the public caps to indicate an underlay is available */ ctx->dc->caps.max_slave_planes = 1; ctx->dc->caps.max_slave_planes = 1; return true; } static void bw_calcs_data_update_from_pplib(struct dc *dc) { struct dm_pp_clock_levels clks = {0}; /*do system clock*/ dm_pp_get_clock_levels_by_type( dc->ctx, DM_PP_CLOCK_TYPE_ENGINE_CLK, &clks); /* convert all the clock fro kHz to fix point mHz */ dc->bw_vbios->high_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels-1], 1000); dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels/8], 1000); dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*2/8], 1000); dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*3/8], 1000); dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*4/8], 1000); dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*5/8], 1000); dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*6/8], 1000); dc->bw_vbios->low_sclk = bw_frc_to_fixed( clks.clocks_in_khz[0], 1000); dc->sclk_lvls = clks; /*do display clock*/ dm_pp_get_clock_levels_by_type( dc->ctx, DM_PP_CLOCK_TYPE_DISPLAY_CLK, &clks); dc->bw_vbios->high_voltage_max_dispclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels-1], 1000); dc->bw_vbios->mid_voltage_max_dispclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels>>1], 1000); dc->bw_vbios->low_voltage_max_dispclk = bw_frc_to_fixed( clks.clocks_in_khz[0], 1000); /*do memory clock*/ dm_pp_get_clock_levels_by_type( dc->ctx, DM_PP_CLOCK_TYPE_MEMORY_CLK, &clks); dc->bw_vbios->low_yclk = bw_frc_to_fixed( clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); } const struct resource_caps *dce110_resource_cap( struct hw_asic_id *asic_id) { if (ASIC_REV_IS_STONEY(asic_id->hw_internal_rev)) return &stoney_resource_cap; else return &carrizo_resource_cap; } static bool construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool, struct hw_asic_id asic_id) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = dce110_resource_cap(&ctx->asic_id); pool->base.funcs = &dce110_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.pipe_count = pool->base.res_cap->num_timing_generator; pool->base.underlay_pipe_index = pool->base.pipe_count; pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; dc->caps.max_downscale_ratio = 150; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; dc->caps.is_apu = true; /************************************************* * Create resources * *************************************************/ bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clk_src_count = 2; /* TODO: find out if CZ support 3 PLLs */ } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce110_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce110_timing_generator_create( ctx, i, &dce110_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce110_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce110_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce110_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce110_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce110_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce110_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = NULL; } if (dc->config.fbc_support) dc->fbc_compressor = dce110_compressor_create(ctx); if (!underlay_create(ctx, &pool->base)) goto res_create_fail; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce110_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < pool->base.underlay_pipe_index; ++i) dc->caps.planes[i] = plane_cap; dc->caps.planes[pool->base.underlay_pipe_index] = underlay_plane_cap; bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); bw_calcs_data_update_from_pplib(dc); return true; res_create_fail: destruct(pool); return false; } struct resource_pool *dce110_create_resource_pool( uint8_t num_virtual_links, struct dc *dc, struct hw_asic_id asic_id) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (construct(num_virtual_links, dc, pool, asic_id)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1273_1
crossvul-cpp_data_bad_1272_4
/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dm_services.h" #include "dc.h" #include "resource.h" #include "include/irq_service_interface.h" #include "dcn10_resource.h" #include "dcn10_ipp.h" #include "dcn10_mpc.h" #include "irq/dcn10/irq_service_dcn10.h" #include "dcn10_dpp.h" #include "dcn10_optc.h" #include "dcn10_hw_sequencer.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn10_opp.h" #include "dcn10_link_encoder.h" #include "dcn10_stream_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dce112/dce112_resource.h" #include "dcn10_hubp.h" #include "dcn10_hubbub.h" #include "soc15_hw_ip.h" #include "vega10_ip_offset.h" #include "dcn/dcn_1_0_offset.h" #include "dcn/dcn_1_0_sh_mask.h" #include "nbio/nbio_7_0_offset.h" #include "mmhub/mmhub_9_1_offset.h" #include "mmhub/mmhub_9_1_sh_mask.h" #include "reg_helper.h" #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" const struct _vcs_dpi_ip_params_st dcn1_0_ip = { .rob_buffer_size_kbytes = 64, .det_buffer_size_kbytes = 164, .dpte_buffer_size_in_pte_reqs_luma = 42, .dpp_output_buffer_pixels = 2560, .opp_output_buffer_lines = 1, .pixel_chunk_size_kbytes = 8, .pte_enable = 1, .pte_chunk_size_kbytes = 2, .meta_chunk_size_kbytes = 2, .writeback_chunk_size_kbytes = 2, .line_buffer_size_bits = 589824, .max_line_buffer_lines = 12, .IsLineBufferBppFixed = 0, .LineBufferFixedBpp = -1, .writeback_luma_buffer_size_kbytes = 12, .writeback_chroma_buffer_size_kbytes = 8, .max_num_dpp = 4, .max_num_wb = 2, .max_dchub_pscl_bw_pix_per_clk = 4, .max_pscl_lb_bw_pix_per_clk = 2, .max_lb_vscl_bw_pix_per_clk = 4, .max_vscl_hscl_bw_pix_per_clk = 4, .max_hscl_ratio = 4, .max_vscl_ratio = 4, .hscl_mults = 4, .vscl_mults = 4, .max_hscl_taps = 8, .max_vscl_taps = 8, .dispclk_ramp_margin_percent = 1, .underscan_factor = 1.10, .min_vblank_lines = 14, .dppclk_delay_subtotal = 90, .dispclk_delay_subtotal = 42, .dcfclk_cstate_latency = 10, .max_inter_dcn_tile_repeaters = 8, .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0, .bug_forcing_LC_req_same_size_fixed = 0, }; const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = { .sr_exit_time_us = 9.0, .sr_enter_plus_exit_time_us = 11.0, .urgent_latency_us = 4.0, .writeback_latency_us = 12.0, .ideal_dram_bw_after_urgent_percent = 80.0, .max_request_size_bytes = 256, .downspread_percent = 0.5, .dram_page_open_time_ns = 50.0, .dram_rw_turnaround_time_ns = 17.5, .dram_return_buffer_per_channel_bytes = 8192, .round_trip_ping_latency_dcfclk_cycles = 128, .urgent_out_of_order_return_per_channel_bytes = 256, .channel_interleave_bytes = 256, .num_banks = 8, .num_chans = 2, .vmm_page_size_bytes = 4096, .dram_clock_change_latency_us = 17.0, .writeback_dram_clock_change_latency_us = 23.0, .return_bus_width_bytes = 64, }; #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #endif enum dcn10_clk_src_array_id { DCN10_CLK_SRC_PLL0, DCN10_CLK_SRC_PLL1, DCN10_CLK_SRC_PLL2, DCN10_CLK_SRC_PLL3, DCN10_CLK_SRC_TOTAL, DCN101_CLK_SRC_TOTAL = DCN10_CLK_SRC_PLL3 }; /* begin ********************* * macros to expend register list macro defined in HW object header file */ /* DCN */ #define BASE_INNER(seg) \ DCE_BASE__INST0_SEG ## seg #define BASE(seg) \ BASE_INNER(seg) #define SR(reg_name)\ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name #define SRI(reg_name, block, id)\ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define SRII(reg_name, block, id)\ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name /* NBIO */ #define NBIO_BASE_INNER(seg) \ NBIF_BASE__INST0_SEG ## seg #define NBIO_BASE(seg) \ NBIO_BASE_INNER(seg) #define NBIO_SR(reg_name)\ .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name /* MMHUB */ #define MMHUB_BASE_INNER(seg) \ MMHUB_BASE__INST0_SEG ## seg #define MMHUB_BASE(seg) \ MMHUB_BASE_INNER(seg) #define MMHUB_SR(reg_name)\ .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name /* macros to expend register list macro defined in HW object header file * end *********************/ static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCN10_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCN10(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCN10_REG_LIST(0) }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCN10(_MASK) }; #define stream_enc_regs(id)\ [id] = {\ SE_DCN_REG_LIST(id)\ } static const struct dcn10_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), }; static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn10_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCN10(_MASK) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) static const struct dce_audio_shift audio_shift = { DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3) }; #define link_regs(id)\ [id] = {\ LE_DCN10_REG_LIST(id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } static const struct dcn10_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3) }; static const struct dcn10_link_enc_shift le_shift = { LINK_ENCODER_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK) }; #define ipp_regs(id)\ [id] = {\ IPP_REG_LIST_DCN10(id),\ } static const struct dcn10_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), }; static const struct dcn10_ipp_shift ipp_shift = { IPP_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn10_ipp_mask ipp_mask = { IPP_MASK_SH_LIST_DCN10(_MASK), }; #define opp_regs(id)\ [id] = {\ OPP_REG_LIST_DCN10(id),\ } static const struct dcn10_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), }; static const struct dcn10_opp_shift opp_shift = { OPP_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn10_opp_mask opp_mask = { OPP_MASK_SH_LIST_DCN10(_MASK), }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define tf_regs(id)\ [id] = {\ TF_REG_LIST_DCN10(id),\ } static const struct dcn_dpp_registers tf_regs[] = { tf_regs(0), tf_regs(1), tf_regs(2), tf_regs(3), }; static const struct dcn_dpp_shift tf_shift = { TF_REG_LIST_SH_MASK_DCN10(__SHIFT), TF_DEBUG_REG_LIST_SH_DCN10 }; static const struct dcn_dpp_mask tf_mask = { TF_REG_LIST_SH_MASK_DCN10(_MASK), TF_DEBUG_REG_LIST_MASK_DCN10 }; static const struct dcn_mpc_registers mpc_regs = { MPC_COMMON_REG_LIST_DCN1_0(0), MPC_COMMON_REG_LIST_DCN1_0(1), MPC_COMMON_REG_LIST_DCN1_0(2), MPC_COMMON_REG_LIST_DCN1_0(3), MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(0), MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(1), MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(2), MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(3) }; static const struct dcn_mpc_shift mpc_shift = { MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) }; static const struct dcn_mpc_mask mpc_mask = { MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK), }; #define tg_regs(id)\ [id] = {TG_COMMON_REG_LIST_DCN1_0(id)} static const struct dcn_optc_registers tg_regs[] = { tg_regs(0), tg_regs(1), tg_regs(2), tg_regs(3), }; static const struct dcn_optc_shift tg_shift = { TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) }; static const struct dcn_optc_mask tg_mask = { TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK) }; static const struct bios_registers bios_regs = { NBIO_SR(BIOS_SCRATCH_3), NBIO_SR(BIOS_SCRATCH_6) }; #define hubp_regs(id)\ [id] = {\ HUBP_REG_LIST_DCN10(id)\ } static const struct dcn_mi_registers hubp_regs[] = { hubp_regs(0), hubp_regs(1), hubp_regs(2), hubp_regs(3), }; static const struct dcn_mi_shift hubp_shift = { HUBP_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn_mi_mask hubp_mask = { HUBP_MASK_SH_LIST_DCN10(_MASK) }; static const struct dcn_hubbub_registers hubbub_reg = { HUBBUB_REG_LIST_DCN10(0) }; static const struct dcn_hubbub_shift hubbub_shift = { HUBBUB_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn_hubbub_mask hubbub_mask = { HUBBUB_MASK_SH_LIST_DCN10(_MASK) }; #define clk_src_regs(index, pllid)\ [index] = {\ CS_COMMON_REG_LIST_DCN1_0(index, pllid),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0, A), clk_src_regs(1, B), clk_src_regs(2, C), clk_src_regs(3, D) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK) }; static const struct resource_caps res_cap = { .num_timing_generator = 4, .num_opp = 4, .num_video_plane = 4, .num_audio = 4, .num_stream_encoder = 4, .num_pll = 4, .num_ddc = 4, }; static const struct resource_caps rv2_res_cap = { .num_timing_generator = 3, .num_opp = 3, .num_video_plane = 3, .num_audio = 3, .num_stream_encoder = 3, .num_pll = 3, .num_ddc = 4, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCN_UNIVERSAL, .blends_with_above = true, .blends_with_below = true, .per_pixel_alpha = true, .pixel_format_support = { .argb8888 = true, .nv12 = true, .fp16 = true }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 16000, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 250, .fp16 = 1 } }; static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = true, .disable_dmcu = true, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, /* raven smu dones't allow 0 disp clk, * smu min disp clk limit is 50Mhz * keep min disp clk 100Mhz avoid smu hang */ .min_disp_clk_khz = 100000, .disable_pplib_clock_request = false, .disable_pplib_wm_range = false, .pplib_wm_report_mode = WM_REPORT_DEFAULT, .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .force_single_disp_pipe_split = true, .disable_dcc = DCC_ENABLE, .voltage_align_fclk = true, .disable_stereo_support = true, .vsr_support = true, .performance_trace = false, .az_endpoint_mute_only = true, .recovery_enabled = false, /*enable this by default after testing.*/ .max_downscale_src_width = 3840, .underflow_assert_delay_us = 0xFFFFFFFF, }; static const struct dc_debug_options debug_defaults_diags = { .disable_dmcu = true, .force_abm_enable = false, .timing_trace = true, .clock_trace = true, .disable_stutter = true, .disable_pplib_clock_request = true, .disable_pplib_wm_range = true, .underflow_assert_delay_us = 0xFFFFFFFF, }; static void dcn10_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN10_DPP(*dpp)); *dpp = NULL; } static struct dpp *dcn10_dpp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_dpp *dpp = kzalloc(sizeof(struct dcn10_dpp), GFP_KERNEL); if (!dpp) return NULL; dpp1_construct(dpp, ctx, inst, &tf_regs[inst], &tf_shift, &tf_mask); return &dpp->base; } static struct input_pixel_processor *dcn10_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_ipp *ipp = kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dcn10_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static struct output_pixel_processor *dcn10_opp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_opp *opp = kzalloc(sizeof(struct dcn10_opp), GFP_KERNEL); if (!opp) { BREAK_TO_DEBUGGER(); return NULL; } dcn10_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } struct dce_aux *dcn10_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst]); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) }; struct dce_i2c_hw *dcn10_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dcn1_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static struct mpc *dcn10_mpc_create(struct dc_context *ctx) { struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc), GFP_KERNEL); if (!mpc10) return NULL; dcn10_mpc_construct(mpc10, ctx, &mpc_regs, &mpc_shift, &mpc_mask, 4); return &mpc10->base; } static struct hubbub *dcn10_hubbub_create(struct dc_context *ctx) { struct dcn10_hubbub *dcn10_hubbub = kzalloc(sizeof(struct dcn10_hubbub), GFP_KERNEL); if (!dcn10_hubbub) return NULL; hubbub1_construct(&dcn10_hubbub->base, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask); return &dcn10_hubbub->base; } static struct timing_generator *dcn10_timing_generator_create( struct dc_context *ctx, uint32_t instance) { struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; tgn10->base.inst = instance; tgn10->base.ctx = ctx; tgn10->tg_regs = &tg_regs[instance]; tgn10->tg_shift = &tg_shift; tgn10->tg_mask = &tg_mask; dcn10_timing_generator_init(tgn10); return &tgn10->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = false, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true }; struct link_encoder *dcn10_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dcn10_link_encoder *enc10 = kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL); if (!enc10) return NULL; dcn10_link_encoder_construct(enc10, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); return &enc10->base; } struct clock_source *dcn10_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce112_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } BREAK_TO_DEBUGGER(); return NULL; } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct stream_encoder *dcn10_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dcn10_stream_encoder *enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); if (!enc1) return NULL; dcn10_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc1->base; } static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCN1_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN1_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCN1_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dcn10_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; hws->wa.DEGVIDCN10_253 = true; hws->wa.false_optc_underflow = true; hws->wa.DEGVIDCN10_254 = true; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dcn10_stream_encoder_create, .create_hwseq = dcn10_hwseq_create, }; static const struct resource_create_funcs res_create_maximus_funcs = { .read_dce_straps = NULL, .create_audio = NULL, .create_stream_encoder = NULL, .create_hwseq = dcn10_hwseq_create, }; void dcn10_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx) { struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); if (!pp_smu) return pp_smu; dm_pp_get_funcs(ctx, pp_smu); return pp_smu; } static void destruct(struct dcn10_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) { kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); pool->base.stream_enc[i] = NULL; } } if (pool->base.mpc != NULL) { kfree(TO_DCN10_MPC(pool->base.mpc)); pool->base.mpc = NULL; } if (pool->base.hubbub != NULL) { kfree(pool->base.hubbub); pool->base.hubbub = NULL; } for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); if (pool->base.dpps[i] != NULL) dcn10_dpp_destroy(&pool->base.dpps[i]); if (pool->base.ipps[i] != NULL) pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); if (pool->base.hubps[i] != NULL) { kfree(TO_DCN10_HUBP(pool->base.hubps[i])); pool->base.hubps[i] = NULL; } if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } if (pool->base.timing_generators[i] != NULL) { kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i]) dce_aud_destroy(&pool->base.audios[i]); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dcn10_clock_source_destroy(&pool->base.clock_sources[i]); pool->base.clock_sources[i] = NULL; } } if (pool->base.dp_clock_source != NULL) { dcn10_clock_source_destroy(&pool->base.dp_clock_source); pool->base.dp_clock_source = NULL; } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); kfree(pool->base.pp_smu); } static struct hubp *dcn10_hubp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_hubp *hubp1 = kzalloc(sizeof(struct dcn10_hubp), GFP_KERNEL); if (!hubp1) return NULL; dcn10_hubp_construct(hubp1, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask); return &hubp1->base; } static void get_pixel_clock_parameters( const struct pipe_ctx *pipe_ctx, struct pixel_clk_params *pixel_clk_params) { const struct dc_stream_state *stream = pipe_ctx->stream; pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; pixel_clk_params->encoder_object_id = stream->link->link_enc->id; pixel_clk_params->signal_type = pipe_ctx->stream->signal; pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; /* TODO: un-hardcode*/ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * LINK_RATE_REF_FREQ_IN_KHZ; pixel_clk_params->flags.ENABLE_SS = 0; pixel_clk_params->color_depth = stream->timing.display_color_depth; pixel_clk_params->flags.DISPLAY_BLANKED = 1; pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) pixel_clk_params->color_depth = COLOR_DEPTH_888; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) pixel_clk_params->requested_pix_clk_100hz /= 2; if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) pixel_clk_params->requested_pix_clk_100hz *= 2; } static void build_clamping_params(struct dc_stream_state *stream) { stream->clamping.clamping_level = CLAMPING_FULL_RANGE; stream->clamping.c_depth = stream->timing.display_color_depth; stream->clamping.pixel_encoding = stream->timing.pixel_encoding; } static void build_pipe_hw_param(struct pipe_ctx *pipe_ctx) { get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); pipe_ctx->clock_source->funcs->get_pix_clk_dividers( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, &pipe_ctx->pll_settings); pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; resource_build_bit_depth_reduction_params(pipe_ctx->stream, &pipe_ctx->stream->bit_depth_params); build_clamping_params(pipe_ctx->stream); } static enum dc_status build_mapped_resource( const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); /*TODO Seems unneeded anymore */ /* if (old_context && resource_is_stream_unchanged(old_context, stream)) { if (stream != NULL && old_context->streams[i] != NULL) { todo: shouldn't have to copy missing parameter here resource_build_bit_depth_reduction_params(stream, &stream->bit_depth_params); stream->clamping.pixel_encoding = stream->timing.pixel_encoding; resource_build_bit_depth_reduction_params(stream, &stream->bit_depth_params); build_clamping_params(stream); continue; } } */ if (!pipe_ctx) return DC_ERROR_UNEXPECTED; build_pipe_hw_param(pipe_ctx); return DC_OK; } enum dc_status dcn10_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_ERROR_UNEXPECTED; result = resource_map_pool_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = build_mapped_resource(dc, new_ctx, dc_stream); return result; } static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer( struct dc_state *context, const struct resource_pool *pool, struct dc_stream_state *stream) { struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe); if (!head_pipe) { ASSERT(0); return NULL; } if (!idle_pipe) return NULL; idle_pipe->stream = head_pipe->stream; idle_pipe->stream_res.tg = head_pipe->stream_res.tg; idle_pipe->stream_res.abm = head_pipe->stream_res.abm; idle_pipe->stream_res.opp = head_pipe->stream_res.opp; idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; return idle_pipe; } static bool dcn10_get_dcc_compression_cap(const struct dc *dc, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output) { return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( dc->res_pool->hubbub, input, output); } static void dcn10_destroy_resource_pool(struct resource_pool **pool) { struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool); destruct(dcn10_pool); kfree(dcn10_pool); *pool = NULL; } static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) { if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && caps->max_video_width != 0 && plane_state->src_rect.width > caps->max_video_width) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *context) { int i, j; bool video_down_scaled = false; bool video_large = false; bool desktop_large = false; bool dcc_disabled = false; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) continue; if (context->stream_status[i].plane_count > 2) return DC_FAIL_UNSUPPORTED_1; for (j = 0; j < context->stream_status[i].plane_count; j++) { struct dc_plane_state *plane = context->stream_status[i].plane_states[j]; if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { if (plane->src_rect.width > plane->dst_rect.width || plane->src_rect.height > plane->dst_rect.height) video_down_scaled = true; if (plane->src_rect.width >= 3840) video_large = true; } else { if (plane->src_rect.width >= 3840) desktop_large = true; if (!plane->dcc.enable) dcc_disabled = true; } } } /* * Workaround: On DCN10 there is UMC issue that causes underflow when * playing 4k video on 4k desktop with video downscaled and single channel * memory */ if (video_large && desktop_large && video_down_scaled && dcc_disabled && dc->dcn_soc->number_of_channels == 1) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state) { enum dc_status result = DC_OK; enum surface_pixel_format surf_pix_format = plane_state->format; unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format); enum swizzle_mode_values swizzle = DC_SW_LINEAR; if (bpp == 64) swizzle = DC_SW_64KB_D; else swizzle = DC_SW_64KB_S; plane_state->tiling_info.gfx9.swizzle = swizzle; return result; } struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link( struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) { int i; int j = -1; struct dc_link *link = stream->link; for (i = 0; i < pool->stream_enc_count; i++) { if (!res_ctx->is_stream_enc_acquired[i] && pool->stream_enc[i]) { /* Store first available for MST second display * in daisy chain use case */ j = i; if (pool->stream_enc[i]->id == link->link_enc->preferred_engine) return pool->stream_enc[i]; } } /* * For CZ and later, we can allow DIG FE and BE to differ for all display types */ if (j >= 0) return pool->stream_enc[j]; return NULL; } static const struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn10_get_dcc_compression_cap }; static const struct resource_funcs dcn10_res_pool_funcs = { .destroy = dcn10_destroy_resource_pool, .link_enc_create = dcn10_link_encoder_create, .validate_bandwidth = dcn_validate_bandwidth, .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, .validate_plane = dcn10_validate_plane, .validate_global = dcn10_validate_global, .add_stream_to_ctx = dcn10_add_stream_to_ctx, .get_default_swizzle_mode = dcn10_get_default_swizzle_mode, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link }; static uint32_t read_pipe_fuses(struct dc_context *ctx) { uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); /* RV1 support max 4 pipes */ value = value & 0xf; return value; } static bool construct( uint8_t num_virtual_links, struct dc *dc, struct dcn10_resource_pool *pool) { int i; int j; struct dc_context *ctx = dc->ctx; uint32_t pipe_fuses = read_pipe_fuses(ctx); ctx->dc_bios->regs = &bios_regs; if (ctx->dce_version == DCN_VERSION_1_01) pool->base.res_cap = &rv2_res_cap; else pool->base.res_cap = &res_cap; pool->base.funcs = &dcn10_res_pool_funcs; /* * TODO fill in from actual raven resource when we create * more than virtual encoder */ /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; /* max pipe num for ASIC before check pipe fuses */ pool->base.pipe_count = pool->base.res_cap->num_timing_generator; if (dc->ctx->dce_version == DCN_VERSION_1_01) pool->base.pipe_count = 3; dc->caps.max_video_width = 3840; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 256; dc->caps.max_slave_planes = 1; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = false; /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */ dc->caps.force_dp_tps4_for_cp2520 = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else dc->debug = debug_defaults_diags; /************************************************* * Create resources * *************************************************/ pool->base.clock_sources[DCN10_CLK_SRC_PLL0] = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[DCN10_CLK_SRC_PLL1] = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[DCN10_CLK_SRC_PLL2] = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); if (dc->ctx->dce_version == DCN_VERSION_1_0) { pool->base.clock_sources[DCN10_CLK_SRC_PLL3] = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); } pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL; if (dc->ctx->dce_version == DCN_VERSION_1_01) pool->base.clk_src_count = DCN101_CLK_SRC_TOTAL; pool->base.dp_clock_source = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, /* todo: not reuse phy_pll registers */ &clk_src_regs[0], true); for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto fail; } } pool->base.dmcu = dcn10_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto fail; } dml_init_instance(&dc->dml, &dcn1_0_soc, &dcn1_0_ip, DML_PROJECT_RAVEN1); memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults)); memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults)); if (dc->ctx->dce_version == DCN_VERSION_1_01) { struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc; struct dcn_ip_params *dcn_ip = dc->dcn_ip; struct display_mode_lib *dml = &dc->dml; dml->ip.max_num_dpp = 3; /* TODO how to handle 23.84? */ dcn_soc->dram_clock_change_latency = 23; dcn_ip->max_num_dpp = 3; } if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { dc->dcn_soc->urgent_latency = 3; dc->debug.disable_dmcu = true; dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f; } dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width; ASSERT(dc->dcn_soc->number_of_channels < 3); if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/ dc->dcn_soc->number_of_channels = 2; if (dc->dcn_soc->number_of_channels == 1) { dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f; dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f; dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f; dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f; if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f; } } pool->base.pp_smu = dcn10_pp_smu_create(ctx); /* * Right now SMU/PPLIB and DAL all have the AZ D3 force PME notification * * implemented. So AZ D3 should work.For issue 197007. * */ if (pool->base.pp_smu != NULL && pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL) dc->debug.az_endpoint_mute_only = false; if (!dc->debug.disable_pplib_clock_request) dcn_bw_update_from_pplib(dc); dcn_bw_sync_calcs_and_dml(dc); if (!dc->debug.disable_pplib_wm_range) { dc->res_pool = &pool->base; dcn_bw_notify_pplib_of_wm_ranges(dc); } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dcn10_create(&init_data); if (!pool->base.irqs) goto fail; } /* index to valid pipe resource */ j = 0; /* mem input -> ipp -> dpp -> opp -> TG */ for (i = 0; i < pool->base.pipe_count; i++) { /* if pipe is disabled, skip instance of HW pipe, * i.e, skip ASIC register instance */ if ((pipe_fuses & (1 << i)) != 0) continue; pool->base.hubps[j] = dcn10_hubp_create(ctx, i); if (pool->base.hubps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto fail; } pool->base.ipps[j] = dcn10_ipp_create(ctx, i); if (pool->base.ipps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto fail; } pool->base.dpps[j] = dcn10_dpp_create(ctx, i); if (pool->base.dpps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create dpp!\n"); goto fail; } pool->base.opps[j] = dcn10_opp_create(ctx, i); if (pool->base.opps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto fail; } pool->base.timing_generators[j] = dcn10_timing_generator_create( ctx, i); if (pool->base.timing_generators[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto fail; } /* check next valid pipe */ j++; } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dcn10_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto fail; } pool->base.hw_i2cs[i] = dcn10_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create hw i2c!!\n"); goto fail; } pool->base.sw_i2cs[i] = NULL; } /* valid pipe num */ pool->base.pipe_count = j; pool->base.timing_generator_count = j; /* within dml lib, it is hard code to 4. If ASIC pipe is fused, * the value may be changed */ dc->dml.ip.max_num_dpp = pool->base.pipe_count; dc->dcn_ip->max_num_dpp = pool->base.pipe_count; pool->base.mpc = dcn10_mpc_create(ctx); if (pool->base.mpc == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mpc!\n"); goto fail; } pool->base.hubbub = dcn10_hubbub_create(ctx); if (pool->base.hubbub == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create hubbub!\n"); goto fail; } if (!resource_construct(num_virtual_links, dc, &pool->base, (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? &res_create_funcs : &res_create_maximus_funcs))) goto fail; dcn10_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->cap_funcs = cap_funcs; return true; fail: destruct(pool); return false; } struct resource_pool *dcn10_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc) { struct dcn10_resource_pool *pool = kzalloc(sizeof(struct dcn10_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (construct(init_data->num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); return NULL; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1272_4
crossvul-cpp_data_good_4773_1
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO DDDD U U L EEEEE % % MM MM O O D D U U L E % % M M M O O D D U U L EEE % % M M O O D D U U L E % % M M OOO DDDD UUU LLLLL EEEEE % % % % % % MagickCore Module Methods % % % % Software Design % % Bob Friesenhahn % % March 2000 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/blob.h" #include "magick/coder.h" #include "magick/client.h" #include "magick/configure.h" #include "magick/deprecate.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/log.h" #include "magick/hashmap.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/nt-base-private.h" #include "magick/policy.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/token.h" #include "magick/utility.h" #if defined(MAGICKCORE_MODULES_SUPPORT) #if defined(MAGICKCORE_LTDL_DELEGATE) #include "ltdl.h" typedef lt_dlhandle ModuleHandle; #else typedef void *ModuleHandle; #endif /* Define declarations. */ #if defined(MAGICKCORE_LTDL_DELEGATE) # define ModuleGlobExpression "*.la" #else # if defined(_DEBUG) # define ModuleGlobExpression "IM_MOD_DB_*.dll" # else # define ModuleGlobExpression "IM_MOD_RL_*.dll" # endif #endif /* Global declarations. */ static SemaphoreInfo *module_semaphore = (SemaphoreInfo *) NULL; static SplayTreeInfo *module_list = (SplayTreeInfo *) NULL; /* Forward declarations. */ static const ModuleInfo *RegisterModule(const ModuleInfo *,ExceptionInfo *); static MagickBooleanType GetMagickModulePath(const char *,MagickModuleType,char *,ExceptionInfo *), IsModuleTreeInstantiated(ExceptionInfo *), UnregisterModule(const ModuleInfo *,ExceptionInfo *); static void TagToCoderModuleName(const char *,char *), TagToFilterModuleName(const char *,char *), TagToModuleName(const char *,const char *,char *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M o d u l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireModuleInfo() allocates the ModuleInfo structure. % % The format of the AcquireModuleInfo method is: % % ModuleInfo *AcquireModuleInfo(const char *path,const char *tag) % % A description of each parameter follows: % % o path: the path associated with the tag. % % o tag: a character string that represents the image format we are % looking for. % */ MagickExport ModuleInfo *AcquireModuleInfo(const char *path,const char *tag) { ModuleInfo *module_info; module_info=(ModuleInfo *) AcquireMagickMemory(sizeof(*module_info)); if (module_info == (ModuleInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(module_info,0,sizeof(*module_info)); if (path != (const char *) NULL) module_info->path=ConstantString(path); if (tag != (const char *) NULL) module_info->tag=ConstantString(tag); module_info->timestamp=time(0); module_info->signature=MagickSignature; return(module_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y M o d u l e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyModuleList() unregisters any previously loaded modules and exits % the module loaded environment. % % The format of the DestroyModuleList module is: % % void DestroyModuleList(void) % */ MagickExport void DestroyModuleList(void) { /* Destroy magick modules. */ LockSemaphoreInfo(module_semaphore); #if defined(MAGICKCORE_MODULES_SUPPORT) if (module_list != (SplayTreeInfo *) NULL) module_list=DestroySplayTree(module_list); #endif UnlockSemaphoreInfo(module_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M o d u l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetModuleInfo() returns a pointer to a ModuleInfo structure that matches the % specified tag. If tag is NULL, the head of the module list is returned. If % no modules are loaded, or the requested module is not found, NULL is % returned. % % The format of the GetModuleInfo module is: % % ModuleInfo *GetModuleInfo(const char *tag,ExceptionInfo *exception) % % A description of each parameter follows: % % o tag: a character string that represents the image format we are % looking for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ModuleInfo *GetModuleInfo(const char *tag,ExceptionInfo *exception) { ModuleInfo *module_info; if (IsModuleTreeInstantiated(exception) == MagickFalse) return((ModuleInfo *) NULL); LockSemaphoreInfo(module_semaphore); ResetSplayTreeIterator(module_list); if ((tag == (const char *) NULL) || (LocaleCompare(tag,"*") == 0)) { #if defined(MAGICKCORE_MODULES_SUPPORT) if (LocaleCompare(tag,"*") == 0) (void) OpenModules(exception); #endif module_info=(ModuleInfo *) GetNextValueInSplayTree(module_list); UnlockSemaphoreInfo(module_semaphore); return(module_info); } module_info=(ModuleInfo *) GetValueFromSplayTree(module_list,tag); UnlockSemaphoreInfo(module_semaphore); return(module_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M o d u l e I n f o L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetModuleInfoList() returns any modules that match the specified pattern. % % The format of the GetModuleInfoList function is: % % const ModuleInfo **GetModuleInfoList(const char *pattern, % size_t *number_modules,ExceptionInfo *exception) % % A description of each parameter follows: % % o pattern: Specifies a pointer to a text string containing a pattern. % % o number_modules: This integer returns the number of modules in the list. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int ModuleInfoCompare(const void *x,const void *y) { const ModuleInfo **p, **q; p=(const ModuleInfo **) x, q=(const ModuleInfo **) y; if (LocaleCompare((*p)->path,(*q)->path) == 0) return(LocaleCompare((*p)->tag,(*q)->tag)); return(LocaleCompare((*p)->path,(*q)->path)); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport const ModuleInfo **GetModuleInfoList(const char *pattern, size_t *number_modules,ExceptionInfo *exception) { const ModuleInfo **modules; register const ModuleInfo *p; register ssize_t i; /* Allocate module list. */ assert(pattern != (char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",pattern); assert(number_modules != (size_t *) NULL); *number_modules=0; p=GetModuleInfo("*",exception); if (p == (const ModuleInfo *) NULL) return((const ModuleInfo **) NULL); modules=(const ModuleInfo **) AcquireQuantumMemory((size_t) GetNumberOfNodesInSplayTree(module_list)+1UL,sizeof(*modules)); if (modules == (const ModuleInfo **) NULL) return((const ModuleInfo **) NULL); /* Generate module list. */ LockSemaphoreInfo(module_semaphore); ResetSplayTreeIterator(module_list); p=(const ModuleInfo *) GetNextValueInSplayTree(module_list); for (i=0; p != (const ModuleInfo *) NULL; ) { if ((p->stealth == MagickFalse) && (GlobExpression(p->tag,pattern,MagickFalse) != MagickFalse)) modules[i++]=p; p=(const ModuleInfo *) GetNextValueInSplayTree(module_list); } UnlockSemaphoreInfo(module_semaphore); qsort((void *) modules,(size_t) i,sizeof(*modules),ModuleInfoCompare); modules[i]=(ModuleInfo *) NULL; *number_modules=(size_t) i; return(modules); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M o d u l e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetModuleList() returns any image format modules that match the specified % pattern. % % The format of the GetModuleList function is: % % char **GetModuleList(const char *pattern,const MagickModuleType type, % size_t *number_modules,ExceptionInfo *exception) % % A description of each parameter follows: % % o pattern: Specifies a pointer to a text string containing a pattern. % % o type: choose from MagickImageCoderModule or MagickImageFilterModule. % % o number_modules: This integer returns the number of modules in the % list. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int ModuleCompare(const void *x,const void *y) { register const char **p, **q; p=(const char **) x; q=(const char **) y; return(LocaleCompare(*p,*q)); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static inline int MagickReadDirectory(DIR *directory,struct dirent *entry, struct dirent **result) { #if defined(MAGICKCORE_HAVE_READDIR_R) return(readdir_r(directory,entry,result)); #else (void) entry; errno=0; *result=readdir(directory); return(errno); #endif } MagickExport char **GetModuleList(const char *pattern, const MagickModuleType type,size_t *number_modules,ExceptionInfo *exception) { #define MaxModules 511 char **modules, filename[MaxTextExtent], module_path[MaxTextExtent], path[MaxTextExtent]; DIR *directory; MagickBooleanType status; register ssize_t i; size_t max_entries; struct dirent *buffer, *entry; /* Locate all modules in the image coder or filter path. */ switch (type) { case MagickImageCoderModule: default: { TagToCoderModuleName("magick",filename); status=GetMagickModulePath(filename,MagickImageCoderModule,module_path, exception); break; } case MagickImageFilterModule: { TagToFilterModuleName("analyze",filename); status=GetMagickModulePath(filename,MagickImageFilterModule,module_path, exception); break; } } if (status == MagickFalse) return((char **) NULL); GetPathComponent(module_path,HeadPath,path); max_entries=MaxModules; modules=(char **) AcquireQuantumMemory((size_t) max_entries+1UL, sizeof(*modules)); if (modules == (char **) NULL) return((char **) NULL); *modules=(char *) NULL; directory=opendir(path); if (directory == (DIR *) NULL) { modules=(char **) RelinquishMagickMemory(modules); return((char **) NULL); } buffer=(struct dirent *) AcquireMagickMemory(sizeof(*buffer)+FILENAME_MAX+1); if (buffer == (struct dirent *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; while ((MagickReadDirectory(directory,buffer,&entry) == 0) && (entry != (struct dirent *) NULL)) { status=GlobExpression(entry->d_name,ModuleGlobExpression,MagickFalse); if (status == MagickFalse) continue; if (GlobExpression(entry->d_name,pattern,MagickFalse) == MagickFalse) continue; if (i >= (ssize_t) max_entries) { modules=(char **) NULL; if (~max_entries > max_entries) modules=(char **) ResizeQuantumMemory(modules,(size_t) (max_entries << 1),sizeof(*modules)); max_entries<<=1; if (modules == (char **) NULL) break; } /* Add new module name to list. */ modules[i]=AcquireString((char *) NULL); GetPathComponent(entry->d_name,BasePath,modules[i]); if (LocaleNCompare("IM_MOD_",modules[i],7) == 0) { (void) CopyMagickString(modules[i],modules[i]+10,MaxTextExtent); modules[i][strlen(modules[i])-1]='\0'; } i++; } buffer=(struct dirent *) RelinquishMagickMemory(buffer); (void) closedir(directory); if (modules == (char **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ConfigureError, "MemoryAllocationFailed","`%s'",pattern); return((char **) NULL); } qsort((void *) modules,(size_t) i,sizeof(*modules),ModuleCompare); modules[i]=(char *) NULL; *number_modules=(size_t) i; return(modules); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a g i c k M o d u l e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMagickModulePath() finds a module with the specified module type and % filename. % % The format of the GetMagickModulePath module is: % % MagickBooleanType GetMagickModulePath(const char *filename, % MagickModuleType module_type,char *path,ExceptionInfo *exception) % % A description of each parameter follows: % % o filename: the module file name. % % o module_type: the module type: MagickImageCoderModule or % MagickImageFilterModule. % % o path: the path associated with the filename. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetMagickModulePath(const char *filename, MagickModuleType module_type,char *path,ExceptionInfo *exception) { char *module_path; assert(filename != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",filename); assert(path != (char *) NULL); assert(exception != (ExceptionInfo *) NULL); (void) CopyMagickString(path,filename,MaxTextExtent); #if defined(MAGICKCORE_INSTALLED_SUPPORT) if (strstr(path,"../") != (char *) NULL) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",path); return(MagickFalse); } #endif module_path=(char *) NULL; switch (module_type) { case MagickImageCoderModule: default: { (void) LogMagickEvent(ModuleEvent,GetMagickModule(), "Searching for coder module file \"%s\" ...",filename); module_path=GetEnvironmentValue("MAGICK_CODER_MODULE_PATH"); #if defined(MAGICKCORE_CODER_PATH) if (module_path == (char *) NULL) module_path=AcquireString(MAGICKCORE_CODER_PATH); #endif break; } case MagickImageFilterModule: { (void) LogMagickEvent(ModuleEvent,GetMagickModule(), "Searching for filter module file \"%s\" ...",filename); module_path=GetEnvironmentValue("MAGICK_CODER_FILTER_PATH"); #if defined(MAGICKCORE_FILTER_PATH) if (module_path == (char *) NULL) module_path=AcquireString(MAGICKCORE_FILTER_PATH); #endif break; } } if (module_path != (char *) NULL) { register char *p, *q; for (p=module_path-1; p != (char *) NULL; ) { (void) CopyMagickString(path,p+1,MaxTextExtent); q=strchr(path,DirectoryListSeparator); if (q != (char *) NULL) *q='\0'; q=path+strlen(path)-1; if ((q >= path) && (*q != *DirectorySeparator)) (void) ConcatenateMagickString(path,DirectorySeparator,MaxTextExtent); (void) ConcatenateMagickString(path,filename,MaxTextExtent); if (IsPathAccessible(path) != MagickFalse) { module_path=DestroyString(module_path); return(MagickTrue); } p=strchr(p+1,DirectoryListSeparator); } module_path=DestroyString(module_path); } #if defined(MAGICKCORE_INSTALLED_SUPPORT) else #if defined(MAGICKCORE_CODER_PATH) { const char *directory; /* Search hard coded paths. */ switch (module_type) { case MagickImageCoderModule: default: { directory=MAGICKCORE_CODER_PATH; break; } case MagickImageFilterModule: { directory=MAGICKCORE_FILTER_PATH; break; } } (void) FormatLocaleString(path,MaxTextExtent,"%s%s",directory,filename); if (IsPathAccessible(path) == MagickFalse) { ThrowFileException(exception,ConfigureWarning, "UnableToOpenModuleFile",path); return(MagickFalse); } return(MagickTrue); } #else #if defined(MAGICKCORE_WINDOWS_SUPPORT) { const char *registery_key; unsigned char *key_value; /* Locate path via registry key. */ switch (module_type) { case MagickImageCoderModule: default: { registery_key="CoderModulesPath"; break; } case MagickImageFilterModule: { registery_key="FilterModulesPath"; break; } } key_value=NTRegistryKeyLookup(registery_key); if (key_value == (unsigned char *) NULL) { ThrowMagickException(exception,GetMagickModule(),ConfigureError, "RegistryKeyLookupFailed","`%s'",registery_key); return(MagickFalse); } (void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",(char *) key_value, DirectorySeparator,filename); key_value=(unsigned char *) RelinquishMagickMemory(key_value); if (IsPathAccessible(path) == MagickFalse) { ThrowFileException(exception,ConfigureWarning, "UnableToOpenModuleFile",path); return(MagickFalse); } return(MagickTrue); } #endif #endif #if !defined(MAGICKCORE_CODER_PATH) && !defined(MAGICKCORE_WINDOWS_SUPPORT) # error MAGICKCORE_CODER_PATH or MAGICKCORE_WINDOWS_SUPPORT must be defined when MAGICKCORE_INSTALLED_SUPPORT is defined #endif #else { char *home; home=GetEnvironmentValue("MAGICK_HOME"); if (home != (char *) NULL) { /* Search MAGICK_HOME. */ #if !defined(MAGICKCORE_POSIX_SUPPORT) (void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",home, DirectorySeparator,filename); #else const char *directory; switch (module_type) { case MagickImageCoderModule: default: { directory=MAGICKCORE_CODER_RELATIVE_PATH; break; } case MagickImageFilterModule: { directory=MAGICKCORE_FILTER_RELATIVE_PATH; break; } } (void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",home, directory,filename); #endif home=DestroyString(home); if (IsPathAccessible(path) != MagickFalse) return(MagickTrue); } } if (*GetClientPath() != '\0') { /* Search based on executable directory. */ #if !defined(MAGICKCORE_POSIX_SUPPORT) (void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",GetClientPath(), DirectorySeparator,filename); #else char prefix[MaxTextExtent]; const char *directory; switch (module_type) { case MagickImageCoderModule: default: { directory="coders"; break; } case MagickImageFilterModule: { directory="filters"; break; } } (void) CopyMagickString(prefix,GetClientPath(),MaxTextExtent); ChopPathComponents(prefix,1); (void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s/%s",prefix, MAGICKCORE_MODULES_RELATIVE_PATH,directory,filename); #endif if (IsPathAccessible(path) != MagickFalse) return(MagickTrue); } #if defined(MAGICKCORE_WINDOWS_SUPPORT) { /* Search module path. */ if ((NTGetModulePath("CORE_RL_magick_.dll",path) != MagickFalse) || (NTGetModulePath("CORE_DB_magick_.dll",path) != MagickFalse) || (NTGetModulePath("Magick.dll",path) != MagickFalse)) { (void) ConcatenateMagickString(path,DirectorySeparator,MaxTextExtent); (void) ConcatenateMagickString(path,filename,MaxTextExtent); if (IsPathAccessible(path) != MagickFalse) return(MagickTrue); } } #endif { char *home; home=GetEnvironmentValue("XDG_CONFIG_HOME"); if (home == (char *) NULL) home=GetEnvironmentValue("LOCALAPPDATA"); if (home == (char *) NULL) home=GetEnvironmentValue("APPDATA"); if (home == (char *) NULL) home=GetEnvironmentValue("USERPROFILE"); if (home != (char *) NULL) { /* Search $XDG_CONFIG_HOME/ImageMagick. */ (void) FormatLocaleString(path,MaxTextExtent,"%s%sImageMagick%s%s", home,DirectorySeparator,DirectorySeparator,filename); home=DestroyString(home); if (IsPathAccessible(path) != MagickFalse) return(MagickTrue); } home=GetEnvironmentValue("HOME"); if (home != (char *) NULL) { /* Search $HOME/.config/ImageMagick. */ (void) FormatLocaleString(path,MaxTextExtent, "%s%s.config%sImageMagick%s%s",home,DirectorySeparator, DirectorySeparator,DirectorySeparator,filename); if (IsPathAccessible(path) != MagickFalse) { home=DestroyString(home); return(MagickTrue); } /* Search $HOME/.magick. */ (void) FormatLocaleString(path,MaxTextExtent,"%s%s.magick%s%s",home, DirectorySeparator,DirectorySeparator,filename); home=DestroyString(home); if (IsPathAccessible(path) != MagickFalse) return(MagickTrue); } } /* Search current directory. */ if (IsPathAccessible(path) != MagickFalse) return(MagickTrue); if (exception->severity < ConfigureError) ThrowFileException(exception,ConfigureWarning,"UnableToOpenModuleFile", path); #endif return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M o d u l e T r e e I n s t a n t i a t e d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsModuleTreeInstantiated() determines if the module tree is instantiated. % If not, it instantiates the tree and returns it. % % The format of the IsModuleTreeInstantiated() method is: % % MagickBooleanType IsModuleTreeInstantiated(Exceptioninfo *exception) % % A description of each parameter follows. % % o exception: return any errors or warnings in this structure. % */ static void *DestroyModuleNode(void *module_info) { ExceptionInfo *exception; register ModuleInfo *p; exception=AcquireExceptionInfo(); p=(ModuleInfo *) module_info; if (UnregisterModule(p,exception) == MagickFalse) CatchException(exception); if (p->tag != (char *) NULL) p->tag=DestroyString(p->tag); if (p->path != (char *) NULL) p->path=DestroyString(p->path); exception=DestroyExceptionInfo(exception); return(RelinquishMagickMemory(p)); } static MagickBooleanType IsModuleTreeInstantiated( ExceptionInfo *magick_unused(exception)) { magick_unreferenced(exception); if (module_list == (SplayTreeInfo *) NULL) { if (module_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&module_semaphore); LockSemaphoreInfo(module_semaphore); if (module_list == (SplayTreeInfo *) NULL) { MagickBooleanType status; ModuleInfo *module_info; module_list=NewSplayTree(CompareSplayTreeString, (void *(*)(void *)) NULL,DestroyModuleNode); if (module_list == (SplayTreeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); module_info=AcquireModuleInfo((const char *) NULL,"[boot-strap]"); module_info->stealth=MagickTrue; status=AddValueToSplayTree(module_list,module_info->tag,module_info); if (status == MagickFalse) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); if (lt_dlinit() != 0) ThrowFatalException(ModuleFatalError, "UnableToInitializeModuleLoader"); } UnlockSemaphoreInfo(module_semaphore); } return(module_list != (SplayTreeInfo *) NULL ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType InitializeModuleList(ExceptionInfo *exception) { return(IsModuleTreeInstantiated(exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v o k e D y n a m i c I m a g e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InvokeDynamicImageFilter() invokes a dynamic image filter. % % The format of the InvokeDynamicImageFilter module is: % % MagickBooleanType InvokeDynamicImageFilter(const char *tag,Image **image, % const int argc,const char **argv,ExceptionInfo *exception) % % A description of each parameter follows: % % o tag: a character string that represents the name of the particular % module. % % o image: the image. % % o argc: a pointer to an integer describing the number of elements in the % argument vector. % % o argv: a pointer to a text array containing the command line arguments. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType InvokeDynamicImageFilter(const char *tag, Image **images,const int argc,const char **argv,ExceptionInfo *exception) { char name[MaxTextExtent], path[MaxTextExtent]; ImageFilterHandler *image_filter; MagickBooleanType status; ModuleHandle handle; PolicyRights rights; /* Find the module. */ assert(images != (Image **) NULL); assert((*images)->signature == MagickSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); #if !defined(MAGICKCORE_BUILD_MODULES) { MagickBooleanType status; status=InvokeStaticImageFilter(tag,images,argc,argv,exception); if (status != MagickFalse) return(status); } #endif rights=ReadPolicyRights; if (IsRightsAuthorized(FilterPolicyDomain,rights,tag) == MagickFalse) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",tag); return(MagickFalse); } TagToFilterModuleName(tag,name); status=GetMagickModulePath(name,MagickImageFilterModule,path,exception); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "UnableToLoadModule","`%s': %s",name,path); return(MagickFalse); } /* Open the module. */ handle=(ModuleHandle) lt_dlopen(path); if (handle == (ModuleHandle) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "UnableToLoadModule","`%s': %s",name,lt_dlerror()); return(MagickFalse); } /* Locate the module. */ #if !defined(MAGICKCORE_NAMESPACE_PREFIX) (void) FormatLocaleString(name,MaxTextExtent,"%sImage",tag); #else (void) FormatLocaleString(name,MaxTextExtent,"%s%sImage", MAGICKCORE_NAMESPACE_PREFIX,tag); #endif /* Execute the module. */ ClearMagickException(exception); image_filter=(ImageFilterHandler *) lt_dlsym(handle,name); if (image_filter == (ImageFilterHandler *) NULL) (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "UnableToLoadModule","`%s': %s",name,lt_dlerror()); else { size_t signature; if ((*images)->debug != MagickFalse) (void) LogMagickEvent(ModuleEvent,GetMagickModule(), "Invoking \"%s\" dynamic image filter",tag); signature=image_filter(images,argc,argv,exception); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(ModuleEvent,GetMagickModule(),"\"%s\" completes", tag); if (signature != MagickImageFilterSignature) (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "ImageFilterSignatureMismatch","`%s': %8lx != %8lx",tag, (unsigned long) signature,(unsigned long) MagickImageFilterSignature); } /* Close the module. */ if (lt_dlclose(handle) != 0) (void) ThrowMagickException(exception,GetMagickModule(),ModuleWarning, "UnableToCloseModule","`%s': %s",name,lt_dlerror()); return(exception->severity < ErrorException ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t M o d u l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListModuleInfo() lists the module info to a file. % % The format of the ListModuleInfo module is: % % MagickBooleanType ListModuleInfo(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to a FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListModuleInfo(FILE *file, ExceptionInfo *exception) { char filename[MaxTextExtent], module_path[MaxTextExtent], **modules, path[MaxTextExtent]; register ssize_t i; size_t number_modules; if (file == (const FILE *) NULL) file=stdout; /* List image coders. */ modules=GetModuleList("*",MagickImageCoderModule,&number_modules,exception); if (modules == (char **) NULL) return(MagickFalse); TagToCoderModuleName("magick",filename); (void) GetMagickModulePath(filename,MagickImageCoderModule,module_path, exception); GetPathComponent(module_path,HeadPath,path); (void) FormatLocaleFile(file,"\nPath: %s\n\n",path); (void) FormatLocaleFile(file,"Image Coder\n"); (void) FormatLocaleFile(file, "-------------------------------------------------" "------------------------------\n"); for (i=0; i < (ssize_t) number_modules; i++) { (void) FormatLocaleFile(file,"%s",modules[i]); (void) FormatLocaleFile(file,"\n"); } (void) fflush(file); /* Relinquish resources. */ for (i=0; i < (ssize_t) number_modules; i++) modules[i]=DestroyString(modules[i]); modules=(char **) RelinquishMagickMemory(modules); /* List image filters. */ modules=GetModuleList("*",MagickImageFilterModule,&number_modules,exception); if (modules == (char **) NULL) return(MagickFalse); TagToFilterModuleName("analyze",filename); (void) GetMagickModulePath(filename,MagickImageFilterModule,module_path, exception); GetPathComponent(module_path,HeadPath,path); (void) FormatLocaleFile(file,"\nPath: %s\n\n",path); (void) FormatLocaleFile(file,"Image Filter\n"); (void) FormatLocaleFile(file, "-------------------------------------------------" "------------------------------\n"); for (i=0; i < (ssize_t) number_modules; i++) { (void) FormatLocaleFile(file,"%s",modules[i]); (void) FormatLocaleFile(file,"\n"); } (void) fflush(file); /* Relinquish resources. */ for (i=0; i < (ssize_t) number_modules; i++) modules[i]=DestroyString(modules[i]); modules=(char **) RelinquishMagickMemory(modules); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o d u l e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModuleComponentGenesis() instantiates the module component. % % The format of the ModuleComponentGenesis method is: % % MagickBooleanType ModuleComponentGenesis(void) % */ MagickExport MagickBooleanType ModuleComponentGenesis(void) { ExceptionInfo *exception; MagickBooleanType status; if (module_semaphore == (SemaphoreInfo *) NULL) module_semaphore=AllocateSemaphoreInfo(); exception=AcquireExceptionInfo(); status=IsModuleTreeInstantiated(exception); exception=DestroyExceptionInfo(exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o d u l e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModuleComponentTerminus() destroys the module component. % % The format of the ModuleComponentTerminus method is: % % ModuleComponentTerminus(void) % */ MagickExport void ModuleComponentTerminus(void) { if (module_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&module_semaphore); DestroyModuleList(); DestroySemaphoreInfo(&module_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p e n M o d u l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenModule() loads a module, and invokes its registration module. It % returns MagickTrue on success, and MagickFalse if there is an error. % % The format of the OpenModule module is: % % MagickBooleanType OpenModule(const char *module,ExceptionInfo *exception) % % A description of each parameter follows: % % o module: a character string that indicates the module to load. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OpenModule(const char *module, ExceptionInfo *exception) { char filename[MaxTextExtent], module_name[MaxTextExtent], name[MaxTextExtent], path[MaxTextExtent]; MagickBooleanType status; ModuleHandle handle; ModuleInfo *module_info; register const CoderInfo *p; size_t signature; /* Assign module name from alias. */ assert(module != (const char *) NULL); module_info=(ModuleInfo *) GetModuleInfo(module,exception); if (module_info != (ModuleInfo *) NULL) return(MagickTrue); (void) CopyMagickString(module_name,module,MaxTextExtent); p=GetCoderInfo(module,exception); if (p != (CoderInfo *) NULL) (void) CopyMagickString(module_name,p->name,MaxTextExtent); if (GetValueFromSplayTree(module_list,module_name) != (void *) NULL) return(MagickTrue); /* module already opened, return */ /* Locate module. */ handle=(ModuleHandle) NULL; TagToCoderModuleName(module_name,filename); (void) LogMagickEvent(ModuleEvent,GetMagickModule(), "Searching for module \"%s\" using filename \"%s\"",module_name,filename); *path='\0'; status=GetMagickModulePath(filename,MagickImageCoderModule,path,exception); if (status == MagickFalse) return(MagickFalse); /* Load module */ (void) LogMagickEvent(ModuleEvent,GetMagickModule(), "Opening module at path \"%s\"",path); handle=(ModuleHandle) lt_dlopen(path); if (handle == (ModuleHandle) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "UnableToLoadModule","`%s': %s",path,lt_dlerror()); return(MagickFalse); } /* Register module. */ module_info=AcquireModuleInfo(path,module_name); module_info->handle=handle; if (RegisterModule(module_info,exception) == (ModuleInfo *) NULL) return(MagickFalse); /* Define RegisterFORMATImage method. */ TagToModuleName(module_name,"Register%sImage",name); module_info->register_module=(size_t (*)(void)) lt_dlsym(handle,name); if (module_info->register_module == (size_t (*)(void)) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "UnableToRegisterImageFormat","`%s': %s",module_name,lt_dlerror()); return(MagickFalse); } (void) LogMagickEvent(ModuleEvent,GetMagickModule(), "Method \"%s\" in module \"%s\" at address %p",name,module_name, (void *) module_info->register_module); /* Define UnregisterFORMATImage method. */ TagToModuleName(module_name,"Unregister%sImage",name); module_info->unregister_module=(void (*)(void)) lt_dlsym(handle,name); if (module_info->unregister_module == (void (*)(void)) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "UnableToRegisterImageFormat","`%s': %s",module_name,lt_dlerror()); return(MagickFalse); } (void) LogMagickEvent(ModuleEvent,GetMagickModule(), "Method \"%s\" in module \"%s\" at address %p",name,module_name, (void *) module_info->unregister_module); signature=module_info->register_module(); if (signature != MagickImageCoderSignature) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "ImageCoderSignatureMismatch","`%s': %8lx != %8lx",module_name, (unsigned long) signature,(unsigned long) MagickImageCoderSignature); return(MagickFalse); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p e n M o d u l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenModules() loads all available modules. % % The format of the OpenModules module is: % % MagickBooleanType OpenModules(ExceptionInfo *exception) % % A description of each parameter follows: % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OpenModules(ExceptionInfo *exception) { char **modules; register ssize_t i; size_t number_modules; /* Load all modules. */ (void) GetMagickInfo((char *) NULL,exception); number_modules=0; modules=GetModuleList("*",MagickImageCoderModule,&number_modules,exception); if ((modules == (char **) NULL) || (*modules == (char *) NULL)) { if (modules != (char **) NULL) modules=(char **) RelinquishMagickMemory(modules); return(MagickFalse); } for (i=0; i < (ssize_t) number_modules; i++) (void) OpenModule(modules[i],exception); /* Relinquish resources. */ for (i=0; i < (ssize_t) number_modules; i++) modules[i]=DestroyString(modules[i]); modules=(char **) RelinquishMagickMemory(modules); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r M o d u l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterModule() adds an entry to the module list. It returns a pointer to % the registered entry on success. % % The format of the RegisterModule module is: % % ModuleInfo *RegisterModule(const ModuleInfo *module_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o info: a pointer to the registered entry is returned. % % o module_info: a pointer to the ModuleInfo structure to register. % % o exception: return any errors or warnings in this structure. % */ static const ModuleInfo *RegisterModule(const ModuleInfo *module_info, ExceptionInfo *exception) { MagickBooleanType status; assert(module_info != (ModuleInfo *) NULL); assert(module_info->signature == MagickSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",module_info->tag); if (module_list == (SplayTreeInfo *) NULL) return((const ModuleInfo *) NULL); status=AddValueToSplayTree(module_list,module_info->tag,module_info); if (status == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(),ResourceLimitError, "MemoryAllocationFailed","`%s'",module_info->tag); return(module_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T a g T o C o d e r M o d u l e N a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TagToCoderModuleName() munges a module tag and obtains the filename of the % corresponding module. % % The format of the TagToCoderModuleName module is: % % char *TagToCoderModuleName(const char *tag,char *name) % % A description of each parameter follows: % % o tag: a character string representing the module tag. % % o name: return the module name here. % */ static void TagToCoderModuleName(const char *tag,char *name) { assert(tag != (char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",tag); assert(name != (char *) NULL); #if defined(MAGICKCORE_LTDL_DELEGATE) (void) FormatLocaleString(name,MaxTextExtent,"%s.la",tag); (void) LocaleLower(name); #else #if defined(MAGICKCORE_WINDOWS_SUPPORT) if (LocaleNCompare("IM_MOD_",tag,7) == 0) (void) CopyMagickString(name,tag,MaxTextExtent); else { #if defined(_DEBUG) (void) FormatLocaleString(name,MaxTextExtent,"IM_MOD_DB_%s_.dll",tag); #else (void) FormatLocaleString(name,MaxTextExtent,"IM_MOD_RL_%s_.dll",tag); #endif } #endif #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T a g T o F i l t e r M o d u l e N a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TagToFilterModuleName() munges a module tag and returns the filename of the % corresponding filter module. % % The format of the TagToFilterModuleName module is: % % void TagToFilterModuleName(const char *tag,char name) % % A description of each parameter follows: % % o tag: a character string representing the module tag. % % o name: return the filter name here. % */ static void TagToFilterModuleName(const char *tag,char *name) { assert(tag != (char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",tag); assert(name != (char *) NULL); #if !defined(MAGICKCORE_LTDL_DELEGATE) (void) FormatLocaleString(name,MaxTextExtent,"%s.dll",tag); #else (void) FormatLocaleString(name,MaxTextExtent,"%s.la",tag); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T a g T o M o d u l e N a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TagToModuleName() munges the module tag name and returns an upper-case tag % name as the input string, and a user-provided format. % % The format of the TagToModuleName module is: % % TagToModuleName(const char *tag,const char *format,char *module) % % A description of each parameter follows: % % o tag: the module tag. % % o format: a sprintf-compatible format string containing %s where the % upper-case tag name is to be inserted. % % o module: pointer to a destination buffer for the formatted result. % */ static void TagToModuleName(const char *tag,const char *format,char *module) { char name[MaxTextExtent]; assert(tag != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",tag); assert(format != (const char *) NULL); assert(module != (char *) NULL); (void) CopyMagickString(name,tag,MaxTextExtent); LocaleUpper(name); #if !defined(MAGICKCORE_NAMESPACE_PREFIX) (void) FormatLocaleString(module,MaxTextExtent,format,name); #else { char prefix_format[MaxTextExtent]; (void) FormatLocaleString(prefix_format,MaxTextExtent,"%s%s", MAGICKCORE_NAMESPACE_PREFIX,format); (void) FormatLocaleString(module,MaxTextExtent,prefix_format,name); } #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r M o d u l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterModule() unloads a module, and invokes its de-registration module. % Returns MagickTrue on success, and MagickFalse if there is an error. % % The format of the UnregisterModule module is: % % MagickBooleanType UnregisterModule(const ModuleInfo *module_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o module_info: the module info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType UnregisterModule(const ModuleInfo *module_info, ExceptionInfo *exception) { /* Locate and execute UnregisterFORMATImage module. */ assert(module_info != (const ModuleInfo *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",module_info->tag); assert(exception != (ExceptionInfo *) NULL); if (module_info->unregister_module == NULL) return(MagickTrue); module_info->unregister_module(); if (lt_dlclose((ModuleHandle) module_info->handle) != 0) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleWarning, "UnableToCloseModule","`%s': %s",module_info->tag,lt_dlerror()); return(MagickFalse); } return(MagickTrue); } #else #if !defined(MAGICKCORE_BUILD_MODULES) extern size_t analyzeImage(Image **,const int,const char **,ExceptionInfo *); #endif MagickExport MagickBooleanType ListModuleInfo(FILE *magick_unused(file), ExceptionInfo *magick_unused(exception)) { magick_unreferenced(file); magick_unreferenced(exception); return(MagickTrue); } MagickExport MagickBooleanType InvokeDynamicImageFilter(const char *tag, Image **image,const int argc,const char **argv,ExceptionInfo *exception) { PolicyRights rights; assert(image != (Image **) NULL); assert((*image)->signature == MagickSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); rights=ReadPolicyRights; if (IsRightsAuthorized(FilterPolicyDomain,rights,tag) == MagickFalse) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",tag); return(MagickFalse); } #if defined(MAGICKCORE_BUILD_MODULES) (void) tag; (void) argc; (void) argv; (void) exception; #else { ImageFilterHandler *image_filter; image_filter=(ImageFilterHandler *) NULL; if (LocaleCompare("analyze",tag) == 0) image_filter=(ImageFilterHandler *) analyzeImage; if (image_filter == (ImageFilterHandler *) NULL) (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "UnableToLoadModule","`%s'",tag); else { size_t signature; if ((*image)->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Invoking \"%s\" static image filter",tag); signature=image_filter(image,argc,argv,exception); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"\"%s\" completes", tag); if (signature != MagickImageFilterSignature) { (void) ThrowMagickException(exception,GetMagickModule(),ModuleError, "ImageFilterSignatureMismatch","`%s': %8lx != %8lx",tag, (unsigned long) signature,(unsigned long) MagickImageFilterSignature); return(MagickFalse); } } } #endif return(MagickTrue); } #endif
./CrossVul/dataset_final_sorted/CWE-400/c/good_4773_1
crossvul-cpp_data_good_5356_5
/* * IPV6 GSO/GRO offload support * Linux INET6 implementation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/socket.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/printk.h> #include <net/protocol.h> #include <net/ipv6.h> #include "ip6_offload.h" static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) { const struct net_offload *ops = NULL; for (;;) { struct ipv6_opt_hdr *opth; int len; if (proto != NEXTHDR_HOP) { ops = rcu_dereference(inet6_offloads[proto]); if (unlikely(!ops)) break; if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) break; } if (unlikely(!pskb_may_pull(skb, 8))) break; opth = (void *)skb->data; len = ipv6_optlen(opth); if (unlikely(!pskb_may_pull(skb, len))) break; opth = (void *)skb->data; proto = opth->nexthdr; __skb_pull(skb, len); } return proto; } static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct ipv6hdr *ipv6h; const struct net_offload *ops; int proto; struct frag_hdr *fptr; unsigned int unfrag_ip6hlen; u8 *prevhdr; int offset = 0; bool encap, udpfrag; int nhoff; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_TCPV4 | SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | SKB_GSO_SIT | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_TUNNEL_REMCSUM | SKB_GSO_TCPV6 | 0))) goto out; skb_reset_network_header(skb); nhoff = skb_network_header(skb) - skb_mac_header(skb); if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) goto out; encap = SKB_GSO_CB(skb)->encap_level > 0; if (encap) features &= skb->dev->hw_enc_features; SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); ipv6h = ipv6_hdr(skb); __skb_pull(skb, sizeof(*ipv6h)); segs = ERR_PTR(-EPROTONOSUPPORT); proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); if (skb->encapsulation && skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP)) udpfrag = proto == IPPROTO_UDP && encap; else udpfrag = proto == IPPROTO_UDP && !skb->encapsulation; ops = rcu_dereference(inet6_offloads[proto]); if (likely(ops && ops->callbacks.gso_segment)) { skb_reset_transport_header(skb); segs = ops->callbacks.gso_segment(skb, features); } if (IS_ERR(segs)) goto out; for (skb = segs; skb; skb = skb->next) { ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h)); skb->network_header = (u8 *)ipv6h - skb->head; if (udpfrag) { unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); fptr->frag_off = htons(offset); if (skb->next) fptr->frag_off |= htons(IP6_MF); offset += (ntohs(ipv6h->payload_len) - sizeof(struct frag_hdr)); } if (encap) skb_reset_inner_headers(skb); } out: return segs; } /* Return the total length of all the extension hdrs, following the same * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs. */ static int ipv6_exthdrs_len(struct ipv6hdr *iph, const struct net_offload **opps) { struct ipv6_opt_hdr *opth = (void *)iph; int len = 0, proto, optlen = sizeof(*iph); proto = iph->nexthdr; for (;;) { if (proto != NEXTHDR_HOP) { *opps = rcu_dereference(inet6_offloads[proto]); if (unlikely(!(*opps))) break; if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR)) break; } opth = (void *)opth + optlen; optlen = ipv6_optlen(opth); len += optlen; proto = opth->nexthdr; } return len; } static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, struct sk_buff *skb) { const struct net_offload *ops; struct sk_buff **pp = NULL; struct sk_buff *p; struct ipv6hdr *iph; unsigned int nlen; unsigned int hlen; unsigned int off; u16 flush = 1; int proto; off = skb_gro_offset(skb); hlen = off + sizeof(*iph); iph = skb_gro_header_fast(skb, off); if (skb_gro_header_hard(skb, hlen)) { iph = skb_gro_header_slow(skb, hlen, off); if (unlikely(!iph)) goto out; } skb_set_network_header(skb, off); skb_gro_pull(skb, sizeof(*iph)); skb_set_transport_header(skb, skb_gro_offset(skb)); flush += ntohs(iph->payload_len) != skb_gro_len(skb); rcu_read_lock(); proto = iph->nexthdr; ops = rcu_dereference(inet6_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) { __pskb_pull(skb, skb_gro_offset(skb)); proto = ipv6_gso_pull_exthdrs(skb, proto); skb_gro_pull(skb, -skb_transport_offset(skb)); skb_reset_transport_header(skb); __skb_push(skb, skb_gro_offset(skb)); ops = rcu_dereference(inet6_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) goto out_unlock; iph = ipv6_hdr(skb); } NAPI_GRO_CB(skb)->proto = proto; flush--; nlen = skb_network_header_len(skb); for (p = *head; p; p = p->next) { const struct ipv6hdr *iph2; __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */ if (!NAPI_GRO_CB(p)->same_flow) continue; iph2 = (struct ipv6hdr *)(p->data + off); first_word = *(__be32 *)iph ^ *(__be32 *)iph2; /* All fields must match except length and Traffic Class. * XXX skbs on the gro_list have all been parsed and pulled * already so we don't need to compare nlen * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops))) * memcmp() alone below is suffcient, right? */ if ((first_word & htonl(0xF00FFFFF)) || memcmp(&iph->nexthdr, &iph2->nexthdr, nlen - offsetof(struct ipv6hdr, nexthdr))) { NAPI_GRO_CB(p)->same_flow = 0; continue; } /* flush if Traffic Class fields are different */ NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000)); NAPI_GRO_CB(p)->flush |= flush; /* Clear flush_id, there's really no concept of ID in IPv6. */ NAPI_GRO_CB(p)->flush_id = 0; } NAPI_GRO_CB(skb)->flush |= flush; skb_gro_postpull_rcsum(skb, iph, nlen); pp = ops->callbacks.gro_receive(head, skb); out_unlock: rcu_read_unlock(); out: NAPI_GRO_CB(skb)->flush |= flush; return pp; } static struct sk_buff **sit_gro_receive(struct sk_buff **head, struct sk_buff *skb) { if (NAPI_GRO_CB(skb)->encap_mark) { NAPI_GRO_CB(skb)->flush = 1; return NULL; } NAPI_GRO_CB(skb)->encap_mark = 1; return ipv6_gro_receive(head, skb); } static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) { const struct net_offload *ops; struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff); int err = -ENOSYS; if (skb->encapsulation) skb_set_inner_network_header(skb, nhoff); iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); rcu_read_lock(); nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops); if (WARN_ON(!ops || !ops->callbacks.gro_complete)) goto out_unlock; err = ops->callbacks.gro_complete(skb, nhoff); out_unlock: rcu_read_unlock(); return err; } static int sit_gro_complete(struct sk_buff *skb, int nhoff) { skb->encapsulation = 1; skb_shinfo(skb)->gso_type |= SKB_GSO_SIT; return ipv6_gro_complete(skb, nhoff); } static struct packet_offload ipv6_packet_offload __read_mostly = { .type = cpu_to_be16(ETH_P_IPV6), .callbacks = { .gso_segment = ipv6_gso_segment, .gro_receive = ipv6_gro_receive, .gro_complete = ipv6_gro_complete, }, }; static const struct net_offload sit_offload = { .callbacks = { .gso_segment = ipv6_gso_segment, .gro_receive = sit_gro_receive, .gro_complete = sit_gro_complete, }, }; static int __init ipv6_offload_init(void) { if (tcpv6_offload_init() < 0) pr_crit("%s: Cannot add TCP protocol offload\n", __func__); if (udp_offload_init() < 0) pr_crit("%s: Cannot add UDP protocol offload\n", __func__); if (ipv6_exthdrs_offload_init() < 0) pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__); dev_add_offload(&ipv6_packet_offload); inet_add_offload(&sit_offload, IPPROTO_IPV6); return 0; } fs_initcall(ipv6_offload_init);
./CrossVul/dataset_final_sorted/CWE-400/c/good_5356_5
crossvul-cpp_data_bad_1234_0
// SPDX-License-Identifier: GPL-2.0+ /* Copyright (C) 2014-2018 Broadcom */ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/sched/signal.h> #include <linux/uaccess.h> #include <drm/drm_syncobj.h> #include <uapi/drm/v3d_drm.h> #include "v3d_drv.h" #include "v3d_regs.h" #include "v3d_trace.h" static void v3d_init_core(struct v3d_dev *v3d, int core) { /* Set OVRTMUOUT, which means that the texture sampler uniform * configuration's tmu output type field is used, instead of * using the hardware default behavior based on the texture * type. If you want the default behavior, you can still put * "2" in the indirect texture state's output_type field. */ if (v3d->ver < 40) V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); /* Whenever we flush the L2T cache, we always want to flush * the whole thing. */ V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0); V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0); } /* Sets invariant state for the HW. */ static void v3d_init_hw_state(struct v3d_dev *v3d) { v3d_init_core(v3d, 0); } static void v3d_idle_axi(struct v3d_dev *v3d, int core) { V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ); if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) & (V3D_GMP_STATUS_RD_COUNT_MASK | V3D_GMP_STATUS_WR_COUNT_MASK | V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) { DRM_ERROR("Failed to wait for safe GMP shutdown\n"); } } static void v3d_idle_gca(struct v3d_dev *v3d) { if (v3d->ver >= 41) return; V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN); if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) & V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) == V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) { DRM_ERROR("Failed to wait for safe GCA shutdown\n"); } } static void v3d_reset_by_bridge(struct v3d_dev *v3d) { int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION); if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) { V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT); V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0); /* GFXH-1383: The SW_INIT may cause a stray write to address 0 * of the unit, so reset it to its power-on value here. */ V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK); } else { WARN_ON_ONCE(V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) != 7); V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT); V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0); } } static void v3d_reset_v3d(struct v3d_dev *v3d) { if (v3d->reset) reset_control_reset(v3d->reset); else v3d_reset_by_bridge(v3d); v3d_init_hw_state(v3d); } void v3d_reset(struct v3d_dev *v3d) { struct drm_device *dev = &v3d->drm; DRM_DEV_ERROR(dev->dev, "Resetting GPU for hang.\n"); DRM_DEV_ERROR(dev->dev, "V3D_ERR_STAT: 0x%08x\n", V3D_CORE_READ(0, V3D_ERR_STAT)); trace_v3d_reset_begin(dev); /* XXX: only needed for safe powerdown, not reset. */ if (false) v3d_idle_axi(v3d, 0); v3d_idle_gca(v3d); v3d_reset_v3d(v3d); v3d_mmu_set_page_table(v3d); v3d_irq_reset(v3d); trace_v3d_reset_end(dev); } static void v3d_flush_l3(struct v3d_dev *v3d) { if (v3d->ver < 41) { u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL); V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH); if (v3d->ver < 33) { V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH); } } } /* Invalidates the (read-only) L2C cache. This was the L2 cache for * uniforms and instructions on V3D 3.2. */ static void v3d_invalidate_l2c(struct v3d_dev *v3d, int core) { if (v3d->ver > 32) return; V3D_CORE_WRITE(core, V3D_CTL_L2CACTL, V3D_L2CACTL_L2CCLR | V3D_L2CACTL_L2CENA); } /* Invalidates texture L2 cachelines */ static void v3d_flush_l2t(struct v3d_dev *v3d, int core) { /* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't * need to wait for completion before dispatching the job -- * L2T accesses will be stalled until the flush has completed. * However, we do need to make sure we don't try to trigger a * new flush while the L2_CLEAN queue is trying to * synchronously clean after a job. */ mutex_lock(&v3d->cache_clean_lock); V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_L2TFLS | V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM)); mutex_unlock(&v3d->cache_clean_lock); } /* Cleans texture L1 and L2 cachelines (writing back dirty data). * * For cleaning, which happens from the CACHE_CLEAN queue after CSD has * executed, we need to make sure that the clean is done before * signaling job completion. So, we synchronously wait before * returning, and we make sure that L2 invalidates don't happen in the * meantime to confuse our are-we-done checks. */ void v3d_clean_caches(struct v3d_dev *v3d) { struct drm_device *dev = &v3d->drm; int core = 0; trace_v3d_cache_clean_begin(dev); V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF); if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & V3D_L2TCACTL_L2TFLS), 100)) { DRM_ERROR("Timeout waiting for L1T write combiner flush\n"); } mutex_lock(&v3d->cache_clean_lock); V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_L2TFLS | V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAN, V3D_L2TCACTL_FLM)); if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & V3D_L2TCACTL_L2TFLS), 100)) { DRM_ERROR("Timeout waiting for L2T clean\n"); } mutex_unlock(&v3d->cache_clean_lock); trace_v3d_cache_clean_end(dev); } /* Invalidates the slice caches. These are read-only caches. */ static void v3d_invalidate_slices(struct v3d_dev *v3d, int core) { V3D_CORE_WRITE(core, V3D_CTL_SLCACTL, V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) | V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) | V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) | V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC)); } void v3d_invalidate_caches(struct v3d_dev *v3d) { /* Invalidate the caches from the outside in. That way if * another CL's concurrent use of nearby memory were to pull * an invalidated cacheline back in, we wouldn't leave stale * data in the inner cache. */ v3d_flush_l3(v3d); v3d_invalidate_l2c(v3d, 0); v3d_flush_l2t(v3d, 0); v3d_invalidate_slices(v3d, 0); } /* Takes the reservation lock on all the BOs being referenced, so that * at queue submit time we can update the reservations. * * We don't lock the RCL the tile alloc/state BOs, or overflow memory * (all of which are on exec->unref_list). They're entirely private * to v3d, so we don't attach dma-buf fences to them. */ static int v3d_lock_bo_reservations(struct v3d_job *job, struct ww_acquire_ctx *acquire_ctx) { int i, ret; ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); if (ret) return ret; for (i = 0; i < job->bo_count; i++) { ret = drm_gem_fence_array_add_implicit(&job->deps, job->bo[i], true); if (ret) { drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); return ret; } } return 0; } /** * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects * referenced by the job. * @dev: DRM device * @file_priv: DRM file for this fd * @job: V3D job being set up * * The command validator needs to reference BOs by their index within * the submitted job's BO list. This does the validation of the job's * BO list and reference counting for the lifetime of the job. * * Note that this function doesn't need to unreference the BOs on * failure, because that will happen at v3d_exec_cleanup() time. */ static int v3d_lookup_bos(struct drm_device *dev, struct drm_file *file_priv, struct v3d_job *job, u64 bo_handles, u32 bo_count) { u32 *handles; int ret = 0; int i; job->bo_count = bo_count; if (!job->bo_count) { /* See comment on bo_index for why we have to check * this. */ DRM_DEBUG("Rendering requires BOs\n"); return -EINVAL; } job->bo = kvmalloc_array(job->bo_count, sizeof(struct drm_gem_cma_object *), GFP_KERNEL | __GFP_ZERO); if (!job->bo) { DRM_DEBUG("Failed to allocate validated BO pointers\n"); return -ENOMEM; } handles = kvmalloc_array(job->bo_count, sizeof(u32), GFP_KERNEL); if (!handles) { ret = -ENOMEM; DRM_DEBUG("Failed to allocate incoming GEM handles\n"); goto fail; } if (copy_from_user(handles, (void __user *)(uintptr_t)bo_handles, job->bo_count * sizeof(u32))) { ret = -EFAULT; DRM_DEBUG("Failed to copy in GEM handles\n"); goto fail; } spin_lock(&file_priv->table_lock); for (i = 0; i < job->bo_count; i++) { struct drm_gem_object *bo = idr_find(&file_priv->object_idr, handles[i]); if (!bo) { DRM_DEBUG("Failed to look up GEM BO %d: %d\n", i, handles[i]); ret = -ENOENT; spin_unlock(&file_priv->table_lock); goto fail; } drm_gem_object_get(bo); job->bo[i] = bo; } spin_unlock(&file_priv->table_lock); fail: kvfree(handles); return ret; } static void v3d_job_free(struct kref *ref) { struct v3d_job *job = container_of(ref, struct v3d_job, refcount); unsigned long index; struct dma_fence *fence; int i; for (i = 0; i < job->bo_count; i++) { if (job->bo[i]) drm_gem_object_put_unlocked(job->bo[i]); } kvfree(job->bo); xa_for_each(&job->deps, index, fence) { dma_fence_put(fence); } xa_destroy(&job->deps); dma_fence_put(job->irq_fence); dma_fence_put(job->done_fence); pm_runtime_mark_last_busy(job->v3d->dev); pm_runtime_put_autosuspend(job->v3d->dev); kfree(job); } static void v3d_render_job_free(struct kref *ref) { struct v3d_render_job *job = container_of(ref, struct v3d_render_job, base.refcount); struct v3d_bo *bo, *save; list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) { drm_gem_object_put_unlocked(&bo->base.base); } v3d_job_free(ref); } void v3d_job_put(struct v3d_job *job) { kref_put(&job->refcount, job->free); } int v3d_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; struct drm_v3d_wait_bo *args = data; ktime_t start = ktime_get(); u64 delta_ns; unsigned long timeout_jiffies = nsecs_to_jiffies_timeout(args->timeout_ns); if (args->pad != 0) return -EINVAL; ret = drm_gem_dma_resv_wait(file_priv, args->handle, true, timeout_jiffies); /* Decrement the user's timeout, in case we got interrupted * such that the ioctl will be restarted. */ delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); if (delta_ns < args->timeout_ns) args->timeout_ns -= delta_ns; else args->timeout_ns = 0; /* Asked to wait beyond the jiffie/scheduler precision? */ if (ret == -ETIME && args->timeout_ns) ret = -EAGAIN; return ret; } static int v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, struct v3d_job *job, void (*free)(struct kref *ref), u32 in_sync) { struct dma_fence *in_fence = NULL; int ret; job->v3d = v3d; job->free = free; ret = pm_runtime_get_sync(v3d->dev); if (ret < 0) return ret; xa_init_flags(&job->deps, XA_FLAGS_ALLOC); ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence); if (ret == -EINVAL) goto fail; ret = drm_gem_fence_array_add(&job->deps, in_fence); if (ret) goto fail; kref_init(&job->refcount); return 0; fail: xa_destroy(&job->deps); pm_runtime_put_autosuspend(v3d->dev); return ret; } static int v3d_push_job(struct v3d_file_priv *v3d_priv, struct v3d_job *job, enum v3d_queue queue) { int ret; ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], v3d_priv); if (ret) return ret; job->done_fence = dma_fence_get(&job->base.s_fence->finished); /* put by scheduler job completion */ kref_get(&job->refcount); drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[queue]); return 0; } static void v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, struct v3d_job *job, struct ww_acquire_ctx *acquire_ctx, u32 out_sync, struct dma_fence *done_fence) { struct drm_syncobj *sync_out; int i; for (i = 0; i < job->bo_count; i++) { /* XXX: Use shared fences for read-only objects. */ dma_resv_add_excl_fence(job->bo[i]->resv, job->done_fence); } drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); /* Update the return sync object for the job */ sync_out = drm_syncobj_find(file_priv, out_sync); if (sync_out) { drm_syncobj_replace_fence(sync_out, done_fence); drm_syncobj_put(sync_out); } } /** * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D. * @dev: DRM device * @data: ioctl argument * @file_priv: DRM file for this fd * * This is the main entrypoint for userspace to submit a 3D frame to * the GPU. Userspace provides the binner command list (if * applicable), and the kernel sets up the render command list to draw * to the framebuffer described in the ioctl, using the command lists * that the 3D engine's binner will produce. */ int v3d_submit_cl_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_file_priv *v3d_priv = file_priv->driver_priv; struct drm_v3d_submit_cl *args = data; struct v3d_bin_job *bin = NULL; struct v3d_render_job *render; struct ww_acquire_ctx acquire_ctx; int ret = 0; trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); if (args->pad != 0) { DRM_INFO("pad must be zero: %d\n", args->pad); return -EINVAL; } render = kcalloc(1, sizeof(*render), GFP_KERNEL); if (!render) return -ENOMEM; render->start = args->rcl_start; render->end = args->rcl_end; INIT_LIST_HEAD(&render->unref_list); ret = v3d_job_init(v3d, file_priv, &render->base, v3d_render_job_free, args->in_sync_rcl); if (ret) { kfree(render); return ret; } if (args->bcl_start != args->bcl_end) { bin = kcalloc(1, sizeof(*bin), GFP_KERNEL); if (!bin) return -ENOMEM; ret = v3d_job_init(v3d, file_priv, &bin->base, v3d_job_free, args->in_sync_bcl); if (ret) { v3d_job_put(&render->base); return ret; } bin->start = args->bcl_start; bin->end = args->bcl_end; bin->qma = args->qma; bin->qms = args->qms; bin->qts = args->qts; bin->render = render; } ret = v3d_lookup_bos(dev, file_priv, &render->base, args->bo_handles, args->bo_handle_count); if (ret) goto fail; ret = v3d_lock_bo_reservations(&render->base, &acquire_ctx); if (ret) goto fail; mutex_lock(&v3d->sched_lock); if (bin) { ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN); if (ret) goto fail_unreserve; ret = drm_gem_fence_array_add(&render->base.deps, dma_fence_get(bin->base.done_fence)); if (ret) goto fail_unreserve; } ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER); if (ret) goto fail_unreserve; mutex_unlock(&v3d->sched_lock); v3d_attach_fences_and_unlock_reservation(file_priv, &render->base, &acquire_ctx, args->out_sync, render->base.done_fence); if (bin) v3d_job_put(&bin->base); v3d_job_put(&render->base); return 0; fail_unreserve: mutex_unlock(&v3d->sched_lock); drm_gem_unlock_reservations(render->base.bo, render->base.bo_count, &acquire_ctx); fail: if (bin) v3d_job_put(&bin->base); v3d_job_put(&render->base); return ret; } /** * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D. * @dev: DRM device * @data: ioctl argument * @file_priv: DRM file for this fd * * Userspace provides the register setup for the TFU, which we don't * need to validate since the TFU is behind the MMU. */ int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_file_priv *v3d_priv = file_priv->driver_priv; struct drm_v3d_submit_tfu *args = data; struct v3d_tfu_job *job; struct ww_acquire_ctx acquire_ctx; int ret = 0; trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia); job = kcalloc(1, sizeof(*job), GFP_KERNEL); if (!job) return -ENOMEM; ret = v3d_job_init(v3d, file_priv, &job->base, v3d_job_free, args->in_sync); if (ret) { kfree(job); return ret; } job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles), sizeof(*job->base.bo), GFP_KERNEL); if (!job->base.bo) { v3d_job_put(&job->base); return -ENOMEM; } job->args = *args; spin_lock(&file_priv->table_lock); for (job->base.bo_count = 0; job->base.bo_count < ARRAY_SIZE(args->bo_handles); job->base.bo_count++) { struct drm_gem_object *bo; if (!args->bo_handles[job->base.bo_count]) break; bo = idr_find(&file_priv->object_idr, args->bo_handles[job->base.bo_count]); if (!bo) { DRM_DEBUG("Failed to look up GEM BO %d: %d\n", job->base.bo_count, args->bo_handles[job->base.bo_count]); ret = -ENOENT; spin_unlock(&file_priv->table_lock); goto fail; } drm_gem_object_get(bo); job->base.bo[job->base.bo_count] = bo; } spin_unlock(&file_priv->table_lock); ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx); if (ret) goto fail; mutex_lock(&v3d->sched_lock); ret = v3d_push_job(v3d_priv, &job->base, V3D_TFU); if (ret) goto fail_unreserve; mutex_unlock(&v3d->sched_lock); v3d_attach_fences_and_unlock_reservation(file_priv, &job->base, &acquire_ctx, args->out_sync, job->base.done_fence); v3d_job_put(&job->base); return 0; fail_unreserve: mutex_unlock(&v3d->sched_lock); drm_gem_unlock_reservations(job->base.bo, job->base.bo_count, &acquire_ctx); fail: v3d_job_put(&job->base); return ret; } /** * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D. * @dev: DRM device * @data: ioctl argument * @file_priv: DRM file for this fd * * Userspace provides the register setup for the CSD, which we don't * need to validate since the CSD is behind the MMU. */ int v3d_submit_csd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_file_priv *v3d_priv = file_priv->driver_priv; struct drm_v3d_submit_csd *args = data; struct v3d_csd_job *job; struct v3d_job *clean_job; struct ww_acquire_ctx acquire_ctx; int ret; trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]); if (!v3d_has_csd(v3d)) { DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n"); return -EINVAL; } job = kcalloc(1, sizeof(*job), GFP_KERNEL); if (!job) return -ENOMEM; ret = v3d_job_init(v3d, file_priv, &job->base, v3d_job_free, args->in_sync); if (ret) { kfree(job); return ret; } clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL); if (!clean_job) { v3d_job_put(&job->base); kfree(job); return -ENOMEM; } ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0); if (ret) { v3d_job_put(&job->base); kfree(clean_job); return ret; } job->args = *args; ret = v3d_lookup_bos(dev, file_priv, clean_job, args->bo_handles, args->bo_handle_count); if (ret) goto fail; ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx); if (ret) goto fail; mutex_lock(&v3d->sched_lock); ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD); if (ret) goto fail_unreserve; ret = drm_gem_fence_array_add(&clean_job->deps, dma_fence_get(job->base.done_fence)); if (ret) goto fail_unreserve; ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN); if (ret) goto fail_unreserve; mutex_unlock(&v3d->sched_lock); v3d_attach_fences_and_unlock_reservation(file_priv, clean_job, &acquire_ctx, args->out_sync, clean_job->done_fence); v3d_job_put(&job->base); v3d_job_put(clean_job); return 0; fail_unreserve: mutex_unlock(&v3d->sched_lock); drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, &acquire_ctx); fail: v3d_job_put(&job->base); v3d_job_put(clean_job); return ret; } int v3d_gem_init(struct drm_device *dev) { struct v3d_dev *v3d = to_v3d_dev(dev); u32 pt_size = 4096 * 1024; int ret, i; for (i = 0; i < V3D_MAX_QUEUES; i++) v3d->queue[i].fence_context = dma_fence_context_alloc(1); spin_lock_init(&v3d->mm_lock); spin_lock_init(&v3d->job_lock); mutex_init(&v3d->bo_lock); mutex_init(&v3d->reset_lock); mutex_init(&v3d->sched_lock); mutex_init(&v3d->cache_clean_lock); /* Note: We don't allocate address 0. Various bits of HW * treat 0 as special, such as the occlusion query counters * where 0 means "disabled". */ drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1); v3d->pt = dma_alloc_wc(v3d->dev, pt_size, &v3d->pt_paddr, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); if (!v3d->pt) { drm_mm_takedown(&v3d->mm); dev_err(v3d->dev, "Failed to allocate page tables. " "Please ensure you have CMA enabled.\n"); return -ENOMEM; } v3d_init_hw_state(v3d); v3d_mmu_set_page_table(v3d); ret = v3d_sched_init(v3d); if (ret) { drm_mm_takedown(&v3d->mm); dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr); } return 0; } void v3d_gem_destroy(struct drm_device *dev) { struct v3d_dev *v3d = to_v3d_dev(dev); v3d_sched_fini(v3d); /* Waiting for jobs to finish would need to be done before * unregistering V3D. */ WARN_ON(v3d->bin_job); WARN_ON(v3d->render_job); drm_mm_takedown(&v3d->mm); dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr); }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1234_0
crossvul-cpp_data_bad_1876_2
/* $Id: fpm_stdio.c,v 1.22.2.2 2008/12/13 03:32:24 anight Exp $ */ /* (c) 2007,2008 Andrei Nigmatulin */ #include "fpm_config.h" #include <sys/types.h> #include <sys/stat.h> #include <string.h> #include <fcntl.h> #include <unistd.h> #include <errno.h> #include "php_syslog.h" #include "fpm.h" #include "fpm_children.h" #include "fpm_events.h" #include "fpm_sockets.h" #include "fpm_stdio.h" #include "zlog.h" static int fd_stdout[2]; static int fd_stderr[2]; int fpm_stdio_init_main() /* {{{ */ { int fd = open("/dev/null", O_RDWR); if (0 > fd) { zlog(ZLOG_SYSERROR, "failed to init stdio: open(\"/dev/null\")"); return -1; } if (0 > dup2(fd, STDIN_FILENO) || 0 > dup2(fd, STDOUT_FILENO)) { zlog(ZLOG_SYSERROR, "failed to init stdio: dup2()"); close(fd); return -1; } close(fd); return 0; } /* }}} */ static inline int fpm_use_error_log() { /* {{{ */ /* * the error_log is NOT used when running in foreground * and from a tty (user looking at output). * So, error_log is used by * - SysV init launch php-fpm as a daemon * - Systemd launch php-fpm in foreground */ #if HAVE_UNISTD_H if (fpm_global_config.daemonize || (!isatty(STDERR_FILENO) && !fpm_globals.force_stderr)) { #else if (fpm_global_config.daemonize) { #endif return 1; } return 0; } /* }}} */ int fpm_stdio_init_final() /* {{{ */ { if (fpm_use_error_log()) { /* prevent duping if logging to syslog */ if (fpm_globals.error_log_fd > 0 && fpm_globals.error_log_fd != STDERR_FILENO) { /* there might be messages to stderr from other parts of the code, we need to log them all */ if (0 > dup2(fpm_globals.error_log_fd, STDERR_FILENO)) { zlog(ZLOG_SYSERROR, "failed to init stdio: dup2()"); return -1; } } #ifdef HAVE_SYSLOG_H else if (fpm_globals.error_log_fd == ZLOG_SYSLOG) { /* dup to /dev/null when using syslog */ dup2(STDOUT_FILENO, STDERR_FILENO); } #endif } zlog_set_launched(); return 0; } /* }}} */ int fpm_stdio_init_child(struct fpm_worker_pool_s *wp) /* {{{ */ { #ifdef HAVE_SYSLOG_H if (fpm_globals.error_log_fd == ZLOG_SYSLOG) { closelog(); /* ensure to close syslog not to interrupt with PHP syslog code */ } else #endif /* Notice: child cannot use master error_log * because not aware when being reopen * else, should use if (!fpm_use_error_log()) */ if (fpm_globals.error_log_fd > 0) { close(fpm_globals.error_log_fd); } fpm_globals.error_log_fd = -1; zlog_set_fd(-1); if (wp->listening_socket != STDIN_FILENO) { if (0 > dup2(wp->listening_socket, STDIN_FILENO)) { zlog(ZLOG_SYSERROR, "failed to init child stdio: dup2()"); return -1; } } return 0; } /* }}} */ static void fpm_stdio_child_said(struct fpm_event_s *ev, short which, void *arg) /* {{{ */ { static const int max_buf_size = 1024; int fd = ev->fd; char buf[max_buf_size]; struct fpm_child_s *child; int is_stdout; struct fpm_event_s *event; int fifo_in = 1, fifo_out = 1; int is_last_message = 0; int in_buf = 0; int res; if (!arg) { return; } child = (struct fpm_child_s *)arg; is_stdout = (fd == child->fd_stdout); if (is_stdout) { event = &child->ev_stdout; } else { event = &child->ev_stderr; } while (fifo_in || fifo_out) { if (fifo_in) { res = read(fd, buf + in_buf, max_buf_size - 1 - in_buf); if (res <= 0) { /* no data */ fifo_in = 0; if (res < 0 && (errno == EAGAIN || errno == EWOULDBLOCK)) { /* just no more data ready */ } else { /* error or pipe is closed */ if (res < 0) { /* error */ zlog(ZLOG_SYSERROR, "unable to read what child say"); } fpm_event_del(event); is_last_message = 1; if (is_stdout) { close(child->fd_stdout); child->fd_stdout = -1; } else { close(child->fd_stderr); child->fd_stderr = -1; } } } else { in_buf += res; } } if (fifo_out) { if (in_buf == 0) { fifo_out = 0; } else { char *nl; int should_print = 0; buf[in_buf] = '\0'; /* FIXME: there might be binary data */ /* we should print if no more space in the buffer */ if (in_buf == max_buf_size - 1) { should_print = 1; } /* we should print if no more data to come */ if (!fifo_in) { should_print = 1; } nl = strchr(buf, '\n'); if (nl || should_print) { if (nl) { *nl = '\0'; } zlog(ZLOG_WARNING, "[pool %s] child %d said into %s: \"%s\"%s", child->wp->config->name, (int) child->pid, is_stdout ? "stdout" : "stderr", buf, is_last_message ? ", pipe is closed" : ""); if (nl) { int out_buf = 1 + nl - buf; memmove(buf, buf + out_buf, in_buf - out_buf); in_buf -= out_buf; } else { in_buf = 0; } } } } } } /* }}} */ int fpm_stdio_prepare_pipes(struct fpm_child_s *child) /* {{{ */ { if (0 == child->wp->config->catch_workers_output) { /* not required */ return 0; } if (0 > pipe(fd_stdout)) { zlog(ZLOG_SYSERROR, "failed to prepare the stdout pipe"); return -1; } if (0 > pipe(fd_stderr)) { zlog(ZLOG_SYSERROR, "failed to prepare the stderr pipe"); close(fd_stdout[0]); close(fd_stdout[1]); return -1; } if (0 > fd_set_blocked(fd_stdout[0], 0) || 0 > fd_set_blocked(fd_stderr[0], 0)) { zlog(ZLOG_SYSERROR, "failed to unblock pipes"); close(fd_stdout[0]); close(fd_stdout[1]); close(fd_stderr[0]); close(fd_stderr[1]); return -1; } return 0; } /* }}} */ int fpm_stdio_parent_use_pipes(struct fpm_child_s *child) /* {{{ */ { if (0 == child->wp->config->catch_workers_output) { /* not required */ return 0; } close(fd_stdout[1]); close(fd_stderr[1]); child->fd_stdout = fd_stdout[0]; child->fd_stderr = fd_stderr[0]; fpm_event_set(&child->ev_stdout, child->fd_stdout, FPM_EV_READ, fpm_stdio_child_said, child); fpm_event_add(&child->ev_stdout, 0); fpm_event_set(&child->ev_stderr, child->fd_stderr, FPM_EV_READ, fpm_stdio_child_said, child); fpm_event_add(&child->ev_stderr, 0); return 0; } /* }}} */ int fpm_stdio_discard_pipes(struct fpm_child_s *child) /* {{{ */ { if (0 == child->wp->config->catch_workers_output) { /* not required */ return 0; } close(fd_stdout[1]); close(fd_stderr[1]); close(fd_stdout[0]); close(fd_stderr[0]); return 0; } /* }}} */ void fpm_stdio_child_use_pipes(struct fpm_child_s *child) /* {{{ */ { if (child->wp->config->catch_workers_output) { dup2(fd_stdout[1], STDOUT_FILENO); dup2(fd_stderr[1], STDERR_FILENO); close(fd_stdout[0]); close(fd_stdout[1]); close(fd_stderr[0]); close(fd_stderr[1]); } else { /* stdout of parent is always /dev/null */ dup2(STDOUT_FILENO, STDERR_FILENO); } } /* }}} */ int fpm_stdio_open_error_log(int reopen) /* {{{ */ { int fd; #ifdef HAVE_SYSLOG_H if (!strcasecmp(fpm_global_config.error_log, "syslog")) { openlog(fpm_global_config.syslog_ident, LOG_PID | LOG_CONS, fpm_global_config.syslog_facility); fpm_globals.error_log_fd = ZLOG_SYSLOG; if (fpm_use_error_log()) { zlog_set_fd(fpm_globals.error_log_fd); } return 0; } #endif fd = open(fpm_global_config.error_log, O_WRONLY | O_APPEND | O_CREAT, S_IRUSR | S_IWUSR); if (0 > fd) { zlog(ZLOG_SYSERROR, "failed to open error_log (%s)", fpm_global_config.error_log); return -1; } if (reopen) { if (fpm_use_error_log()) { dup2(fd, STDERR_FILENO); } dup2(fd, fpm_globals.error_log_fd); close(fd); fd = fpm_globals.error_log_fd; /* for FD_CLOSEXEC to work */ } else { fpm_globals.error_log_fd = fd; if (fpm_use_error_log()) { zlog_set_fd(fpm_globals.error_log_fd); } } if (0 > fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC)) { zlog(ZLOG_WARNING, "failed to change attribute of error_log"); } return 0; } /* }}} */
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1876_2
crossvul-cpp_data_good_1262_0
// SPDX-License-Identifier: GPL-2.0 /* * trace_events_filter - generic event filtering * * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> */ #include <linux/module.h> #include <linux/ctype.h> #include <linux/mutex.h> #include <linux/perf_event.h> #include <linux/slab.h> #include "trace.h" #include "trace_output.h" #define DEFAULT_SYS_FILTER_MESSAGE \ "### global filter ###\n" \ "# Use this to set filters for multiple events.\n" \ "# Only events with the given fields will be affected.\n" \ "# If no events are modified, an error message will be displayed here" /* Due to token parsing '<=' must be before '<' and '>=' must be before '>' */ #define OPS \ C( OP_GLOB, "~" ), \ C( OP_NE, "!=" ), \ C( OP_EQ, "==" ), \ C( OP_LE, "<=" ), \ C( OP_LT, "<" ), \ C( OP_GE, ">=" ), \ C( OP_GT, ">" ), \ C( OP_BAND, "&" ), \ C( OP_MAX, NULL ) #undef C #define C(a, b) a enum filter_op_ids { OPS }; #undef C #define C(a, b) b static const char * ops[] = { OPS }; /* * pred functions are OP_LE, OP_LT, OP_GE, OP_GT, and OP_BAND * pred_funcs_##type below must match the order of them above. */ #define PRED_FUNC_START OP_LE #define PRED_FUNC_MAX (OP_BAND - PRED_FUNC_START) #define ERRORS \ C(NONE, "No error"), \ C(INVALID_OP, "Invalid operator"), \ C(TOO_MANY_OPEN, "Too many '('"), \ C(TOO_MANY_CLOSE, "Too few '('"), \ C(MISSING_QUOTE, "Missing matching quote"), \ C(OPERAND_TOO_LONG, "Operand too long"), \ C(EXPECT_STRING, "Expecting string field"), \ C(EXPECT_DIGIT, "Expecting numeric field"), \ C(ILLEGAL_FIELD_OP, "Illegal operation for field type"), \ C(FIELD_NOT_FOUND, "Field not found"), \ C(ILLEGAL_INTVAL, "Illegal integer value"), \ C(BAD_SUBSYS_FILTER, "Couldn't find or set field in one of a subsystem's events"), \ C(TOO_MANY_PREDS, "Too many terms in predicate expression"), \ C(INVALID_FILTER, "Meaningless filter expression"), \ C(IP_FIELD_ONLY, "Only 'ip' field is supported for function trace"), \ C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), \ C(ERRNO, "Error"), \ C(NO_FILTER, "No filter found") #undef C #define C(a, b) FILT_ERR_##a enum { ERRORS }; #undef C #define C(a, b) b static const char *err_text[] = { ERRORS }; /* Called after a '!' character but "!=" and "!~" are not "not"s */ static bool is_not(const char *str) { switch (str[1]) { case '=': case '~': return false; } return true; } /** * prog_entry - a singe entry in the filter program * @target: Index to jump to on a branch (actually one minus the index) * @when_to_branch: The value of the result of the predicate to do a branch * @pred: The predicate to execute. */ struct prog_entry { int target; int when_to_branch; struct filter_pred *pred; }; /** * update_preds- assign a program entry a label target * @prog: The program array * @N: The index of the current entry in @prog * @when_to_branch: What to assign a program entry for its branch condition * * The program entry at @N has a target that points to the index of a program * entry that can have its target and when_to_branch fields updated. * Update the current program entry denoted by index @N target field to be * that of the updated entry. This will denote the entry to update if * we are processing an "||" after an "&&" */ static void update_preds(struct prog_entry *prog, int N, int invert) { int t, s; t = prog[N].target; s = prog[t].target; prog[t].when_to_branch = invert; prog[t].target = N; prog[N].target = s; } struct filter_parse_error { int lasterr; int lasterr_pos; }; static void parse_error(struct filter_parse_error *pe, int err, int pos) { pe->lasterr = err; pe->lasterr_pos = pos; } typedef int (*parse_pred_fn)(const char *str, void *data, int pos, struct filter_parse_error *pe, struct filter_pred **pred); enum { INVERT = 1, PROCESS_AND = 2, PROCESS_OR = 4, }; /* * Without going into a formal proof, this explains the method that is used in * parsing the logical expressions. * * For example, if we have: "a && !(!b || (c && g)) || d || e && !f" * The first pass will convert it into the following program: * * n1: r=a; l1: if (!r) goto l4; * n2: r=b; l2: if (!r) goto l4; * n3: r=c; r=!r; l3: if (r) goto l4; * n4: r=g; r=!r; l4: if (r) goto l5; * n5: r=d; l5: if (r) goto T * n6: r=e; l6: if (!r) goto l7; * n7: r=f; r=!r; l7: if (!r) goto F * T: return TRUE * F: return FALSE * * To do this, we use a data structure to represent each of the above * predicate and conditions that has: * * predicate, when_to_branch, invert, target * * The "predicate" will hold the function to determine the result "r". * The "when_to_branch" denotes what "r" should be if a branch is to be taken * "&&" would contain "!r" or (0) and "||" would contain "r" or (1). * The "invert" holds whether the value should be reversed before testing. * The "target" contains the label "l#" to jump to. * * A stack is created to hold values when parentheses are used. * * To simplify the logic, the labels will start at 0 and not 1. * * The possible invert values are 1 and 0. The number of "!"s that are in scope * before the predicate determines the invert value, if the number is odd then * the invert value is 1 and 0 otherwise. This means the invert value only * needs to be toggled when a new "!" is introduced compared to what is stored * on the stack, where parentheses were used. * * The top of the stack and "invert" are initialized to zero. * * ** FIRST PASS ** * * #1 A loop through all the tokens is done: * * #2 If the token is an "(", the stack is push, and the current stack value * gets the current invert value, and the loop continues to the next token. * The top of the stack saves the "invert" value to keep track of what * the current inversion is. As "!(a && !b || c)" would require all * predicates being affected separately by the "!" before the parentheses. * And that would end up being equivalent to "(!a || b) && !c" * * #3 If the token is an "!", the current "invert" value gets inverted, and * the loop continues. Note, if the next token is a predicate, then * this "invert" value is only valid for the current program entry, * and does not affect other predicates later on. * * The only other acceptable token is the predicate string. * * #4 A new entry into the program is added saving: the predicate and the * current value of "invert". The target is currently assigned to the * previous program index (this will not be its final value). * * #5 We now enter another loop and look at the next token. The only valid * tokens are ")", "&&", "||" or end of the input string "\0". * * #6 The invert variable is reset to the current value saved on the top of * the stack. * * #7 The top of the stack holds not only the current invert value, but also * if a "&&" or "||" needs to be processed. Note, the "&&" takes higher * precedence than "||". That is "a && b || c && d" is equivalent to * "(a && b) || (c && d)". Thus the first thing to do is to see if "&&" needs * to be processed. This is the case if an "&&" was the last token. If it was * then we call update_preds(). This takes the program, the current index in * the program, and the current value of "invert". More will be described * below about this function. * * #8 If the next token is "&&" then we set a flag in the top of the stack * that denotes that "&&" needs to be processed, break out of this loop * and continue with the outer loop. * * #9 Otherwise, if a "||" needs to be processed then update_preds() is called. * This is called with the program, the current index in the program, but * this time with an inverted value of "invert" (that is !invert). This is * because the value taken will become the "when_to_branch" value of the * program. * Note, this is called when the next token is not an "&&". As stated before, * "&&" takes higher precedence, and "||" should not be processed yet if the * next logical operation is "&&". * * #10 If the next token is "||" then we set a flag in the top of the stack * that denotes that "||" needs to be processed, break out of this loop * and continue with the outer loop. * * #11 If this is the end of the input string "\0" then we break out of both * loops. * * #12 Otherwise, the next token is ")", where we pop the stack and continue * this inner loop. * * Now to discuss the update_pred() function, as that is key to the setting up * of the program. Remember the "target" of the program is initialized to the * previous index and not the "l" label. The target holds the index into the * program that gets affected by the operand. Thus if we have something like * "a || b && c", when we process "a" the target will be "-1" (undefined). * When we process "b", its target is "0", which is the index of "a", as that's * the predicate that is affected by "||". But because the next token after "b" * is "&&" we don't call update_preds(). Instead continue to "c". As the * next token after "c" is not "&&" but the end of input, we first process the * "&&" by calling update_preds() for the "&&" then we process the "||" by * callin updates_preds() with the values for processing "||". * * What does that mean? What update_preds() does is to first save the "target" * of the program entry indexed by the current program entry's "target" * (remember the "target" is initialized to previous program entry), and then * sets that "target" to the current index which represents the label "l#". * That entry's "when_to_branch" is set to the value passed in (the "invert" * or "!invert"). Then it sets the current program entry's target to the saved * "target" value (the old value of the program that had its "target" updated * to the label). * * Looking back at "a || b && c", we have the following steps: * "a" - prog[0] = { "a", X, -1 } // pred, when_to_branch, target * "||" - flag that we need to process "||"; continue outer loop * "b" - prog[1] = { "b", X, 0 } * "&&" - flag that we need to process "&&"; continue outer loop * (Notice we did not process "||") * "c" - prog[2] = { "c", X, 1 } * update_preds(prog, 2, 0); // invert = 0 as we are processing "&&" * t = prog[2].target; // t = 1 * s = prog[t].target; // s = 0 * prog[t].target = 2; // Set target to "l2" * prog[t].when_to_branch = 0; * prog[2].target = s; * update_preds(prog, 2, 1); // invert = 1 as we are now processing "||" * t = prog[2].target; // t = 0 * s = prog[t].target; // s = -1 * prog[t].target = 2; // Set target to "l2" * prog[t].when_to_branch = 1; * prog[2].target = s; * * #13 Which brings us to the final step of the first pass, which is to set * the last program entry's when_to_branch and target, which will be * when_to_branch = 0; target = N; ( the label after the program entry after * the last program entry processed above). * * If we denote "TRUE" to be the entry after the last program entry processed, * and "FALSE" the program entry after that, we are now done with the first * pass. * * Making the above "a || b && c" have a progam of: * prog[0] = { "a", 1, 2 } * prog[1] = { "b", 0, 2 } * prog[2] = { "c", 0, 3 } * * Which translates into: * n0: r = a; l0: if (r) goto l2; * n1: r = b; l1: if (!r) goto l2; * n2: r = c; l2: if (!r) goto l3; // Which is the same as "goto F;" * T: return TRUE; l3: * F: return FALSE * * Although, after the first pass, the program is correct, it is * inefficient. The simple sample of "a || b && c" could be easily been * converted into: * n0: r = a; if (r) goto T * n1: r = b; if (!r) goto F * n2: r = c; if (!r) goto F * T: return TRUE; * F: return FALSE; * * The First Pass is over the input string. The next too passes are over * the program itself. * * ** SECOND PASS ** * * Which brings us to the second pass. If a jump to a label has the * same condition as that label, it can instead jump to its target. * The original example of "a && !(!b || (c && g)) || d || e && !f" * where the first pass gives us: * * n1: r=a; l1: if (!r) goto l4; * n2: r=b; l2: if (!r) goto l4; * n3: r=c; r=!r; l3: if (r) goto l4; * n4: r=g; r=!r; l4: if (r) goto l5; * n5: r=d; l5: if (r) goto T * n6: r=e; l6: if (!r) goto l7; * n7: r=f; r=!r; l7: if (!r) goto F: * T: return TRUE; * F: return FALSE * * We can see that "l3: if (r) goto l4;" and at l4, we have "if (r) goto l5;". * And "l5: if (r) goto T", we could optimize this by converting l3 and l4 * to go directly to T. To accomplish this, we start from the last * entry in the program and work our way back. If the target of the entry * has the same "when_to_branch" then we could use that entry's target. * Doing this, the above would end up as: * * n1: r=a; l1: if (!r) goto l4; * n2: r=b; l2: if (!r) goto l4; * n3: r=c; r=!r; l3: if (r) goto T; * n4: r=g; r=!r; l4: if (r) goto T; * n5: r=d; l5: if (r) goto T; * n6: r=e; l6: if (!r) goto F; * n7: r=f; r=!r; l7: if (!r) goto F; * T: return TRUE * F: return FALSE * * In that same pass, if the "when_to_branch" doesn't match, we can simply * go to the program entry after the label. That is, "l2: if (!r) goto l4;" * where "l4: if (r) goto T;", then we can convert l2 to be: * "l2: if (!r) goto n5;". * * This will have the second pass give us: * n1: r=a; l1: if (!r) goto n5; * n2: r=b; l2: if (!r) goto n5; * n3: r=c; r=!r; l3: if (r) goto T; * n4: r=g; r=!r; l4: if (r) goto T; * n5: r=d; l5: if (r) goto T * n6: r=e; l6: if (!r) goto F; * n7: r=f; r=!r; l7: if (!r) goto F * T: return TRUE * F: return FALSE * * Notice, all the "l#" labels are no longer used, and they can now * be discarded. * * ** THIRD PASS ** * * For the third pass we deal with the inverts. As they simply just * make the "when_to_branch" get inverted, a simple loop over the * program to that does: "when_to_branch ^= invert;" will do the * job, leaving us with: * n1: r=a; if (!r) goto n5; * n2: r=b; if (!r) goto n5; * n3: r=c: if (!r) goto T; * n4: r=g; if (!r) goto T; * n5: r=d; if (r) goto T * n6: r=e; if (!r) goto F; * n7: r=f; if (r) goto F * T: return TRUE * F: return FALSE * * As "r = a; if (!r) goto n5;" is obviously the same as * "if (!a) goto n5;" without doing anything we can interperate the * program as: * n1: if (!a) goto n5; * n2: if (!b) goto n5; * n3: if (!c) goto T; * n4: if (!g) goto T; * n5: if (d) goto T * n6: if (!e) goto F; * n7: if (f) goto F * T: return TRUE * F: return FALSE * * Since the inverts are discarded at the end, there's no reason to store * them in the program array (and waste memory). A separate array to hold * the inverts is used and freed at the end. */ static struct prog_entry * predicate_parse(const char *str, int nr_parens, int nr_preds, parse_pred_fn parse_pred, void *data, struct filter_parse_error *pe) { struct prog_entry *prog_stack; struct prog_entry *prog; const char *ptr = str; char *inverts = NULL; int *op_stack; int *top; int invert = 0; int ret = -ENOMEM; int len; int N = 0; int i; nr_preds += 2; /* For TRUE and FALSE */ op_stack = kmalloc_array(nr_parens, sizeof(*op_stack), GFP_KERNEL); if (!op_stack) return ERR_PTR(-ENOMEM); prog_stack = kcalloc(nr_preds, sizeof(*prog_stack), GFP_KERNEL); if (!prog_stack) { parse_error(pe, -ENOMEM, 0); goto out_free; } inverts = kmalloc_array(nr_preds, sizeof(*inverts), GFP_KERNEL); if (!inverts) { parse_error(pe, -ENOMEM, 0); goto out_free; } top = op_stack; prog = prog_stack; *top = 0; /* First pass */ while (*ptr) { /* #1 */ const char *next = ptr++; if (isspace(*next)) continue; switch (*next) { case '(': /* #2 */ if (top - op_stack > nr_parens) { ret = -EINVAL; goto out_free; } *(++top) = invert; continue; case '!': /* #3 */ if (!is_not(next)) break; invert = !invert; continue; } if (N >= nr_preds) { parse_error(pe, FILT_ERR_TOO_MANY_PREDS, next - str); goto out_free; } inverts[N] = invert; /* #4 */ prog[N].target = N-1; len = parse_pred(next, data, ptr - str, pe, &prog[N].pred); if (len < 0) { ret = len; goto out_free; } ptr = next + len; N++; ret = -1; while (1) { /* #5 */ next = ptr++; if (isspace(*next)) continue; switch (*next) { case ')': case '\0': break; case '&': case '|': /* accepting only "&&" or "||" */ if (next[1] == next[0]) { ptr++; break; } /* fall through */ default: parse_error(pe, FILT_ERR_TOO_MANY_PREDS, next - str); goto out_free; } invert = *top & INVERT; if (*top & PROCESS_AND) { /* #7 */ update_preds(prog, N - 1, invert); *top &= ~PROCESS_AND; } if (*next == '&') { /* #8 */ *top |= PROCESS_AND; break; } if (*top & PROCESS_OR) { /* #9 */ update_preds(prog, N - 1, !invert); *top &= ~PROCESS_OR; } if (*next == '|') { /* #10 */ *top |= PROCESS_OR; break; } if (!*next) /* #11 */ goto out; if (top == op_stack) { ret = -1; /* Too few '(' */ parse_error(pe, FILT_ERR_TOO_MANY_CLOSE, ptr - str); goto out_free; } top--; /* #12 */ } } out: if (top != op_stack) { /* Too many '(' */ parse_error(pe, FILT_ERR_TOO_MANY_OPEN, ptr - str); goto out_free; } if (!N) { /* No program? */ ret = -EINVAL; parse_error(pe, FILT_ERR_NO_FILTER, ptr - str); goto out_free; } prog[N].pred = NULL; /* #13 */ prog[N].target = 1; /* TRUE */ prog[N+1].pred = NULL; prog[N+1].target = 0; /* FALSE */ prog[N-1].target = N; prog[N-1].when_to_branch = false; /* Second Pass */ for (i = N-1 ; i--; ) { int target = prog[i].target; if (prog[i].when_to_branch == prog[target].when_to_branch) prog[i].target = prog[target].target; } /* Third Pass */ for (i = 0; i < N; i++) { invert = inverts[i] ^ prog[i].when_to_branch; prog[i].when_to_branch = invert; /* Make sure the program always moves forward */ if (WARN_ON(prog[i].target <= i)) { ret = -EINVAL; goto out_free; } } kfree(op_stack); kfree(inverts); return prog; out_free: kfree(op_stack); kfree(inverts); if (prog_stack) { for (i = 0; prog_stack[i].pred; i++) kfree(prog_stack[i].pred); kfree(prog_stack); } return ERR_PTR(ret); } #define DEFINE_COMPARISON_PRED(type) \ static int filter_pred_LT_##type(struct filter_pred *pred, void *event) \ { \ type *addr = (type *)(event + pred->offset); \ type val = (type)pred->val; \ return *addr < val; \ } \ static int filter_pred_LE_##type(struct filter_pred *pred, void *event) \ { \ type *addr = (type *)(event + pred->offset); \ type val = (type)pred->val; \ return *addr <= val; \ } \ static int filter_pred_GT_##type(struct filter_pred *pred, void *event) \ { \ type *addr = (type *)(event + pred->offset); \ type val = (type)pred->val; \ return *addr > val; \ } \ static int filter_pred_GE_##type(struct filter_pred *pred, void *event) \ { \ type *addr = (type *)(event + pred->offset); \ type val = (type)pred->val; \ return *addr >= val; \ } \ static int filter_pred_BAND_##type(struct filter_pred *pred, void *event) \ { \ type *addr = (type *)(event + pred->offset); \ type val = (type)pred->val; \ return !!(*addr & val); \ } \ static const filter_pred_fn_t pred_funcs_##type[] = { \ filter_pred_LE_##type, \ filter_pred_LT_##type, \ filter_pred_GE_##type, \ filter_pred_GT_##type, \ filter_pred_BAND_##type, \ }; #define DEFINE_EQUALITY_PRED(size) \ static int filter_pred_##size(struct filter_pred *pred, void *event) \ { \ u##size *addr = (u##size *)(event + pred->offset); \ u##size val = (u##size)pred->val; \ int match; \ \ match = (val == *addr) ^ pred->not; \ \ return match; \ } DEFINE_COMPARISON_PRED(s64); DEFINE_COMPARISON_PRED(u64); DEFINE_COMPARISON_PRED(s32); DEFINE_COMPARISON_PRED(u32); DEFINE_COMPARISON_PRED(s16); DEFINE_COMPARISON_PRED(u16); DEFINE_COMPARISON_PRED(s8); DEFINE_COMPARISON_PRED(u8); DEFINE_EQUALITY_PRED(64); DEFINE_EQUALITY_PRED(32); DEFINE_EQUALITY_PRED(16); DEFINE_EQUALITY_PRED(8); /* Filter predicate for fixed sized arrays of characters */ static int filter_pred_string(struct filter_pred *pred, void *event) { char *addr = (char *)(event + pred->offset); int cmp, match; cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len); match = cmp ^ pred->not; return match; } /* Filter predicate for char * pointers */ static int filter_pred_pchar(struct filter_pred *pred, void *event) { char **addr = (char **)(event + pred->offset); int cmp, match; int len = strlen(*addr) + 1; /* including tailing '\0' */ cmp = pred->regex.match(*addr, &pred->regex, len); match = cmp ^ pred->not; return match; } /* * Filter predicate for dynamic sized arrays of characters. * These are implemented through a list of strings at the end * of the entry. * Also each of these strings have a field in the entry which * contains its offset from the beginning of the entry. * We have then first to get this field, dereference it * and add it to the address of the entry, and at last we have * the address of the string. */ static int filter_pred_strloc(struct filter_pred *pred, void *event) { u32 str_item = *(u32 *)(event + pred->offset); int str_loc = str_item & 0xffff; int str_len = str_item >> 16; char *addr = (char *)(event + str_loc); int cmp, match; cmp = pred->regex.match(addr, &pred->regex, str_len); match = cmp ^ pred->not; return match; } /* Filter predicate for CPUs. */ static int filter_pred_cpu(struct filter_pred *pred, void *event) { int cpu, cmp; cpu = raw_smp_processor_id(); cmp = pred->val; switch (pred->op) { case OP_EQ: return cpu == cmp; case OP_NE: return cpu != cmp; case OP_LT: return cpu < cmp; case OP_LE: return cpu <= cmp; case OP_GT: return cpu > cmp; case OP_GE: return cpu >= cmp; default: return 0; } } /* Filter predicate for COMM. */ static int filter_pred_comm(struct filter_pred *pred, void *event) { int cmp; cmp = pred->regex.match(current->comm, &pred->regex, TASK_COMM_LEN); return cmp ^ pred->not; } static int filter_pred_none(struct filter_pred *pred, void *event) { return 0; } /* * regex_match_foo - Basic regex callbacks * * @str: the string to be searched * @r: the regex structure containing the pattern string * @len: the length of the string to be searched (including '\0') * * Note: * - @str might not be NULL-terminated if it's of type DYN_STRING * or STATIC_STRING, unless @len is zero. */ static int regex_match_full(char *str, struct regex *r, int len) { /* len of zero means str is dynamic and ends with '\0' */ if (!len) return strcmp(str, r->pattern) == 0; return strncmp(str, r->pattern, len) == 0; } static int regex_match_front(char *str, struct regex *r, int len) { if (len && len < r->len) return 0; return strncmp(str, r->pattern, r->len) == 0; } static int regex_match_middle(char *str, struct regex *r, int len) { if (!len) return strstr(str, r->pattern) != NULL; return strnstr(str, r->pattern, len) != NULL; } static int regex_match_end(char *str, struct regex *r, int len) { int strlen = len - 1; if (strlen >= r->len && memcmp(str + strlen - r->len, r->pattern, r->len) == 0) return 1; return 0; } static int regex_match_glob(char *str, struct regex *r, int len __maybe_unused) { if (glob_match(r->pattern, str)) return 1; return 0; } /** * filter_parse_regex - parse a basic regex * @buff: the raw regex * @len: length of the regex * @search: will point to the beginning of the string to compare * @not: tell whether the match will have to be inverted * * This passes in a buffer containing a regex and this function will * set search to point to the search part of the buffer and * return the type of search it is (see enum above). * This does modify buff. * * Returns enum type. * search returns the pointer to use for comparison. * not returns 1 if buff started with a '!' * 0 otherwise. */ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not) { int type = MATCH_FULL; int i; if (buff[0] == '!') { *not = 1; buff++; len--; } else *not = 0; *search = buff; if (isdigit(buff[0])) return MATCH_INDEX; for (i = 0; i < len; i++) { if (buff[i] == '*') { if (!i) { type = MATCH_END_ONLY; } else if (i == len - 1) { if (type == MATCH_END_ONLY) type = MATCH_MIDDLE_ONLY; else type = MATCH_FRONT_ONLY; buff[i] = 0; break; } else { /* pattern continues, use full glob */ return MATCH_GLOB; } } else if (strchr("[?\\", buff[i])) { return MATCH_GLOB; } } if (buff[0] == '*') *search = buff + 1; return type; } static void filter_build_regex(struct filter_pred *pred) { struct regex *r = &pred->regex; char *search; enum regex_type type = MATCH_FULL; if (pred->op == OP_GLOB) { type = filter_parse_regex(r->pattern, r->len, &search, &pred->not); r->len = strlen(search); memmove(r->pattern, search, r->len+1); } switch (type) { /* MATCH_INDEX should not happen, but if it does, match full */ case MATCH_INDEX: case MATCH_FULL: r->match = regex_match_full; break; case MATCH_FRONT_ONLY: r->match = regex_match_front; break; case MATCH_MIDDLE_ONLY: r->match = regex_match_middle; break; case MATCH_END_ONLY: r->match = regex_match_end; break; case MATCH_GLOB: r->match = regex_match_glob; break; } } /* return 1 if event matches, 0 otherwise (discard) */ int filter_match_preds(struct event_filter *filter, void *rec) { struct prog_entry *prog; int i; /* no filter is considered a match */ if (!filter) return 1; /* Protected by either SRCU(tracepoint_srcu) or preempt_disable */ prog = rcu_dereference_raw(filter->prog); if (!prog) return 1; for (i = 0; prog[i].pred; i++) { struct filter_pred *pred = prog[i].pred; int match = pred->fn(pred, rec); if (match == prog[i].when_to_branch) i = prog[i].target; } return prog[i].target; } EXPORT_SYMBOL_GPL(filter_match_preds); static void remove_filter_string(struct event_filter *filter) { if (!filter) return; kfree(filter->filter_string); filter->filter_string = NULL; } static void append_filter_err(struct trace_array *tr, struct filter_parse_error *pe, struct event_filter *filter) { struct trace_seq *s; int pos = pe->lasterr_pos; char *buf; int len; if (WARN_ON(!filter->filter_string)) return; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return; trace_seq_init(s); len = strlen(filter->filter_string); if (pos > len) pos = len; /* indexing is off by one */ if (pos) pos++; trace_seq_puts(s, filter->filter_string); if (pe->lasterr > 0) { trace_seq_printf(s, "\n%*s", pos, "^"); trace_seq_printf(s, "\nparse_error: %s\n", err_text[pe->lasterr]); tracing_log_err(tr, "event filter parse error", filter->filter_string, err_text, pe->lasterr, pe->lasterr_pos); } else { trace_seq_printf(s, "\nError: (%d)\n", pe->lasterr); tracing_log_err(tr, "event filter parse error", filter->filter_string, err_text, FILT_ERR_ERRNO, 0); } trace_seq_putc(s, 0); buf = kmemdup_nul(s->buffer, s->seq.len, GFP_KERNEL); if (buf) { kfree(filter->filter_string); filter->filter_string = buf; } kfree(s); } static inline struct event_filter *event_filter(struct trace_event_file *file) { return file->filter; } /* caller must hold event_mutex */ void print_event_filter(struct trace_event_file *file, struct trace_seq *s) { struct event_filter *filter = event_filter(file); if (filter && filter->filter_string) trace_seq_printf(s, "%s\n", filter->filter_string); else trace_seq_puts(s, "none\n"); } void print_subsystem_event_filter(struct event_subsystem *system, struct trace_seq *s) { struct event_filter *filter; mutex_lock(&event_mutex); filter = system->filter; if (filter && filter->filter_string) trace_seq_printf(s, "%s\n", filter->filter_string); else trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n"); mutex_unlock(&event_mutex); } static void free_prog(struct event_filter *filter) { struct prog_entry *prog; int i; prog = rcu_access_pointer(filter->prog); if (!prog) return; for (i = 0; prog[i].pred; i++) kfree(prog[i].pred); kfree(prog); } static void filter_disable(struct trace_event_file *file) { unsigned long old_flags = file->flags; file->flags &= ~EVENT_FILE_FL_FILTERED; if (old_flags != file->flags) trace_buffered_event_disable(); } static void __free_filter(struct event_filter *filter) { if (!filter) return; free_prog(filter); kfree(filter->filter_string); kfree(filter); } void free_event_filter(struct event_filter *filter) { __free_filter(filter); } static inline void __remove_filter(struct trace_event_file *file) { filter_disable(file); remove_filter_string(file->filter); } static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir, struct trace_array *tr) { struct trace_event_file *file; list_for_each_entry(file, &tr->events, list) { if (file->system != dir) continue; __remove_filter(file); } } static inline void __free_subsystem_filter(struct trace_event_file *file) { __free_filter(file->filter); file->filter = NULL; } static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir, struct trace_array *tr) { struct trace_event_file *file; list_for_each_entry(file, &tr->events, list) { if (file->system != dir) continue; __free_subsystem_filter(file); } } int filter_assign_type(const char *type) { if (strstr(type, "__data_loc") && strstr(type, "char")) return FILTER_DYN_STRING; if (strchr(type, '[') && strstr(type, "char")) return FILTER_STATIC_STRING; if (strcmp(type, "char *") == 0 || strcmp(type, "const char *") == 0) return FILTER_PTR_STRING; return FILTER_OTHER; } static filter_pred_fn_t select_comparison_fn(enum filter_op_ids op, int field_size, int field_is_signed) { filter_pred_fn_t fn = NULL; int pred_func_index = -1; switch (op) { case OP_EQ: case OP_NE: break; default: if (WARN_ON_ONCE(op < PRED_FUNC_START)) return NULL; pred_func_index = op - PRED_FUNC_START; if (WARN_ON_ONCE(pred_func_index > PRED_FUNC_MAX)) return NULL; } switch (field_size) { case 8: if (pred_func_index < 0) fn = filter_pred_64; else if (field_is_signed) fn = pred_funcs_s64[pred_func_index]; else fn = pred_funcs_u64[pred_func_index]; break; case 4: if (pred_func_index < 0) fn = filter_pred_32; else if (field_is_signed) fn = pred_funcs_s32[pred_func_index]; else fn = pred_funcs_u32[pred_func_index]; break; case 2: if (pred_func_index < 0) fn = filter_pred_16; else if (field_is_signed) fn = pred_funcs_s16[pred_func_index]; else fn = pred_funcs_u16[pred_func_index]; break; case 1: if (pred_func_index < 0) fn = filter_pred_8; else if (field_is_signed) fn = pred_funcs_s8[pred_func_index]; else fn = pred_funcs_u8[pred_func_index]; break; } return fn; } /* Called when a predicate is encountered by predicate_parse() */ static int parse_pred(const char *str, void *data, int pos, struct filter_parse_error *pe, struct filter_pred **pred_ptr) { struct trace_event_call *call = data; struct ftrace_event_field *field; struct filter_pred *pred = NULL; char num_buf[24]; /* Big enough to hold an address */ char *field_name; char q; u64 val; int len; int ret; int op; int s; int i = 0; /* First find the field to associate to */ while (isspace(str[i])) i++; s = i; while (isalnum(str[i]) || str[i] == '_') i++; len = i - s; if (!len) return -1; field_name = kmemdup_nul(str + s, len, GFP_KERNEL); if (!field_name) return -ENOMEM; /* Make sure that the field exists */ field = trace_find_event_field(call, field_name); kfree(field_name); if (!field) { parse_error(pe, FILT_ERR_FIELD_NOT_FOUND, pos + i); return -EINVAL; } while (isspace(str[i])) i++; /* Make sure this op is supported */ for (op = 0; ops[op]; op++) { /* This is why '<=' must come before '<' in ops[] */ if (strncmp(str + i, ops[op], strlen(ops[op])) == 0) break; } if (!ops[op]) { parse_error(pe, FILT_ERR_INVALID_OP, pos + i); goto err_free; } i += strlen(ops[op]); while (isspace(str[i])) i++; s = i; pred = kzalloc(sizeof(*pred), GFP_KERNEL); if (!pred) return -ENOMEM; pred->field = field; pred->offset = field->offset; pred->op = op; if (ftrace_event_is_function(call)) { /* * Perf does things different with function events. * It only allows an "ip" field, and expects a string. * But the string does not need to be surrounded by quotes. * If it is a string, the assigned function as a nop, * (perf doesn't use it) and grab everything. */ if (strcmp(field->name, "ip") != 0) { parse_error(pe, FILT_ERR_IP_FIELD_ONLY, pos + i); goto err_free; } pred->fn = filter_pred_none; /* * Quotes are not required, but if they exist then we need * to read them till we hit a matching one. */ if (str[i] == '\'' || str[i] == '"') q = str[i]; else q = 0; for (i++; str[i]; i++) { if (q && str[i] == q) break; if (!q && (str[i] == ')' || str[i] == '&' || str[i] == '|')) break; } /* Skip quotes */ if (q) s++; len = i - s; if (len >= MAX_FILTER_STR_VAL) { parse_error(pe, FILT_ERR_OPERAND_TOO_LONG, pos + i); goto err_free; } pred->regex.len = len; strncpy(pred->regex.pattern, str + s, len); pred->regex.pattern[len] = 0; /* This is either a string, or an integer */ } else if (str[i] == '\'' || str[i] == '"') { char q = str[i]; /* Make sure the op is OK for strings */ switch (op) { case OP_NE: pred->not = 1; /* Fall through */ case OP_GLOB: case OP_EQ: break; default: parse_error(pe, FILT_ERR_ILLEGAL_FIELD_OP, pos + i); goto err_free; } /* Make sure the field is OK for strings */ if (!is_string_field(field)) { parse_error(pe, FILT_ERR_EXPECT_DIGIT, pos + i); goto err_free; } for (i++; str[i]; i++) { if (str[i] == q) break; } if (!str[i]) { parse_error(pe, FILT_ERR_MISSING_QUOTE, pos + i); goto err_free; } /* Skip quotes */ s++; len = i - s; if (len >= MAX_FILTER_STR_VAL) { parse_error(pe, FILT_ERR_OPERAND_TOO_LONG, pos + i); goto err_free; } pred->regex.len = len; strncpy(pred->regex.pattern, str + s, len); pred->regex.pattern[len] = 0; filter_build_regex(pred); if (field->filter_type == FILTER_COMM) { pred->fn = filter_pred_comm; } else if (field->filter_type == FILTER_STATIC_STRING) { pred->fn = filter_pred_string; pred->regex.field_len = field->size; } else if (field->filter_type == FILTER_DYN_STRING) pred->fn = filter_pred_strloc; else pred->fn = filter_pred_pchar; /* go past the last quote */ i++; } else if (isdigit(str[i]) || str[i] == '-') { /* Make sure the field is not a string */ if (is_string_field(field)) { parse_error(pe, FILT_ERR_EXPECT_STRING, pos + i); goto err_free; } if (op == OP_GLOB) { parse_error(pe, FILT_ERR_ILLEGAL_FIELD_OP, pos + i); goto err_free; } if (str[i] == '-') i++; /* We allow 0xDEADBEEF */ while (isalnum(str[i])) i++; len = i - s; /* 0xfeedfacedeadbeef is 18 chars max */ if (len >= sizeof(num_buf)) { parse_error(pe, FILT_ERR_OPERAND_TOO_LONG, pos + i); goto err_free; } strncpy(num_buf, str + s, len); num_buf[len] = 0; /* Make sure it is a value */ if (field->is_signed) ret = kstrtoll(num_buf, 0, &val); else ret = kstrtoull(num_buf, 0, &val); if (ret) { parse_error(pe, FILT_ERR_ILLEGAL_INTVAL, pos + s); goto err_free; } pred->val = val; if (field->filter_type == FILTER_CPU) pred->fn = filter_pred_cpu; else { pred->fn = select_comparison_fn(pred->op, field->size, field->is_signed); if (pred->op == OP_NE) pred->not = 1; } } else { parse_error(pe, FILT_ERR_INVALID_VALUE, pos + i); goto err_free; } *pred_ptr = pred; return i; err_free: kfree(pred); return -EINVAL; } enum { TOO_MANY_CLOSE = -1, TOO_MANY_OPEN = -2, MISSING_QUOTE = -3, }; /* * Read the filter string once to calculate the number of predicates * as well as how deep the parentheses go. * * Returns: * 0 - everything is fine (err is undefined) * -1 - too many ')' * -2 - too many '(' * -3 - No matching quote */ static int calc_stack(const char *str, int *parens, int *preds, int *err) { bool is_pred = false; int nr_preds = 0; int open = 1; /* Count the expression as "(E)" */ int last_quote = 0; int max_open = 1; int quote = 0; int i; *err = 0; for (i = 0; str[i]; i++) { if (isspace(str[i])) continue; if (quote) { if (str[i] == quote) quote = 0; continue; } switch (str[i]) { case '\'': case '"': quote = str[i]; last_quote = i; break; case '|': case '&': if (str[i+1] != str[i]) break; is_pred = false; continue; case '(': is_pred = false; open++; if (open > max_open) max_open = open; continue; case ')': is_pred = false; if (open == 1) { *err = i; return TOO_MANY_CLOSE; } open--; continue; } if (!is_pred) { nr_preds++; is_pred = true; } } if (quote) { *err = last_quote; return MISSING_QUOTE; } if (open != 1) { int level = open; /* find the bad open */ for (i--; i; i--) { if (quote) { if (str[i] == quote) quote = 0; continue; } switch (str[i]) { case '(': if (level == open) { *err = i; return TOO_MANY_OPEN; } level--; break; case ')': level++; break; case '\'': case '"': quote = str[i]; break; } } /* First character is the '(' with missing ')' */ *err = 0; return TOO_MANY_OPEN; } /* Set the size of the required stacks */ *parens = max_open; *preds = nr_preds; return 0; } static int process_preds(struct trace_event_call *call, const char *filter_string, struct event_filter *filter, struct filter_parse_error *pe) { struct prog_entry *prog; int nr_parens; int nr_preds; int index; int ret; ret = calc_stack(filter_string, &nr_parens, &nr_preds, &index); if (ret < 0) { switch (ret) { case MISSING_QUOTE: parse_error(pe, FILT_ERR_MISSING_QUOTE, index); break; case TOO_MANY_OPEN: parse_error(pe, FILT_ERR_TOO_MANY_OPEN, index); break; default: parse_error(pe, FILT_ERR_TOO_MANY_CLOSE, index); } return ret; } if (!nr_preds) return -EINVAL; prog = predicate_parse(filter_string, nr_parens, nr_preds, parse_pred, call, pe); if (IS_ERR(prog)) return PTR_ERR(prog); rcu_assign_pointer(filter->prog, prog); return 0; } static inline void event_set_filtered_flag(struct trace_event_file *file) { unsigned long old_flags = file->flags; file->flags |= EVENT_FILE_FL_FILTERED; if (old_flags != file->flags) trace_buffered_event_enable(); } static inline void event_set_filter(struct trace_event_file *file, struct event_filter *filter) { rcu_assign_pointer(file->filter, filter); } static inline void event_clear_filter(struct trace_event_file *file) { RCU_INIT_POINTER(file->filter, NULL); } static inline void event_set_no_set_filter_flag(struct trace_event_file *file) { file->flags |= EVENT_FILE_FL_NO_SET_FILTER; } static inline void event_clear_no_set_filter_flag(struct trace_event_file *file) { file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER; } static inline bool event_no_set_filter_flag(struct trace_event_file *file) { if (file->flags & EVENT_FILE_FL_NO_SET_FILTER) return true; return false; } struct filter_list { struct list_head list; struct event_filter *filter; }; static int process_system_preds(struct trace_subsystem_dir *dir, struct trace_array *tr, struct filter_parse_error *pe, char *filter_string) { struct trace_event_file *file; struct filter_list *filter_item; struct event_filter *filter = NULL; struct filter_list *tmp; LIST_HEAD(filter_list); bool fail = true; int err; list_for_each_entry(file, &tr->events, list) { if (file->system != dir) continue; filter = kzalloc(sizeof(*filter), GFP_KERNEL); if (!filter) goto fail_mem; filter->filter_string = kstrdup(filter_string, GFP_KERNEL); if (!filter->filter_string) goto fail_mem; err = process_preds(file->event_call, filter_string, filter, pe); if (err) { filter_disable(file); parse_error(pe, FILT_ERR_BAD_SUBSYS_FILTER, 0); append_filter_err(tr, pe, filter); } else event_set_filtered_flag(file); filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); if (!filter_item) goto fail_mem; list_add_tail(&filter_item->list, &filter_list); /* * Regardless of if this returned an error, we still * replace the filter for the call. */ filter_item->filter = event_filter(file); event_set_filter(file, filter); filter = NULL; fail = false; } if (fail) goto fail; /* * The calls can still be using the old filters. * Do a synchronize_rcu() and to ensure all calls are * done with them before we free them. */ tracepoint_synchronize_unregister(); list_for_each_entry_safe(filter_item, tmp, &filter_list, list) { __free_filter(filter_item->filter); list_del(&filter_item->list); kfree(filter_item); } return 0; fail: /* No call succeeded */ list_for_each_entry_safe(filter_item, tmp, &filter_list, list) { list_del(&filter_item->list); kfree(filter_item); } parse_error(pe, FILT_ERR_BAD_SUBSYS_FILTER, 0); return -EINVAL; fail_mem: kfree(filter); /* If any call succeeded, we still need to sync */ if (!fail) tracepoint_synchronize_unregister(); list_for_each_entry_safe(filter_item, tmp, &filter_list, list) { __free_filter(filter_item->filter); list_del(&filter_item->list); kfree(filter_item); } return -ENOMEM; } static int create_filter_start(char *filter_string, bool set_str, struct filter_parse_error **pse, struct event_filter **filterp) { struct event_filter *filter; struct filter_parse_error *pe = NULL; int err = 0; if (WARN_ON_ONCE(*pse || *filterp)) return -EINVAL; filter = kzalloc(sizeof(*filter), GFP_KERNEL); if (filter && set_str) { filter->filter_string = kstrdup(filter_string, GFP_KERNEL); if (!filter->filter_string) err = -ENOMEM; } pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!filter || !pe || err) { kfree(pe); __free_filter(filter); return -ENOMEM; } /* we're committed to creating a new filter */ *filterp = filter; *pse = pe; return 0; } static void create_filter_finish(struct filter_parse_error *pe) { kfree(pe); } /** * create_filter - create a filter for a trace_event_call * @call: trace_event_call to create a filter for * @filter_str: filter string * @set_str: remember @filter_str and enable detailed error in filter * @filterp: out param for created filter (always updated on return) * Must be a pointer that references a NULL pointer. * * Creates a filter for @call with @filter_str. If @set_str is %true, * @filter_str is copied and recorded in the new filter. * * On success, returns 0 and *@filterp points to the new filter. On * failure, returns -errno and *@filterp may point to %NULL or to a new * filter. In the latter case, the returned filter contains error * information if @set_str is %true and the caller is responsible for * freeing it. */ static int create_filter(struct trace_array *tr, struct trace_event_call *call, char *filter_string, bool set_str, struct event_filter **filterp) { struct filter_parse_error *pe = NULL; int err; /* filterp must point to NULL */ if (WARN_ON(*filterp)) *filterp = NULL; err = create_filter_start(filter_string, set_str, &pe, filterp); if (err) return err; err = process_preds(call, filter_string, *filterp, pe); if (err && set_str) append_filter_err(tr, pe, *filterp); create_filter_finish(pe); return err; } int create_event_filter(struct trace_array *tr, struct trace_event_call *call, char *filter_str, bool set_str, struct event_filter **filterp) { return create_filter(tr, call, filter_str, set_str, filterp); } /** * create_system_filter - create a filter for an event_subsystem * @system: event_subsystem to create a filter for * @filter_str: filter string * @filterp: out param for created filter (always updated on return) * * Identical to create_filter() except that it creates a subsystem filter * and always remembers @filter_str. */ static int create_system_filter(struct trace_subsystem_dir *dir, struct trace_array *tr, char *filter_str, struct event_filter **filterp) { struct filter_parse_error *pe = NULL; int err; err = create_filter_start(filter_str, true, &pe, filterp); if (!err) { err = process_system_preds(dir, tr, pe, filter_str); if (!err) { /* System filters just show a default message */ kfree((*filterp)->filter_string); (*filterp)->filter_string = NULL; } else { append_filter_err(tr, pe, *filterp); } } create_filter_finish(pe); return err; } /* caller must hold event_mutex */ int apply_event_filter(struct trace_event_file *file, char *filter_string) { struct trace_event_call *call = file->event_call; struct event_filter *filter = NULL; int err; if (!strcmp(strstrip(filter_string), "0")) { filter_disable(file); filter = event_filter(file); if (!filter) return 0; event_clear_filter(file); /* Make sure the filter is not being used */ tracepoint_synchronize_unregister(); __free_filter(filter); return 0; } err = create_filter(file->tr, call, filter_string, true, &filter); /* * Always swap the call filter with the new filter * even if there was an error. If there was an error * in the filter, we disable the filter and show the error * string */ if (filter) { struct event_filter *tmp; tmp = event_filter(file); if (!err) event_set_filtered_flag(file); else filter_disable(file); event_set_filter(file, filter); if (tmp) { /* Make sure the call is done with the filter */ tracepoint_synchronize_unregister(); __free_filter(tmp); } } return err; } int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, char *filter_string) { struct event_subsystem *system = dir->subsystem; struct trace_array *tr = dir->tr; struct event_filter *filter = NULL; int err = 0; mutex_lock(&event_mutex); /* Make sure the system still has events */ if (!dir->nr_events) { err = -ENODEV; goto out_unlock; } if (!strcmp(strstrip(filter_string), "0")) { filter_free_subsystem_preds(dir, tr); remove_filter_string(system->filter); filter = system->filter; system->filter = NULL; /* Ensure all filters are no longer used */ tracepoint_synchronize_unregister(); filter_free_subsystem_filters(dir, tr); __free_filter(filter); goto out_unlock; } err = create_system_filter(dir, tr, filter_string, &filter); if (filter) { /* * No event actually uses the system filter * we can free it without synchronize_rcu(). */ __free_filter(system->filter); system->filter = filter; } out_unlock: mutex_unlock(&event_mutex); return err; } #ifdef CONFIG_PERF_EVENTS void ftrace_profile_free_filter(struct perf_event *event) { struct event_filter *filter = event->filter; event->filter = NULL; __free_filter(filter); } struct function_filter_data { struct ftrace_ops *ops; int first_filter; int first_notrace; }; #ifdef CONFIG_FUNCTION_TRACER static char ** ftrace_function_filter_re(char *buf, int len, int *count) { char *str, **re; str = kstrndup(buf, len, GFP_KERNEL); if (!str) return NULL; /* * The argv_split function takes white space * as a separator, so convert ',' into spaces. */ strreplace(str, ',', ' '); re = argv_split(GFP_KERNEL, str, count); kfree(str); return re; } static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter, int reset, char *re, int len) { int ret; if (filter) ret = ftrace_set_filter(ops, re, len, reset); else ret = ftrace_set_notrace(ops, re, len, reset); return ret; } static int __ftrace_function_set_filter(int filter, char *buf, int len, struct function_filter_data *data) { int i, re_cnt, ret = -EINVAL; int *reset; char **re; reset = filter ? &data->first_filter : &data->first_notrace; /* * The 'ip' field could have multiple filters set, separated * either by space or comma. We first cut the filter and apply * all pieces separatelly. */ re = ftrace_function_filter_re(buf, len, &re_cnt); if (!re) return -EINVAL; for (i = 0; i < re_cnt; i++) { ret = ftrace_function_set_regexp(data->ops, filter, *reset, re[i], strlen(re[i])); if (ret) break; if (*reset) *reset = 0; } argv_free(re); return ret; } static int ftrace_function_check_pred(struct filter_pred *pred) { struct ftrace_event_field *field = pred->field; /* * Check the predicate for function trace, verify: * - only '==' and '!=' is used * - the 'ip' field is used */ if ((pred->op != OP_EQ) && (pred->op != OP_NE)) return -EINVAL; if (strcmp(field->name, "ip")) return -EINVAL; return 0; } static int ftrace_function_set_filter_pred(struct filter_pred *pred, struct function_filter_data *data) { int ret; /* Checking the node is valid for function trace. */ ret = ftrace_function_check_pred(pred); if (ret) return ret; return __ftrace_function_set_filter(pred->op == OP_EQ, pred->regex.pattern, pred->regex.len, data); } static bool is_or(struct prog_entry *prog, int i) { int target; /* * Only "||" is allowed for function events, thus, * all true branches should jump to true, and any * false branch should jump to false. */ target = prog[i].target + 1; /* True and false have NULL preds (all prog entries should jump to one */ if (prog[target].pred) return false; /* prog[target].target is 1 for TRUE, 0 for FALSE */ return prog[i].when_to_branch == prog[target].target; } static int ftrace_function_set_filter(struct perf_event *event, struct event_filter *filter) { struct prog_entry *prog = rcu_dereference_protected(filter->prog, lockdep_is_held(&event_mutex)); struct function_filter_data data = { .first_filter = 1, .first_notrace = 1, .ops = &event->ftrace_ops, }; int i; for (i = 0; prog[i].pred; i++) { struct filter_pred *pred = prog[i].pred; if (!is_or(prog, i)) return -EINVAL; if (ftrace_function_set_filter_pred(pred, &data) < 0) return -EINVAL; } return 0; } #else static int ftrace_function_set_filter(struct perf_event *event, struct event_filter *filter) { return -ENODEV; } #endif /* CONFIG_FUNCTION_TRACER */ int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str) { int err; struct event_filter *filter = NULL; struct trace_event_call *call; mutex_lock(&event_mutex); call = event->tp_event; err = -EINVAL; if (!call) goto out_unlock; err = -EEXIST; if (event->filter) goto out_unlock; err = create_filter(NULL, call, filter_str, false, &filter); if (err) goto free_filter; if (ftrace_event_is_function(call)) err = ftrace_function_set_filter(event, filter); else event->filter = filter; free_filter: if (err || ftrace_event_is_function(call)) __free_filter(filter); out_unlock: mutex_unlock(&event_mutex); return err; } #endif /* CONFIG_PERF_EVENTS */ #ifdef CONFIG_FTRACE_STARTUP_TEST #include <linux/types.h> #include <linux/tracepoint.h> #define CREATE_TRACE_POINTS #include "trace_events_filter_test.h" #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \ { \ .filter = FILTER, \ .rec = { .a = va, .b = vb, .c = vc, .d = vd, \ .e = ve, .f = vf, .g = vg, .h = vh }, \ .match = m, \ .not_visited = nvisit, \ } #define YES 1 #define NO 0 static struct test_filter_data_t { char *filter; struct trace_event_raw_ftrace_test_filter rec; int match; char *not_visited; } test_filter_data[] = { #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \ "e == 1 && f == 1 && g == 1 && h == 1" DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""), DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"), DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""), #undef FILTER #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \ "e == 1 || f == 1 || g == 1 || h == 1" DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""), DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""), DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"), #undef FILTER #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \ "(e == 1 || f == 1) && (g == 1 || h == 1)" DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"), DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""), DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"), DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"), #undef FILTER #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \ "(e == 1 && f == 1) || (g == 1 && h == 1)" DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"), DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""), DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""), #undef FILTER #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \ "(e == 1 && f == 1) || (g == 1 && h == 1)" DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"), DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""), DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""), #undef FILTER #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \ "(e == 1 || f == 1)) && (g == 1 || h == 1)" DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"), DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""), DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"), #undef FILTER #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \ "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))" DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"), DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""), DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""), #undef FILTER #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \ "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))" DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"), DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""), DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"), }; #undef DATA_REC #undef FILTER #undef YES #undef NO #define DATA_CNT ARRAY_SIZE(test_filter_data) static int test_pred_visited; static int test_pred_visited_fn(struct filter_pred *pred, void *event) { struct ftrace_event_field *field = pred->field; test_pred_visited = 1; printk(KERN_INFO "\npred visited %s\n", field->name); return 1; } static void update_pred_fn(struct event_filter *filter, char *fields) { struct prog_entry *prog = rcu_dereference_protected(filter->prog, lockdep_is_held(&event_mutex)); int i; for (i = 0; prog[i].pred; i++) { struct filter_pred *pred = prog[i].pred; struct ftrace_event_field *field = pred->field; WARN_ON_ONCE(!pred->fn); if (!field) { WARN_ONCE(1, "all leafs should have field defined %d", i); continue; } if (!strchr(fields, *field->name)) continue; pred->fn = test_pred_visited_fn; } } static __init int ftrace_test_event_filter(void) { int i; printk(KERN_INFO "Testing ftrace filter: "); for (i = 0; i < DATA_CNT; i++) { struct event_filter *filter = NULL; struct test_filter_data_t *d = &test_filter_data[i]; int err; err = create_filter(NULL, &event_ftrace_test_filter, d->filter, false, &filter); if (err) { printk(KERN_INFO "Failed to get filter for '%s', err %d\n", d->filter, err); __free_filter(filter); break; } /* Needed to dereference filter->prog */ mutex_lock(&event_mutex); /* * The preemption disabling is not really needed for self * tests, but the rcu dereference will complain without it. */ preempt_disable(); if (*d->not_visited) update_pred_fn(filter, d->not_visited); test_pred_visited = 0; err = filter_match_preds(filter, &d->rec); preempt_enable(); mutex_unlock(&event_mutex); __free_filter(filter); if (test_pred_visited) { printk(KERN_INFO "Failed, unwanted pred visited for filter %s\n", d->filter); break; } if (err != d->match) { printk(KERN_INFO "Failed to match filter '%s', expected %d\n", d->filter, d->match); break; } } if (i == DATA_CNT) printk(KERN_CONT "OK\n"); return 0; } late_initcall(ftrace_test_event_filter); #endif /* CONFIG_FTRACE_STARTUP_TEST */
./CrossVul/dataset_final_sorted/CWE-400/c/good_1262_0
crossvul-cpp_data_good_1261_0
/** * Copyright (c) 2014 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/etherdevice.h> #include <linux/timer.h> #include "rsi_mgmt.h" #include "rsi_common.h" #include "rsi_ps.h" #include "rsi_hal.h" static struct bootup_params boot_params_20 = { .magic_number = cpu_to_le16(0x5aa5), .crystal_good_time = 0x0, .valid = cpu_to_le32(VALID_20), .reserved_for_valids = 0x0, .bootup_mode_info = 0x0, .digital_loop_back_params = 0x0, .rtls_timestamp_en = 0x0, .host_spi_intr_cfg = 0x0, .device_clk_info = {{ .pll_config_g = { .tapll_info_g = { .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)| (TA_PLL_M_VAL_20)), .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20), }, .pll960_info_g = { .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)| (PLL960_N_VAL_20)), .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20), .pll_reg_3 = 0x0, }, .afepll_info_g = { .pll_reg = cpu_to_le16(0x9f0), } }, .switch_clk_g = { .switch_clk_info = cpu_to_le16(0xb), .bbp_lmac_clk_reg_val = cpu_to_le16(0x111), .umac_clock_reg_config = cpu_to_le16(0x48), .qspi_uart_clock_reg_config = cpu_to_le16(0x1211) } }, { .pll_config_g = { .tapll_info_g = { .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)| (TA_PLL_M_VAL_20)), .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20), }, .pll960_info_g = { .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)| (PLL960_N_VAL_20)), .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20), .pll_reg_3 = 0x0, }, .afepll_info_g = { .pll_reg = cpu_to_le16(0x9f0), } }, .switch_clk_g = { .switch_clk_info = 0x0, .bbp_lmac_clk_reg_val = 0x0, .umac_clock_reg_config = 0x0, .qspi_uart_clock_reg_config = 0x0 } }, { .pll_config_g = { .tapll_info_g = { .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)| (TA_PLL_M_VAL_20)), .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20), }, .pll960_info_g = { .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)| (PLL960_N_VAL_20)), .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20), .pll_reg_3 = 0x0, }, .afepll_info_g = { .pll_reg = cpu_to_le16(0x9f0), } }, .switch_clk_g = { .switch_clk_info = 0x0, .bbp_lmac_clk_reg_val = 0x0, .umac_clock_reg_config = 0x0, .qspi_uart_clock_reg_config = 0x0 } } }, .buckboost_wakeup_cnt = 0x0, .pmu_wakeup_wait = 0x0, .shutdown_wait_time = 0x0, .pmu_slp_clkout_sel = 0x0, .wdt_prog_value = 0x0, .wdt_soc_rst_delay = 0x0, .dcdc_operation_mode = 0x0, .soc_reset_wait_cnt = 0x0, .waiting_time_at_fresh_sleep = 0x0, .max_threshold_to_avoid_sleep = 0x0, .beacon_resedue_alg_en = 0, }; static struct bootup_params boot_params_40 = { .magic_number = cpu_to_le16(0x5aa5), .crystal_good_time = 0x0, .valid = cpu_to_le32(VALID_40), .reserved_for_valids = 0x0, .bootup_mode_info = 0x0, .digital_loop_back_params = 0x0, .rtls_timestamp_en = 0x0, .host_spi_intr_cfg = 0x0, .device_clk_info = {{ .pll_config_g = { .tapll_info_g = { .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)| (TA_PLL_M_VAL_40)), .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40), }, .pll960_info_g = { .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)| (PLL960_N_VAL_40)), .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40), .pll_reg_3 = 0x0, }, .afepll_info_g = { .pll_reg = cpu_to_le16(0x9f0), } }, .switch_clk_g = { .switch_clk_info = cpu_to_le16(0x09), .bbp_lmac_clk_reg_val = cpu_to_le16(0x1121), .umac_clock_reg_config = cpu_to_le16(0x48), .qspi_uart_clock_reg_config = cpu_to_le16(0x1211) } }, { .pll_config_g = { .tapll_info_g = { .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)| (TA_PLL_M_VAL_40)), .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40), }, .pll960_info_g = { .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)| (PLL960_N_VAL_40)), .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40), .pll_reg_3 = 0x0, }, .afepll_info_g = { .pll_reg = cpu_to_le16(0x9f0), } }, .switch_clk_g = { .switch_clk_info = 0x0, .bbp_lmac_clk_reg_val = 0x0, .umac_clock_reg_config = 0x0, .qspi_uart_clock_reg_config = 0x0 } }, { .pll_config_g = { .tapll_info_g = { .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)| (TA_PLL_M_VAL_40)), .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40), }, .pll960_info_g = { .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)| (PLL960_N_VAL_40)), .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40), .pll_reg_3 = 0x0, }, .afepll_info_g = { .pll_reg = cpu_to_le16(0x9f0), } }, .switch_clk_g = { .switch_clk_info = 0x0, .bbp_lmac_clk_reg_val = 0x0, .umac_clock_reg_config = 0x0, .qspi_uart_clock_reg_config = 0x0 } } }, .buckboost_wakeup_cnt = 0x0, .pmu_wakeup_wait = 0x0, .shutdown_wait_time = 0x0, .pmu_slp_clkout_sel = 0x0, .wdt_prog_value = 0x0, .wdt_soc_rst_delay = 0x0, .dcdc_operation_mode = 0x0, .soc_reset_wait_cnt = 0x0, .waiting_time_at_fresh_sleep = 0x0, .max_threshold_to_avoid_sleep = 0x0, .beacon_resedue_alg_en = 0, }; static struct bootup_params_9116 boot_params_9116_20 = { .magic_number = cpu_to_le16(LOADED_TOKEN), .valid = cpu_to_le32(VALID_20), .device_clk_info_9116 = {{ .pll_config_9116_g = { .pll_ctrl_set_reg = cpu_to_le16(0xd518), .pll_ctrl_clr_reg = cpu_to_le16(0x2ae7), .pll_modem_conig_reg = cpu_to_le16(0x2000), .soc_clk_config_reg = cpu_to_le16(0x0c18), .adc_dac_strm1_config_reg = cpu_to_le16(0x1100), .adc_dac_strm2_config_reg = cpu_to_le16(0x6600), }, .switch_clk_9116_g = { .switch_clk_info = cpu_to_le32((RSI_SWITCH_TASS_CLK | RSI_SWITCH_WLAN_BBP_LMAC_CLK_REG | RSI_SWITCH_BBP_LMAC_CLK_REG)), .tass_clock_reg = cpu_to_le32(0x083C0503), .wlan_bbp_lmac_clk_reg_val = cpu_to_le32(0x01042001), .zbbt_bbp_lmac_clk_reg_val = cpu_to_le32(0x02010001), .bbp_lmac_clk_en_val = cpu_to_le32(0x0000003b), } }, }, }; static struct bootup_params_9116 boot_params_9116_40 = { .magic_number = cpu_to_le16(LOADED_TOKEN), .valid = cpu_to_le32(VALID_40), .device_clk_info_9116 = {{ .pll_config_9116_g = { .pll_ctrl_set_reg = cpu_to_le16(0xd518), .pll_ctrl_clr_reg = cpu_to_le16(0x2ae7), .pll_modem_conig_reg = cpu_to_le16(0x3000), .soc_clk_config_reg = cpu_to_le16(0x0c18), .adc_dac_strm1_config_reg = cpu_to_le16(0x0000), .adc_dac_strm2_config_reg = cpu_to_le16(0x6600), }, .switch_clk_9116_g = { .switch_clk_info = cpu_to_le32((RSI_SWITCH_TASS_CLK | RSI_SWITCH_WLAN_BBP_LMAC_CLK_REG | RSI_SWITCH_BBP_LMAC_CLK_REG | RSI_MODEM_CLK_160MHZ)), .tass_clock_reg = cpu_to_le32(0x083C0503), .wlan_bbp_lmac_clk_reg_val = cpu_to_le32(0x01042002), .zbbt_bbp_lmac_clk_reg_val = cpu_to_le32(0x04010002), .bbp_lmac_clk_en_val = cpu_to_le32(0x0000003b), } }, }, }; static u16 mcs[] = {13, 26, 39, 52, 78, 104, 117, 130}; /** * rsi_set_default_parameters() - This function sets default parameters. * @common: Pointer to the driver private structure. * * Return: none */ static void rsi_set_default_parameters(struct rsi_common *common) { common->band = NL80211_BAND_2GHZ; common->channel_width = BW_20MHZ; common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; common->channel = 1; common->min_rate = 0xffff; common->fsm_state = FSM_CARD_NOT_READY; common->iface_down = true; common->endpoint = EP_2GHZ_20MHZ; common->driver_mode = 1; /* End to end mode */ common->lp_ps_handshake_mode = 0; /* Default no handShake mode*/ common->ulp_ps_handshake_mode = 2; /* Default PKT handShake mode*/ common->rf_power_val = 0; /* Default 1.9V */ common->wlan_rf_power_mode = 0; common->obm_ant_sel_val = 2; common->beacon_interval = RSI_BEACON_INTERVAL; common->dtim_cnt = RSI_DTIM_COUNT; common->w9116_features.pll_mode = 0x0; common->w9116_features.rf_type = 1; common->w9116_features.wireless_mode = 0; common->w9116_features.enable_ppe = 0; common->w9116_features.afe_type = 1; common->w9116_features.dpd = 0; common->w9116_features.sifs_tx_enable = 0; common->w9116_features.ps_options = 0; } void init_bgscan_params(struct rsi_common *common) { memset((u8 *)&common->bgscan, 0, sizeof(struct rsi_bgscan_params)); common->bgscan.bgscan_threshold = RSI_DEF_BGSCAN_THRLD; common->bgscan.roam_threshold = RSI_DEF_ROAM_THRLD; common->bgscan.bgscan_periodicity = RSI_BGSCAN_PERIODICITY; common->bgscan.num_bgscan_channels = 0; common->bgscan.two_probe = 1; common->bgscan.active_scan_duration = RSI_ACTIVE_SCAN_TIME; common->bgscan.passive_scan_duration = RSI_PASSIVE_SCAN_TIME; } /** * rsi_set_contention_vals() - This function sets the contention values for the * backoff procedure. * @common: Pointer to the driver private structure. * * Return: None. */ static void rsi_set_contention_vals(struct rsi_common *common) { u8 ii = 0; for (; ii < NUM_EDCA_QUEUES; ii++) { common->tx_qinfo[ii].wme_params = (((common->edca_params[ii].cw_min / 2) + (common->edca_params[ii].aifs)) * WMM_SHORT_SLOT_TIME + SIFS_DURATION); common->tx_qinfo[ii].weight = common->tx_qinfo[ii].wme_params; common->tx_qinfo[ii].pkt_contended = 0; } } /** * rsi_send_internal_mgmt_frame() - This function sends management frames to * firmware.Also schedules packet to queue * for transmission. * @common: Pointer to the driver private structure. * @skb: Pointer to the socket buffer structure. * * Return: 0 on success, -1 on failure. */ static int rsi_send_internal_mgmt_frame(struct rsi_common *common, struct sk_buff *skb) { struct skb_info *tx_params; struct rsi_cmd_desc *desc; if (skb == NULL) { rsi_dbg(ERR_ZONE, "%s: Unable to allocate skb\n", __func__); return -ENOMEM; } desc = (struct rsi_cmd_desc *)skb->data; desc->desc_dword0.len_qno |= cpu_to_le16(DESC_IMMEDIATE_WAKEUP); skb->priority = MGMT_SOFT_Q; tx_params = (struct skb_info *)&IEEE80211_SKB_CB(skb)->driver_data; tx_params->flags |= INTERNAL_MGMT_PKT; skb_queue_tail(&common->tx_queue[MGMT_SOFT_Q], skb); rsi_set_event(&common->tx_thread.event); return 0; } /** * rsi_load_radio_caps() - This function is used to send radio capabilities * values to firmware. * @common: Pointer to the driver private structure. * * Return: 0 on success, corresponding negative error code on failure. */ static int rsi_load_radio_caps(struct rsi_common *common) { struct rsi_radio_caps *radio_caps; struct rsi_hw *adapter = common->priv; u16 inx = 0; u8 ii; u8 radio_id = 0; u16 gc[20] = {0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0}; struct sk_buff *skb; u16 frame_len = sizeof(struct rsi_radio_caps); rsi_dbg(INFO_ZONE, "%s: Sending rate symbol req frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); radio_caps = (struct rsi_radio_caps *)skb->data; radio_caps->desc_dword0.frame_type = RADIO_CAPABILITIES; radio_caps->channel_num = common->channel; radio_caps->rf_model = RSI_RF_TYPE; radio_caps->radio_cfg_info = RSI_LMAC_CLOCK_80MHZ; if (common->channel_width == BW_40MHZ) { radio_caps->radio_cfg_info |= RSI_ENABLE_40MHZ; if (common->fsm_state == FSM_MAC_INIT_DONE) { struct ieee80211_hw *hw = adapter->hw; struct ieee80211_conf *conf = &hw->conf; if (conf_is_ht40_plus(conf)) { radio_caps->ppe_ack_rate = cpu_to_le16(LOWER_20_ENABLE | (LOWER_20_ENABLE >> 12)); } else if (conf_is_ht40_minus(conf)) { radio_caps->ppe_ack_rate = cpu_to_le16(UPPER_20_ENABLE | (UPPER_20_ENABLE >> 12)); } else { radio_caps->ppe_ack_rate = cpu_to_le16((BW_40MHZ << 12) | FULL40M_ENABLE); } } } radio_caps->radio_info |= radio_id; if (adapter->device_model == RSI_DEV_9116 && common->channel_width == BW_20MHZ) radio_caps->radio_cfg_info &= ~0x3; radio_caps->sifs_tx_11n = cpu_to_le16(SIFS_TX_11N_VALUE); radio_caps->sifs_tx_11b = cpu_to_le16(SIFS_TX_11B_VALUE); radio_caps->slot_rx_11n = cpu_to_le16(SHORT_SLOT_VALUE); radio_caps->ofdm_ack_tout = cpu_to_le16(OFDM_ACK_TOUT_VALUE); radio_caps->cck_ack_tout = cpu_to_le16(CCK_ACK_TOUT_VALUE); radio_caps->preamble_type = cpu_to_le16(LONG_PREAMBLE); for (ii = 0; ii < MAX_HW_QUEUES; ii++) { radio_caps->qos_params[ii].cont_win_min_q = cpu_to_le16(3); radio_caps->qos_params[ii].cont_win_max_q = cpu_to_le16(0x3f); radio_caps->qos_params[ii].aifsn_val_q = cpu_to_le16(2); radio_caps->qos_params[ii].txop_q = 0; } for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) { if (common->edca_params[ii].cw_max > 0) { radio_caps->qos_params[ii].cont_win_min_q = cpu_to_le16(common->edca_params[ii].cw_min); radio_caps->qos_params[ii].cont_win_max_q = cpu_to_le16(common->edca_params[ii].cw_max); radio_caps->qos_params[ii].aifsn_val_q = cpu_to_le16(common->edca_params[ii].aifs << 8); radio_caps->qos_params[ii].txop_q = cpu_to_le16(common->edca_params[ii].txop); } } radio_caps->qos_params[BROADCAST_HW_Q].txop_q = cpu_to_le16(0xffff); radio_caps->qos_params[MGMT_HW_Q].txop_q = 0; radio_caps->qos_params[BEACON_HW_Q].txop_q = cpu_to_le16(0xffff); memcpy(&common->rate_pwr[0], &gc[0], 40); for (ii = 0; ii < 20; ii++) radio_caps->gcpd_per_rate[inx++] = cpu_to_le16(common->rate_pwr[ii] & 0x00FF); rsi_set_len_qno(&radio_caps->desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_mgmt_pkt_to_core() - This function is the entry point for Mgmt module. * @common: Pointer to the driver private structure. * @msg: Pointer to received packet. * @msg_len: Length of the received packet. * @type: Type of received packet. * * Return: 0 on success, -1 on failure. */ static int rsi_mgmt_pkt_to_core(struct rsi_common *common, u8 *msg, s32 msg_len) { struct rsi_hw *adapter = common->priv; struct ieee80211_tx_info *info; struct skb_info *rx_params; u8 pad_bytes = msg[4]; struct sk_buff *skb; if (!adapter->sc_nvifs) return -ENOLINK; msg_len -= pad_bytes; if (msg_len <= 0) { rsi_dbg(MGMT_RX_ZONE, "%s: Invalid rx msg of len = %d\n", __func__, msg_len); return -EINVAL; } skb = dev_alloc_skb(msg_len); if (!skb) return -ENOMEM; skb_put_data(skb, (u8 *)(msg + FRAME_DESC_SZ + pad_bytes), msg_len); info = IEEE80211_SKB_CB(skb); rx_params = (struct skb_info *)info->driver_data; rx_params->rssi = rsi_get_rssi(msg); rx_params->channel = rsi_get_channel(msg); rsi_indicate_pkt_to_os(common, skb); return 0; } /** * rsi_hal_send_sta_notify_frame() - This function sends the station notify * frame to firmware. * @common: Pointer to the driver private structure. * @opmode: Operating mode of device. * @notify_event: Notification about station connection. * @bssid: bssid. * @qos_enable: Qos is enabled. * @aid: Aid (unique for all STA). * * Return: status: 0 on success, corresponding negative error code on failure. */ int rsi_hal_send_sta_notify_frame(struct rsi_common *common, enum opmode opmode, u8 notify_event, const unsigned char *bssid, u8 qos_enable, u16 aid, u16 sta_id, struct ieee80211_vif *vif) { struct sk_buff *skb = NULL; struct rsi_peer_notify *peer_notify; u16 vap_id = ((struct vif_priv *)vif->drv_priv)->vap_id; int status; u16 frame_len = sizeof(struct rsi_peer_notify); rsi_dbg(MGMT_TX_ZONE, "%s: Sending sta notify frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); peer_notify = (struct rsi_peer_notify *)skb->data; if (opmode == RSI_OPMODE_STA) peer_notify->command = cpu_to_le16(PEER_TYPE_AP << 1); else if (opmode == RSI_OPMODE_AP) peer_notify->command = cpu_to_le16(PEER_TYPE_STA << 1); switch (notify_event) { case STA_CONNECTED: peer_notify->command |= cpu_to_le16(RSI_ADD_PEER); break; case STA_DISCONNECTED: peer_notify->command |= cpu_to_le16(RSI_DELETE_PEER); break; default: break; } peer_notify->command |= cpu_to_le16((aid & 0xfff) << 4); ether_addr_copy(peer_notify->mac_addr, bssid); peer_notify->mpdu_density = cpu_to_le16(RSI_MPDU_DENSITY); peer_notify->sta_flags = cpu_to_le32((qos_enable) ? 1 : 0); rsi_set_len_qno(&peer_notify->desc.desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); peer_notify->desc.desc_dword0.frame_type = PEER_NOTIFY; peer_notify->desc.desc_dword3.qid_tid = sta_id; peer_notify->desc.desc_dword3.sta_id = vap_id; skb_put(skb, frame_len); status = rsi_send_internal_mgmt_frame(common, skb); if ((vif->type == NL80211_IFTYPE_STATION) && (!status && qos_enable)) { rsi_set_contention_vals(common); status = rsi_load_radio_caps(common); } return status; } /** * rsi_send_aggregation_params_frame() - This function sends the ampdu * indication frame to firmware. * @common: Pointer to the driver private structure. * @tid: traffic identifier. * @ssn: ssn. * @buf_size: buffer size. * @event: notification about station connection. * * Return: 0 on success, corresponding negative error code on failure. */ int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid, u16 ssn, u8 buf_size, u8 event, u8 sta_id) { struct sk_buff *skb = NULL; struct rsi_aggr_params *aggr_params; u16 frame_len = sizeof(struct rsi_aggr_params); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); aggr_params = (struct rsi_aggr_params *)skb->data; rsi_dbg(MGMT_TX_ZONE, "%s: Sending AMPDU indication frame\n", __func__); rsi_set_len_qno(&aggr_params->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); aggr_params->desc_dword0.frame_type = AMPDU_IND; aggr_params->aggr_params = tid & RSI_AGGR_PARAMS_TID_MASK; aggr_params->peer_id = sta_id; if (event == STA_TX_ADDBA_DONE) { aggr_params->seq_start = cpu_to_le16(ssn); aggr_params->baw_size = cpu_to_le16(buf_size); aggr_params->aggr_params |= RSI_AGGR_PARAMS_START; } else if (event == STA_RX_ADDBA_DONE) { aggr_params->seq_start = cpu_to_le16(ssn); aggr_params->aggr_params |= (RSI_AGGR_PARAMS_START | RSI_AGGR_PARAMS_RX_AGGR); } else if (event == STA_RX_DELBA) { aggr_params->aggr_params |= RSI_AGGR_PARAMS_RX_AGGR; } skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_program_bb_rf() - This function starts base band and RF programming. * This is called after initial configurations are done. * @common: Pointer to the driver private structure. * * Return: 0 on success, corresponding negative error code on failure. */ static int rsi_program_bb_rf(struct rsi_common *common) { struct sk_buff *skb; struct rsi_bb_rf_prog *bb_rf_prog; u16 frame_len = sizeof(struct rsi_bb_rf_prog); rsi_dbg(MGMT_TX_ZONE, "%s: Sending program BB/RF frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); bb_rf_prog = (struct rsi_bb_rf_prog *)skb->data; rsi_set_len_qno(&bb_rf_prog->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); bb_rf_prog->desc_dword0.frame_type = BBP_PROG_IN_TA; bb_rf_prog->endpoint = common->endpoint; bb_rf_prog->rf_power_mode = common->wlan_rf_power_mode; if (common->rf_reset) { bb_rf_prog->flags = cpu_to_le16(RF_RESET_ENABLE); rsi_dbg(MGMT_TX_ZONE, "%s: ===> RF RESET REQUEST SENT <===\n", __func__); common->rf_reset = 0; } common->bb_rf_prog_count = 1; bb_rf_prog->flags |= cpu_to_le16(PUT_BBP_RESET | BBP_REG_WRITE | (RSI_RF_TYPE << 4)); skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_set_vap_capabilities() - This function send vap capability to firmware. * @common: Pointer to the driver private structure. * @opmode: Operating mode of device. * * Return: 0 on success, corresponding negative error code on failure. */ int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode, u8 *mac_addr, u8 vap_id, u8 vap_status) { struct sk_buff *skb = NULL; struct rsi_vap_caps *vap_caps; struct rsi_hw *adapter = common->priv; struct ieee80211_hw *hw = adapter->hw; struct ieee80211_conf *conf = &hw->conf; u16 frame_len = sizeof(struct rsi_vap_caps); rsi_dbg(MGMT_TX_ZONE, "%s: Sending VAP capabilities frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); vap_caps = (struct rsi_vap_caps *)skb->data; rsi_set_len_qno(&vap_caps->desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); vap_caps->desc_dword0.frame_type = VAP_CAPABILITIES; vap_caps->status = vap_status; vap_caps->vif_type = mode; vap_caps->channel_bw = common->channel_width; vap_caps->vap_id = vap_id; vap_caps->radioid_macid = ((common->mac_id & 0xf) << 4) | (common->radio_id & 0xf); memcpy(vap_caps->mac_addr, mac_addr, IEEE80211_ADDR_LEN); vap_caps->keep_alive_period = cpu_to_le16(90); vap_caps->frag_threshold = cpu_to_le16(IEEE80211_MAX_FRAG_THRESHOLD); vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold); if (common->band == NL80211_BAND_5GHZ) { vap_caps->default_ctrl_rate = cpu_to_le16(RSI_RATE_6); vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6); } else { vap_caps->default_ctrl_rate = cpu_to_le16(RSI_RATE_1); vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_1); } if (conf_is_ht40(conf)) { if (conf_is_ht40_minus(conf)) vap_caps->ctrl_rate_flags = cpu_to_le16(UPPER_20_ENABLE); else if (conf_is_ht40_plus(conf)) vap_caps->ctrl_rate_flags = cpu_to_le16(LOWER_20_ENABLE); else vap_caps->ctrl_rate_flags = cpu_to_le16(FULL40M_ENABLE); } vap_caps->default_data_rate = 0; vap_caps->beacon_interval = cpu_to_le16(common->beacon_interval); vap_caps->dtim_period = cpu_to_le16(common->dtim_cnt); skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_hal_load_key() - This function is used to load keys within the firmware. * @common: Pointer to the driver private structure. * @data: Pointer to the key data. * @key_len: Key length to be loaded. * @key_type: Type of key: GROUP/PAIRWISE. * @key_id: Key index. * @cipher: Type of cipher used. * * Return: 0 on success, -1 on failure. */ int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len, u8 key_type, u8 key_id, u32 cipher, s16 sta_id, struct ieee80211_vif *vif) { struct sk_buff *skb = NULL; struct rsi_set_key *set_key; u16 key_descriptor = 0; u16 frame_len = sizeof(struct rsi_set_key); rsi_dbg(MGMT_TX_ZONE, "%s: Sending load key frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); set_key = (struct rsi_set_key *)skb->data; if (key_type == RSI_GROUP_KEY) { key_descriptor = RSI_KEY_TYPE_BROADCAST; if (vif->type == NL80211_IFTYPE_AP) key_descriptor |= RSI_KEY_MODE_AP; } if ((cipher == WLAN_CIPHER_SUITE_WEP40) || (cipher == WLAN_CIPHER_SUITE_WEP104)) { key_id = 0; key_descriptor |= RSI_WEP_KEY; if (key_len >= 13) key_descriptor |= RSI_WEP_KEY_104; } else if (cipher != KEY_TYPE_CLEAR) { key_descriptor |= RSI_CIPHER_WPA; if (cipher == WLAN_CIPHER_SUITE_TKIP) key_descriptor |= RSI_CIPHER_TKIP; } key_descriptor |= RSI_PROTECT_DATA_FRAMES; key_descriptor |= (key_id << RSI_KEY_ID_OFFSET); rsi_set_len_qno(&set_key->desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); set_key->desc_dword0.frame_type = SET_KEY_REQ; set_key->key_desc = cpu_to_le16(key_descriptor); set_key->sta_id = sta_id; if (data) { if ((cipher == WLAN_CIPHER_SUITE_WEP40) || (cipher == WLAN_CIPHER_SUITE_WEP104)) { memcpy(&set_key->key[key_id][1], data, key_len * 2); } else { memcpy(&set_key->key[0][0], data, key_len); } memcpy(set_key->tx_mic_key, &data[16], 8); memcpy(set_key->rx_mic_key, &data[24], 8); } else { memset(&set_key[FRAME_DESC_SZ], 0, frame_len - FRAME_DESC_SZ); } skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /* * This function sends the common device configuration parameters to device. * This frame includes the useful information to make device works on * specific operating mode. */ static int rsi_send_common_dev_params(struct rsi_common *common) { struct sk_buff *skb; u16 frame_len; struct rsi_config_vals *dev_cfgs; frame_len = sizeof(struct rsi_config_vals); rsi_dbg(MGMT_TX_ZONE, "Sending common device config params\n"); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Unable to allocate skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); dev_cfgs = (struct rsi_config_vals *)skb->data; memset(dev_cfgs, 0, (sizeof(struct rsi_config_vals))); rsi_set_len_qno(&dev_cfgs->len_qno, (frame_len - FRAME_DESC_SZ), RSI_COEX_Q); dev_cfgs->pkt_type = COMMON_DEV_CONFIG; dev_cfgs->lp_ps_handshake = common->lp_ps_handshake_mode; dev_cfgs->ulp_ps_handshake = common->ulp_ps_handshake_mode; dev_cfgs->unused_ulp_gpio = RSI_UNUSED_ULP_GPIO_BITMAP; dev_cfgs->unused_soc_gpio_bitmap = cpu_to_le32(RSI_UNUSED_SOC_GPIO_BITMAP); dev_cfgs->opermode = common->oper_mode; dev_cfgs->wlan_rf_pwr_mode = common->wlan_rf_power_mode; dev_cfgs->driver_mode = common->driver_mode; dev_cfgs->region_code = NL80211_DFS_FCC; dev_cfgs->antenna_sel_val = common->obm_ant_sel_val; skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /* * rsi_load_bootup_params() - This function send bootup params to the firmware. * @common: Pointer to the driver private structure. * * Return: 0 on success, corresponding error code on failure. */ static int rsi_load_bootup_params(struct rsi_common *common) { struct sk_buff *skb; struct rsi_boot_params *boot_params; rsi_dbg(MGMT_TX_ZONE, "%s: Sending boot params frame\n", __func__); skb = dev_alloc_skb(sizeof(struct rsi_boot_params)); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, sizeof(struct rsi_boot_params)); boot_params = (struct rsi_boot_params *)skb->data; rsi_dbg(MGMT_TX_ZONE, "%s:\n", __func__); if (common->channel_width == BW_40MHZ) { memcpy(&boot_params->bootup_params, &boot_params_40, sizeof(struct bootup_params)); rsi_dbg(MGMT_TX_ZONE, "%s: Packet 40MHZ <=== %d\n", __func__, UMAC_CLK_40BW); boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_40BW); } else { memcpy(&boot_params->bootup_params, &boot_params_20, sizeof(struct bootup_params)); if (boot_params_20.valid != cpu_to_le32(VALID_20)) { boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_20BW); rsi_dbg(MGMT_TX_ZONE, "%s: Packet 20MHZ <=== %d\n", __func__, UMAC_CLK_20BW); } else { boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_40MHZ); rsi_dbg(MGMT_TX_ZONE, "%s: Packet 20MHZ <=== %d\n", __func__, UMAC_CLK_40MHZ); } } /** * Bit{0:11} indicates length of the Packet * Bit{12:15} indicates host queue number */ boot_params->desc_word[0] = cpu_to_le16(sizeof(struct bootup_params) | (RSI_WIFI_MGMT_Q << 12)); boot_params->desc_word[1] = cpu_to_le16(BOOTUP_PARAMS_REQUEST); skb_put(skb, sizeof(struct rsi_boot_params)); return rsi_send_internal_mgmt_frame(common, skb); } static int rsi_load_9116_bootup_params(struct rsi_common *common) { struct sk_buff *skb; struct rsi_boot_params_9116 *boot_params; rsi_dbg(MGMT_TX_ZONE, "%s: Sending boot params frame\n", __func__); skb = dev_alloc_skb(sizeof(struct rsi_boot_params_9116)); if (!skb) return -ENOMEM; memset(skb->data, 0, sizeof(struct rsi_boot_params)); boot_params = (struct rsi_boot_params_9116 *)skb->data; if (common->channel_width == BW_40MHZ) { memcpy(&boot_params->bootup_params, &boot_params_9116_40, sizeof(struct bootup_params_9116)); rsi_dbg(MGMT_TX_ZONE, "%s: Packet 40MHZ <=== %d\n", __func__, UMAC_CLK_40BW); boot_params->umac_clk = cpu_to_le16(UMAC_CLK_40BW); } else { memcpy(&boot_params->bootup_params, &boot_params_9116_20, sizeof(struct bootup_params_9116)); if (boot_params_20.valid != cpu_to_le32(VALID_20)) { boot_params->umac_clk = cpu_to_le16(UMAC_CLK_20BW); rsi_dbg(MGMT_TX_ZONE, "%s: Packet 20MHZ <=== %d\n", __func__, UMAC_CLK_20BW); } else { boot_params->umac_clk = cpu_to_le16(UMAC_CLK_40MHZ); rsi_dbg(MGMT_TX_ZONE, "%s: Packet 20MHZ <=== %d\n", __func__, UMAC_CLK_40MHZ); } } rsi_set_len_qno(&boot_params->desc_dword0.len_qno, sizeof(struct bootup_params_9116), RSI_WIFI_MGMT_Q); boot_params->desc_dword0.frame_type = BOOTUP_PARAMS_REQUEST; skb_put(skb, sizeof(struct rsi_boot_params_9116)); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_send_reset_mac() - This function prepares reset MAC request and sends an * internal management frame to indicate it to firmware. * @common: Pointer to the driver private structure. * * Return: 0 on success, corresponding error code on failure. */ static int rsi_send_reset_mac(struct rsi_common *common) { struct sk_buff *skb; struct rsi_mac_frame *mgmt_frame; rsi_dbg(MGMT_TX_ZONE, "%s: Sending reset MAC frame\n", __func__); skb = dev_alloc_skb(FRAME_DESC_SZ); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, FRAME_DESC_SZ); mgmt_frame = (struct rsi_mac_frame *)skb->data; mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); mgmt_frame->desc_word[1] = cpu_to_le16(RESET_MAC_REQ); mgmt_frame->desc_word[4] = cpu_to_le16(RETRY_COUNT << 8); #define RSI_9116_DEF_TA_AGGR 3 if (common->priv->device_model == RSI_DEV_9116) mgmt_frame->desc_word[3] |= cpu_to_le16(RSI_9116_DEF_TA_AGGR << 8); skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_band_check() - This function programs the band * @common: Pointer to the driver private structure. * * Return: 0 on success, corresponding error code on failure. */ int rsi_band_check(struct rsi_common *common, struct ieee80211_channel *curchan) { struct rsi_hw *adapter = common->priv; struct ieee80211_hw *hw = adapter->hw; u8 prev_bw = common->channel_width; u8 prev_ep = common->endpoint; int status = 0; if (common->band != curchan->band) { common->rf_reset = 1; common->band = curchan->band; } if ((hw->conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) || (hw->conf.chandef.width == NL80211_CHAN_WIDTH_20)) common->channel_width = BW_20MHZ; else common->channel_width = BW_40MHZ; if (common->band == NL80211_BAND_2GHZ) { if (common->channel_width) common->endpoint = EP_2GHZ_40MHZ; else common->endpoint = EP_2GHZ_20MHZ; } else { if (common->channel_width) common->endpoint = EP_5GHZ_40MHZ; else common->endpoint = EP_5GHZ_20MHZ; } if (common->endpoint != prev_ep) { status = rsi_program_bb_rf(common); if (status) return status; } if (common->channel_width != prev_bw) { if (adapter->device_model == RSI_DEV_9116) status = rsi_load_9116_bootup_params(common); else status = rsi_load_bootup_params(common); if (status) return status; status = rsi_load_radio_caps(common); if (status) return status; } return status; } /** * rsi_set_channel() - This function programs the channel. * @common: Pointer to the driver private structure. * @channel: Channel value to be set. * * Return: 0 on success, corresponding error code on failure. */ int rsi_set_channel(struct rsi_common *common, struct ieee80211_channel *channel) { struct sk_buff *skb = NULL; struct rsi_chan_config *chan_cfg; u16 frame_len = sizeof(struct rsi_chan_config); rsi_dbg(MGMT_TX_ZONE, "%s: Sending scan req frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } if (!channel) { dev_kfree_skb(skb); return 0; } memset(skb->data, 0, frame_len); chan_cfg = (struct rsi_chan_config *)skb->data; rsi_set_len_qno(&chan_cfg->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); chan_cfg->desc_dword0.frame_type = SCAN_REQUEST; chan_cfg->channel_number = channel->hw_value; chan_cfg->antenna_gain_offset_2g = channel->max_antenna_gain; chan_cfg->antenna_gain_offset_5g = channel->max_antenna_gain; chan_cfg->region_rftype = (RSI_RF_TYPE & 0xf) << 4; if ((channel->flags & IEEE80211_CHAN_NO_IR) || (channel->flags & IEEE80211_CHAN_RADAR)) { chan_cfg->antenna_gain_offset_2g |= RSI_CHAN_RADAR; } else { if (common->tx_power < channel->max_power) chan_cfg->tx_power = cpu_to_le16(common->tx_power); else chan_cfg->tx_power = cpu_to_le16(channel->max_power); } chan_cfg->region_rftype |= (common->priv->dfs_region & 0xf); if (common->channel_width == BW_40MHZ) chan_cfg->channel_width = 0x1; common->channel = channel->hw_value; skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_send_radio_params_update() - This function sends the radio * parameters update to device * @common: Pointer to the driver private structure. * @channel: Channel value to be set. * * Return: 0 on success, corresponding error code on failure. */ int rsi_send_radio_params_update(struct rsi_common *common) { struct rsi_mac_frame *cmd_frame; struct sk_buff *skb = NULL; rsi_dbg(MGMT_TX_ZONE, "%s: Sending Radio Params update frame\n", __func__); skb = dev_alloc_skb(FRAME_DESC_SZ); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, FRAME_DESC_SZ); cmd_frame = (struct rsi_mac_frame *)skb->data; cmd_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); cmd_frame->desc_word[1] = cpu_to_le16(RADIO_PARAMS_UPDATE); cmd_frame->desc_word[3] = cpu_to_le16(BIT(0)); cmd_frame->desc_word[3] |= cpu_to_le16(common->tx_power << 8); skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); } /* This function programs the threshold. */ int rsi_send_vap_dynamic_update(struct rsi_common *common) { struct sk_buff *skb; struct rsi_dynamic_s *dynamic_frame; rsi_dbg(MGMT_TX_ZONE, "%s: Sending vap update indication frame\n", __func__); skb = dev_alloc_skb(sizeof(struct rsi_dynamic_s)); if (!skb) return -ENOMEM; memset(skb->data, 0, sizeof(struct rsi_dynamic_s)); dynamic_frame = (struct rsi_dynamic_s *)skb->data; rsi_set_len_qno(&dynamic_frame->desc_dword0.len_qno, sizeof(dynamic_frame->frame_body), RSI_WIFI_MGMT_Q); dynamic_frame->desc_dword0.frame_type = VAP_DYNAMIC_UPDATE; dynamic_frame->desc_dword2.pkt_info = cpu_to_le32(common->rts_threshold); if (common->wow_flags & RSI_WOW_ENABLED) { /* Beacon miss threshold */ dynamic_frame->desc_dword3.token = cpu_to_le16(RSI_BCN_MISS_THRESHOLD); dynamic_frame->frame_body.keep_alive_period = cpu_to_le16(RSI_WOW_KEEPALIVE); } else { dynamic_frame->frame_body.keep_alive_period = cpu_to_le16(RSI_DEF_KEEPALIVE); } dynamic_frame->desc_dword3.sta_id = 0; /* vap id */ skb_put(skb, sizeof(struct rsi_dynamic_s)); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_compare() - This function is used to compare two integers * @a: pointer to the first integer * @b: pointer to the second integer * * Return: 0 if both are equal, -1 if the first is smaller, else 1 */ static int rsi_compare(const void *a, const void *b) { u16 _a = *(const u16 *)(a); u16 _b = *(const u16 *)(b); if (_a > _b) return -1; if (_a < _b) return 1; return 0; } /** * rsi_map_rates() - This function is used to map selected rates to hw rates. * @rate: The standard rate to be mapped. * @offset: Offset that will be returned. * * Return: 0 if it is a mcs rate, else 1 */ static bool rsi_map_rates(u16 rate, int *offset) { int kk; for (kk = 0; kk < ARRAY_SIZE(rsi_mcsrates); kk++) { if (rate == mcs[kk]) { *offset = kk; return false; } } for (kk = 0; kk < ARRAY_SIZE(rsi_rates); kk++) { if (rate == rsi_rates[kk].bitrate / 5) { *offset = kk; break; } } return true; } /** * rsi_send_auto_rate_request() - This function is to set rates for connection * and send autorate request to firmware. * @common: Pointer to the driver private structure. * * Return: 0 on success, corresponding error code on failure. */ static int rsi_send_auto_rate_request(struct rsi_common *common, struct ieee80211_sta *sta, u16 sta_id, struct ieee80211_vif *vif) { struct sk_buff *skb; struct rsi_auto_rate *auto_rate; int ii = 0, jj = 0, kk = 0; struct ieee80211_hw *hw = common->priv->hw; u8 band = hw->conf.chandef.chan->band; u8 num_supported_rates = 0; u8 rate_table_offset, rate_offset = 0; u32 rate_bitmap; u16 *selected_rates, min_rate; bool is_ht = false, is_sgi = false; u16 frame_len = sizeof(struct rsi_auto_rate); rsi_dbg(MGMT_TX_ZONE, "%s: Sending auto rate request frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); selected_rates = kzalloc(2 * RSI_TBL_SZ, GFP_KERNEL); if (!selected_rates) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n", __func__); dev_kfree_skb(skb); return -ENOMEM; } auto_rate = (struct rsi_auto_rate *)skb->data; auto_rate->aarf_rssi = cpu_to_le16(((u16)3 << 6) | (u16)(18 & 0x3f)); auto_rate->collision_tolerance = cpu_to_le16(3); auto_rate->failure_limit = cpu_to_le16(3); auto_rate->initial_boundary = cpu_to_le16(3); auto_rate->max_threshold_limt = cpu_to_le16(27); auto_rate->desc.desc_dword0.frame_type = AUTO_RATE_IND; if (common->channel_width == BW_40MHZ) auto_rate->desc.desc_dword3.qid_tid = BW_40MHZ; auto_rate->desc.desc_dword3.sta_id = sta_id; if (vif->type == NL80211_IFTYPE_STATION) { rate_bitmap = common->bitrate_mask[band]; is_ht = common->vif_info[0].is_ht; is_sgi = common->vif_info[0].sgi; } else { rate_bitmap = sta->supp_rates[band]; is_ht = sta->ht_cap.ht_supported; if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) || (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) is_sgi = true; } if (band == NL80211_BAND_2GHZ) { if ((rate_bitmap == 0) && (is_ht)) min_rate = RSI_RATE_MCS0; else min_rate = RSI_RATE_1; rate_table_offset = 0; } else { if ((rate_bitmap == 0) && (is_ht)) min_rate = RSI_RATE_MCS0; else min_rate = RSI_RATE_6; rate_table_offset = 4; } for (ii = 0, jj = 0; ii < (ARRAY_SIZE(rsi_rates) - rate_table_offset); ii++) { if (rate_bitmap & BIT(ii)) { selected_rates[jj++] = (rsi_rates[ii + rate_table_offset].bitrate / 5); rate_offset++; } } num_supported_rates = jj; if (is_ht) { for (ii = 0; ii < ARRAY_SIZE(mcs); ii++) selected_rates[jj++] = mcs[ii]; num_supported_rates += ARRAY_SIZE(mcs); rate_offset += ARRAY_SIZE(mcs); } sort(selected_rates, jj, sizeof(u16), &rsi_compare, NULL); /* mapping the rates to RSI rates */ for (ii = 0; ii < jj; ii++) { if (rsi_map_rates(selected_rates[ii], &kk)) { auto_rate->supported_rates[ii] = cpu_to_le16(rsi_rates[kk].hw_value); } else { auto_rate->supported_rates[ii] = cpu_to_le16(rsi_mcsrates[kk]); } } /* loading HT rates in the bottom half of the auto rate table */ if (is_ht) { for (ii = rate_offset, kk = ARRAY_SIZE(rsi_mcsrates) - 1; ii < rate_offset + 2 * ARRAY_SIZE(rsi_mcsrates); ii++) { if (is_sgi || conf_is_ht40(&common->priv->hw->conf)) auto_rate->supported_rates[ii++] = cpu_to_le16(rsi_mcsrates[kk] | BIT(9)); else auto_rate->supported_rates[ii++] = cpu_to_le16(rsi_mcsrates[kk]); auto_rate->supported_rates[ii] = cpu_to_le16(rsi_mcsrates[kk--]); } for (; ii < (RSI_TBL_SZ - 1); ii++) { auto_rate->supported_rates[ii] = cpu_to_le16(rsi_mcsrates[0]); } } for (; ii < RSI_TBL_SZ; ii++) auto_rate->supported_rates[ii] = cpu_to_le16(min_rate); auto_rate->num_supported_rates = cpu_to_le16(num_supported_rates * 2); auto_rate->moderate_rate_inx = cpu_to_le16(num_supported_rates / 2); num_supported_rates *= 2; rsi_set_len_qno(&auto_rate->desc.desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); skb_put(skb, frame_len); kfree(selected_rates); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_inform_bss_status() - This function informs about bss status with the * help of sta notify params by sending an internal * management frame to firmware. * @common: Pointer to the driver private structure. * @status: Bss status type. * @bssid: Bssid. * @qos_enable: Qos is enabled. * @aid: Aid (unique for all STAs). * * Return: None. */ void rsi_inform_bss_status(struct rsi_common *common, enum opmode opmode, u8 status, const u8 *addr, u8 qos_enable, u16 aid, struct ieee80211_sta *sta, u16 sta_id, u16 assoc_cap, struct ieee80211_vif *vif) { if (status) { if (opmode == RSI_OPMODE_STA) common->hw_data_qs_blocked = true; rsi_hal_send_sta_notify_frame(common, opmode, STA_CONNECTED, addr, qos_enable, aid, sta_id, vif); if (common->min_rate == 0xffff) rsi_send_auto_rate_request(common, sta, sta_id, vif); if (opmode == RSI_OPMODE_STA && !(assoc_cap & WLAN_CAPABILITY_PRIVACY) && !rsi_send_block_unblock_frame(common, false)) common->hw_data_qs_blocked = false; } else { if (opmode == RSI_OPMODE_STA) common->hw_data_qs_blocked = true; if (!(common->wow_flags & RSI_WOW_ENABLED)) rsi_hal_send_sta_notify_frame(common, opmode, STA_DISCONNECTED, addr, qos_enable, aid, sta_id, vif); if (opmode == RSI_OPMODE_STA) rsi_send_block_unblock_frame(common, true); } } /** * rsi_eeprom_read() - This function sends a frame to read the mac address * from the eeprom. * @common: Pointer to the driver private structure. * * Return: 0 on success, -1 on failure. */ static int rsi_eeprom_read(struct rsi_common *common) { struct rsi_eeprom_read_frame *mgmt_frame; struct rsi_hw *adapter = common->priv; struct sk_buff *skb; rsi_dbg(MGMT_TX_ZONE, "%s: Sending EEPROM read req frame\n", __func__); skb = dev_alloc_skb(FRAME_DESC_SZ); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, FRAME_DESC_SZ); mgmt_frame = (struct rsi_eeprom_read_frame *)skb->data; /* FrameType */ rsi_set_len_qno(&mgmt_frame->len_qno, 0, RSI_WIFI_MGMT_Q); mgmt_frame->pkt_type = EEPROM_READ; /* Number of bytes to read */ mgmt_frame->pkt_info = cpu_to_le32((adapter->eeprom.length << RSI_EEPROM_LEN_OFFSET) & RSI_EEPROM_LEN_MASK); mgmt_frame->pkt_info |= cpu_to_le32((3 << RSI_EEPROM_HDR_SIZE_OFFSET) & RSI_EEPROM_HDR_SIZE_MASK); /* Address to read */ mgmt_frame->eeprom_offset = cpu_to_le32(adapter->eeprom.offset); skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); } /** * This function sends a frame to block/unblock * data queues in the firmware * * @param common Pointer to the driver private structure. * @param block event - block if true, unblock if false * @return 0 on success, -1 on failure. */ int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event) { struct rsi_block_unblock_data *mgmt_frame; struct sk_buff *skb; rsi_dbg(MGMT_TX_ZONE, "%s: Sending block/unblock frame\n", __func__); skb = dev_alloc_skb(FRAME_DESC_SZ); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, FRAME_DESC_SZ); mgmt_frame = (struct rsi_block_unblock_data *)skb->data; rsi_set_len_qno(&mgmt_frame->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); mgmt_frame->desc_dword0.frame_type = BLOCK_HW_QUEUE; mgmt_frame->host_quiet_info = QUIET_INFO_VALID; if (block_event) { rsi_dbg(INFO_ZONE, "blocking the data qs\n"); mgmt_frame->block_q_bitmap = cpu_to_le16(0xf); mgmt_frame->block_q_bitmap |= cpu_to_le16(0xf << 4); } else { rsi_dbg(INFO_ZONE, "unblocking the data qs\n"); mgmt_frame->unblock_q_bitmap = cpu_to_le16(0xf); mgmt_frame->unblock_q_bitmap |= cpu_to_le16(0xf << 4); } skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_send_rx_filter_frame() - Sends a frame to filter the RX packets * * @common: Pointer to the driver private structure. * @rx_filter_word: Flags of filter packets * * @Return: 0 on success, -1 on failure. */ int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word) { struct rsi_mac_frame *cmd_frame; struct sk_buff *skb; rsi_dbg(MGMT_TX_ZONE, "Sending RX filter frame\n"); skb = dev_alloc_skb(FRAME_DESC_SZ); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, FRAME_DESC_SZ); cmd_frame = (struct rsi_mac_frame *)skb->data; cmd_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); cmd_frame->desc_word[1] = cpu_to_le16(SET_RX_FILTER); cmd_frame->desc_word[4] = cpu_to_le16(rx_filter_word); skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); } int rsi_send_ps_request(struct rsi_hw *adapter, bool enable, struct ieee80211_vif *vif) { struct rsi_common *common = adapter->priv; struct ieee80211_bss_conf *bss = &vif->bss_conf; struct rsi_request_ps *ps; struct rsi_ps_info *ps_info; struct sk_buff *skb; int frame_len = sizeof(*ps); skb = dev_alloc_skb(frame_len); if (!skb) return -ENOMEM; memset(skb->data, 0, frame_len); ps = (struct rsi_request_ps *)skb->data; ps_info = &adapter->ps_info; rsi_set_len_qno(&ps->desc.desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); ps->desc.desc_dword0.frame_type = WAKEUP_SLEEP_REQUEST; if (enable) { ps->ps_sleep.enable = RSI_PS_ENABLE; ps->desc.desc_dword3.token = cpu_to_le16(RSI_SLEEP_REQUEST); } else { ps->ps_sleep.enable = RSI_PS_DISABLE; ps->desc.desc_dword0.len_qno |= cpu_to_le16(RSI_PS_DISABLE_IND); ps->desc.desc_dword3.token = cpu_to_le16(RSI_WAKEUP_REQUEST); } ps->ps_uapsd_acs = common->uapsd_bitmap; ps->ps_sleep.sleep_type = ps_info->sleep_type; ps->ps_sleep.num_bcns_per_lis_int = cpu_to_le16(ps_info->num_bcns_per_lis_int); ps->ps_sleep.sleep_duration = cpu_to_le32(ps_info->deep_sleep_wakeup_period); if (bss->assoc) ps->ps_sleep.connected_sleep = RSI_CONNECTED_SLEEP; else ps->ps_sleep.connected_sleep = RSI_DEEP_SLEEP; ps->ps_listen_interval = cpu_to_le32(ps_info->listen_interval); ps->ps_dtim_interval_duration = cpu_to_le32(ps_info->dtim_interval_duration); if (ps_info->listen_interval > ps_info->dtim_interval_duration) ps->ps_listen_interval = cpu_to_le32(RSI_PS_DISABLE); ps->ps_num_dtim_intervals = cpu_to_le16(ps_info->num_dtims_per_sleep); skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } static int rsi_send_w9116_features(struct rsi_common *common) { struct rsi_wlan_9116_features *w9116_features; u16 frame_len = sizeof(struct rsi_wlan_9116_features); struct sk_buff *skb; rsi_dbg(MGMT_TX_ZONE, "%s: Sending wlan 9116 features\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) return -ENOMEM; memset(skb->data, 0, frame_len); w9116_features = (struct rsi_wlan_9116_features *)skb->data; w9116_features->pll_mode = common->w9116_features.pll_mode; w9116_features->rf_type = common->w9116_features.rf_type; w9116_features->wireless_mode = common->w9116_features.wireless_mode; w9116_features->enable_ppe = common->w9116_features.enable_ppe; w9116_features->afe_type = common->w9116_features.afe_type; if (common->w9116_features.dpd) w9116_features->feature_enable |= cpu_to_le32(RSI_DPD); if (common->w9116_features.sifs_tx_enable) w9116_features->feature_enable |= cpu_to_le32(RSI_SIFS_TX_ENABLE); if (common->w9116_features.ps_options & RSI_DUTY_CYCLING) w9116_features->feature_enable |= cpu_to_le32(RSI_DUTY_CYCLING); if (common->w9116_features.ps_options & RSI_END_OF_FRAME) w9116_features->feature_enable |= cpu_to_le32(RSI_END_OF_FRAME); w9116_features->feature_enable |= cpu_to_le32((common->w9116_features.ps_options & ~0x3) << 2); rsi_set_len_qno(&w9116_features->desc.desc_dword0.len_qno, frame_len - FRAME_DESC_SZ, RSI_WIFI_MGMT_Q); w9116_features->desc.desc_dword0.frame_type = FEATURES_ENABLE; skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_set_antenna() - This function send antenna configuration request * to device * * @common: Pointer to the driver private structure. * @antenna: bitmap for tx antenna selection * * Return: 0 on Success, negative error code on failure */ int rsi_set_antenna(struct rsi_common *common, u8 antenna) { struct rsi_ant_sel_frame *ant_sel_frame; struct sk_buff *skb; skb = dev_alloc_skb(FRAME_DESC_SZ); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, FRAME_DESC_SZ); ant_sel_frame = (struct rsi_ant_sel_frame *)skb->data; ant_sel_frame->desc_dword0.frame_type = ANT_SEL_FRAME; ant_sel_frame->sub_frame_type = ANTENNA_SEL_TYPE; ant_sel_frame->ant_value = cpu_to_le16(antenna & ANTENNA_MASK_VALUE); rsi_set_len_qno(&ant_sel_frame->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); } static int rsi_send_beacon(struct rsi_common *common) { struct sk_buff *skb = NULL; u8 dword_align_bytes = 0; skb = dev_alloc_skb(MAX_MGMT_PKT_SIZE); if (!skb) return -ENOMEM; memset(skb->data, 0, MAX_MGMT_PKT_SIZE); dword_align_bytes = ((unsigned long)skb->data & 0x3f); if (dword_align_bytes) skb_pull(skb, (64 - dword_align_bytes)); if (rsi_prepare_beacon(common, skb)) { rsi_dbg(ERR_ZONE, "Failed to prepare beacon\n"); dev_kfree_skb(skb); return -EINVAL; } skb_queue_tail(&common->tx_queue[MGMT_BEACON_Q], skb); rsi_set_event(&common->tx_thread.event); rsi_dbg(DATA_TX_ZONE, "%s: Added to beacon queue\n", __func__); return 0; } #ifdef CONFIG_PM int rsi_send_wowlan_request(struct rsi_common *common, u16 flags, u16 sleep_status) { struct rsi_wowlan_req *cmd_frame; struct sk_buff *skb; u8 length; rsi_dbg(ERR_ZONE, "%s: Sending wowlan request frame\n", __func__); length = sizeof(*cmd_frame); skb = dev_alloc_skb(length); if (!skb) return -ENOMEM; memset(skb->data, 0, length); cmd_frame = (struct rsi_wowlan_req *)skb->data; rsi_set_len_qno(&cmd_frame->desc.desc_dword0.len_qno, (length - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); cmd_frame->desc.desc_dword0.frame_type = WOWLAN_CONFIG_PARAMS; cmd_frame->host_sleep_status = sleep_status; if (common->secinfo.security_enable && common->secinfo.gtk_cipher) flags |= RSI_WOW_GTK_REKEY; if (sleep_status) cmd_frame->wow_flags = flags; rsi_dbg(INFO_ZONE, "Host_Sleep_Status : %d Flags : %d\n", cmd_frame->host_sleep_status, cmd_frame->wow_flags); skb_put(skb, length); return rsi_send_internal_mgmt_frame(common, skb); } #endif int rsi_send_bgscan_params(struct rsi_common *common, int enable) { struct rsi_bgscan_params *params = &common->bgscan; struct cfg80211_scan_request *scan_req = common->hwscan; struct rsi_bgscan_config *bgscan; struct sk_buff *skb; u16 frame_len = sizeof(*bgscan); u8 i; rsi_dbg(MGMT_TX_ZONE, "%s: Sending bgscan params frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) return -ENOMEM; memset(skb->data, 0, frame_len); bgscan = (struct rsi_bgscan_config *)skb->data; rsi_set_len_qno(&bgscan->desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); bgscan->desc_dword0.frame_type = BG_SCAN_PARAMS; bgscan->bgscan_threshold = cpu_to_le16(params->bgscan_threshold); bgscan->roam_threshold = cpu_to_le16(params->roam_threshold); if (enable) bgscan->bgscan_periodicity = cpu_to_le16(params->bgscan_periodicity); bgscan->active_scan_duration = cpu_to_le16(params->active_scan_duration); bgscan->passive_scan_duration = cpu_to_le16(params->passive_scan_duration); bgscan->two_probe = params->two_probe; bgscan->num_bgscan_channels = scan_req->n_channels; for (i = 0; i < bgscan->num_bgscan_channels; i++) bgscan->channels2scan[i] = cpu_to_le16(scan_req->channels[i]->hw_value); skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /* This function sends the probe request to be used by firmware in * background scan */ int rsi_send_bgscan_probe_req(struct rsi_common *common, struct ieee80211_vif *vif) { struct cfg80211_scan_request *scan_req = common->hwscan; struct rsi_bgscan_probe *bgscan; struct sk_buff *skb; struct sk_buff *probereq_skb; u16 frame_len = sizeof(*bgscan); size_t ssid_len = 0; u8 *ssid = NULL; rsi_dbg(MGMT_TX_ZONE, "%s: Sending bgscan probe req frame\n", __func__); if (common->priv->sc_nvifs <= 0) return -ENODEV; if (scan_req->n_ssids) { ssid = scan_req->ssids[0].ssid; ssid_len = scan_req->ssids[0].ssid_len; } skb = dev_alloc_skb(frame_len + MAX_BGSCAN_PROBE_REQ_LEN); if (!skb) return -ENOMEM; memset(skb->data, 0, frame_len + MAX_BGSCAN_PROBE_REQ_LEN); bgscan = (struct rsi_bgscan_probe *)skb->data; bgscan->desc_dword0.frame_type = BG_SCAN_PROBE_REQ; bgscan->flags = cpu_to_le16(HOST_BG_SCAN_TRIG); if (common->band == NL80211_BAND_5GHZ) { bgscan->mgmt_rate = cpu_to_le16(RSI_RATE_6); bgscan->def_chan = cpu_to_le16(40); } else { bgscan->mgmt_rate = cpu_to_le16(RSI_RATE_1); bgscan->def_chan = cpu_to_le16(11); } bgscan->channel_scan_time = cpu_to_le16(RSI_CHANNEL_SCAN_TIME); probereq_skb = ieee80211_probereq_get(common->priv->hw, vif->addr, ssid, ssid_len, scan_req->ie_len); if (!probereq_skb) { dev_kfree_skb(skb); return -ENOMEM; } memcpy(&skb->data[frame_len], probereq_skb->data, probereq_skb->len); bgscan->probe_req_length = cpu_to_le16(probereq_skb->len); rsi_set_len_qno(&bgscan->desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ + probereq_skb->len), RSI_WIFI_MGMT_Q); skb_put(skb, frame_len + probereq_skb->len); dev_kfree_skb(probereq_skb); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_handle_ta_confirm_type() - This function handles the confirm frames. * @common: Pointer to the driver private structure. * @msg: Pointer to received packet. * * Return: 0 on success, -1 on failure. */ static int rsi_handle_ta_confirm_type(struct rsi_common *common, u8 *msg) { struct rsi_hw *adapter = common->priv; u8 sub_type = (msg[15] & 0xff); u16 msg_len = ((u16 *)msg)[0] & 0xfff; u8 offset; switch (sub_type) { case BOOTUP_PARAMS_REQUEST: rsi_dbg(FSM_ZONE, "%s: Boot up params confirm received\n", __func__); if (common->fsm_state == FSM_BOOT_PARAMS_SENT) { if (adapter->device_model == RSI_DEV_9116) { common->band = NL80211_BAND_5GHZ; common->num_supp_bands = 2; if (rsi_send_reset_mac(common)) goto out; else common->fsm_state = FSM_RESET_MAC_SENT; } else { adapter->eeprom.length = (IEEE80211_ADDR_LEN + WLAN_MAC_MAGIC_WORD_LEN + WLAN_HOST_MODE_LEN); adapter->eeprom.offset = WLAN_MAC_EEPROM_ADDR; if (rsi_eeprom_read(common)) { common->fsm_state = FSM_CARD_NOT_READY; goto out; } common->fsm_state = FSM_EEPROM_READ_MAC_ADDR; } } else { rsi_dbg(INFO_ZONE, "%s: Received bootup params cfm in %d state\n", __func__, common->fsm_state); return 0; } break; case EEPROM_READ: rsi_dbg(FSM_ZONE, "EEPROM READ confirm received\n"); if (msg_len <= 0) { rsi_dbg(FSM_ZONE, "%s: [EEPROM_READ] Invalid len %d\n", __func__, msg_len); goto out; } if (msg[16] != MAGIC_WORD) { rsi_dbg(FSM_ZONE, "%s: [EEPROM_READ] Invalid token\n", __func__); common->fsm_state = FSM_CARD_NOT_READY; goto out; } if (common->fsm_state == FSM_EEPROM_READ_MAC_ADDR) { offset = (FRAME_DESC_SZ + WLAN_HOST_MODE_LEN + WLAN_MAC_MAGIC_WORD_LEN); memcpy(common->mac_addr, &msg[offset], ETH_ALEN); adapter->eeprom.length = ((WLAN_MAC_MAGIC_WORD_LEN + 3) & (~3)); adapter->eeprom.offset = WLAN_EEPROM_RFTYPE_ADDR; if (rsi_eeprom_read(common)) { rsi_dbg(ERR_ZONE, "%s: Failed reading RF band\n", __func__); common->fsm_state = FSM_CARD_NOT_READY; goto out; } common->fsm_state = FSM_EEPROM_READ_RF_TYPE; } else if (common->fsm_state == FSM_EEPROM_READ_RF_TYPE) { if ((msg[17] & 0x3) == 0x3) { rsi_dbg(INIT_ZONE, "Dual band supported\n"); common->band = NL80211_BAND_5GHZ; common->num_supp_bands = 2; } else if ((msg[17] & 0x3) == 0x1) { rsi_dbg(INIT_ZONE, "Only 2.4Ghz band supported\n"); common->band = NL80211_BAND_2GHZ; common->num_supp_bands = 1; } if (rsi_send_reset_mac(common)) goto out; common->fsm_state = FSM_RESET_MAC_SENT; } else { rsi_dbg(ERR_ZONE, "%s: Invalid EEPROM read type\n", __func__); return 0; } break; case RESET_MAC_REQ: if (common->fsm_state == FSM_RESET_MAC_SENT) { rsi_dbg(FSM_ZONE, "%s: Reset MAC cfm received\n", __func__); if (rsi_load_radio_caps(common)) goto out; else common->fsm_state = FSM_RADIO_CAPS_SENT; } else { rsi_dbg(ERR_ZONE, "%s: Received reset mac cfm in %d state\n", __func__, common->fsm_state); return 0; } break; case RADIO_CAPABILITIES: if (common->fsm_state == FSM_RADIO_CAPS_SENT) { common->rf_reset = 1; if (adapter->device_model == RSI_DEV_9116 && rsi_send_w9116_features(common)) { rsi_dbg(ERR_ZONE, "Failed to send 9116 features\n"); goto out; } if (rsi_program_bb_rf(common)) { goto out; } else { common->fsm_state = FSM_BB_RF_PROG_SENT; rsi_dbg(FSM_ZONE, "%s: Radio cap cfm received\n", __func__); } } else { rsi_dbg(INFO_ZONE, "%s: Received radio caps cfm in %d state\n", __func__, common->fsm_state); return 0; } break; case BB_PROG_VALUES_REQUEST: case RF_PROG_VALUES_REQUEST: case BBP_PROG_IN_TA: rsi_dbg(FSM_ZONE, "%s: BB/RF cfm received\n", __func__); if (common->fsm_state == FSM_BB_RF_PROG_SENT) { common->bb_rf_prog_count--; if (!common->bb_rf_prog_count) { common->fsm_state = FSM_MAC_INIT_DONE; if (common->reinit_hw) { complete(&common->wlan_init_completion); } else { return rsi_mac80211_attach(common); } } } else { rsi_dbg(INFO_ZONE, "%s: Received bbb_rf cfm in %d state\n", __func__, common->fsm_state); return 0; } break; case SCAN_REQUEST: rsi_dbg(INFO_ZONE, "Set channel confirm\n"); break; case WAKEUP_SLEEP_REQUEST: rsi_dbg(INFO_ZONE, "Wakeup/Sleep confirmation.\n"); return rsi_handle_ps_confirm(adapter, msg); case BG_SCAN_PROBE_REQ: rsi_dbg(INFO_ZONE, "BG scan complete event\n"); if (common->bgscan_en) { struct cfg80211_scan_info info; if (!rsi_send_bgscan_params(common, RSI_STOP_BGSCAN)) common->bgscan_en = 0; info.aborted = false; ieee80211_scan_completed(adapter->hw, &info); } rsi_dbg(INFO_ZONE, "Background scan completed\n"); break; default: rsi_dbg(INFO_ZONE, "%s: Invalid TA confirm pkt received\n", __func__); break; } return 0; out: rsi_dbg(ERR_ZONE, "%s: Unable to send pkt/Invalid frame received\n", __func__); return -EINVAL; } int rsi_handle_card_ready(struct rsi_common *common, u8 *msg) { int status; switch (common->fsm_state) { case FSM_CARD_NOT_READY: rsi_dbg(INIT_ZONE, "Card ready indication from Common HAL\n"); rsi_set_default_parameters(common); if (rsi_send_common_dev_params(common) < 0) return -EINVAL; common->fsm_state = FSM_COMMON_DEV_PARAMS_SENT; break; case FSM_COMMON_DEV_PARAMS_SENT: rsi_dbg(INIT_ZONE, "Card ready indication from WLAN HAL\n"); if (common->priv->device_model == RSI_DEV_9116) { if (msg[16] != MAGIC_WORD) { rsi_dbg(FSM_ZONE, "%s: [EEPROM_READ] Invalid token\n", __func__); common->fsm_state = FSM_CARD_NOT_READY; return -EINVAL; } memcpy(common->mac_addr, &msg[20], ETH_ALEN); rsi_dbg(INIT_ZONE, "MAC Addr %pM", common->mac_addr); } /* Get usb buffer status register address */ common->priv->usb_buffer_status_reg = *(u32 *)&msg[8]; rsi_dbg(INFO_ZONE, "USB buffer status register = %x\n", common->priv->usb_buffer_status_reg); if (common->priv->device_model == RSI_DEV_9116) status = rsi_load_9116_bootup_params(common); else status = rsi_load_bootup_params(common); if (status < 0) { common->fsm_state = FSM_CARD_NOT_READY; return status; } common->fsm_state = FSM_BOOT_PARAMS_SENT; break; default: rsi_dbg(ERR_ZONE, "%s: card ready indication in invalid state %d.\n", __func__, common->fsm_state); return -EINVAL; } return 0; } /** * rsi_mgmt_pkt_recv() - This function processes the management packets * received from the hardware. * @common: Pointer to the driver private structure. * @msg: Pointer to the received packet. * * Return: 0 on success, -1 on failure. */ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg) { s32 msg_len = (le16_to_cpu(*(__le16 *)&msg[0]) & 0x0fff); u16 msg_type = (msg[2]); rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n", __func__, msg_len, msg_type); switch (msg_type) { case TA_CONFIRM_TYPE: return rsi_handle_ta_confirm_type(common, msg); case CARD_READY_IND: common->hibernate_resume = false; rsi_dbg(FSM_ZONE, "%s: Card ready indication received\n", __func__); return rsi_handle_card_ready(common, msg); case TX_STATUS_IND: switch (msg[RSI_TX_STATUS_TYPE]) { case PROBEREQ_CONFIRM: common->mgmt_q_block = false; rsi_dbg(FSM_ZONE, "%s: Probe confirm received\n", __func__); break; case EAPOL4_CONFIRM: if (msg[RSI_TX_STATUS]) { common->eapol4_confirm = true; if (!rsi_send_block_unblock_frame(common, false)) common->hw_data_qs_blocked = false; } } break; case BEACON_EVENT_IND: rsi_dbg(INFO_ZONE, "Beacon event\n"); if (common->fsm_state != FSM_MAC_INIT_DONE) return -1; if (common->iface_down) return -1; if (!common->beacon_enabled) return -1; rsi_send_beacon(common); break; case WOWLAN_WAKEUP_REASON: rsi_dbg(ERR_ZONE, "\n\nWakeup Type: %x\n", msg[15]); switch (msg[15]) { case RSI_UNICAST_MAGIC_PKT: rsi_dbg(ERR_ZONE, "*** Wakeup for Unicast magic packet ***\n"); break; case RSI_BROADCAST_MAGICPKT: rsi_dbg(ERR_ZONE, "*** Wakeup for Broadcast magic packet ***\n"); break; case RSI_EAPOL_PKT: rsi_dbg(ERR_ZONE, "*** Wakeup for GTK renewal ***\n"); break; case RSI_DISCONNECT_PKT: rsi_dbg(ERR_ZONE, "*** Wakeup for Disconnect ***\n"); break; case RSI_HW_BMISS_PKT: rsi_dbg(ERR_ZONE, "*** Wakeup for HW Beacon miss ***\n"); break; default: rsi_dbg(ERR_ZONE, "##### Un-intentional Wakeup #####\n"); break; } break; case RX_DOT11_MGMT: return rsi_mgmt_pkt_to_core(common, msg, msg_len); default: rsi_dbg(INFO_ZONE, "Received packet type: 0x%x\n", msg_type); } return 0; }
./CrossVul/dataset_final_sorted/CWE-400/c/good_1261_0
crossvul-cpp_data_bad_1257_0
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/irqdomain.h> #include <linux/pci.h> #include <linux/pm_domain.h> #include <linux/platform_device.h> #include <sound/designware_i2s.h> #include <sound/pcm.h> #include "amdgpu.h" #include "atom.h" #include "amdgpu_acp.h" #include "acp_gfx_if.h" #define ACP_TILE_ON_MASK 0x03 #define ACP_TILE_OFF_MASK 0x02 #define ACP_TILE_ON_RETAIN_REG_MASK 0x1f #define ACP_TILE_OFF_RETAIN_REG_MASK 0x20 #define ACP_TILE_P1_MASK 0x3e #define ACP_TILE_P2_MASK 0x3d #define ACP_TILE_DSP0_MASK 0x3b #define ACP_TILE_DSP1_MASK 0x37 #define ACP_TILE_DSP2_MASK 0x2f #define ACP_DMA_REGS_END 0x146c0 #define ACP_I2S_PLAY_REGS_START 0x14840 #define ACP_I2S_PLAY_REGS_END 0x148b4 #define ACP_I2S_CAP_REGS_START 0x148b8 #define ACP_I2S_CAP_REGS_END 0x1496c #define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 #define ACP_BT_PLAY_REGS_START 0x14970 #define ACP_BT_PLAY_REGS_END 0x14a24 #define ACP_BT_COMP1_REG_OFFSET 0xac #define ACP_BT_COMP2_REG_OFFSET 0xa8 #define mmACP_PGFSM_RETAIN_REG 0x51c9 #define mmACP_PGFSM_CONFIG_REG 0x51ca #define mmACP_PGFSM_READ_REG_0 0x51cc #define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8 #define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9 #define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa #define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb #define mmACP_CONTROL 0x5131 #define mmACP_STATUS 0x5133 #define mmACP_SOFT_RESET 0x5134 #define ACP_CONTROL__ClkEn_MASK 0x1 #define ACP_SOFT_RESET__SoftResetAud_MASK 0x100 #define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000 #define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF #define ACP_TIMEOUT_LOOP 0x000000FF #define ACP_DEVS 4 #define ACP_SRC_ID 162 enum { ACP_TILE_P1 = 0, ACP_TILE_P2, ACP_TILE_DSP0, ACP_TILE_DSP1, ACP_TILE_DSP2, }; static int acp_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->acp.parent = adev->dev; adev->acp.cgs_device = amdgpu_cgs_create_device(adev); if (!adev->acp.cgs_device) return -EINVAL; return 0; } static int acp_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->acp.cgs_device) amdgpu_cgs_destroy_device(adev->acp.cgs_device); return 0; } struct acp_pm_domain { void *adev; struct generic_pm_domain gpd; }; static int acp_poweroff(struct generic_pm_domain *genpd) { struct acp_pm_domain *apd; struct amdgpu_device *adev; apd = container_of(genpd, struct acp_pm_domain, gpd); if (apd != NULL) { adev = apd->adev; /* call smu to POWER GATE ACP block * smu will * 1. turn off the acp clock * 2. power off the acp tiles * 3. check and enter ulv state */ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true); } return 0; } static int acp_poweron(struct generic_pm_domain *genpd) { struct acp_pm_domain *apd; struct amdgpu_device *adev; apd = container_of(genpd, struct acp_pm_domain, gpd); if (apd != NULL) { adev = apd->adev; /* call smu to UNGATE ACP block * smu will * 1. exit ulv * 2. turn on acp clock * 3. power on acp tiles */ if (adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false); } return 0; } static struct device *get_mfd_cell_dev(const char *device_name, int r) { char auto_dev_name[25]; struct device *dev; snprintf(auto_dev_name, sizeof(auto_dev_name), "%s.%d.auto", device_name, r); dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name); dev_info(dev, "device %s added to pm domain\n", auto_dev_name); return dev; } /** * acp_hw_init - start and test ACP block * * @adev: amdgpu_device pointer * */ static int acp_hw_init(void *handle) { int r, i; uint64_t acp_base; u32 val = 0; u32 count = 0; struct device *dev; struct i2s_platform_data *i2s_pdata; struct amdgpu_device *adev = (struct amdgpu_device *)handle; const struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); if (!ip_block) return -EINVAL; r = amd_acp_hw_init(adev->acp.cgs_device, ip_block->version->major, ip_block->version->minor); /* -ENODEV means board uses AZ rather than ACP */ if (r == -ENODEV) { amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true); return 0; } else if (r) { return r; } if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289) return -EINVAL; acp_base = adev->rmmio_base; adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); if (adev->acp.acp_genpd == NULL) return -ENOMEM; adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; adev->acp.acp_genpd->gpd.power_off = acp_poweroff; adev->acp.acp_genpd->gpd.power_on = acp_poweron; adev->acp.acp_genpd->adev = adev; pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell), GFP_KERNEL); if (adev->acp.acp_cell == NULL) return -ENOMEM; adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL); if (adev->acp.acp_res == NULL) { kfree(adev->acp.acp_cell); return -ENOMEM; } i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL); if (i2s_pdata == NULL) { kfree(adev->acp.acp_res); kfree(adev->acp.acp_cell); return -ENOMEM; } switch (adev->asic_type) { case CHIP_STONEY: i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; break; default: i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; } i2s_pdata[0].cap = DWC_I2S_PLAY; i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000; i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET; i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET; switch (adev->asic_type) { case CHIP_STONEY: i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | DW_I2S_QUIRK_COMP_PARAM1 | DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; break; default: i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | DW_I2S_QUIRK_COMP_PARAM1; } i2s_pdata[1].cap = DWC_I2S_RECORD; i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000; i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET; i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; switch (adev->asic_type) { case CHIP_STONEY: i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; break; default: break; } i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD; i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000; i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET; i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET; adev->acp.acp_res[0].name = "acp2x_dma"; adev->acp.acp_res[0].flags = IORESOURCE_MEM; adev->acp.acp_res[0].start = acp_base; adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END; adev->acp.acp_res[1].name = "acp2x_dw_i2s_play"; adev->acp.acp_res[1].flags = IORESOURCE_MEM; adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START; adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END; adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap"; adev->acp.acp_res[2].flags = IORESOURCE_MEM; adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START; adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END; adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap"; adev->acp.acp_res[3].flags = IORESOURCE_MEM; adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START; adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END; adev->acp.acp_res[4].name = "acp2x_dma_irq"; adev->acp.acp_res[4].flags = IORESOURCE_IRQ; adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162); adev->acp.acp_res[4].end = adev->acp.acp_res[4].start; adev->acp.acp_cell[0].name = "acp_audio_dma"; adev->acp.acp_cell[0].num_resources = 5; adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; adev->acp.acp_cell[0].platform_data = &adev->asic_type; adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type); adev->acp.acp_cell[1].name = "designware-i2s"; adev->acp.acp_cell[1].num_resources = 1; adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1]; adev->acp.acp_cell[1].platform_data = &i2s_pdata[0]; adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data); adev->acp.acp_cell[2].name = "designware-i2s"; adev->acp.acp_cell[2].num_resources = 1; adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2]; adev->acp.acp_cell[2].platform_data = &i2s_pdata[1]; adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data); adev->acp.acp_cell[3].name = "designware-i2s"; adev->acp.acp_cell[3].num_resources = 1; adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3]; adev->acp.acp_cell[3].platform_data = &i2s_pdata[2]; adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data); r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS); if (r) return r; for (i = 0; i < ACP_DEVS ; i++) { dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); if (r) { dev_err(dev, "Failed to add dev to genpd\n"); return r; } } /* Assert Soft reset of ACP */ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); val |= ACP_SOFT_RESET__SoftResetAud_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE; while (true) { val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); if (ACP_SOFT_RESET__SoftResetAudDone_MASK == (val & ACP_SOFT_RESET__SoftResetAudDone_MASK)) break; if (--count == 0) { dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); return -ETIMEDOUT; } udelay(100); } /* Enable clock to ACP and wait until the clock is enabled */ val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL); val = val | ACP_CONTROL__ClkEn_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val); count = ACP_CLOCK_EN_TIME_OUT_VALUE; while (true) { val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS); if (val & (u32) 0x1) break; if (--count == 0) { dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); return -ETIMEDOUT; } udelay(100); } /* Deassert the SOFT RESET flags */ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); return 0; } /** * acp_hw_fini - stop the hardware block * * @adev: amdgpu_device pointer * */ static int acp_hw_fini(void *handle) { int i, ret; u32 val = 0; u32 count = 0; struct device *dev; struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* return early if no ACP */ if (!adev->acp.acp_genpd) { amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false); return 0; } /* Assert Soft reset of ACP */ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); val |= ACP_SOFT_RESET__SoftResetAud_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE; while (true) { val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); if (ACP_SOFT_RESET__SoftResetAudDone_MASK == (val & ACP_SOFT_RESET__SoftResetAudDone_MASK)) break; if (--count == 0) { dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); return -ETIMEDOUT; } udelay(100); } /* Disable ACP clock */ val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL); val &= ~ACP_CONTROL__ClkEn_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val); count = ACP_CLOCK_EN_TIME_OUT_VALUE; while (true) { val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS); if (val & (u32) 0x1) break; if (--count == 0) { dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); return -ETIMEDOUT; } udelay(100); } for (i = 0; i < ACP_DEVS ; i++) { dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); ret = pm_genpd_remove_device(dev); /* If removal fails, dont giveup and try rest */ if (ret) dev_err(dev, "remove dev from genpd failed\n"); } mfd_remove_devices(adev->acp.parent); kfree(adev->acp.acp_res); kfree(adev->acp.acp_genpd); kfree(adev->acp.acp_cell); return 0; } static int acp_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* power up on suspend */ if (!adev->acp.acp_cell) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false); return 0; } static int acp_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* power down again on resume */ if (!adev->acp.acp_cell) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true); return 0; } static int acp_early_init(void *handle) { return 0; } static bool acp_is_idle(void *handle) { return true; } static int acp_wait_for_idle(void *handle) { return 0; } static int acp_soft_reset(void *handle) { return 0; } static int acp_set_clockgating_state(void *handle, enum amd_clockgating_state state) { return 0; } static int acp_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = state == AMD_PG_STATE_GATE ? true : false; if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable); return 0; } static const struct amd_ip_funcs acp_ip_funcs = { .name = "acp_ip", .early_init = acp_early_init, .late_init = NULL, .sw_init = acp_sw_init, .sw_fini = acp_sw_fini, .hw_init = acp_hw_init, .hw_fini = acp_hw_fini, .suspend = acp_suspend, .resume = acp_resume, .is_idle = acp_is_idle, .wait_for_idle = acp_wait_for_idle, .soft_reset = acp_soft_reset, .set_clockgating_state = acp_set_clockgating_state, .set_powergating_state = acp_set_powergating_state, }; const struct amdgpu_ip_block_version acp_ip_block = { .type = AMD_IP_BLOCK_TYPE_ACP, .major = 2, .minor = 2, .rev = 0, .funcs = &acp_ip_funcs, };
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1257_0
crossvul-cpp_data_bad_1254_0
// SPDX-License-Identifier: GPL-2.0+ // // Freescale i.MX7ULP LPSPI driver // // Copyright 2016 Freescale Semiconductor, Inc. // Copyright 2018 NXP Semiconductors #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/platform_data/dma-imx.h> #include <linux/platform_data/spi-imx.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/types.h> #define DRIVER_NAME "fsl_lpspi" #define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */ /* The maximum bytes that edma can transfer once.*/ #define FSL_LPSPI_MAX_EDMA_BYTES ((1 << 15) - 1) /* i.MX7ULP LPSPI registers */ #define IMX7ULP_VERID 0x0 #define IMX7ULP_PARAM 0x4 #define IMX7ULP_CR 0x10 #define IMX7ULP_SR 0x14 #define IMX7ULP_IER 0x18 #define IMX7ULP_DER 0x1c #define IMX7ULP_CFGR0 0x20 #define IMX7ULP_CFGR1 0x24 #define IMX7ULP_DMR0 0x30 #define IMX7ULP_DMR1 0x34 #define IMX7ULP_CCR 0x40 #define IMX7ULP_FCR 0x58 #define IMX7ULP_FSR 0x5c #define IMX7ULP_TCR 0x60 #define IMX7ULP_TDR 0x64 #define IMX7ULP_RSR 0x70 #define IMX7ULP_RDR 0x74 /* General control register field define */ #define CR_RRF BIT(9) #define CR_RTF BIT(8) #define CR_RST BIT(1) #define CR_MEN BIT(0) #define SR_MBF BIT(24) #define SR_TCF BIT(10) #define SR_FCF BIT(9) #define SR_RDF BIT(1) #define SR_TDF BIT(0) #define IER_TCIE BIT(10) #define IER_FCIE BIT(9) #define IER_RDIE BIT(1) #define IER_TDIE BIT(0) #define DER_RDDE BIT(1) #define DER_TDDE BIT(0) #define CFGR1_PCSCFG BIT(27) #define CFGR1_PINCFG (BIT(24)|BIT(25)) #define CFGR1_PCSPOL BIT(8) #define CFGR1_NOSTALL BIT(3) #define CFGR1_MASTER BIT(0) #define FSR_TXCOUNT (0xFF) #define RSR_RXEMPTY BIT(1) #define TCR_CPOL BIT(31) #define TCR_CPHA BIT(30) #define TCR_CONT BIT(21) #define TCR_CONTC BIT(20) #define TCR_RXMSK BIT(19) #define TCR_TXMSK BIT(18) static int clkdivs[] = {1, 2, 4, 8, 16, 32, 64, 128}; struct lpspi_config { u8 bpw; u8 chip_select; u8 prescale; u16 mode; u32 speed_hz; }; struct fsl_lpspi_data { struct device *dev; void __iomem *base; unsigned long base_phys; struct clk *clk_ipg; struct clk *clk_per; bool is_slave; bool is_first_byte; void *rx_buf; const void *tx_buf; void (*tx)(struct fsl_lpspi_data *); void (*rx)(struct fsl_lpspi_data *); u32 remain; u8 watermark; u8 txfifosize; u8 rxfifosize; struct lpspi_config config; struct completion xfer_done; bool slave_aborted; /* DMA */ bool usedma; struct completion dma_rx_completion; struct completion dma_tx_completion; int chipselect[0]; }; static const struct of_device_id fsl_lpspi_dt_ids[] = { { .compatible = "fsl,imx7ulp-spi", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids); #define LPSPI_BUF_RX(type) \ static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi) \ { \ unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR); \ \ if (fsl_lpspi->rx_buf) { \ *(type *)fsl_lpspi->rx_buf = val; \ fsl_lpspi->rx_buf += sizeof(type); \ } \ } #define LPSPI_BUF_TX(type) \ static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi) \ { \ type val = 0; \ \ if (fsl_lpspi->tx_buf) { \ val = *(type *)fsl_lpspi->tx_buf; \ fsl_lpspi->tx_buf += sizeof(type); \ } \ \ fsl_lpspi->remain -= sizeof(type); \ writel(val, fsl_lpspi->base + IMX7ULP_TDR); \ } LPSPI_BUF_RX(u8) LPSPI_BUF_TX(u8) LPSPI_BUF_RX(u16) LPSPI_BUF_TX(u16) LPSPI_BUF_RX(u32) LPSPI_BUF_TX(u32) static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi, unsigned int enable) { writel(enable, fsl_lpspi->base + IMX7ULP_IER); } static int fsl_lpspi_bytes_per_word(const int bpw) { return DIV_ROUND_UP(bpw, BITS_PER_BYTE); } static bool fsl_lpspi_can_dma(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *transfer) { unsigned int bytes_per_word; if (!controller->dma_rx) return false; bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word); switch (bytes_per_word) { case 1: case 2: case 4: break; default: return false; } return true; } static int lpspi_prepare_xfer_hardware(struct spi_controller *controller) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); int ret; ret = pm_runtime_get_sync(fsl_lpspi->dev); if (ret < 0) { dev_err(fsl_lpspi->dev, "failed to enable clock\n"); return ret; } return 0; } static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); pm_runtime_mark_last_busy(fsl_lpspi->dev); pm_runtime_put_autosuspend(fsl_lpspi->dev); return 0; } static int fsl_lpspi_prepare_message(struct spi_controller *controller, struct spi_message *msg) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); struct spi_device *spi = msg->spi; int gpio = fsl_lpspi->chipselect[spi->chip_select]; if (gpio_is_valid(gpio)) gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); return 0; } static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi) { u8 txfifo_cnt; u32 temp; txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff; while (txfifo_cnt < fsl_lpspi->txfifosize) { if (!fsl_lpspi->remain) break; fsl_lpspi->tx(fsl_lpspi); txfifo_cnt++; } if (txfifo_cnt < fsl_lpspi->txfifosize) { if (!fsl_lpspi->is_slave) { temp = readl(fsl_lpspi->base + IMX7ULP_TCR); temp &= ~TCR_CONTC; writel(temp, fsl_lpspi->base + IMX7ULP_TCR); } fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE); } else fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE); } static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi) { while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY)) fsl_lpspi->rx(fsl_lpspi); } static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi) { u32 temp = 0; temp |= fsl_lpspi->config.bpw - 1; temp |= (fsl_lpspi->config.mode & 0x3) << 30; if (!fsl_lpspi->is_slave) { temp |= fsl_lpspi->config.prescale << 27; temp |= (fsl_lpspi->config.chip_select & 0x3) << 24; /* * Set TCR_CONT will keep SS asserted after current transfer. * For the first transfer, clear TCR_CONTC to assert SS. * For subsequent transfer, set TCR_CONTC to keep SS asserted. */ if (!fsl_lpspi->usedma) { temp |= TCR_CONT; if (fsl_lpspi->is_first_byte) temp &= ~TCR_CONTC; else temp |= TCR_CONTC; } } writel(temp, fsl_lpspi->base + IMX7ULP_TCR); dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp); } static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi) { u32 temp; if (!fsl_lpspi->usedma) temp = fsl_lpspi->watermark >> 1 | (fsl_lpspi->watermark >> 1) << 16; else temp = fsl_lpspi->watermark >> 1; writel(temp, fsl_lpspi->base + IMX7ULP_FCR); dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp); } static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) { struct lpspi_config config = fsl_lpspi->config; unsigned int perclk_rate, scldiv; u8 prescale; perclk_rate = clk_get_rate(fsl_lpspi->clk_per); if (config.speed_hz > perclk_rate / 2) { dev_err(fsl_lpspi->dev, "per-clk should be at least two times of transfer speed"); return -EINVAL; } for (prescale = 0; prescale < 8; prescale++) { scldiv = perclk_rate / (clkdivs[prescale] * config.speed_hz) - 2; if (scldiv < 256) { fsl_lpspi->config.prescale = prescale; break; } } if (prescale == 8 && scldiv >= 256) return -EINVAL; writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16), fsl_lpspi->base + IMX7ULP_CCR); dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale=%d, scldiv=%d\n", perclk_rate, config.speed_hz, prescale, scldiv); return 0; } static int fsl_lpspi_dma_configure(struct spi_controller *controller) { int ret; enum dma_slave_buswidth buswidth; struct dma_slave_config rx = {}, tx = {}; struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) { case 4: buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; break; case 2: buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; break; case 1: buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; break; default: return -EINVAL; } tx.direction = DMA_MEM_TO_DEV; tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR; tx.dst_addr_width = buswidth; tx.dst_maxburst = 1; ret = dmaengine_slave_config(controller->dma_tx, &tx); if (ret) { dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n", ret); return ret; } rx.direction = DMA_DEV_TO_MEM; rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR; rx.src_addr_width = buswidth; rx.src_maxburst = 1; ret = dmaengine_slave_config(controller->dma_rx, &rx); if (ret) { dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n", ret); return ret; } return 0; } static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi) { u32 temp; int ret; if (!fsl_lpspi->is_slave) { ret = fsl_lpspi_set_bitrate(fsl_lpspi); if (ret) return ret; } fsl_lpspi_set_watermark(fsl_lpspi); if (!fsl_lpspi->is_slave) temp = CFGR1_MASTER; else temp = CFGR1_PINCFG; if (fsl_lpspi->config.mode & SPI_CS_HIGH) temp |= CFGR1_PCSPOL; writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1); temp = readl(fsl_lpspi->base + IMX7ULP_CR); temp |= CR_RRF | CR_RTF | CR_MEN; writel(temp, fsl_lpspi->base + IMX7ULP_CR); temp = 0; if (fsl_lpspi->usedma) temp = DER_TDDE | DER_RDDE; writel(temp, fsl_lpspi->base + IMX7ULP_DER); return 0; } static int fsl_lpspi_setup_transfer(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *t) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(spi->controller); if (t == NULL) return -EINVAL; fsl_lpspi->config.mode = spi->mode; fsl_lpspi->config.bpw = t->bits_per_word; fsl_lpspi->config.speed_hz = t->speed_hz; fsl_lpspi->config.chip_select = spi->chip_select; if (!fsl_lpspi->config.speed_hz) fsl_lpspi->config.speed_hz = spi->max_speed_hz; if (!fsl_lpspi->config.bpw) fsl_lpspi->config.bpw = spi->bits_per_word; /* Initialize the functions for transfer */ if (fsl_lpspi->config.bpw <= 8) { fsl_lpspi->rx = fsl_lpspi_buf_rx_u8; fsl_lpspi->tx = fsl_lpspi_buf_tx_u8; } else if (fsl_lpspi->config.bpw <= 16) { fsl_lpspi->rx = fsl_lpspi_buf_rx_u16; fsl_lpspi->tx = fsl_lpspi_buf_tx_u16; } else { fsl_lpspi->rx = fsl_lpspi_buf_rx_u32; fsl_lpspi->tx = fsl_lpspi_buf_tx_u32; } if (t->len <= fsl_lpspi->txfifosize) fsl_lpspi->watermark = t->len; else fsl_lpspi->watermark = fsl_lpspi->txfifosize; if (fsl_lpspi_can_dma(controller, spi, t)) fsl_lpspi->usedma = 1; else fsl_lpspi->usedma = 0; return fsl_lpspi_config(fsl_lpspi); } static int fsl_lpspi_slave_abort(struct spi_controller *controller) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); fsl_lpspi->slave_aborted = true; if (!fsl_lpspi->usedma) complete(&fsl_lpspi->xfer_done); else { complete(&fsl_lpspi->dma_tx_completion); complete(&fsl_lpspi->dma_rx_completion); } return 0; } static int fsl_lpspi_wait_for_completion(struct spi_controller *controller) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); if (fsl_lpspi->is_slave) { if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) || fsl_lpspi->slave_aborted) { dev_dbg(fsl_lpspi->dev, "interrupted\n"); return -EINTR; } } else { if (!wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ)) { dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n"); return -ETIMEDOUT; } } return 0; } static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi) { u32 temp; if (!fsl_lpspi->usedma) { /* Disable all interrupt */ fsl_lpspi_intctrl(fsl_lpspi, 0); } /* W1C for all flags in SR */ temp = 0x3F << 8; writel(temp, fsl_lpspi->base + IMX7ULP_SR); /* Clear FIFO and disable module */ temp = CR_RRF | CR_RTF; writel(temp, fsl_lpspi->base + IMX7ULP_CR); return 0; } static void fsl_lpspi_dma_rx_callback(void *cookie) { struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie; complete(&fsl_lpspi->dma_rx_completion); } static void fsl_lpspi_dma_tx_callback(void *cookie) { struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie; complete(&fsl_lpspi->dma_tx_completion); } static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi, int size) { unsigned long timeout = 0; /* Time with actual data transfer and CS change delay related to HW */ timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz; /* Add extra second for scheduler related activities */ timeout += 1; /* Double calculated timeout */ return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC); } static int fsl_lpspi_dma_transfer(struct spi_controller *controller, struct fsl_lpspi_data *fsl_lpspi, struct spi_transfer *transfer) { struct dma_async_tx_descriptor *desc_tx, *desc_rx; unsigned long transfer_timeout; unsigned long timeout; struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; int ret; ret = fsl_lpspi_dma_configure(controller); if (ret) return ret; desc_rx = dmaengine_prep_slave_sg(controller->dma_rx, rx->sgl, rx->nents, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_rx) return -EINVAL; desc_rx->callback = fsl_lpspi_dma_rx_callback; desc_rx->callback_param = (void *)fsl_lpspi; dmaengine_submit(desc_rx); reinit_completion(&fsl_lpspi->dma_rx_completion); dma_async_issue_pending(controller->dma_rx); desc_tx = dmaengine_prep_slave_sg(controller->dma_tx, tx->sgl, tx->nents, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_tx) { dmaengine_terminate_all(controller->dma_tx); return -EINVAL; } desc_tx->callback = fsl_lpspi_dma_tx_callback; desc_tx->callback_param = (void *)fsl_lpspi; dmaengine_submit(desc_tx); reinit_completion(&fsl_lpspi->dma_tx_completion); dma_async_issue_pending(controller->dma_tx); fsl_lpspi->slave_aborted = false; if (!fsl_lpspi->is_slave) { transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi, transfer->len); /* Wait eDMA to finish the data transfer.*/ timeout = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion, transfer_timeout); if (!timeout) { dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n"); dmaengine_terminate_all(controller->dma_tx); dmaengine_terminate_all(controller->dma_rx); fsl_lpspi_reset(fsl_lpspi); return -ETIMEDOUT; } timeout = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion, transfer_timeout); if (!timeout) { dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n"); dmaengine_terminate_all(controller->dma_tx); dmaengine_terminate_all(controller->dma_rx); fsl_lpspi_reset(fsl_lpspi); return -ETIMEDOUT; } } else { if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) || fsl_lpspi->slave_aborted) { dev_dbg(fsl_lpspi->dev, "I/O Error in DMA TX interrupted\n"); dmaengine_terminate_all(controller->dma_tx); dmaengine_terminate_all(controller->dma_rx); fsl_lpspi_reset(fsl_lpspi); return -EINTR; } if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) || fsl_lpspi->slave_aborted) { dev_dbg(fsl_lpspi->dev, "I/O Error in DMA RX interrupted\n"); dmaengine_terminate_all(controller->dma_tx); dmaengine_terminate_all(controller->dma_rx); fsl_lpspi_reset(fsl_lpspi); return -EINTR; } } fsl_lpspi_reset(fsl_lpspi); return 0; } static void fsl_lpspi_dma_exit(struct spi_controller *controller) { if (controller->dma_rx) { dma_release_channel(controller->dma_rx); controller->dma_rx = NULL; } if (controller->dma_tx) { dma_release_channel(controller->dma_tx); controller->dma_tx = NULL; } } static int fsl_lpspi_dma_init(struct device *dev, struct fsl_lpspi_data *fsl_lpspi, struct spi_controller *controller) { int ret; /* Prepare for TX DMA: */ controller->dma_tx = dma_request_slave_channel_reason(dev, "tx"); if (IS_ERR(controller->dma_tx)) { ret = PTR_ERR(controller->dma_tx); dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret); controller->dma_tx = NULL; goto err; } /* Prepare for RX DMA: */ controller->dma_rx = dma_request_slave_channel_reason(dev, "rx"); if (IS_ERR(controller->dma_rx)) { ret = PTR_ERR(controller->dma_rx); dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret); controller->dma_rx = NULL; goto err; } init_completion(&fsl_lpspi->dma_rx_completion); init_completion(&fsl_lpspi->dma_tx_completion); controller->can_dma = fsl_lpspi_can_dma; controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES; return 0; err: fsl_lpspi_dma_exit(controller); return ret; } static int fsl_lpspi_pio_transfer(struct spi_controller *controller, struct spi_transfer *t) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); int ret; fsl_lpspi->tx_buf = t->tx_buf; fsl_lpspi->rx_buf = t->rx_buf; fsl_lpspi->remain = t->len; reinit_completion(&fsl_lpspi->xfer_done); fsl_lpspi->slave_aborted = false; fsl_lpspi_write_tx_fifo(fsl_lpspi); ret = fsl_lpspi_wait_for_completion(controller); if (ret) return ret; fsl_lpspi_reset(fsl_lpspi); return 0; } static int fsl_lpspi_transfer_one(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *t) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); int ret; fsl_lpspi->is_first_byte = true; ret = fsl_lpspi_setup_transfer(controller, spi, t); if (ret < 0) return ret; fsl_lpspi_set_cmd(fsl_lpspi); fsl_lpspi->is_first_byte = false; if (fsl_lpspi->usedma) ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t); else ret = fsl_lpspi_pio_transfer(controller, t); if (ret < 0) return ret; return 0; } static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id) { u32 temp_SR, temp_IER; struct fsl_lpspi_data *fsl_lpspi = dev_id; temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER); fsl_lpspi_intctrl(fsl_lpspi, 0); temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR); fsl_lpspi_read_rx_fifo(fsl_lpspi); if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) { fsl_lpspi_write_tx_fifo(fsl_lpspi); return IRQ_HANDLED; } if (temp_SR & SR_MBF || readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) { writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR); fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE); return IRQ_HANDLED; } if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) { writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR); complete(&fsl_lpspi->xfer_done); return IRQ_HANDLED; } return IRQ_NONE; } #ifdef CONFIG_PM static int fsl_lpspi_runtime_resume(struct device *dev) { struct spi_controller *controller = dev_get_drvdata(dev); struct fsl_lpspi_data *fsl_lpspi; int ret; fsl_lpspi = spi_controller_get_devdata(controller); ret = clk_prepare_enable(fsl_lpspi->clk_per); if (ret) return ret; ret = clk_prepare_enable(fsl_lpspi->clk_ipg); if (ret) { clk_disable_unprepare(fsl_lpspi->clk_per); return ret; } return 0; } static int fsl_lpspi_runtime_suspend(struct device *dev) { struct spi_controller *controller = dev_get_drvdata(dev); struct fsl_lpspi_data *fsl_lpspi; fsl_lpspi = spi_controller_get_devdata(controller); clk_disable_unprepare(fsl_lpspi->clk_per); clk_disable_unprepare(fsl_lpspi->clk_ipg); return 0; } #endif static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi) { struct device *dev = fsl_lpspi->dev; pm_runtime_enable(dev); pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT); pm_runtime_use_autosuspend(dev); return 0; } static int fsl_lpspi_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct fsl_lpspi_data *fsl_lpspi; struct spi_controller *controller; struct spi_imx_master *lpspi_platform_info = dev_get_platdata(&pdev->dev); struct resource *res; int i, ret, irq; u32 temp; bool is_slave; is_slave = of_property_read_bool((&pdev->dev)->of_node, "spi-slave"); if (is_slave) controller = spi_alloc_slave(&pdev->dev, sizeof(struct fsl_lpspi_data)); else controller = spi_alloc_master(&pdev->dev, sizeof(struct fsl_lpspi_data)); if (!controller) return -ENOMEM; platform_set_drvdata(pdev, controller); fsl_lpspi = spi_controller_get_devdata(controller); fsl_lpspi->dev = &pdev->dev; fsl_lpspi->is_slave = is_slave; if (!fsl_lpspi->is_slave) { for (i = 0; i < controller->num_chipselect; i++) { int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); if (!gpio_is_valid(cs_gpio) && lpspi_platform_info) cs_gpio = lpspi_platform_info->chipselect[i]; fsl_lpspi->chipselect[i] = cs_gpio; if (!gpio_is_valid(cs_gpio)) continue; ret = devm_gpio_request(&pdev->dev, fsl_lpspi->chipselect[i], DRIVER_NAME); if (ret) { dev_err(&pdev->dev, "can't get cs gpios\n"); goto out_controller_put; } } controller->cs_gpios = fsl_lpspi->chipselect; controller->prepare_message = fsl_lpspi_prepare_message; } controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); controller->transfer_one = fsl_lpspi_transfer_one; controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; controller->dev.of_node = pdev->dev.of_node; controller->bus_num = pdev->id; controller->slave_abort = fsl_lpspi_slave_abort; init_completion(&fsl_lpspi->xfer_done); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); fsl_lpspi->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(fsl_lpspi->base)) { ret = PTR_ERR(fsl_lpspi->base); goto out_controller_put; } fsl_lpspi->base_phys = res->start; irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto out_controller_put; } ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0, dev_name(&pdev->dev), fsl_lpspi); if (ret) { dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret); goto out_controller_put; } fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per"); if (IS_ERR(fsl_lpspi->clk_per)) { ret = PTR_ERR(fsl_lpspi->clk_per); goto out_controller_put; } fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(fsl_lpspi->clk_ipg)) { ret = PTR_ERR(fsl_lpspi->clk_ipg); goto out_controller_put; } /* enable the clock */ ret = fsl_lpspi_init_rpm(fsl_lpspi); if (ret) goto out_controller_put; ret = pm_runtime_get_sync(fsl_lpspi->dev); if (ret < 0) { dev_err(fsl_lpspi->dev, "failed to enable clock\n"); return ret; } temp = readl(fsl_lpspi->base + IMX7ULP_PARAM); fsl_lpspi->txfifosize = 1 << (temp & 0x0f); fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f); ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller); if (ret == -EPROBE_DEFER) goto out_controller_put; if (ret < 0) dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret); ret = devm_spi_register_controller(&pdev->dev, controller); if (ret < 0) { dev_err(&pdev->dev, "spi_register_controller error.\n"); goto out_controller_put; } return 0; out_controller_put: spi_controller_put(controller); return ret; } static int fsl_lpspi_remove(struct platform_device *pdev) { struct spi_controller *controller = platform_get_drvdata(pdev); struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); pm_runtime_disable(fsl_lpspi->dev); spi_master_put(controller); return 0; } #ifdef CONFIG_PM_SLEEP static int fsl_lpspi_suspend(struct device *dev) { int ret; pinctrl_pm_select_sleep_state(dev); ret = pm_runtime_force_suspend(dev); return ret; } static int fsl_lpspi_resume(struct device *dev) { int ret; ret = pm_runtime_force_resume(dev); if (ret) { dev_err(dev, "Error in resume: %d\n", ret); return ret; } pinctrl_pm_select_default_state(dev); return 0; } #endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops fsl_lpspi_pm_ops = { SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend, fsl_lpspi_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume) }; static struct platform_driver fsl_lpspi_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = fsl_lpspi_dt_ids, .pm = &fsl_lpspi_pm_ops, }, .probe = fsl_lpspi_probe, .remove = fsl_lpspi_remove, }; module_platform_driver(fsl_lpspi_driver); MODULE_DESCRIPTION("LPSPI Controller driver"); MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1254_0
crossvul-cpp_data_bad_1273_5
/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dm_services.h" #include "dc.h" #include "resource.h" #include "include/irq_service_interface.h" #include "dcn10_resource.h" #include "dcn10_ipp.h" #include "dcn10_mpc.h" #include "irq/dcn10/irq_service_dcn10.h" #include "dcn10_dpp.h" #include "dcn10_optc.h" #include "dcn10_hw_sequencer.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn10_opp.h" #include "dcn10_link_encoder.h" #include "dcn10_stream_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dce112/dce112_resource.h" #include "dcn10_hubp.h" #include "dcn10_hubbub.h" #include "soc15_hw_ip.h" #include "vega10_ip_offset.h" #include "dcn/dcn_1_0_offset.h" #include "dcn/dcn_1_0_sh_mask.h" #include "nbio/nbio_7_0_offset.h" #include "mmhub/mmhub_9_1_offset.h" #include "mmhub/mmhub_9_1_sh_mask.h" #include "reg_helper.h" #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" const struct _vcs_dpi_ip_params_st dcn1_0_ip = { .rob_buffer_size_kbytes = 64, .det_buffer_size_kbytes = 164, .dpte_buffer_size_in_pte_reqs_luma = 42, .dpp_output_buffer_pixels = 2560, .opp_output_buffer_lines = 1, .pixel_chunk_size_kbytes = 8, .pte_enable = 1, .pte_chunk_size_kbytes = 2, .meta_chunk_size_kbytes = 2, .writeback_chunk_size_kbytes = 2, .line_buffer_size_bits = 589824, .max_line_buffer_lines = 12, .IsLineBufferBppFixed = 0, .LineBufferFixedBpp = -1, .writeback_luma_buffer_size_kbytes = 12, .writeback_chroma_buffer_size_kbytes = 8, .max_num_dpp = 4, .max_num_wb = 2, .max_dchub_pscl_bw_pix_per_clk = 4, .max_pscl_lb_bw_pix_per_clk = 2, .max_lb_vscl_bw_pix_per_clk = 4, .max_vscl_hscl_bw_pix_per_clk = 4, .max_hscl_ratio = 4, .max_vscl_ratio = 4, .hscl_mults = 4, .vscl_mults = 4, .max_hscl_taps = 8, .max_vscl_taps = 8, .dispclk_ramp_margin_percent = 1, .underscan_factor = 1.10, .min_vblank_lines = 14, .dppclk_delay_subtotal = 90, .dispclk_delay_subtotal = 42, .dcfclk_cstate_latency = 10, .max_inter_dcn_tile_repeaters = 8, .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0, .bug_forcing_LC_req_same_size_fixed = 0, }; const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = { .sr_exit_time_us = 9.0, .sr_enter_plus_exit_time_us = 11.0, .urgent_latency_us = 4.0, .writeback_latency_us = 12.0, .ideal_dram_bw_after_urgent_percent = 80.0, .max_request_size_bytes = 256, .downspread_percent = 0.5, .dram_page_open_time_ns = 50.0, .dram_rw_turnaround_time_ns = 17.5, .dram_return_buffer_per_channel_bytes = 8192, .round_trip_ping_latency_dcfclk_cycles = 128, .urgent_out_of_order_return_per_channel_bytes = 256, .channel_interleave_bytes = 256, .num_banks = 8, .num_chans = 2, .vmm_page_size_bytes = 4096, .dram_clock_change_latency_us = 17.0, .writeback_dram_clock_change_latency_us = 23.0, .return_bus_width_bytes = 64, }; #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #endif enum dcn10_clk_src_array_id { DCN10_CLK_SRC_PLL0, DCN10_CLK_SRC_PLL1, DCN10_CLK_SRC_PLL2, DCN10_CLK_SRC_PLL3, DCN10_CLK_SRC_TOTAL, DCN101_CLK_SRC_TOTAL = DCN10_CLK_SRC_PLL3 }; /* begin ********************* * macros to expend register list macro defined in HW object header file */ /* DCN */ #define BASE_INNER(seg) \ DCE_BASE__INST0_SEG ## seg #define BASE(seg) \ BASE_INNER(seg) #define SR(reg_name)\ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name #define SRI(reg_name, block, id)\ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define SRII(reg_name, block, id)\ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name /* NBIO */ #define NBIO_BASE_INNER(seg) \ NBIF_BASE__INST0_SEG ## seg #define NBIO_BASE(seg) \ NBIO_BASE_INNER(seg) #define NBIO_SR(reg_name)\ .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name /* MMHUB */ #define MMHUB_BASE_INNER(seg) \ MMHUB_BASE__INST0_SEG ## seg #define MMHUB_BASE(seg) \ MMHUB_BASE_INNER(seg) #define MMHUB_SR(reg_name)\ .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name /* macros to expend register list macro defined in HW object header file * end *********************/ static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCN10_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCN10(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCN10_REG_LIST(0) }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCN10(_MASK) }; #define stream_enc_regs(id)\ [id] = {\ SE_DCN_REG_LIST(id)\ } static const struct dcn10_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), }; static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn10_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCN10(_MASK) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) static const struct dce_audio_shift audio_shift = { DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3) }; #define link_regs(id)\ [id] = {\ LE_DCN10_REG_LIST(id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } static const struct dcn10_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3) }; static const struct dcn10_link_enc_shift le_shift = { LINK_ENCODER_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK) }; #define ipp_regs(id)\ [id] = {\ IPP_REG_LIST_DCN10(id),\ } static const struct dcn10_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), }; static const struct dcn10_ipp_shift ipp_shift = { IPP_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn10_ipp_mask ipp_mask = { IPP_MASK_SH_LIST_DCN10(_MASK), }; #define opp_regs(id)\ [id] = {\ OPP_REG_LIST_DCN10(id),\ } static const struct dcn10_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), }; static const struct dcn10_opp_shift opp_shift = { OPP_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn10_opp_mask opp_mask = { OPP_MASK_SH_LIST_DCN10(_MASK), }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define tf_regs(id)\ [id] = {\ TF_REG_LIST_DCN10(id),\ } static const struct dcn_dpp_registers tf_regs[] = { tf_regs(0), tf_regs(1), tf_regs(2), tf_regs(3), }; static const struct dcn_dpp_shift tf_shift = { TF_REG_LIST_SH_MASK_DCN10(__SHIFT), TF_DEBUG_REG_LIST_SH_DCN10 }; static const struct dcn_dpp_mask tf_mask = { TF_REG_LIST_SH_MASK_DCN10(_MASK), TF_DEBUG_REG_LIST_MASK_DCN10 }; static const struct dcn_mpc_registers mpc_regs = { MPC_COMMON_REG_LIST_DCN1_0(0), MPC_COMMON_REG_LIST_DCN1_0(1), MPC_COMMON_REG_LIST_DCN1_0(2), MPC_COMMON_REG_LIST_DCN1_0(3), MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(0), MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(1), MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(2), MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(3) }; static const struct dcn_mpc_shift mpc_shift = { MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) }; static const struct dcn_mpc_mask mpc_mask = { MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK), }; #define tg_regs(id)\ [id] = {TG_COMMON_REG_LIST_DCN1_0(id)} static const struct dcn_optc_registers tg_regs[] = { tg_regs(0), tg_regs(1), tg_regs(2), tg_regs(3), }; static const struct dcn_optc_shift tg_shift = { TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) }; static const struct dcn_optc_mask tg_mask = { TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK) }; static const struct bios_registers bios_regs = { NBIO_SR(BIOS_SCRATCH_3), NBIO_SR(BIOS_SCRATCH_6) }; #define hubp_regs(id)\ [id] = {\ HUBP_REG_LIST_DCN10(id)\ } static const struct dcn_mi_registers hubp_regs[] = { hubp_regs(0), hubp_regs(1), hubp_regs(2), hubp_regs(3), }; static const struct dcn_mi_shift hubp_shift = { HUBP_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn_mi_mask hubp_mask = { HUBP_MASK_SH_LIST_DCN10(_MASK) }; static const struct dcn_hubbub_registers hubbub_reg = { HUBBUB_REG_LIST_DCN10(0) }; static const struct dcn_hubbub_shift hubbub_shift = { HUBBUB_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn_hubbub_mask hubbub_mask = { HUBBUB_MASK_SH_LIST_DCN10(_MASK) }; #define clk_src_regs(index, pllid)\ [index] = {\ CS_COMMON_REG_LIST_DCN1_0(index, pllid),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0, A), clk_src_regs(1, B), clk_src_regs(2, C), clk_src_regs(3, D) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK) }; static const struct resource_caps res_cap = { .num_timing_generator = 4, .num_opp = 4, .num_video_plane = 4, .num_audio = 4, .num_stream_encoder = 4, .num_pll = 4, .num_ddc = 4, }; static const struct resource_caps rv2_res_cap = { .num_timing_generator = 3, .num_opp = 3, .num_video_plane = 3, .num_audio = 3, .num_stream_encoder = 3, .num_pll = 3, .num_ddc = 4, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCN_UNIVERSAL, .blends_with_above = true, .blends_with_below = true, .per_pixel_alpha = true, .pixel_format_support = { .argb8888 = true, .nv12 = true, .fp16 = true }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 16000, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 250, .fp16 = 1 } }; static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = true, .disable_dmcu = true, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, /* raven smu dones't allow 0 disp clk, * smu min disp clk limit is 50Mhz * keep min disp clk 100Mhz avoid smu hang */ .min_disp_clk_khz = 100000, .disable_pplib_clock_request = false, .disable_pplib_wm_range = false, .pplib_wm_report_mode = WM_REPORT_DEFAULT, .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .force_single_disp_pipe_split = true, .disable_dcc = DCC_ENABLE, .voltage_align_fclk = true, .disable_stereo_support = true, .vsr_support = true, .performance_trace = false, .az_endpoint_mute_only = true, .recovery_enabled = false, /*enable this by default after testing.*/ .max_downscale_src_width = 3840, .underflow_assert_delay_us = 0xFFFFFFFF, }; static const struct dc_debug_options debug_defaults_diags = { .disable_dmcu = true, .force_abm_enable = false, .timing_trace = true, .clock_trace = true, .disable_stutter = true, .disable_pplib_clock_request = true, .disable_pplib_wm_range = true, .underflow_assert_delay_us = 0xFFFFFFFF, }; static void dcn10_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN10_DPP(*dpp)); *dpp = NULL; } static struct dpp *dcn10_dpp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_dpp *dpp = kzalloc(sizeof(struct dcn10_dpp), GFP_KERNEL); if (!dpp) return NULL; dpp1_construct(dpp, ctx, inst, &tf_regs[inst], &tf_shift, &tf_mask); return &dpp->base; } static struct input_pixel_processor *dcn10_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_ipp *ipp = kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dcn10_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static struct output_pixel_processor *dcn10_opp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_opp *opp = kzalloc(sizeof(struct dcn10_opp), GFP_KERNEL); if (!opp) { BREAK_TO_DEBUGGER(); return NULL; } dcn10_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } struct dce_aux *dcn10_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst]); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) }; struct dce_i2c_hw *dcn10_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dcn1_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static struct mpc *dcn10_mpc_create(struct dc_context *ctx) { struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc), GFP_KERNEL); if (!mpc10) return NULL; dcn10_mpc_construct(mpc10, ctx, &mpc_regs, &mpc_shift, &mpc_mask, 4); return &mpc10->base; } static struct hubbub *dcn10_hubbub_create(struct dc_context *ctx) { struct dcn10_hubbub *dcn10_hubbub = kzalloc(sizeof(struct dcn10_hubbub), GFP_KERNEL); if (!dcn10_hubbub) return NULL; hubbub1_construct(&dcn10_hubbub->base, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask); return &dcn10_hubbub->base; } static struct timing_generator *dcn10_timing_generator_create( struct dc_context *ctx, uint32_t instance) { struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; tgn10->base.inst = instance; tgn10->base.ctx = ctx; tgn10->tg_regs = &tg_regs[instance]; tgn10->tg_shift = &tg_shift; tgn10->tg_mask = &tg_mask; dcn10_timing_generator_init(tgn10); return &tgn10->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = false, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true }; struct link_encoder *dcn10_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dcn10_link_encoder *enc10 = kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL); if (!enc10) return NULL; dcn10_link_encoder_construct(enc10, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); return &enc10->base; } struct clock_source *dcn10_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce112_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } BREAK_TO_DEBUGGER(); return NULL; } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct stream_encoder *dcn10_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dcn10_stream_encoder *enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); if (!enc1) return NULL; dcn10_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc1->base; } static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCN1_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN1_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCN1_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dcn10_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; hws->wa.DEGVIDCN10_253 = true; hws->wa.false_optc_underflow = true; hws->wa.DEGVIDCN10_254 = true; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dcn10_stream_encoder_create, .create_hwseq = dcn10_hwseq_create, }; static const struct resource_create_funcs res_create_maximus_funcs = { .read_dce_straps = NULL, .create_audio = NULL, .create_stream_encoder = NULL, .create_hwseq = dcn10_hwseq_create, }; void dcn10_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx) { struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); if (!pp_smu) return pp_smu; dm_pp_get_funcs(ctx, pp_smu); return pp_smu; } static void destruct(struct dcn10_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) { kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); pool->base.stream_enc[i] = NULL; } } if (pool->base.mpc != NULL) { kfree(TO_DCN10_MPC(pool->base.mpc)); pool->base.mpc = NULL; } if (pool->base.hubbub != NULL) { kfree(pool->base.hubbub); pool->base.hubbub = NULL; } for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); if (pool->base.dpps[i] != NULL) dcn10_dpp_destroy(&pool->base.dpps[i]); if (pool->base.ipps[i] != NULL) pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); if (pool->base.hubps[i] != NULL) { kfree(TO_DCN10_HUBP(pool->base.hubps[i])); pool->base.hubps[i] = NULL; } if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } if (pool->base.timing_generators[i] != NULL) { kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i]) dce_aud_destroy(&pool->base.audios[i]); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dcn10_clock_source_destroy(&pool->base.clock_sources[i]); pool->base.clock_sources[i] = NULL; } } if (pool->base.dp_clock_source != NULL) { dcn10_clock_source_destroy(&pool->base.dp_clock_source); pool->base.dp_clock_source = NULL; } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); kfree(pool->base.pp_smu); } static struct hubp *dcn10_hubp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_hubp *hubp1 = kzalloc(sizeof(struct dcn10_hubp), GFP_KERNEL); if (!hubp1) return NULL; dcn10_hubp_construct(hubp1, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask); return &hubp1->base; } static void get_pixel_clock_parameters( const struct pipe_ctx *pipe_ctx, struct pixel_clk_params *pixel_clk_params) { const struct dc_stream_state *stream = pipe_ctx->stream; pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; pixel_clk_params->encoder_object_id = stream->link->link_enc->id; pixel_clk_params->signal_type = pipe_ctx->stream->signal; pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; /* TODO: un-hardcode*/ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * LINK_RATE_REF_FREQ_IN_KHZ; pixel_clk_params->flags.ENABLE_SS = 0; pixel_clk_params->color_depth = stream->timing.display_color_depth; pixel_clk_params->flags.DISPLAY_BLANKED = 1; pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) pixel_clk_params->color_depth = COLOR_DEPTH_888; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) pixel_clk_params->requested_pix_clk_100hz /= 2; if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) pixel_clk_params->requested_pix_clk_100hz *= 2; } static void build_clamping_params(struct dc_stream_state *stream) { stream->clamping.clamping_level = CLAMPING_FULL_RANGE; stream->clamping.c_depth = stream->timing.display_color_depth; stream->clamping.pixel_encoding = stream->timing.pixel_encoding; } static void build_pipe_hw_param(struct pipe_ctx *pipe_ctx) { get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); pipe_ctx->clock_source->funcs->get_pix_clk_dividers( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, &pipe_ctx->pll_settings); pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; resource_build_bit_depth_reduction_params(pipe_ctx->stream, &pipe_ctx->stream->bit_depth_params); build_clamping_params(pipe_ctx->stream); } static enum dc_status build_mapped_resource( const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); /*TODO Seems unneeded anymore */ /* if (old_context && resource_is_stream_unchanged(old_context, stream)) { if (stream != NULL && old_context->streams[i] != NULL) { todo: shouldn't have to copy missing parameter here resource_build_bit_depth_reduction_params(stream, &stream->bit_depth_params); stream->clamping.pixel_encoding = stream->timing.pixel_encoding; resource_build_bit_depth_reduction_params(stream, &stream->bit_depth_params); build_clamping_params(stream); continue; } } */ if (!pipe_ctx) return DC_ERROR_UNEXPECTED; build_pipe_hw_param(pipe_ctx); return DC_OK; } enum dc_status dcn10_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_ERROR_UNEXPECTED; result = resource_map_pool_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = build_mapped_resource(dc, new_ctx, dc_stream); return result; } static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer( struct dc_state *context, const struct resource_pool *pool, struct dc_stream_state *stream) { struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe); if (!head_pipe) { ASSERT(0); return NULL; } if (!idle_pipe) return NULL; idle_pipe->stream = head_pipe->stream; idle_pipe->stream_res.tg = head_pipe->stream_res.tg; idle_pipe->stream_res.abm = head_pipe->stream_res.abm; idle_pipe->stream_res.opp = head_pipe->stream_res.opp; idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; return idle_pipe; } static bool dcn10_get_dcc_compression_cap(const struct dc *dc, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output) { return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( dc->res_pool->hubbub, input, output); } static void dcn10_destroy_resource_pool(struct resource_pool **pool) { struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool); destruct(dcn10_pool); kfree(dcn10_pool); *pool = NULL; } static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) { if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && caps->max_video_width != 0 && plane_state->src_rect.width > caps->max_video_width) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *context) { int i, j; bool video_down_scaled = false; bool video_large = false; bool desktop_large = false; bool dcc_disabled = false; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) continue; if (context->stream_status[i].plane_count > 2) return DC_FAIL_UNSUPPORTED_1; for (j = 0; j < context->stream_status[i].plane_count; j++) { struct dc_plane_state *plane = context->stream_status[i].plane_states[j]; if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { if (plane->src_rect.width > plane->dst_rect.width || plane->src_rect.height > plane->dst_rect.height) video_down_scaled = true; if (plane->src_rect.width >= 3840) video_large = true; } else { if (plane->src_rect.width >= 3840) desktop_large = true; if (!plane->dcc.enable) dcc_disabled = true; } } } /* * Workaround: On DCN10 there is UMC issue that causes underflow when * playing 4k video on 4k desktop with video downscaled and single channel * memory */ if (video_large && desktop_large && video_down_scaled && dcc_disabled && dc->dcn_soc->number_of_channels == 1) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state) { enum dc_status result = DC_OK; enum surface_pixel_format surf_pix_format = plane_state->format; unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format); enum swizzle_mode_values swizzle = DC_SW_LINEAR; if (bpp == 64) swizzle = DC_SW_64KB_D; else swizzle = DC_SW_64KB_S; plane_state->tiling_info.gfx9.swizzle = swizzle; return result; } struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link( struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) { int i; int j = -1; struct dc_link *link = stream->link; for (i = 0; i < pool->stream_enc_count; i++) { if (!res_ctx->is_stream_enc_acquired[i] && pool->stream_enc[i]) { /* Store first available for MST second display * in daisy chain use case */ j = i; if (pool->stream_enc[i]->id == link->link_enc->preferred_engine) return pool->stream_enc[i]; } } /* * For CZ and later, we can allow DIG FE and BE to differ for all display types */ if (j >= 0) return pool->stream_enc[j]; return NULL; } static const struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn10_get_dcc_compression_cap }; static const struct resource_funcs dcn10_res_pool_funcs = { .destroy = dcn10_destroy_resource_pool, .link_enc_create = dcn10_link_encoder_create, .validate_bandwidth = dcn_validate_bandwidth, .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, .validate_plane = dcn10_validate_plane, .validate_global = dcn10_validate_global, .add_stream_to_ctx = dcn10_add_stream_to_ctx, .get_default_swizzle_mode = dcn10_get_default_swizzle_mode, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link }; static uint32_t read_pipe_fuses(struct dc_context *ctx) { uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); /* RV1 support max 4 pipes */ value = value & 0xf; return value; } static bool construct( uint8_t num_virtual_links, struct dc *dc, struct dcn10_resource_pool *pool) { int i; int j; struct dc_context *ctx = dc->ctx; uint32_t pipe_fuses = read_pipe_fuses(ctx); ctx->dc_bios->regs = &bios_regs; if (ctx->dce_version == DCN_VERSION_1_01) pool->base.res_cap = &rv2_res_cap; else pool->base.res_cap = &res_cap; pool->base.funcs = &dcn10_res_pool_funcs; /* * TODO fill in from actual raven resource when we create * more than virtual encoder */ /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; /* max pipe num for ASIC before check pipe fuses */ pool->base.pipe_count = pool->base.res_cap->num_timing_generator; if (dc->ctx->dce_version == DCN_VERSION_1_01) pool->base.pipe_count = 3; dc->caps.max_video_width = 3840; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 256; dc->caps.max_slave_planes = 1; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = false; /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */ dc->caps.force_dp_tps4_for_cp2520 = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else dc->debug = debug_defaults_diags; /************************************************* * Create resources * *************************************************/ pool->base.clock_sources[DCN10_CLK_SRC_PLL0] = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[DCN10_CLK_SRC_PLL1] = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[DCN10_CLK_SRC_PLL2] = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); if (dc->ctx->dce_version == DCN_VERSION_1_0) { pool->base.clock_sources[DCN10_CLK_SRC_PLL3] = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); } pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL; if (dc->ctx->dce_version == DCN_VERSION_1_01) pool->base.clk_src_count = DCN101_CLK_SRC_TOTAL; pool->base.dp_clock_source = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, /* todo: not reuse phy_pll registers */ &clk_src_regs[0], true); for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto fail; } } pool->base.dmcu = dcn10_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto fail; } dml_init_instance(&dc->dml, &dcn1_0_soc, &dcn1_0_ip, DML_PROJECT_RAVEN1); memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults)); memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults)); if (dc->ctx->dce_version == DCN_VERSION_1_01) { struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc; struct dcn_ip_params *dcn_ip = dc->dcn_ip; struct display_mode_lib *dml = &dc->dml; dml->ip.max_num_dpp = 3; /* TODO how to handle 23.84? */ dcn_soc->dram_clock_change_latency = 23; dcn_ip->max_num_dpp = 3; } if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { dc->dcn_soc->urgent_latency = 3; dc->debug.disable_dmcu = true; dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f; } dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width; ASSERT(dc->dcn_soc->number_of_channels < 3); if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/ dc->dcn_soc->number_of_channels = 2; if (dc->dcn_soc->number_of_channels == 1) { dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f; dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f; dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f; dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f; if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f; } } pool->base.pp_smu = dcn10_pp_smu_create(ctx); /* * Right now SMU/PPLIB and DAL all have the AZ D3 force PME notification * * implemented. So AZ D3 should work.For issue 197007. * */ if (pool->base.pp_smu != NULL && pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL) dc->debug.az_endpoint_mute_only = false; if (!dc->debug.disable_pplib_clock_request) dcn_bw_update_from_pplib(dc); dcn_bw_sync_calcs_and_dml(dc); if (!dc->debug.disable_pplib_wm_range) { dc->res_pool = &pool->base; dcn_bw_notify_pplib_of_wm_ranges(dc); } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dcn10_create(&init_data); if (!pool->base.irqs) goto fail; } /* index to valid pipe resource */ j = 0; /* mem input -> ipp -> dpp -> opp -> TG */ for (i = 0; i < pool->base.pipe_count; i++) { /* if pipe is disabled, skip instance of HW pipe, * i.e, skip ASIC register instance */ if ((pipe_fuses & (1 << i)) != 0) continue; pool->base.hubps[j] = dcn10_hubp_create(ctx, i); if (pool->base.hubps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto fail; } pool->base.ipps[j] = dcn10_ipp_create(ctx, i); if (pool->base.ipps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto fail; } pool->base.dpps[j] = dcn10_dpp_create(ctx, i); if (pool->base.dpps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create dpp!\n"); goto fail; } pool->base.opps[j] = dcn10_opp_create(ctx, i); if (pool->base.opps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto fail; } pool->base.timing_generators[j] = dcn10_timing_generator_create( ctx, i); if (pool->base.timing_generators[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto fail; } /* check next valid pipe */ j++; } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dcn10_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto fail; } pool->base.hw_i2cs[i] = dcn10_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create hw i2c!!\n"); goto fail; } pool->base.sw_i2cs[i] = NULL; } /* valid pipe num */ pool->base.pipe_count = j; pool->base.timing_generator_count = j; /* within dml lib, it is hard code to 4. If ASIC pipe is fused, * the value may be changed */ dc->dml.ip.max_num_dpp = pool->base.pipe_count; dc->dcn_ip->max_num_dpp = pool->base.pipe_count; pool->base.mpc = dcn10_mpc_create(ctx); if (pool->base.mpc == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mpc!\n"); goto fail; } pool->base.hubbub = dcn10_hubbub_create(ctx); if (pool->base.hubbub == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create hubbub!\n"); goto fail; } if (!resource_construct(num_virtual_links, dc, &pool->base, (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? &res_create_funcs : &res_create_maximus_funcs))) goto fail; dcn10_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->cap_funcs = cap_funcs; return true; fail: destruct(pool); return false; } struct resource_pool *dcn10_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc) { struct dcn10_resource_pool *pool = kzalloc(sizeof(struct dcn10_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (construct(init_data->num_virtual_links, dc, pool)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1273_5
crossvul-cpp_data_bad_2563_1
/* +----------------------------------------------------------------------+ | PHP Version 7 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2017 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: Rasmus Lerdorf <rasmus@lerdorf.on.ca> | | Zeev Suraski <zeev@zend.com> | +----------------------------------------------------------------------+ */ /* $Id$ */ #include <stdio.h> #include "php.h" #include "ext/standard/php_standard.h" #include "ext/standard/credits.h" #include "zend_smart_str.h" #include "php_variables.h" #include "php_globals.h" #include "php_content_types.h" #include "SAPI.h" #include "zend_globals.h" #ifdef PHP_WIN32 # include "win32/php_inttypes.h" #endif /* for systems that need to override reading of environment variables */ void _php_import_environment_variables(zval *array_ptr); PHPAPI void (*php_import_environment_variables)(zval *array_ptr) = _php_import_environment_variables; PHPAPI void php_register_variable(char *var, char *strval, zval *track_vars_array) { php_register_variable_safe(var, strval, strlen(strval), track_vars_array); } /* binary-safe version */ PHPAPI void php_register_variable_safe(char *var, char *strval, size_t str_len, zval *track_vars_array) { zval new_entry; assert(strval != NULL); /* Prepare value */ ZVAL_NEW_STR(&new_entry, zend_string_init(strval, str_len, 0)); php_register_variable_ex(var, &new_entry, track_vars_array); } PHPAPI void php_register_variable_ex(char *var_name, zval *val, zval *track_vars_array) { char *p = NULL; char *ip = NULL; /* index pointer */ char *index; char *var, *var_orig; size_t var_len, index_len; zval gpc_element, *gpc_element_p; zend_bool is_array = 0; HashTable *symtable1 = NULL; ALLOCA_FLAG(use_heap) assert(var_name != NULL); if (track_vars_array && Z_TYPE_P(track_vars_array) == IS_ARRAY) { symtable1 = Z_ARRVAL_P(track_vars_array); } if (!symtable1) { /* Nothing to do */ zval_dtor(val); return; } /* ignore leading spaces in the variable name */ while (*var_name && *var_name==' ') { var_name++; } /* * Prepare variable name */ var_len = strlen(var_name); var = var_orig = do_alloca(var_len + 1, use_heap); memcpy(var_orig, var_name, var_len + 1); /* ensure that we don't have spaces or dots in the variable name (not binary safe) */ for (p = var; *p; p++) { if (*p == ' ' || *p == '.') { *p='_'; } else if (*p == '[') { is_array = 1; ip = p; *p = 0; break; } } var_len = p - var; if (var_len==0) { /* empty variable name, or variable name with a space in it */ zval_dtor(val); free_alloca(var_orig, use_heap); return; } /* GLOBALS hijack attempt, reject parameter */ if (symtable1 == &EG(symbol_table) && var_len == sizeof("GLOBALS")-1 && !memcmp(var, "GLOBALS", sizeof("GLOBALS")-1)) { zval_dtor(val); free_alloca(var_orig, use_heap); return; } index = var; index_len = var_len; if (is_array) { int nest_level = 0; while (1) { char *index_s; size_t new_idx_len = 0; if(++nest_level > PG(max_input_nesting_level)) { HashTable *ht; /* too many levels of nesting */ if (track_vars_array) { ht = Z_ARRVAL_P(track_vars_array); zend_symtable_str_del(ht, var, var_len); } zval_dtor(val); /* do not output the error message to the screen, this helps us to to avoid "information disclosure" */ if (!PG(display_errors)) { php_error_docref(NULL, E_WARNING, "Input variable nesting level exceeded " ZEND_LONG_FMT ". To increase the limit change max_input_nesting_level in php.ini.", PG(max_input_nesting_level)); } free_alloca(var_orig, use_heap); return; } ip++; index_s = ip; if (isspace(*ip)) { ip++; } if (*ip==']') { index_s = NULL; } else { ip = strchr(ip, ']'); if (!ip) { /* PHP variables cannot contain '[' in their names, so we replace the character with a '_' */ *(index_s - 1) = '_'; index_len = 0; if (index) { index_len = strlen(index); } goto plain_var; return; } *ip = 0; new_idx_len = strlen(index_s); } if (!index) { array_init(&gpc_element); if ((gpc_element_p = zend_hash_next_index_insert(symtable1, &gpc_element)) == NULL) { zval_ptr_dtor(&gpc_element); zval_dtor(val); free_alloca(var_orig, use_heap); return; } } else { gpc_element_p = zend_symtable_str_find(symtable1, index, index_len); if (!gpc_element_p) { zval tmp; array_init(&tmp); gpc_element_p = zend_symtable_str_update_ind(symtable1, index, index_len, &tmp); } else { if (Z_TYPE_P(gpc_element_p) == IS_INDIRECT) { gpc_element_p = Z_INDIRECT_P(gpc_element_p); } if (Z_TYPE_P(gpc_element_p) != IS_ARRAY) { zval_ptr_dtor(gpc_element_p); array_init(gpc_element_p); } } } symtable1 = Z_ARRVAL_P(gpc_element_p); /* ip pointed to the '[' character, now obtain the key */ index = index_s; index_len = new_idx_len; ip++; if (*ip == '[') { is_array = 1; *ip = 0; } else { goto plain_var; } } } else { plain_var: ZVAL_COPY_VALUE(&gpc_element, val); if (!index) { if ((gpc_element_p = zend_hash_next_index_insert(symtable1, &gpc_element)) == NULL) { zval_ptr_dtor(&gpc_element); } } else { /* * According to rfc2965, more specific paths are listed above the less specific ones. * If we encounter a duplicate cookie name, we should skip it, since it is not possible * to have the same (plain text) cookie name for the same path and we should not overwrite * more specific cookies with the less specific ones. */ if (Z_TYPE(PG(http_globals)[TRACK_VARS_COOKIE]) != IS_UNDEF && symtable1 == Z_ARRVAL(PG(http_globals)[TRACK_VARS_COOKIE]) && zend_symtable_str_exists(symtable1, index, index_len)) { zval_ptr_dtor(&gpc_element); } else { gpc_element_p = zend_symtable_str_update_ind(symtable1, index, index_len, &gpc_element); } } } free_alloca(var_orig, use_heap); } typedef struct post_var_data { smart_str str; char *ptr; char *end; uint64_t cnt; } post_var_data_t; static zend_bool add_post_var(zval *arr, post_var_data_t *var, zend_bool eof) { char *ksep, *vsep, *val; size_t klen, vlen; size_t new_vlen; if (var->ptr >= var->end) { return 0; } vsep = memchr(var->ptr, '&', var->end - var->ptr); if (!vsep) { if (!eof) { return 0; } else { vsep = var->end; } } ksep = memchr(var->ptr, '=', vsep - var->ptr); if (ksep) { *ksep = '\0'; /* "foo=bar&" or "foo=&" */ klen = ksep - var->ptr; vlen = vsep - ++ksep; } else { ksep = ""; /* "foo&" */ klen = vsep - var->ptr; vlen = 0; } php_url_decode(var->ptr, klen); val = estrndup(ksep, vlen); if (vlen) { vlen = php_url_decode(val, vlen); } if (sapi_module.input_filter(PARSE_POST, var->ptr, &val, vlen, &new_vlen)) { php_register_variable_safe(var->ptr, val, new_vlen, arr); } efree(val); var->ptr = vsep + (vsep != var->end); return 1; } static inline int add_post_vars(zval *arr, post_var_data_t *vars, zend_bool eof) { uint64_t max_vars = PG(max_input_vars); vars->ptr = ZSTR_VAL(vars->str.s); vars->end = ZSTR_VAL(vars->str.s) + ZSTR_LEN(vars->str.s); while (add_post_var(arr, vars, eof)) { if (++vars->cnt > max_vars) { php_error_docref(NULL, E_WARNING, "Input variables exceeded %" PRIu64 ". " "To increase the limit change max_input_vars in php.ini.", max_vars); return FAILURE; } } if (!eof) { memmove(ZSTR_VAL(vars->str.s), vars->ptr, ZSTR_LEN(vars->str.s) = vars->end - vars->ptr); } return SUCCESS; } #ifdef PHP_WIN32 #define SAPI_POST_HANDLER_BUFSIZ 16384 #else # define SAPI_POST_HANDLER_BUFSIZ BUFSIZ #endif SAPI_API SAPI_POST_HANDLER_FUNC(php_std_post_handler) { zval *arr = (zval *) arg; php_stream *s = SG(request_info).request_body; post_var_data_t post_data; if (s && SUCCESS == php_stream_rewind(s)) { memset(&post_data, 0, sizeof(post_data)); while (!php_stream_eof(s)) { char buf[SAPI_POST_HANDLER_BUFSIZ] = {0}; size_t len = php_stream_read(s, buf, SAPI_POST_HANDLER_BUFSIZ); if (len && len != (size_t) -1) { smart_str_appendl(&post_data.str, buf, len); if (SUCCESS != add_post_vars(arr, &post_data, 0)) { smart_str_free(&post_data.str); return; } } if (len != SAPI_POST_HANDLER_BUFSIZ){ break; } } if (post_data.str.s) { add_post_vars(arr, &post_data, 1); smart_str_free(&post_data.str); } } } #undef SAPI_POST_HANDLER_BUFSIZ SAPI_API SAPI_INPUT_FILTER_FUNC(php_default_input_filter) { /* TODO: check .ini setting here and apply user-defined input filter */ if(new_val_len) *new_val_len = val_len; return 1; } SAPI_API SAPI_TREAT_DATA_FUNC(php_default_treat_data) { char *res = NULL, *var, *val, *separator = NULL; const char *c_var; zval array; int free_buffer = 0; char *strtok_buf = NULL; zend_long count = 0; ZVAL_UNDEF(&array); switch (arg) { case PARSE_POST: case PARSE_GET: case PARSE_COOKIE: array_init(&array); switch (arg) { case PARSE_POST: zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_POST]); ZVAL_COPY_VALUE(&PG(http_globals)[TRACK_VARS_POST], &array); break; case PARSE_GET: zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_GET]); ZVAL_COPY_VALUE(&PG(http_globals)[TRACK_VARS_GET], &array); break; case PARSE_COOKIE: zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_COOKIE]); ZVAL_COPY_VALUE(&PG(http_globals)[TRACK_VARS_COOKIE], &array); break; } break; default: ZVAL_COPY_VALUE(&array, destArray); break; } if (arg == PARSE_POST) { sapi_handle_post(&array); return; } if (arg == PARSE_GET) { /* GET data */ c_var = SG(request_info).query_string; if (c_var && *c_var) { res = (char *) estrdup(c_var); free_buffer = 1; } else { free_buffer = 0; } } else if (arg == PARSE_COOKIE) { /* Cookie data */ c_var = SG(request_info).cookie_data; if (c_var && *c_var) { res = (char *) estrdup(c_var); free_buffer = 1; } else { free_buffer = 0; } } else if (arg == PARSE_STRING) { /* String data */ res = str; free_buffer = 1; } if (!res) { return; } switch (arg) { case PARSE_GET: case PARSE_STRING: separator = (char *) estrdup(PG(arg_separator).input); break; case PARSE_COOKIE: separator = ";\0"; break; } var = php_strtok_r(res, separator, &strtok_buf); while (var) { val = strchr(var, '='); if (arg == PARSE_COOKIE) { /* Remove leading spaces from cookie names, needed for multi-cookie header where ; can be followed by a space */ while (isspace(*var)) { var++; } if (var == val || *var == '\0') { goto next_cookie; } } if (++count > PG(max_input_vars)) { php_error_docref(NULL, E_WARNING, "Input variables exceeded " ZEND_LONG_FMT ". To increase the limit change max_input_vars in php.ini.", PG(max_input_vars)); break; } if (val) { /* have a value */ size_t val_len; size_t new_val_len; *val++ = '\0'; php_url_decode(var, strlen(var)); val_len = php_url_decode(val, strlen(val)); val = estrndup(val, val_len); if (sapi_module.input_filter(arg, var, &val, val_len, &new_val_len)) { php_register_variable_safe(var, val, new_val_len, &array); } efree(val); } else { size_t val_len; size_t new_val_len; php_url_decode(var, strlen(var)); val_len = 0; val = estrndup("", val_len); if (sapi_module.input_filter(arg, var, &val, val_len, &new_val_len)) { php_register_variable_safe(var, val, new_val_len, &array); } efree(val); } next_cookie: var = php_strtok_r(NULL, separator, &strtok_buf); } if (arg != PARSE_COOKIE) { efree(separator); } if (free_buffer) { efree(res); } } void _php_import_environment_variables(zval *array_ptr) { char buf[128]; char **env, *p, *t = buf; size_t alloc_size = sizeof(buf); unsigned long nlen; /* ptrdiff_t is not portable */ for (env = environ; env != NULL && *env != NULL; env++) { p = strchr(*env, '='); if (!p) { /* malformed entry? */ continue; } nlen = p - *env; if (nlen >= alloc_size) { alloc_size = nlen + 64; t = (t == buf ? emalloc(alloc_size): erealloc(t, alloc_size)); } memcpy(t, *env, nlen); t[nlen] = '\0'; php_register_variable(t, p + 1, array_ptr); } if (t != buf && t != NULL) { efree(t); } } zend_bool php_std_auto_global_callback(char *name, uint name_len) { zend_printf("%s\n", name); return 0; /* don't rearm */ } /* {{{ php_build_argv */ PHPAPI void php_build_argv(char *s, zval *track_vars_array) { zval arr, argc, tmp; int count = 0; char *ss, *space; if (!(SG(request_info).argc || track_vars_array)) { return; } array_init(&arr); /* Prepare argv */ if (SG(request_info).argc) { /* are we in cli sapi? */ int i; for (i = 0; i < SG(request_info).argc; i++) { ZVAL_STRING(&tmp, SG(request_info).argv[i]); if (zend_hash_next_index_insert(Z_ARRVAL(arr), &tmp) == NULL) { zend_string_free(Z_STR(tmp)); } } } else if (s && *s) { ss = s; while (ss) { space = strchr(ss, '+'); if (space) { *space = '\0'; } /* auto-type */ ZVAL_STRING(&tmp, ss); count++; if (zend_hash_next_index_insert(Z_ARRVAL(arr), &tmp) == NULL) { zend_string_free(Z_STR(tmp)); } if (space) { *space = '+'; ss = space + 1; } else { ss = space; } } } /* prepare argc */ if (SG(request_info).argc) { ZVAL_LONG(&argc, SG(request_info).argc); } else { ZVAL_LONG(&argc, count); } if (SG(request_info).argc) { Z_ADDREF(arr); zend_hash_str_update(&EG(symbol_table), "argv", sizeof("argv")-1, &arr); zend_hash_str_add(&EG(symbol_table), "argc", sizeof("argc")-1, &argc); } if (track_vars_array && Z_TYPE_P(track_vars_array) == IS_ARRAY) { Z_ADDREF(arr); zend_hash_str_update(Z_ARRVAL_P(track_vars_array), "argv", sizeof("argv")-1, &arr); zend_hash_str_update(Z_ARRVAL_P(track_vars_array), "argc", sizeof("argc")-1, &argc); } zval_ptr_dtor(&arr); } /* }}} */ /* {{{ php_register_server_variables */ static inline void php_register_server_variables(void) { zval request_time_float, request_time_long; zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_SERVER]); array_init(&PG(http_globals)[TRACK_VARS_SERVER]); /* Server variables */ if (sapi_module.register_server_variables) { sapi_module.register_server_variables(&PG(http_globals)[TRACK_VARS_SERVER]); } /* PHP Authentication support */ if (SG(request_info).auth_user) { php_register_variable("PHP_AUTH_USER", SG(request_info).auth_user, &PG(http_globals)[TRACK_VARS_SERVER]); } if (SG(request_info).auth_password) { php_register_variable("PHP_AUTH_PW", SG(request_info).auth_password, &PG(http_globals)[TRACK_VARS_SERVER]); } if (SG(request_info).auth_digest) { php_register_variable("PHP_AUTH_DIGEST", SG(request_info).auth_digest, &PG(http_globals)[TRACK_VARS_SERVER]); } /* store request init time */ ZVAL_DOUBLE(&request_time_float, sapi_get_request_time()); php_register_variable_ex("REQUEST_TIME_FLOAT", &request_time_float, &PG(http_globals)[TRACK_VARS_SERVER]); ZVAL_LONG(&request_time_long, zend_dval_to_lval(Z_DVAL(request_time_float))); php_register_variable_ex("REQUEST_TIME", &request_time_long, &PG(http_globals)[TRACK_VARS_SERVER]); } /* }}} */ /* {{{ php_autoglobal_merge */ static void php_autoglobal_merge(HashTable *dest, HashTable *src) { zval *src_entry, *dest_entry; zend_string *string_key; zend_ulong num_key; int globals_check = (dest == (&EG(symbol_table))); ZEND_HASH_FOREACH_KEY_VAL(src, num_key, string_key, src_entry) { if (Z_TYPE_P(src_entry) != IS_ARRAY || (string_key && (dest_entry = zend_hash_find(dest, string_key)) == NULL) || (string_key == NULL && (dest_entry = zend_hash_index_find(dest, num_key)) == NULL) || Z_TYPE_P(dest_entry) != IS_ARRAY) { if (Z_REFCOUNTED_P(src_entry)) { Z_ADDREF_P(src_entry); } if (string_key) { if (!globals_check || ZSTR_LEN(string_key) != sizeof("GLOBALS") - 1 || memcmp(ZSTR_VAL(string_key), "GLOBALS", sizeof("GLOBALS") - 1)) { zend_hash_update(dest, string_key, src_entry); } else if (Z_REFCOUNTED_P(src_entry)) { Z_DELREF_P(src_entry); } } else { zend_hash_index_update(dest, num_key, src_entry); } } else { SEPARATE_ARRAY(dest_entry); php_autoglobal_merge(Z_ARRVAL_P(dest_entry), Z_ARRVAL_P(src_entry)); } } ZEND_HASH_FOREACH_END(); } /* }}} */ /* {{{ php_hash_environment */ PHPAPI int php_hash_environment(void) { memset(PG(http_globals), 0, sizeof(PG(http_globals))); zend_activate_auto_globals(); if (PG(register_argc_argv)) { php_build_argv(SG(request_info).query_string, &PG(http_globals)[TRACK_VARS_SERVER]); } return SUCCESS; } /* }}} */ static zend_bool php_auto_globals_create_get(zend_string *name) { if (PG(variables_order) && (strchr(PG(variables_order),'G') || strchr(PG(variables_order),'g'))) { sapi_module.treat_data(PARSE_GET, NULL, NULL); } else { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_GET]); array_init(&PG(http_globals)[TRACK_VARS_GET]); } zend_hash_update(&EG(symbol_table), name, &PG(http_globals)[TRACK_VARS_GET]); Z_ADDREF(PG(http_globals)[TRACK_VARS_GET]); return 0; /* don't rearm */ } static zend_bool php_auto_globals_create_post(zend_string *name) { if (PG(variables_order) && (strchr(PG(variables_order),'P') || strchr(PG(variables_order),'p')) && !SG(headers_sent) && SG(request_info).request_method && !strcasecmp(SG(request_info).request_method, "POST")) { sapi_module.treat_data(PARSE_POST, NULL, NULL); } else { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_POST]); array_init(&PG(http_globals)[TRACK_VARS_POST]); } zend_hash_update(&EG(symbol_table), name, &PG(http_globals)[TRACK_VARS_POST]); Z_ADDREF(PG(http_globals)[TRACK_VARS_POST]); return 0; /* don't rearm */ } static zend_bool php_auto_globals_create_cookie(zend_string *name) { if (PG(variables_order) && (strchr(PG(variables_order),'C') || strchr(PG(variables_order),'c'))) { sapi_module.treat_data(PARSE_COOKIE, NULL, NULL); } else { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_COOKIE]); array_init(&PG(http_globals)[TRACK_VARS_COOKIE]); } zend_hash_update(&EG(symbol_table), name, &PG(http_globals)[TRACK_VARS_COOKIE]); Z_ADDREF(PG(http_globals)[TRACK_VARS_COOKIE]); return 0; /* don't rearm */ } static zend_bool php_auto_globals_create_files(zend_string *name) { if (Z_TYPE(PG(http_globals)[TRACK_VARS_FILES]) == IS_UNDEF) { array_init(&PG(http_globals)[TRACK_VARS_FILES]); } zend_hash_update(&EG(symbol_table), name, &PG(http_globals)[TRACK_VARS_FILES]); Z_ADDREF(PG(http_globals)[TRACK_VARS_FILES]); return 0; /* don't rearm */ } /* Upgly hack to fix HTTP_PROXY issue, see bug #72573 */ static void check_http_proxy(HashTable *var_table) { if (zend_hash_str_exists(var_table, "HTTP_PROXY", sizeof("HTTP_PROXY")-1)) { char *local_proxy = getenv("HTTP_PROXY"); if (!local_proxy) { zend_hash_str_del(var_table, "HTTP_PROXY", sizeof("HTTP_PROXY")-1); } else { zval local_zval; ZVAL_STRING(&local_zval, local_proxy); zend_hash_str_update(var_table, "HTTP_PROXY", sizeof("HTTP_PROXY")-1, &local_zval); } } } static zend_bool php_auto_globals_create_server(zend_string *name) { if (PG(variables_order) && (strchr(PG(variables_order),'S') || strchr(PG(variables_order),'s'))) { php_register_server_variables(); if (PG(register_argc_argv)) { if (SG(request_info).argc) { zval *argc, *argv; if ((argc = zend_hash_str_find_ind(&EG(symbol_table), "argc", sizeof("argc")-1)) != NULL && (argv = zend_hash_str_find_ind(&EG(symbol_table), "argv", sizeof("argv")-1)) != NULL) { Z_ADDREF_P(argv); zend_hash_str_update(Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER]), "argv", sizeof("argv")-1, argv); zend_hash_str_update(Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER]), "argc", sizeof("argc")-1, argc); } } else { php_build_argv(SG(request_info).query_string, &PG(http_globals)[TRACK_VARS_SERVER]); } } } else { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_SERVER]); array_init(&PG(http_globals)[TRACK_VARS_SERVER]); } check_http_proxy(Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER])); zend_hash_update(&EG(symbol_table), name, &PG(http_globals)[TRACK_VARS_SERVER]); Z_ADDREF(PG(http_globals)[TRACK_VARS_SERVER]); return 0; /* don't rearm */ } static zend_bool php_auto_globals_create_env(zend_string *name) { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_ENV]); array_init(&PG(http_globals)[TRACK_VARS_ENV]); if (PG(variables_order) && (strchr(PG(variables_order),'E') || strchr(PG(variables_order),'e'))) { php_import_environment_variables(&PG(http_globals)[TRACK_VARS_ENV]); } check_http_proxy(Z_ARRVAL(PG(http_globals)[TRACK_VARS_ENV])); zend_hash_update(&EG(symbol_table), name, &PG(http_globals)[TRACK_VARS_ENV]); Z_ADDREF(PG(http_globals)[TRACK_VARS_ENV]); return 0; /* don't rearm */ } static zend_bool php_auto_globals_create_request(zend_string *name) { zval form_variables; unsigned char _gpc_flags[3] = {0, 0, 0}; char *p; array_init(&form_variables); if (PG(request_order) != NULL) { p = PG(request_order); } else { p = PG(variables_order); } for (; p && *p; p++) { switch (*p) { case 'g': case 'G': if (!_gpc_flags[0]) { php_autoglobal_merge(Z_ARRVAL(form_variables), Z_ARRVAL(PG(http_globals)[TRACK_VARS_GET])); _gpc_flags[0] = 1; } break; case 'p': case 'P': if (!_gpc_flags[1]) { php_autoglobal_merge(Z_ARRVAL(form_variables), Z_ARRVAL(PG(http_globals)[TRACK_VARS_POST])); _gpc_flags[1] = 1; } break; case 'c': case 'C': if (!_gpc_flags[2]) { php_autoglobal_merge(Z_ARRVAL(form_variables), Z_ARRVAL(PG(http_globals)[TRACK_VARS_COOKIE])); _gpc_flags[2] = 1; } break; } } zend_hash_update(&EG(symbol_table), name, &form_variables); return 0; } void php_startup_auto_globals(void) { zend_register_auto_global(zend_string_init("_GET", sizeof("_GET")-1, 1), 0, php_auto_globals_create_get); zend_register_auto_global(zend_string_init("_POST", sizeof("_POST")-1, 1), 0, php_auto_globals_create_post); zend_register_auto_global(zend_string_init("_COOKIE", sizeof("_COOKIE")-1, 1), 0, php_auto_globals_create_cookie); zend_register_auto_global(zend_string_init("_SERVER", sizeof("_SERVER")-1, 1), PG(auto_globals_jit), php_auto_globals_create_server); zend_register_auto_global(zend_string_init("_ENV", sizeof("_ENV")-1, 1), PG(auto_globals_jit), php_auto_globals_create_env); zend_register_auto_global(zend_string_init("_REQUEST", sizeof("_REQUEST")-1, 1), PG(auto_globals_jit), php_auto_globals_create_request); zend_register_auto_global(zend_string_init("_FILES", sizeof("_FILES")-1, 1), 0, php_auto_globals_create_files); } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: sw=4 ts=4 fdm=marker * vim<600: sw=4 ts=4 */
./CrossVul/dataset_final_sorted/CWE-400/c/bad_2563_1
crossvul-cpp_data_good_5496_0
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * Copyright (c) 2013 Red Hat, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_bit.h" #include "xfs_mount.h" #include "xfs_da_format.h" #include "xfs_da_btree.h" #include "xfs_inode.h" #include "xfs_trans.h" #include "xfs_inode_item.h" #include "xfs_bmap.h" #include "xfs_attr.h" #include "xfs_attr_sf.h" #include "xfs_attr_remote.h" #include "xfs_attr_leaf.h" #include "xfs_error.h" #include "xfs_trace.h" #include "xfs_buf_item.h" #include "xfs_cksum.h" #include "xfs_dir2.h" STATIC int xfs_attr_shortform_compare(const void *a, const void *b) { xfs_attr_sf_sort_t *sa, *sb; sa = (xfs_attr_sf_sort_t *)a; sb = (xfs_attr_sf_sort_t *)b; if (sa->hash < sb->hash) { return -1; } else if (sa->hash > sb->hash) { return 1; } else { return sa->entno - sb->entno; } } #define XFS_ISRESET_CURSOR(cursor) \ (!((cursor)->initted) && !((cursor)->hashval) && \ !((cursor)->blkno) && !((cursor)->offset)) /* * Copy out entries of shortform attribute lists for attr_list(). * Shortform attribute lists are not stored in hashval sorted order. * If the output buffer is not large enough to hold them all, then we * we have to calculate each entries' hashvalue and sort them before * we can begin returning them to the user. */ int xfs_attr_shortform_list(xfs_attr_list_context_t *context) { attrlist_cursor_kern_t *cursor; xfs_attr_sf_sort_t *sbuf, *sbp; xfs_attr_shortform_t *sf; xfs_attr_sf_entry_t *sfe; xfs_inode_t *dp; int sbsize, nsbuf, count, i; int error; ASSERT(context != NULL); dp = context->dp; ASSERT(dp != NULL); ASSERT(dp->i_afp != NULL); sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; ASSERT(sf != NULL); if (!sf->hdr.count) return 0; cursor = context->cursor; ASSERT(cursor != NULL); trace_xfs_attr_list_sf(context); /* * If the buffer is large enough and the cursor is at the start, * do not bother with sorting since we will return everything in * one buffer and another call using the cursor won't need to be * made. * Note the generous fudge factor of 16 overhead bytes per entry. * If bufsize is zero then put_listent must be a search function * and can just scan through what we have. */ if (context->bufsize == 0 || (XFS_ISRESET_CURSOR(cursor) && (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) { for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { error = context->put_listent(context, sfe->flags, sfe->nameval, (int)sfe->namelen, (int)sfe->valuelen, &sfe->nameval[sfe->namelen]); /* * Either search callback finished early or * didn't fit it all in the buffer after all. */ if (context->seen_enough) break; if (error) return error; sfe = XFS_ATTR_SF_NEXTENTRY(sfe); } trace_xfs_attr_list_sf_all(context); return 0; } /* do no more for a search callback */ if (context->bufsize == 0) return 0; /* * It didn't all fit, so we have to sort everything on hashval. */ sbsize = sf->hdr.count * sizeof(*sbuf); sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS); /* * Scan the attribute list for the rest of the entries, storing * the relevant info from only those that match into a buffer. */ nsbuf = 0; for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { if (unlikely( ((char *)sfe < (char *)sf) || ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) { XFS_CORRUPTION_ERROR("xfs_attr_shortform_list", XFS_ERRLEVEL_LOW, context->dp->i_mount, sfe); kmem_free(sbuf); return -EFSCORRUPTED; } sbp->entno = i; sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen); sbp->name = sfe->nameval; sbp->namelen = sfe->namelen; /* These are bytes, and both on-disk, don't endian-flip */ sbp->valuelen = sfe->valuelen; sbp->flags = sfe->flags; sfe = XFS_ATTR_SF_NEXTENTRY(sfe); sbp++; nsbuf++; } /* * Sort the entries on hash then entno. */ xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare); /* * Re-find our place IN THE SORTED LIST. */ count = 0; cursor->initted = 1; cursor->blkno = 0; for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) { if (sbp->hash == cursor->hashval) { if (cursor->offset == count) { break; } count++; } else if (sbp->hash > cursor->hashval) { break; } } if (i == nsbuf) { kmem_free(sbuf); return 0; } /* * Loop putting entries into the user buffer. */ for ( ; i < nsbuf; i++, sbp++) { if (cursor->hashval != sbp->hash) { cursor->hashval = sbp->hash; cursor->offset = 0; } error = context->put_listent(context, sbp->flags, sbp->name, sbp->namelen, sbp->valuelen, &sbp->name[sbp->namelen]); if (error) { kmem_free(sbuf); return error; } if (context->seen_enough) break; cursor->offset++; } kmem_free(sbuf); return 0; } STATIC int xfs_attr_node_list(xfs_attr_list_context_t *context) { attrlist_cursor_kern_t *cursor; xfs_attr_leafblock_t *leaf; xfs_da_intnode_t *node; struct xfs_attr3_icleaf_hdr leafhdr; struct xfs_da3_icnode_hdr nodehdr; struct xfs_da_node_entry *btree; int error, i; struct xfs_buf *bp; struct xfs_inode *dp = context->dp; struct xfs_mount *mp = dp->i_mount; trace_xfs_attr_node_list(context); cursor = context->cursor; cursor->initted = 1; /* * Do all sorts of validation on the passed-in cursor structure. * If anything is amiss, ignore the cursor and look up the hashval * starting from the btree root. */ bp = NULL; if (cursor->blkno > 0) { error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1, &bp, XFS_ATTR_FORK); if ((error != 0) && (error != -EFSCORRUPTED)) return error; if (bp) { struct xfs_attr_leaf_entry *entries; node = bp->b_addr; switch (be16_to_cpu(node->hdr.info.magic)) { case XFS_DA_NODE_MAGIC: case XFS_DA3_NODE_MAGIC: trace_xfs_attr_list_wrong_blk(context); xfs_trans_brelse(NULL, bp); bp = NULL; break; case XFS_ATTR_LEAF_MAGIC: case XFS_ATTR3_LEAF_MAGIC: leaf = bp->b_addr; xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf); entries = xfs_attr3_leaf_entryp(leaf); if (cursor->hashval > be32_to_cpu( entries[leafhdr.count - 1].hashval)) { trace_xfs_attr_list_wrong_blk(context); xfs_trans_brelse(NULL, bp); bp = NULL; } else if (cursor->hashval <= be32_to_cpu( entries[0].hashval)) { trace_xfs_attr_list_wrong_blk(context); xfs_trans_brelse(NULL, bp); bp = NULL; } break; default: trace_xfs_attr_list_wrong_blk(context); xfs_trans_brelse(NULL, bp); bp = NULL; } } } /* * We did not find what we expected given the cursor's contents, * so we start from the top and work down based on the hash value. * Note that start of node block is same as start of leaf block. */ if (bp == NULL) { cursor->blkno = 0; for (;;) { __uint16_t magic; error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1, &bp, XFS_ATTR_FORK); if (error) return error; node = bp->b_addr; magic = be16_to_cpu(node->hdr.info.magic); if (magic == XFS_ATTR_LEAF_MAGIC || magic == XFS_ATTR3_LEAF_MAGIC) break; if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) { XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)", XFS_ERRLEVEL_LOW, context->dp->i_mount, node); xfs_trans_brelse(NULL, bp); return -EFSCORRUPTED; } dp->d_ops->node_hdr_from_disk(&nodehdr, node); btree = dp->d_ops->node_tree_p(node); for (i = 0; i < nodehdr.count; btree++, i++) { if (cursor->hashval <= be32_to_cpu(btree->hashval)) { cursor->blkno = be32_to_cpu(btree->before); trace_xfs_attr_list_node_descend(context, btree); break; } } if (i == nodehdr.count) { xfs_trans_brelse(NULL, bp); return 0; } xfs_trans_brelse(NULL, bp); } } ASSERT(bp != NULL); /* * Roll upward through the blocks, processing each leaf block in * order. As long as there is space in the result buffer, keep * adding the information. */ for (;;) { leaf = bp->b_addr; error = xfs_attr3_leaf_list_int(bp, context); if (error) { xfs_trans_brelse(NULL, bp); return error; } xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf); if (context->seen_enough || leafhdr.forw == 0) break; cursor->blkno = leafhdr.forw; xfs_trans_brelse(NULL, bp); error = xfs_attr3_leaf_read(NULL, dp, cursor->blkno, -1, &bp); if (error) return error; } xfs_trans_brelse(NULL, bp); return 0; } /* * Copy out attribute list entries for attr_list(), for leaf attribute lists. */ int xfs_attr3_leaf_list_int( struct xfs_buf *bp, struct xfs_attr_list_context *context) { struct attrlist_cursor_kern *cursor; struct xfs_attr_leafblock *leaf; struct xfs_attr3_icleaf_hdr ichdr; struct xfs_attr_leaf_entry *entries; struct xfs_attr_leaf_entry *entry; int retval; int i; struct xfs_mount *mp = context->dp->i_mount; trace_xfs_attr_list_leaf(context); leaf = bp->b_addr; xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf); entries = xfs_attr3_leaf_entryp(leaf); cursor = context->cursor; cursor->initted = 1; /* * Re-find our place in the leaf block if this is a new syscall. */ if (context->resynch) { entry = &entries[0]; for (i = 0; i < ichdr.count; entry++, i++) { if (be32_to_cpu(entry->hashval) == cursor->hashval) { if (cursor->offset == context->dupcnt) { context->dupcnt = 0; break; } context->dupcnt++; } else if (be32_to_cpu(entry->hashval) > cursor->hashval) { context->dupcnt = 0; break; } } if (i == ichdr.count) { trace_xfs_attr_list_notfound(context); return 0; } } else { entry = &entries[0]; i = 0; } context->resynch = 0; /* * We have found our place, start copying out the new attributes. */ retval = 0; for (; i < ichdr.count; entry++, i++) { if (be32_to_cpu(entry->hashval) != cursor->hashval) { cursor->hashval = be32_to_cpu(entry->hashval); cursor->offset = 0; } if (entry->flags & XFS_ATTR_INCOMPLETE) continue; /* skip incomplete entries */ if (entry->flags & XFS_ATTR_LOCAL) { xfs_attr_leaf_name_local_t *name_loc = xfs_attr3_leaf_name_local(leaf, i); retval = context->put_listent(context, entry->flags, name_loc->nameval, (int)name_loc->namelen, be16_to_cpu(name_loc->valuelen), &name_loc->nameval[name_loc->namelen]); if (retval) return retval; } else { xfs_attr_leaf_name_remote_t *name_rmt = xfs_attr3_leaf_name_remote(leaf, i); int valuelen = be32_to_cpu(name_rmt->valuelen); if (context->put_value) { xfs_da_args_t args; memset((char *)&args, 0, sizeof(args)); args.geo = context->dp->i_mount->m_attr_geo; args.dp = context->dp; args.whichfork = XFS_ATTR_FORK; args.valuelen = valuelen; args.rmtvaluelen = valuelen; args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS); args.rmtblkno = be32_to_cpu(name_rmt->valueblk); args.rmtblkcnt = xfs_attr3_rmt_blocks( args.dp->i_mount, valuelen); retval = xfs_attr_rmtval_get(&args); if (!retval) retval = context->put_listent(context, entry->flags, name_rmt->name, (int)name_rmt->namelen, valuelen, args.value); kmem_free(args.value); } else { retval = context->put_listent(context, entry->flags, name_rmt->name, (int)name_rmt->namelen, valuelen, NULL); } if (retval) return retval; } if (context->seen_enough) break; cursor->offset++; } trace_xfs_attr_list_leaf_end(context); return retval; } /* * Copy out attribute entries for attr_list(), for leaf attribute lists. */ STATIC int xfs_attr_leaf_list(xfs_attr_list_context_t *context) { int error; struct xfs_buf *bp; trace_xfs_attr_leaf_list(context); context->cursor->blkno = 0; error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp); if (error) return error; error = xfs_attr3_leaf_list_int(bp, context); xfs_trans_brelse(NULL, bp); return error; } int xfs_attr_list_int( xfs_attr_list_context_t *context) { int error; xfs_inode_t *dp = context->dp; uint lock_mode; XFS_STATS_INC(dp->i_mount, xs_attr_list); if (XFS_FORCED_SHUTDOWN(dp->i_mount)) return -EIO; /* * Decide on what work routines to call based on the inode size. */ lock_mode = xfs_ilock_attr_map_shared(dp); if (!xfs_inode_hasattr(dp)) { error = 0; } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { error = xfs_attr_shortform_list(context); } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { error = xfs_attr_leaf_list(context); } else { error = xfs_attr_node_list(context); } xfs_iunlock(dp, lock_mode); return error; } #define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \ (((struct attrlist_ent *) 0)->a_name - (char *) 0) #define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \ ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \ & ~(sizeof(u_int32_t)-1)) /* * Format an attribute and copy it out to the user's buffer. * Take care to check values and protect against them changing later, * we may be reading them directly out of a user buffer. */ STATIC int xfs_attr_put_listent( xfs_attr_list_context_t *context, int flags, unsigned char *name, int namelen, int valuelen, unsigned char *value) { struct attrlist *alist = (struct attrlist *)context->alist; attrlist_ent_t *aep; int arraytop; ASSERT(!(context->flags & ATTR_KERNOVAL)); ASSERT(context->count >= 0); ASSERT(context->count < (ATTR_MAX_VALUELEN/8)); ASSERT(context->firstu >= sizeof(*alist)); ASSERT(context->firstu <= context->bufsize); /* * Only list entries in the right namespace. */ if (((context->flags & ATTR_SECURE) == 0) != ((flags & XFS_ATTR_SECURE) == 0)) return 0; if (((context->flags & ATTR_ROOT) == 0) != ((flags & XFS_ATTR_ROOT) == 0)) return 0; arraytop = sizeof(*alist) + context->count * sizeof(alist->al_offset[0]); context->firstu -= ATTR_ENTSIZE(namelen); if (context->firstu < arraytop) { trace_xfs_attr_list_full(context); alist->al_more = 1; context->seen_enough = 1; return 1; } aep = (attrlist_ent_t *)&context->alist[context->firstu]; aep->a_valuelen = valuelen; memcpy(aep->a_name, name, namelen); aep->a_name[namelen] = 0; alist->al_offset[context->count++] = context->firstu; alist->al_count = context->count; trace_xfs_attr_list_add(context); return 0; } /* * Generate a list of extended attribute names and optionally * also value lengths. Positive return value follows the XFS * convention of being an error, zero or negative return code * is the length of the buffer returned (negated), indicating * success. */ int xfs_attr_list( xfs_inode_t *dp, char *buffer, int bufsize, int flags, attrlist_cursor_kern_t *cursor) { xfs_attr_list_context_t context; struct attrlist *alist; int error; /* * Validate the cursor. */ if (cursor->pad1 || cursor->pad2) return -EINVAL; if ((cursor->initted == 0) && (cursor->hashval || cursor->blkno || cursor->offset)) return -EINVAL; /* * Check for a properly aligned buffer. */ if (((long)buffer) & (sizeof(int)-1)) return -EFAULT; if (flags & ATTR_KERNOVAL) bufsize = 0; /* * Initialize the output buffer. */ memset(&context, 0, sizeof(context)); context.dp = dp; context.cursor = cursor; context.resynch = 1; context.flags = flags; context.alist = buffer; context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */ context.firstu = context.bufsize; context.put_listent = xfs_attr_put_listent; alist = (struct attrlist *)context.alist; alist->al_count = 0; alist->al_more = 0; alist->al_offset[0] = context.bufsize; error = xfs_attr_list_int(&context); ASSERT(error <= 0); return error; }
./CrossVul/dataset_final_sorted/CWE-400/c/good_5496_0
crossvul-cpp_data_bad_5356_4
/* * IPV4 GSO/GRO offload support * Linux INET implementation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * UDPv4 GSO support */ #include <linux/skbuff.h> #include <net/udp.h> #include <net/protocol.h> static DEFINE_SPINLOCK(udp_offload_lock); static struct udp_offload_priv __rcu *udp_offload_base __read_mostly; #define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock)) struct udp_offload_priv { struct udp_offload *offload; possible_net_t net; struct rcu_head rcu; struct udp_offload_priv __rcu *next; }; static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features, struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, netdev_features_t features), __be16 new_protocol, bool is_ipv6) { int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); bool remcsum, need_csum, offload_csum, ufo; struct sk_buff *segs = ERR_PTR(-EINVAL); struct udphdr *uh = udp_hdr(skb); u16 mac_offset = skb->mac_header; __be16 protocol = skb->protocol; u16 mac_len = skb->mac_len; int udp_offset, outer_hlen; __wsum partial; if (unlikely(!pskb_may_pull(skb, tnl_hlen))) goto out; /* Adjust partial header checksum to negate old length. * We cannot rely on the value contained in uh->len as it is * possible that the actual value exceeds the boundaries of the * 16 bit length field due to the header being added outside of an * IP or IPv6 frame that was already limited to 64K - 1. */ partial = csum_sub(csum_unfold(uh->check), (__force __wsum)htonl(skb->len)); /* setup inner skb. */ skb->encapsulation = 0; __skb_pull(skb, tnl_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); skb->protocol = new_protocol; need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); skb->encap_hdr_csum = need_csum; remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); skb->remcsum_offload = remcsum; ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); /* Try to offload checksum if possible */ offload_csum = !!(need_csum && (skb->dev->features & (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) : (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM)))); features &= skb->dev->hw_enc_features; /* The only checksum offload we care about from here on out is the * outer one so strip the existing checksum feature flags and * instead set the flag based on our outer checksum offload value. */ if (remcsum || ufo) { features &= ~NETIF_F_CSUM_MASK; if (!need_csum || offload_csum) features |= NETIF_F_HW_CSUM; } /* segment inner packet. */ segs = gso_inner_segment(skb, features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, mac_len); goto out; } outer_hlen = skb_tnl_header_len(skb); udp_offset = outer_hlen - tnl_hlen; skb = segs; do { __be16 len; if (remcsum) skb->ip_summed = CHECKSUM_NONE; /* Set up inner headers if we are offloading inner checksum */ if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } skb->mac_len = mac_len; skb->protocol = protocol; __skb_push(skb, outer_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb_set_transport_header(skb, udp_offset); len = htons(skb->len - udp_offset); uh = udp_hdr(skb); uh->len = len; if (!need_csum) continue; uh->check = ~csum_fold(csum_add(partial, (__force __wsum)len)); if (skb->encapsulation || !offload_csum) { uh->check = gso_make_checksum(skb, ~uh->check); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } else { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); } } while ((skb = skb->next)); out: return segs; } struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features, bool is_ipv6) { __be16 protocol = skb->protocol; const struct net_offload **offloads; const struct net_offload *ops; struct sk_buff *segs = ERR_PTR(-EINVAL); struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, netdev_features_t features); rcu_read_lock(); switch (skb->inner_protocol_type) { case ENCAP_TYPE_ETHER: protocol = skb->inner_protocol; gso_inner_segment = skb_mac_gso_segment; break; case ENCAP_TYPE_IPPROTO: offloads = is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[skb->inner_ipproto]); if (!ops || !ops->callbacks.gso_segment) goto out_unlock; gso_inner_segment = ops->callbacks.gso_segment; break; default: goto out_unlock; } segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, protocol, is_ipv6); out_unlock: rcu_read_unlock(); return segs; } static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; __wsum csum; struct udphdr *uh; struct iphdr *iph; if (skb->encapsulation && (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { segs = skb_udp_tunnel_segment(skb, features, false); goto out; } if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto out; mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { /* Packet is from an untrusted source, reset gso_segs. */ int type = skb_shinfo(skb)->gso_type; if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_TUNNEL_REMCSUM | SKB_GSO_IPIP | SKB_GSO_GRE | SKB_GSO_GRE_CSUM) || !(type & (SKB_GSO_UDP)))) goto out; skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); segs = NULL; goto out; } /* Do software UFO. Complete and fill in the UDP checksum as * HW cannot do checksum of UDP packets sent as multiple * IP fragments. */ uh = udp_hdr(skb); iph = ip_hdr(skb); uh->check = 0; csum = skb_checksum(skb, 0, skb->len, 0); uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; skb->ip_summed = CHECKSUM_NONE; /* If there is no outer header we can fake a checksum offload * due to the fact that we have already done the checksum in * software prior to segmenting the frame. */ if (!skb->encap_hdr_csum) features |= NETIF_F_HW_CSUM; /* Fragment the skb. IP headers of the fragments are updated in * inet_gso_segment() */ segs = skb_segment(skb, features); out: return segs; } int udp_add_offload(struct net *net, struct udp_offload *uo) { struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC); if (!new_offload) return -ENOMEM; write_pnet(&new_offload->net, net); new_offload->offload = uo; spin_lock(&udp_offload_lock); new_offload->next = udp_offload_base; rcu_assign_pointer(udp_offload_base, new_offload); spin_unlock(&udp_offload_lock); return 0; } EXPORT_SYMBOL(udp_add_offload); static void udp_offload_free_routine(struct rcu_head *head) { struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu); kfree(ou_priv); } void udp_del_offload(struct udp_offload *uo) { struct udp_offload_priv __rcu **head = &udp_offload_base; struct udp_offload_priv *uo_priv; spin_lock(&udp_offload_lock); uo_priv = udp_deref_protected(*head); for (; uo_priv != NULL; uo_priv = udp_deref_protected(*head)) { if (uo_priv->offload == uo) { rcu_assign_pointer(*head, udp_deref_protected(uo_priv->next)); goto unlock; } head = &uo_priv->next; } pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port)); unlock: spin_unlock(&udp_offload_lock); if (uo_priv) call_rcu(&uo_priv->rcu, udp_offload_free_routine); } EXPORT_SYMBOL(udp_del_offload); struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, struct udphdr *uh) { struct udp_offload_priv *uo_priv; struct sk_buff *p, **pp = NULL; struct udphdr *uh2; unsigned int off = skb_gro_offset(skb); int flush = 1; if (NAPI_GRO_CB(skb)->udp_mark || (skb->ip_summed != CHECKSUM_PARTIAL && NAPI_GRO_CB(skb)->csum_cnt == 0 && !NAPI_GRO_CB(skb)->csum_valid)) goto out; /* mark that this skb passed once through the udp gro layer */ NAPI_GRO_CB(skb)->udp_mark = 1; rcu_read_lock(); uo_priv = rcu_dereference(udp_offload_base); for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) && uo_priv->offload->port == uh->dest && uo_priv->offload->callbacks.gro_receive) goto unflush; } goto out_unlock; unflush: flush = 0; for (p = *head; p; p = p->next) { if (!NAPI_GRO_CB(p)->same_flow) continue; uh2 = (struct udphdr *)(p->data + off); /* Match ports and either checksums are either both zero * or nonzero. */ if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) || (!uh->check ^ !uh2->check)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } } skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; pp = uo_priv->offload->callbacks.gro_receive(head, skb, uo_priv->offload); out_unlock: rcu_read_unlock(); out: NAPI_GRO_CB(skb)->flush |= flush; return pp; } static struct sk_buff **udp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) { struct udphdr *uh = udp_gro_udphdr(skb); if (unlikely(!uh)) goto flush; /* Don't bother verifying checksum if we're going to flush anyway. */ if (NAPI_GRO_CB(skb)->flush) goto skip; if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, inet_gro_compute_pseudo)) goto flush; else if (uh->check) skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check, inet_gro_compute_pseudo); skip: NAPI_GRO_CB(skb)->is_ipv6 = 0; return udp_gro_receive(head, skb, uh); flush: NAPI_GRO_CB(skb)->flush = 1; return NULL; } int udp_gro_complete(struct sk_buff *skb, int nhoff) { struct udp_offload_priv *uo_priv; __be16 newlen = htons(skb->len - nhoff); struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); int err = -ENOSYS; uh->len = newlen; rcu_read_lock(); uo_priv = rcu_dereference(udp_offload_base); for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) && uo_priv->offload->port == uh->dest && uo_priv->offload->callbacks.gro_complete) break; } if (uo_priv) { NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr), uo_priv->offload); } rcu_read_unlock(); if (skb->remcsum_offload) skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM; skb->encapsulation = 1; skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr)); return err; } static int udp4_gro_complete(struct sk_buff *skb, int nhoff) { const struct iphdr *iph = ip_hdr(skb); struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); if (uh->check) { skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr, iph->daddr, 0); } else { skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; } return udp_gro_complete(skb, nhoff); } static const struct net_offload udpv4_offload = { .callbacks = { .gso_segment = udp4_ufo_fragment, .gro_receive = udp4_gro_receive, .gro_complete = udp4_gro_complete, }, }; int __init udpv4_offload_init(void) { return inet_add_offload(&udpv4_offload, IPPROTO_UDP); }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_5356_4
crossvul-cpp_data_good_5200_2
/* * linux/fs/namespace.c * * (C) Copyright Al Viro 2000, 2001 * Released under GPL v2. * * Based on code from fs/super.c, copyright Linus Torvalds and others. * Heavily rewritten. */ #include <linux/syscalls.h> #include <linux/export.h> #include <linux/capability.h> #include <linux/mnt_namespace.h> #include <linux/user_namespace.h> #include <linux/namei.h> #include <linux/security.h> #include <linux/idr.h> #include <linux/init.h> /* init_rootfs */ #include <linux/fs_struct.h> /* get_fs_root et.al. */ #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ #include <linux/uaccess.h> #include <linux/proc_ns.h> #include <linux/magic.h> #include <linux/bootmem.h> #include <linux/task_work.h> #include "pnode.h" #include "internal.h" /* Maximum number of mounts in a mount namespace */ unsigned int sysctl_mount_max __read_mostly = 100000; static unsigned int m_hash_mask __read_mostly; static unsigned int m_hash_shift __read_mostly; static unsigned int mp_hash_mask __read_mostly; static unsigned int mp_hash_shift __read_mostly; static __initdata unsigned long mhash_entries; static int __init set_mhash_entries(char *str) { if (!str) return 0; mhash_entries = simple_strtoul(str, &str, 0); return 1; } __setup("mhash_entries=", set_mhash_entries); static __initdata unsigned long mphash_entries; static int __init set_mphash_entries(char *str) { if (!str) return 0; mphash_entries = simple_strtoul(str, &str, 0); return 1; } __setup("mphash_entries=", set_mphash_entries); static u64 event; static DEFINE_IDA(mnt_id_ida); static DEFINE_IDA(mnt_group_ida); static DEFINE_SPINLOCK(mnt_id_lock); static int mnt_id_start = 0; static int mnt_group_start = 1; static struct hlist_head *mount_hashtable __read_mostly; static struct hlist_head *mountpoint_hashtable __read_mostly; static struct kmem_cache *mnt_cache __read_mostly; static DECLARE_RWSEM(namespace_sem); /* /sys/fs */ struct kobject *fs_kobj; EXPORT_SYMBOL_GPL(fs_kobj); /* * vfsmount lock may be taken for read to prevent changes to the * vfsmount hash, ie. during mountpoint lookups or walking back * up the tree. * * It should be taken for write in all cases where the vfsmount * tree or hash is modified or when a vfsmount structure is modified. */ __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry) { unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); tmp += ((unsigned long)dentry / L1_CACHE_BYTES); tmp = tmp + (tmp >> m_hash_shift); return &mount_hashtable[tmp & m_hash_mask]; } static inline struct hlist_head *mp_hash(struct dentry *dentry) { unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES); tmp = tmp + (tmp >> mp_hash_shift); return &mountpoint_hashtable[tmp & mp_hash_mask]; } /* * allocation is serialized by namespace_sem, but we need the spinlock to * serialize with freeing. */ static int mnt_alloc_id(struct mount *mnt) { int res; retry: ida_pre_get(&mnt_id_ida, GFP_KERNEL); spin_lock(&mnt_id_lock); res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id); if (!res) mnt_id_start = mnt->mnt_id + 1; spin_unlock(&mnt_id_lock); if (res == -EAGAIN) goto retry; return res; } static void mnt_free_id(struct mount *mnt) { int id = mnt->mnt_id; spin_lock(&mnt_id_lock); ida_remove(&mnt_id_ida, id); if (mnt_id_start > id) mnt_id_start = id; spin_unlock(&mnt_id_lock); } /* * Allocate a new peer group ID * * mnt_group_ida is protected by namespace_sem */ static int mnt_alloc_group_id(struct mount *mnt) { int res; if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL)) return -ENOMEM; res = ida_get_new_above(&mnt_group_ida, mnt_group_start, &mnt->mnt_group_id); if (!res) mnt_group_start = mnt->mnt_group_id + 1; return res; } /* * Release a peer group ID */ void mnt_release_group_id(struct mount *mnt) { int id = mnt->mnt_group_id; ida_remove(&mnt_group_ida, id); if (mnt_group_start > id) mnt_group_start = id; mnt->mnt_group_id = 0; } /* * vfsmount lock must be held for read */ static inline void mnt_add_count(struct mount *mnt, int n) { #ifdef CONFIG_SMP this_cpu_add(mnt->mnt_pcp->mnt_count, n); #else preempt_disable(); mnt->mnt_count += n; preempt_enable(); #endif } /* * vfsmount lock must be held for write */ unsigned int mnt_get_count(struct mount *mnt) { #ifdef CONFIG_SMP unsigned int count = 0; int cpu; for_each_possible_cpu(cpu) { count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; } return count; #else return mnt->mnt_count; #endif } static void drop_mountpoint(struct fs_pin *p) { struct mount *m = container_of(p, struct mount, mnt_umount); dput(m->mnt_ex_mountpoint); pin_remove(p); mntput(&m->mnt); } static struct mount *alloc_vfsmnt(const char *name) { struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); if (mnt) { int err; err = mnt_alloc_id(mnt); if (err) goto out_free_cache; if (name) { mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL); if (!mnt->mnt_devname) goto out_free_id; } #ifdef CONFIG_SMP mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); if (!mnt->mnt_pcp) goto out_free_devname; this_cpu_add(mnt->mnt_pcp->mnt_count, 1); #else mnt->mnt_count = 1; mnt->mnt_writers = 0; #endif INIT_HLIST_NODE(&mnt->mnt_hash); INIT_LIST_HEAD(&mnt->mnt_child); INIT_LIST_HEAD(&mnt->mnt_mounts); INIT_LIST_HEAD(&mnt->mnt_list); INIT_LIST_HEAD(&mnt->mnt_expire); INIT_LIST_HEAD(&mnt->mnt_share); INIT_LIST_HEAD(&mnt->mnt_slave_list); INIT_LIST_HEAD(&mnt->mnt_slave); INIT_HLIST_NODE(&mnt->mnt_mp_list); #ifdef CONFIG_FSNOTIFY INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks); #endif init_fs_pin(&mnt->mnt_umount, drop_mountpoint); } return mnt; #ifdef CONFIG_SMP out_free_devname: kfree_const(mnt->mnt_devname); #endif out_free_id: mnt_free_id(mnt); out_free_cache: kmem_cache_free(mnt_cache, mnt); return NULL; } /* * Most r/o checks on a fs are for operations that take * discrete amounts of time, like a write() or unlink(). * We must keep track of when those operations start * (for permission checks) and when they end, so that * we can determine when writes are able to occur to * a filesystem. */ /* * __mnt_is_readonly: check whether a mount is read-only * @mnt: the mount to check for its write status * * This shouldn't be used directly ouside of the VFS. * It does not guarantee that the filesystem will stay * r/w, just that it is right *now*. This can not and * should not be used in place of IS_RDONLY(inode). * mnt_want/drop_write() will _keep_ the filesystem * r/w. */ int __mnt_is_readonly(struct vfsmount *mnt) { if (mnt->mnt_flags & MNT_READONLY) return 1; if (mnt->mnt_sb->s_flags & MS_RDONLY) return 1; return 0; } EXPORT_SYMBOL_GPL(__mnt_is_readonly); static inline void mnt_inc_writers(struct mount *mnt) { #ifdef CONFIG_SMP this_cpu_inc(mnt->mnt_pcp->mnt_writers); #else mnt->mnt_writers++; #endif } static inline void mnt_dec_writers(struct mount *mnt) { #ifdef CONFIG_SMP this_cpu_dec(mnt->mnt_pcp->mnt_writers); #else mnt->mnt_writers--; #endif } static unsigned int mnt_get_writers(struct mount *mnt) { #ifdef CONFIG_SMP unsigned int count = 0; int cpu; for_each_possible_cpu(cpu) { count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; } return count; #else return mnt->mnt_writers; #endif } static int mnt_is_readonly(struct vfsmount *mnt) { if (mnt->mnt_sb->s_readonly_remount) return 1; /* Order wrt setting s_flags/s_readonly_remount in do_remount() */ smp_rmb(); return __mnt_is_readonly(mnt); } /* * Most r/o & frozen checks on a fs are for operations that take discrete * amounts of time, like a write() or unlink(). We must keep track of when * those operations start (for permission checks) and when they end, so that we * can determine when writes are able to occur to a filesystem. */ /** * __mnt_want_write - get write access to a mount without freeze protection * @m: the mount on which to take a write * * This tells the low-level filesystem that a write is about to be performed to * it, and makes sure that writes are allowed (mnt it read-write) before * returning success. This operation does not protect against filesystem being * frozen. When the write operation is finished, __mnt_drop_write() must be * called. This is effectively a refcount. */ int __mnt_want_write(struct vfsmount *m) { struct mount *mnt = real_mount(m); int ret = 0; preempt_disable(); mnt_inc_writers(mnt); /* * The store to mnt_inc_writers must be visible before we pass * MNT_WRITE_HOLD loop below, so that the slowpath can see our * incremented count after it has set MNT_WRITE_HOLD. */ smp_mb(); while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) cpu_relax(); /* * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will * be set to match its requirements. So we must not load that until * MNT_WRITE_HOLD is cleared. */ smp_rmb(); if (mnt_is_readonly(m)) { mnt_dec_writers(mnt); ret = -EROFS; } preempt_enable(); return ret; } /** * mnt_want_write - get write access to a mount * @m: the mount on which to take a write * * This tells the low-level filesystem that a write is about to be performed to * it, and makes sure that writes are allowed (mount is read-write, filesystem * is not frozen) before returning success. When the write operation is * finished, mnt_drop_write() must be called. This is effectively a refcount. */ int mnt_want_write(struct vfsmount *m) { int ret; sb_start_write(m->mnt_sb); ret = __mnt_want_write(m); if (ret) sb_end_write(m->mnt_sb); return ret; } EXPORT_SYMBOL_GPL(mnt_want_write); /** * mnt_clone_write - get write access to a mount * @mnt: the mount on which to take a write * * This is effectively like mnt_want_write, except * it must only be used to take an extra write reference * on a mountpoint that we already know has a write reference * on it. This allows some optimisation. * * After finished, mnt_drop_write must be called as usual to * drop the reference. */ int mnt_clone_write(struct vfsmount *mnt) { /* superblock may be r/o */ if (__mnt_is_readonly(mnt)) return -EROFS; preempt_disable(); mnt_inc_writers(real_mount(mnt)); preempt_enable(); return 0; } EXPORT_SYMBOL_GPL(mnt_clone_write); /** * __mnt_want_write_file - get write access to a file's mount * @file: the file who's mount on which to take a write * * This is like __mnt_want_write, but it takes a file and can * do some optimisations if the file is open for write already */ int __mnt_want_write_file(struct file *file) { if (!(file->f_mode & FMODE_WRITER)) return __mnt_want_write(file->f_path.mnt); else return mnt_clone_write(file->f_path.mnt); } /** * mnt_want_write_file - get write access to a file's mount * @file: the file who's mount on which to take a write * * This is like mnt_want_write, but it takes a file and can * do some optimisations if the file is open for write already */ int mnt_want_write_file(struct file *file) { int ret; sb_start_write(file->f_path.mnt->mnt_sb); ret = __mnt_want_write_file(file); if (ret) sb_end_write(file->f_path.mnt->mnt_sb); return ret; } EXPORT_SYMBOL_GPL(mnt_want_write_file); /** * __mnt_drop_write - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done * performing writes to it. Must be matched with * __mnt_want_write() call above. */ void __mnt_drop_write(struct vfsmount *mnt) { preempt_disable(); mnt_dec_writers(real_mount(mnt)); preempt_enable(); } /** * mnt_drop_write - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done performing writes to it and * also allows filesystem to be frozen again. Must be matched with * mnt_want_write() call above. */ void mnt_drop_write(struct vfsmount *mnt) { __mnt_drop_write(mnt); sb_end_write(mnt->mnt_sb); } EXPORT_SYMBOL_GPL(mnt_drop_write); void __mnt_drop_write_file(struct file *file) { __mnt_drop_write(file->f_path.mnt); } void mnt_drop_write_file(struct file *file) { mnt_drop_write(file->f_path.mnt); } EXPORT_SYMBOL(mnt_drop_write_file); static int mnt_make_readonly(struct mount *mnt) { int ret = 0; lock_mount_hash(); mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; /* * After storing MNT_WRITE_HOLD, we'll read the counters. This store * should be visible before we do. */ smp_mb(); /* * With writers on hold, if this value is zero, then there are * definitely no active writers (although held writers may subsequently * increment the count, they'll have to wait, and decrement it after * seeing MNT_READONLY). * * It is OK to have counter incremented on one CPU and decremented on * another: the sum will add up correctly. The danger would be when we * sum up each counter, if we read a counter before it is incremented, * but then read another CPU's count which it has been subsequently * decremented from -- we would see more decrements than we should. * MNT_WRITE_HOLD protects against this scenario, because * mnt_want_write first increments count, then smp_mb, then spins on * MNT_WRITE_HOLD, so it can't be decremented by another CPU while * we're counting up here. */ if (mnt_get_writers(mnt) > 0) ret = -EBUSY; else mnt->mnt.mnt_flags |= MNT_READONLY; /* * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers * that become unheld will see MNT_READONLY. */ smp_wmb(); mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; unlock_mount_hash(); return ret; } static void __mnt_unmake_readonly(struct mount *mnt) { lock_mount_hash(); mnt->mnt.mnt_flags &= ~MNT_READONLY; unlock_mount_hash(); } int sb_prepare_remount_readonly(struct super_block *sb) { struct mount *mnt; int err = 0; /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */ if (atomic_long_read(&sb->s_remove_count)) return -EBUSY; lock_mount_hash(); list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; smp_mb(); if (mnt_get_writers(mnt) > 0) { err = -EBUSY; break; } } } if (!err && atomic_long_read(&sb->s_remove_count)) err = -EBUSY; if (!err) { sb->s_readonly_remount = 1; smp_wmb(); } list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; } unlock_mount_hash(); return err; } static void free_vfsmnt(struct mount *mnt) { kfree_const(mnt->mnt_devname); #ifdef CONFIG_SMP free_percpu(mnt->mnt_pcp); #endif kmem_cache_free(mnt_cache, mnt); } static void delayed_free_vfsmnt(struct rcu_head *head) { free_vfsmnt(container_of(head, struct mount, mnt_rcu)); } /* call under rcu_read_lock */ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) { struct mount *mnt; if (read_seqretry(&mount_lock, seq)) return 1; if (bastard == NULL) return 0; mnt = real_mount(bastard); mnt_add_count(mnt, 1); if (likely(!read_seqretry(&mount_lock, seq))) return 0; if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { mnt_add_count(mnt, -1); return 1; } return -1; } /* call under rcu_read_lock */ bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) { int res = __legitimize_mnt(bastard, seq); if (likely(!res)) return true; if (unlikely(res < 0)) { rcu_read_unlock(); mntput(bastard); rcu_read_lock(); } return false; } /* * find the first mount at @dentry on vfsmount @mnt. * call under rcu_read_lock() */ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) { struct hlist_head *head = m_hash(mnt, dentry); struct mount *p; hlist_for_each_entry_rcu(p, head, mnt_hash) if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) return p; return NULL; } /* * find the last mount at @dentry on vfsmount @mnt. * mount_lock must be held. */ struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) { struct mount *p, *res = NULL; p = __lookup_mnt(mnt, dentry); if (!p) goto out; if (!(p->mnt.mnt_flags & MNT_UMOUNT)) res = p; hlist_for_each_entry_continue(p, mnt_hash) { if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry) break; if (!(p->mnt.mnt_flags & MNT_UMOUNT)) res = p; } out: return res; } /* * lookup_mnt - Return the first child mount mounted at path * * "First" means first mounted chronologically. If you create the * following mounts: * * mount /dev/sda1 /mnt * mount /dev/sda2 /mnt * mount /dev/sda3 /mnt * * Then lookup_mnt() on the base /mnt dentry in the root mount will * return successively the root dentry and vfsmount of /dev/sda1, then * /dev/sda2, then /dev/sda3, then NULL. * * lookup_mnt takes a reference to the found vfsmount. */ struct vfsmount *lookup_mnt(struct path *path) { struct mount *child_mnt; struct vfsmount *m; unsigned seq; rcu_read_lock(); do { seq = read_seqbegin(&mount_lock); child_mnt = __lookup_mnt(path->mnt, path->dentry); m = child_mnt ? &child_mnt->mnt : NULL; } while (!legitimize_mnt(m, seq)); rcu_read_unlock(); return m; } /* * __is_local_mountpoint - Test to see if dentry is a mountpoint in the * current mount namespace. * * The common case is dentries are not mountpoints at all and that * test is handled inline. For the slow case when we are actually * dealing with a mountpoint of some kind, walk through all of the * mounts in the current mount namespace and test to see if the dentry * is a mountpoint. * * The mount_hashtable is not usable in the context because we * need to identify all mounts that may be in the current mount * namespace not just a mount that happens to have some specified * parent mount. */ bool __is_local_mountpoint(struct dentry *dentry) { struct mnt_namespace *ns = current->nsproxy->mnt_ns; struct mount *mnt; bool is_covered = false; if (!d_mountpoint(dentry)) goto out; down_read(&namespace_sem); list_for_each_entry(mnt, &ns->list, mnt_list) { is_covered = (mnt->mnt_mountpoint == dentry); if (is_covered) break; } up_read(&namespace_sem); out: return is_covered; } static struct mountpoint *lookup_mountpoint(struct dentry *dentry) { struct hlist_head *chain = mp_hash(dentry); struct mountpoint *mp; hlist_for_each_entry(mp, chain, m_hash) { if (mp->m_dentry == dentry) { /* might be worth a WARN_ON() */ if (d_unlinked(dentry)) return ERR_PTR(-ENOENT); mp->m_count++; return mp; } } return NULL; } static struct mountpoint *new_mountpoint(struct dentry *dentry) { struct hlist_head *chain = mp_hash(dentry); struct mountpoint *mp; int ret; mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); if (!mp) return ERR_PTR(-ENOMEM); ret = d_set_mounted(dentry); if (ret) { kfree(mp); return ERR_PTR(ret); } mp->m_dentry = dentry; mp->m_count = 1; hlist_add_head(&mp->m_hash, chain); INIT_HLIST_HEAD(&mp->m_list); return mp; } static void put_mountpoint(struct mountpoint *mp) { if (!--mp->m_count) { struct dentry *dentry = mp->m_dentry; BUG_ON(!hlist_empty(&mp->m_list)); spin_lock(&dentry->d_lock); dentry->d_flags &= ~DCACHE_MOUNTED; spin_unlock(&dentry->d_lock); hlist_del(&mp->m_hash); kfree(mp); } } static inline int check_mnt(struct mount *mnt) { return mnt->mnt_ns == current->nsproxy->mnt_ns; } /* * vfsmount lock must be held for write */ static void touch_mnt_namespace(struct mnt_namespace *ns) { if (ns) { ns->event = ++event; wake_up_interruptible(&ns->poll); } } /* * vfsmount lock must be held for write */ static void __touch_mnt_namespace(struct mnt_namespace *ns) { if (ns && ns->event != event) { ns->event = event; wake_up_interruptible(&ns->poll); } } /* * vfsmount lock must be held for write */ static void unhash_mnt(struct mount *mnt) { mnt->mnt_parent = mnt; mnt->mnt_mountpoint = mnt->mnt.mnt_root; list_del_init(&mnt->mnt_child); hlist_del_init_rcu(&mnt->mnt_hash); hlist_del_init(&mnt->mnt_mp_list); put_mountpoint(mnt->mnt_mp); mnt->mnt_mp = NULL; } /* * vfsmount lock must be held for write */ static void detach_mnt(struct mount *mnt, struct path *old_path) { old_path->dentry = mnt->mnt_mountpoint; old_path->mnt = &mnt->mnt_parent->mnt; unhash_mnt(mnt); } /* * vfsmount lock must be held for write */ static void umount_mnt(struct mount *mnt) { /* old mountpoint will be dropped when we can do that */ mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint; unhash_mnt(mnt); } /* * vfsmount lock must be held for write */ void mnt_set_mountpoint(struct mount *mnt, struct mountpoint *mp, struct mount *child_mnt) { mp->m_count++; mnt_add_count(mnt, 1); /* essentially, that's mntget */ child_mnt->mnt_mountpoint = dget(mp->m_dentry); child_mnt->mnt_parent = mnt; child_mnt->mnt_mp = mp; hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list); } /* * vfsmount lock must be held for write */ static void attach_mnt(struct mount *mnt, struct mount *parent, struct mountpoint *mp) { mnt_set_mountpoint(parent, mp, mnt); hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry)); list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); } static void attach_shadowed(struct mount *mnt, struct mount *parent, struct mount *shadows) { if (shadows) { hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash); list_add(&mnt->mnt_child, &shadows->mnt_child); } else { hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mnt->mnt_mountpoint)); list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); } } /* * vfsmount lock must be held for write */ static void commit_tree(struct mount *mnt, struct mount *shadows) { struct mount *parent = mnt->mnt_parent; struct mount *m; LIST_HEAD(head); struct mnt_namespace *n = parent->mnt_ns; BUG_ON(parent == mnt); list_add_tail(&head, &mnt->mnt_list); list_for_each_entry(m, &head, mnt_list) m->mnt_ns = n; list_splice(&head, n->list.prev); n->mounts += n->pending_mounts; n->pending_mounts = 0; attach_shadowed(mnt, parent, shadows); touch_mnt_namespace(n); } static struct mount *next_mnt(struct mount *p, struct mount *root) { struct list_head *next = p->mnt_mounts.next; if (next == &p->mnt_mounts) { while (1) { if (p == root) return NULL; next = p->mnt_child.next; if (next != &p->mnt_parent->mnt_mounts) break; p = p->mnt_parent; } } return list_entry(next, struct mount, mnt_child); } static struct mount *skip_mnt_tree(struct mount *p) { struct list_head *prev = p->mnt_mounts.prev; while (prev != &p->mnt_mounts) { p = list_entry(prev, struct mount, mnt_child); prev = p->mnt_mounts.prev; } return p; } struct vfsmount * vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data) { struct mount *mnt; struct dentry *root; if (!type) return ERR_PTR(-ENODEV); mnt = alloc_vfsmnt(name); if (!mnt) return ERR_PTR(-ENOMEM); if (flags & MS_KERNMOUNT) mnt->mnt.mnt_flags = MNT_INTERNAL; root = mount_fs(type, flags, name, data); if (IS_ERR(root)) { mnt_free_id(mnt); free_vfsmnt(mnt); return ERR_CAST(root); } mnt->mnt.mnt_root = root; mnt->mnt.mnt_sb = root->d_sb; mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; lock_mount_hash(); list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts); unlock_mount_hash(); return &mnt->mnt; } EXPORT_SYMBOL_GPL(vfs_kern_mount); static struct mount *clone_mnt(struct mount *old, struct dentry *root, int flag) { struct super_block *sb = old->mnt.mnt_sb; struct mount *mnt; int err; mnt = alloc_vfsmnt(old->mnt_devname); if (!mnt) return ERR_PTR(-ENOMEM); if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE)) mnt->mnt_group_id = 0; /* not a peer of original */ else mnt->mnt_group_id = old->mnt_group_id; if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { err = mnt_alloc_group_id(mnt); if (err) goto out_free; } mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED); /* Don't allow unprivileged users to change mount flags */ if (flag & CL_UNPRIVILEGED) { mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; if (mnt->mnt.mnt_flags & MNT_READONLY) mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; if (mnt->mnt.mnt_flags & MNT_NODEV) mnt->mnt.mnt_flags |= MNT_LOCK_NODEV; if (mnt->mnt.mnt_flags & MNT_NOSUID) mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID; if (mnt->mnt.mnt_flags & MNT_NOEXEC) mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC; } /* Don't allow unprivileged users to reveal what is under a mount */ if ((flag & CL_UNPRIVILEGED) && (!(flag & CL_EXPIRE) || list_empty(&old->mnt_expire))) mnt->mnt.mnt_flags |= MNT_LOCKED; atomic_inc(&sb->s_active); mnt->mnt.mnt_sb = sb; mnt->mnt.mnt_root = dget(root); mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; lock_mount_hash(); list_add_tail(&mnt->mnt_instance, &sb->s_mounts); unlock_mount_hash(); if ((flag & CL_SLAVE) || ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) { list_add(&mnt->mnt_slave, &old->mnt_slave_list); mnt->mnt_master = old; CLEAR_MNT_SHARED(mnt); } else if (!(flag & CL_PRIVATE)) { if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) list_add(&mnt->mnt_share, &old->mnt_share); if (IS_MNT_SLAVE(old)) list_add(&mnt->mnt_slave, &old->mnt_slave); mnt->mnt_master = old->mnt_master; } if (flag & CL_MAKE_SHARED) set_mnt_shared(mnt); /* stick the duplicate mount on the same expiry list * as the original if that was on one */ if (flag & CL_EXPIRE) { if (!list_empty(&old->mnt_expire)) list_add(&mnt->mnt_expire, &old->mnt_expire); } return mnt; out_free: mnt_free_id(mnt); free_vfsmnt(mnt); return ERR_PTR(err); } static void cleanup_mnt(struct mount *mnt) { /* * This probably indicates that somebody messed * up a mnt_want/drop_write() pair. If this * happens, the filesystem was probably unable * to make r/w->r/o transitions. */ /* * The locking used to deal with mnt_count decrement provides barriers, * so mnt_get_writers() below is safe. */ WARN_ON(mnt_get_writers(mnt)); if (unlikely(mnt->mnt_pins.first)) mnt_pin_kill(mnt); fsnotify_vfsmount_delete(&mnt->mnt); dput(mnt->mnt.mnt_root); deactivate_super(mnt->mnt.mnt_sb); mnt_free_id(mnt); call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt); } static void __cleanup_mnt(struct rcu_head *head) { cleanup_mnt(container_of(head, struct mount, mnt_rcu)); } static LLIST_HEAD(delayed_mntput_list); static void delayed_mntput(struct work_struct *unused) { struct llist_node *node = llist_del_all(&delayed_mntput_list); struct llist_node *next; for (; node; node = next) { next = llist_next(node); cleanup_mnt(llist_entry(node, struct mount, mnt_llist)); } } static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); static void mntput_no_expire(struct mount *mnt) { rcu_read_lock(); mnt_add_count(mnt, -1); if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */ rcu_read_unlock(); return; } lock_mount_hash(); if (mnt_get_count(mnt)) { rcu_read_unlock(); unlock_mount_hash(); return; } if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { rcu_read_unlock(); unlock_mount_hash(); return; } mnt->mnt.mnt_flags |= MNT_DOOMED; rcu_read_unlock(); list_del(&mnt->mnt_instance); if (unlikely(!list_empty(&mnt->mnt_mounts))) { struct mount *p, *tmp; list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { umount_mnt(p); } } unlock_mount_hash(); if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { struct task_struct *task = current; if (likely(!(task->flags & PF_KTHREAD))) { init_task_work(&mnt->mnt_rcu, __cleanup_mnt); if (!task_work_add(task, &mnt->mnt_rcu, true)) return; } if (llist_add(&mnt->mnt_llist, &delayed_mntput_list)) schedule_delayed_work(&delayed_mntput_work, 1); return; } cleanup_mnt(mnt); } void mntput(struct vfsmount *mnt) { if (mnt) { struct mount *m = real_mount(mnt); /* avoid cacheline pingpong, hope gcc doesn't get "smart" */ if (unlikely(m->mnt_expiry_mark)) m->mnt_expiry_mark = 0; mntput_no_expire(m); } } EXPORT_SYMBOL(mntput); struct vfsmount *mntget(struct vfsmount *mnt) { if (mnt) mnt_add_count(real_mount(mnt), 1); return mnt; } EXPORT_SYMBOL(mntget); struct vfsmount *mnt_clone_internal(struct path *path) { struct mount *p; p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE); if (IS_ERR(p)) return ERR_CAST(p); p->mnt.mnt_flags |= MNT_INTERNAL; return &p->mnt; } static inline void mangle(struct seq_file *m, const char *s) { seq_escape(m, s, " \t\n\\"); } /* * Simple .show_options callback for filesystems which don't want to * implement more complex mount option showing. * * See also save_mount_options(). */ int generic_show_options(struct seq_file *m, struct dentry *root) { const char *options; rcu_read_lock(); options = rcu_dereference(root->d_sb->s_options); if (options != NULL && options[0]) { seq_putc(m, ','); mangle(m, options); } rcu_read_unlock(); return 0; } EXPORT_SYMBOL(generic_show_options); /* * If filesystem uses generic_show_options(), this function should be * called from the fill_super() callback. * * The .remount_fs callback usually needs to be handled in a special * way, to make sure, that previous options are not overwritten if the * remount fails. * * Also note, that if the filesystem's .remount_fs function doesn't * reset all options to their default value, but changes only newly * given options, then the displayed options will not reflect reality * any more. */ void save_mount_options(struct super_block *sb, char *options) { BUG_ON(sb->s_options); rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL)); } EXPORT_SYMBOL(save_mount_options); void replace_mount_options(struct super_block *sb, char *options) { char *old = sb->s_options; rcu_assign_pointer(sb->s_options, options); if (old) { synchronize_rcu(); kfree(old); } } EXPORT_SYMBOL(replace_mount_options); #ifdef CONFIG_PROC_FS /* iterator; we want it to have access to namespace_sem, thus here... */ static void *m_start(struct seq_file *m, loff_t *pos) { struct proc_mounts *p = m->private; down_read(&namespace_sem); if (p->cached_event == p->ns->event) { void *v = p->cached_mount; if (*pos == p->cached_index) return v; if (*pos == p->cached_index + 1) { v = seq_list_next(v, &p->ns->list, &p->cached_index); return p->cached_mount = v; } } p->cached_event = p->ns->event; p->cached_mount = seq_list_start(&p->ns->list, *pos); p->cached_index = *pos; return p->cached_mount; } static void *m_next(struct seq_file *m, void *v, loff_t *pos) { struct proc_mounts *p = m->private; p->cached_mount = seq_list_next(v, &p->ns->list, pos); p->cached_index = *pos; return p->cached_mount; } static void m_stop(struct seq_file *m, void *v) { up_read(&namespace_sem); } static int m_show(struct seq_file *m, void *v) { struct proc_mounts *p = m->private; struct mount *r = list_entry(v, struct mount, mnt_list); return p->show(m, &r->mnt); } const struct seq_operations mounts_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = m_show, }; #endif /* CONFIG_PROC_FS */ /** * may_umount_tree - check if a mount tree is busy * @mnt: root of mount tree * * This is called to check if a tree of mounts has any * open files, pwds, chroots or sub mounts that are * busy. */ int may_umount_tree(struct vfsmount *m) { struct mount *mnt = real_mount(m); int actual_refs = 0; int minimum_refs = 0; struct mount *p; BUG_ON(!m); /* write lock needed for mnt_get_count */ lock_mount_hash(); for (p = mnt; p; p = next_mnt(p, mnt)) { actual_refs += mnt_get_count(p); minimum_refs += 2; } unlock_mount_hash(); if (actual_refs > minimum_refs) return 0; return 1; } EXPORT_SYMBOL(may_umount_tree); /** * may_umount - check if a mount point is busy * @mnt: root of mount * * This is called to check if a mount point has any * open files, pwds, chroots or sub mounts. If the * mount has sub mounts this will return busy * regardless of whether the sub mounts are busy. * * Doesn't take quota and stuff into account. IOW, in some cases it will * give false negatives. The main reason why it's here is that we need * a non-destructive way to look for easily umountable filesystems. */ int may_umount(struct vfsmount *mnt) { int ret = 1; down_read(&namespace_sem); lock_mount_hash(); if (propagate_mount_busy(real_mount(mnt), 2)) ret = 0; unlock_mount_hash(); up_read(&namespace_sem); return ret; } EXPORT_SYMBOL(may_umount); static HLIST_HEAD(unmounted); /* protected by namespace_sem */ static void namespace_unlock(void) { struct hlist_head head; hlist_move_list(&unmounted, &head); up_write(&namespace_sem); if (likely(hlist_empty(&head))) return; synchronize_rcu(); group_pin_kill(&head); } static inline void namespace_lock(void) { down_write(&namespace_sem); } enum umount_tree_flags { UMOUNT_SYNC = 1, UMOUNT_PROPAGATE = 2, UMOUNT_CONNECTED = 4, }; static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) { /* Leaving mounts connected is only valid for lazy umounts */ if (how & UMOUNT_SYNC) return true; /* A mount without a parent has nothing to be connected to */ if (!mnt_has_parent(mnt)) return true; /* Because the reference counting rules change when mounts are * unmounted and connected, umounted mounts may not be * connected to mounted mounts. */ if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) return true; /* Has it been requested that the mount remain connected? */ if (how & UMOUNT_CONNECTED) return false; /* Is the mount locked such that it needs to remain connected? */ if (IS_MNT_LOCKED(mnt)) return false; /* By default disconnect the mount */ return true; } /* * mount_lock must be held * namespace_sem must be held for write */ static void umount_tree(struct mount *mnt, enum umount_tree_flags how) { LIST_HEAD(tmp_list); struct mount *p; if (how & UMOUNT_PROPAGATE) propagate_mount_unlock(mnt); /* Gather the mounts to umount */ for (p = mnt; p; p = next_mnt(p, mnt)) { p->mnt.mnt_flags |= MNT_UMOUNT; list_move(&p->mnt_list, &tmp_list); } /* Hide the mounts from mnt_mounts */ list_for_each_entry(p, &tmp_list, mnt_list) { list_del_init(&p->mnt_child); } /* Add propogated mounts to the tmp_list */ if (how & UMOUNT_PROPAGATE) propagate_umount(&tmp_list); while (!list_empty(&tmp_list)) { struct mnt_namespace *ns; bool disconnect; p = list_first_entry(&tmp_list, struct mount, mnt_list); list_del_init(&p->mnt_expire); list_del_init(&p->mnt_list); ns = p->mnt_ns; if (ns) { ns->mounts--; __touch_mnt_namespace(ns); } p->mnt_ns = NULL; if (how & UMOUNT_SYNC) p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; disconnect = disconnect_mount(p, how); pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, disconnect ? &unmounted : NULL); if (mnt_has_parent(p)) { mnt_add_count(p->mnt_parent, -1); if (!disconnect) { /* Don't forget about p */ list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); } else { umount_mnt(p); } } change_mnt_propagation(p, MS_PRIVATE); } } static void shrink_submounts(struct mount *mnt); static int do_umount(struct mount *mnt, int flags) { struct super_block *sb = mnt->mnt.mnt_sb; int retval; retval = security_sb_umount(&mnt->mnt, flags); if (retval) return retval; /* * Allow userspace to request a mountpoint be expired rather than * unmounting unconditionally. Unmount only happens if: * (1) the mark is already set (the mark is cleared by mntput()) * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] */ if (flags & MNT_EXPIRE) { if (&mnt->mnt == current->fs->root.mnt || flags & (MNT_FORCE | MNT_DETACH)) return -EINVAL; /* * probably don't strictly need the lock here if we examined * all race cases, but it's a slowpath. */ lock_mount_hash(); if (mnt_get_count(mnt) != 2) { unlock_mount_hash(); return -EBUSY; } unlock_mount_hash(); if (!xchg(&mnt->mnt_expiry_mark, 1)) return -EAGAIN; } /* * If we may have to abort operations to get out of this * mount, and they will themselves hold resources we must * allow the fs to do things. In the Unix tradition of * 'Gee thats tricky lets do it in userspace' the umount_begin * might fail to complete on the first run through as other tasks * must return, and the like. Thats for the mount program to worry * about for the moment. */ if (flags & MNT_FORCE && sb->s_op->umount_begin) { sb->s_op->umount_begin(sb); } /* * No sense to grab the lock for this test, but test itself looks * somewhat bogus. Suggestions for better replacement? * Ho-hum... In principle, we might treat that as umount + switch * to rootfs. GC would eventually take care of the old vfsmount. * Actually it makes sense, especially if rootfs would contain a * /reboot - static binary that would close all descriptors and * call reboot(9). Then init(8) could umount root and exec /reboot. */ if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { /* * Special case for "unmounting" root ... * we just try to remount it readonly. */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; down_write(&sb->s_umount); if (!(sb->s_flags & MS_RDONLY)) retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); up_write(&sb->s_umount); return retval; } namespace_lock(); lock_mount_hash(); event++; if (flags & MNT_DETACH) { if (!list_empty(&mnt->mnt_list)) umount_tree(mnt, UMOUNT_PROPAGATE); retval = 0; } else { shrink_submounts(mnt); retval = -EBUSY; if (!propagate_mount_busy(mnt, 2)) { if (!list_empty(&mnt->mnt_list)) umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); retval = 0; } } unlock_mount_hash(); namespace_unlock(); return retval; } /* * __detach_mounts - lazily unmount all mounts on the specified dentry * * During unlink, rmdir, and d_drop it is possible to loose the path * to an existing mountpoint, and wind up leaking the mount. * detach_mounts allows lazily unmounting those mounts instead of * leaking them. * * The caller may hold dentry->d_inode->i_mutex. */ void __detach_mounts(struct dentry *dentry) { struct mountpoint *mp; struct mount *mnt; namespace_lock(); mp = lookup_mountpoint(dentry); if (IS_ERR_OR_NULL(mp)) goto out_unlock; lock_mount_hash(); event++; while (!hlist_empty(&mp->m_list)) { mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); if (mnt->mnt.mnt_flags & MNT_UMOUNT) { hlist_add_head(&mnt->mnt_umount.s_list, &unmounted); umount_mnt(mnt); } else umount_tree(mnt, UMOUNT_CONNECTED); } unlock_mount_hash(); put_mountpoint(mp); out_unlock: namespace_unlock(); } /* * Is the caller allowed to modify his namespace? */ static inline bool may_mount(void) { return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); } static inline bool may_mandlock(void) { #ifndef CONFIG_MANDATORY_FILE_LOCKING return false; #endif return capable(CAP_SYS_ADMIN); } /* * Now umount can handle mount points as well as block devices. * This is important for filesystems which use unnamed block devices. * * We now support a flag for forced unmount like the other 'big iron' * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD */ SYSCALL_DEFINE2(umount, char __user *, name, int, flags) { struct path path; struct mount *mnt; int retval; int lookup_flags = 0; if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) return -EINVAL; if (!may_mount()) return -EPERM; if (!(flags & UMOUNT_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path); if (retval) goto out; mnt = real_mount(path.mnt); retval = -EINVAL; if (path.dentry != path.mnt->mnt_root) goto dput_and_out; if (!check_mnt(mnt)) goto dput_and_out; if (mnt->mnt.mnt_flags & MNT_LOCKED) goto dput_and_out; retval = -EPERM; if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) goto dput_and_out; retval = do_umount(mnt, flags); dput_and_out: /* we mustn't call path_put() as that would clear mnt_expiry_mark */ dput(path.dentry); mntput_no_expire(mnt); out: return retval; } #ifdef __ARCH_WANT_SYS_OLDUMOUNT /* * The 2.0 compatible umount. No flags. */ SYSCALL_DEFINE1(oldumount, char __user *, name) { return sys_umount(name, 0); } #endif static bool is_mnt_ns_file(struct dentry *dentry) { /* Is this a proxy for a mount namespace? */ return dentry->d_op == &ns_dentry_operations && dentry->d_fsdata == &mntns_operations; } struct mnt_namespace *to_mnt_ns(struct ns_common *ns) { return container_of(ns, struct mnt_namespace, ns); } static bool mnt_ns_loop(struct dentry *dentry) { /* Could bind mounting the mount namespace inode cause a * mount namespace loop? */ struct mnt_namespace *mnt_ns; if (!is_mnt_ns_file(dentry)) return false; mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode)); return current->nsproxy->mnt_ns->seq >= mnt_ns->seq; } struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, int flag) { struct mount *res, *p, *q, *r, *parent; if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt)) return ERR_PTR(-EINVAL); if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry)) return ERR_PTR(-EINVAL); res = q = clone_mnt(mnt, dentry, flag); if (IS_ERR(q)) return q; q->mnt_mountpoint = mnt->mnt_mountpoint; p = mnt; list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { struct mount *s; if (!is_subdir(r->mnt_mountpoint, dentry)) continue; for (s = r; s; s = next_mnt(s, r)) { struct mount *t = NULL; if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(s)) { s = skip_mnt_tree(s); continue; } if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(s->mnt.mnt_root)) { s = skip_mnt_tree(s); continue; } while (p != s->mnt_parent) { p = p->mnt_parent; q = q->mnt_parent; } p = s; parent = q; q = clone_mnt(p, p->mnt.mnt_root, flag); if (IS_ERR(q)) goto out; lock_mount_hash(); list_add_tail(&q->mnt_list, &res->mnt_list); mnt_set_mountpoint(parent, p->mnt_mp, q); if (!list_empty(&parent->mnt_mounts)) { t = list_last_entry(&parent->mnt_mounts, struct mount, mnt_child); if (t->mnt_mp != p->mnt_mp) t = NULL; } attach_shadowed(q, parent, t); unlock_mount_hash(); } } return res; out: if (res) { lock_mount_hash(); umount_tree(res, UMOUNT_SYNC); unlock_mount_hash(); } return q; } /* Caller should check returned pointer for errors */ struct vfsmount *collect_mounts(struct path *path) { struct mount *tree; namespace_lock(); if (!check_mnt(real_mount(path->mnt))) tree = ERR_PTR(-EINVAL); else tree = copy_tree(real_mount(path->mnt), path->dentry, CL_COPY_ALL | CL_PRIVATE); namespace_unlock(); if (IS_ERR(tree)) return ERR_CAST(tree); return &tree->mnt; } void drop_collected_mounts(struct vfsmount *mnt) { namespace_lock(); lock_mount_hash(); umount_tree(real_mount(mnt), UMOUNT_SYNC); unlock_mount_hash(); namespace_unlock(); } /** * clone_private_mount - create a private clone of a path * * This creates a new vfsmount, which will be the clone of @path. The new will * not be attached anywhere in the namespace and will be private (i.e. changes * to the originating mount won't be propagated into this). * * Release with mntput(). */ struct vfsmount *clone_private_mount(struct path *path) { struct mount *old_mnt = real_mount(path->mnt); struct mount *new_mnt; if (IS_MNT_UNBINDABLE(old_mnt)) return ERR_PTR(-EINVAL); down_read(&namespace_sem); new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); up_read(&namespace_sem); if (IS_ERR(new_mnt)) return ERR_CAST(new_mnt); return &new_mnt->mnt; } EXPORT_SYMBOL_GPL(clone_private_mount); int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, struct vfsmount *root) { struct mount *mnt; int res = f(root, arg); if (res) return res; list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) { res = f(&mnt->mnt, arg); if (res) return res; } return 0; } static void cleanup_group_ids(struct mount *mnt, struct mount *end) { struct mount *p; for (p = mnt; p != end; p = next_mnt(p, mnt)) { if (p->mnt_group_id && !IS_MNT_SHARED(p)) mnt_release_group_id(p); } } static int invent_group_ids(struct mount *mnt, bool recurse) { struct mount *p; for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { int err = mnt_alloc_group_id(p); if (err) { cleanup_group_ids(mnt, p); return err; } } } return 0; } int count_mounts(struct mnt_namespace *ns, struct mount *mnt) { unsigned int max = READ_ONCE(sysctl_mount_max); unsigned int mounts = 0, old, pending, sum; struct mount *p; for (p = mnt; p; p = next_mnt(p, mnt)) mounts++; old = ns->mounts; pending = ns->pending_mounts; sum = old + pending; if ((old > sum) || (pending > sum) || (max < sum) || (mounts > (max - sum))) return -ENOSPC; ns->pending_mounts = pending + mounts; return 0; } /* * @source_mnt : mount tree to be attached * @nd : place the mount tree @source_mnt is attached * @parent_nd : if non-null, detach the source_mnt from its parent and * store the parent mount and mountpoint dentry. * (done when source_mnt is moved) * * NOTE: in the table below explains the semantics when a source mount * of a given type is attached to a destination mount of a given type. * --------------------------------------------------------------------------- * | BIND MOUNT OPERATION | * |************************************************************************** * | source-->| shared | private | slave | unbindable | * | dest | | | | | * | | | | | | | * | v | | | | | * |************************************************************************** * | shared | shared (++) | shared (+) | shared(+++)| invalid | * | | | | | | * |non-shared| shared (+) | private | slave (*) | invalid | * *************************************************************************** * A bind operation clones the source mount and mounts the clone on the * destination mount. * * (++) the cloned mount is propagated to all the mounts in the propagation * tree of the destination mount and the cloned mount is added to * the peer group of the source mount. * (+) the cloned mount is created under the destination mount and is marked * as shared. The cloned mount is added to the peer group of the source * mount. * (+++) the mount is propagated to all the mounts in the propagation tree * of the destination mount and the cloned mount is made slave * of the same master as that of the source mount. The cloned mount * is marked as 'shared and slave'. * (*) the cloned mount is made a slave of the same master as that of the * source mount. * * --------------------------------------------------------------------------- * | MOVE MOUNT OPERATION | * |************************************************************************** * | source-->| shared | private | slave | unbindable | * | dest | | | | | * | | | | | | | * | v | | | | | * |************************************************************************** * | shared | shared (+) | shared (+) | shared(+++) | invalid | * | | | | | | * |non-shared| shared (+*) | private | slave (*) | unbindable | * *************************************************************************** * * (+) the mount is moved to the destination. And is then propagated to * all the mounts in the propagation tree of the destination mount. * (+*) the mount is moved to the destination. * (+++) the mount is moved to the destination and is then propagated to * all the mounts belonging to the destination mount's propagation tree. * the mount is marked as 'shared and slave'. * (*) the mount continues to be a slave at the new location. * * if the source mount is a tree, the operations explained above is * applied to each mount in the tree. * Must be called without spinlocks held, since this function can sleep * in allocations. */ static int attach_recursive_mnt(struct mount *source_mnt, struct mount *dest_mnt, struct mountpoint *dest_mp, struct path *parent_path) { HLIST_HEAD(tree_list); struct mnt_namespace *ns = dest_mnt->mnt_ns; struct mount *child, *p; struct hlist_node *n; int err; /* Is there space to add these mounts to the mount namespace? */ if (!parent_path) { err = count_mounts(ns, source_mnt); if (err) goto out; } if (IS_MNT_SHARED(dest_mnt)) { err = invent_group_ids(source_mnt, true); if (err) goto out; err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); lock_mount_hash(); if (err) goto out_cleanup_ids; for (p = source_mnt; p; p = next_mnt(p, source_mnt)) set_mnt_shared(p); } else { lock_mount_hash(); } if (parent_path) { detach_mnt(source_mnt, parent_path); attach_mnt(source_mnt, dest_mnt, dest_mp); touch_mnt_namespace(source_mnt->mnt_ns); } else { mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); commit_tree(source_mnt, NULL); } hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { struct mount *q; hlist_del_init(&child->mnt_hash); q = __lookup_mnt_last(&child->mnt_parent->mnt, child->mnt_mountpoint); commit_tree(child, q); } unlock_mount_hash(); return 0; out_cleanup_ids: while (!hlist_empty(&tree_list)) { child = hlist_entry(tree_list.first, struct mount, mnt_hash); child->mnt_parent->mnt_ns->pending_mounts = 0; umount_tree(child, UMOUNT_SYNC); } unlock_mount_hash(); cleanup_group_ids(source_mnt, NULL); out: ns->pending_mounts = 0; return err; } static struct mountpoint *lock_mount(struct path *path) { struct vfsmount *mnt; struct dentry *dentry = path->dentry; retry: inode_lock(dentry->d_inode); if (unlikely(cant_mount(dentry))) { inode_unlock(dentry->d_inode); return ERR_PTR(-ENOENT); } namespace_lock(); mnt = lookup_mnt(path); if (likely(!mnt)) { struct mountpoint *mp = lookup_mountpoint(dentry); if (!mp) mp = new_mountpoint(dentry); if (IS_ERR(mp)) { namespace_unlock(); inode_unlock(dentry->d_inode); return mp; } return mp; } namespace_unlock(); inode_unlock(path->dentry->d_inode); path_put(path); path->mnt = mnt; dentry = path->dentry = dget(mnt->mnt_root); goto retry; } static void unlock_mount(struct mountpoint *where) { struct dentry *dentry = where->m_dentry; put_mountpoint(where); namespace_unlock(); inode_unlock(dentry->d_inode); } static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) { if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER) return -EINVAL; if (d_is_dir(mp->m_dentry) != d_is_dir(mnt->mnt.mnt_root)) return -ENOTDIR; return attach_recursive_mnt(mnt, p, mp, NULL); } /* * Sanity check the flags to change_mnt_propagation. */ static int flags_to_propagation_type(int flags) { int type = flags & ~(MS_REC | MS_SILENT); /* Fail if any non-propagation flags are set */ if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) return 0; /* Only one propagation flag should be set */ if (!is_power_of_2(type)) return 0; return type; } /* * recursively change the type of the mountpoint. */ static int do_change_type(struct path *path, int flag) { struct mount *m; struct mount *mnt = real_mount(path->mnt); int recurse = flag & MS_REC; int type; int err = 0; if (path->dentry != path->mnt->mnt_root) return -EINVAL; type = flags_to_propagation_type(flag); if (!type) return -EINVAL; namespace_lock(); if (type == MS_SHARED) { err = invent_group_ids(mnt, recurse); if (err) goto out_unlock; } lock_mount_hash(); for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) change_mnt_propagation(m, type); unlock_mount_hash(); out_unlock: namespace_unlock(); return err; } static bool has_locked_children(struct mount *mnt, struct dentry *dentry) { struct mount *child; list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { if (!is_subdir(child->mnt_mountpoint, dentry)) continue; if (child->mnt.mnt_flags & MNT_LOCKED) return true; } return false; } /* * do loopback mount. */ static int do_loopback(struct path *path, const char *old_name, int recurse) { struct path old_path; struct mount *mnt = NULL, *old, *parent; struct mountpoint *mp; int err; if (!old_name || !*old_name) return -EINVAL; err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); if (err) return err; err = -EINVAL; if (mnt_ns_loop(old_path.dentry)) goto out; mp = lock_mount(path); err = PTR_ERR(mp); if (IS_ERR(mp)) goto out; old = real_mount(old_path.mnt); parent = real_mount(path->mnt); err = -EINVAL; if (IS_MNT_UNBINDABLE(old)) goto out2; if (!check_mnt(parent)) goto out2; if (!check_mnt(old) && old_path.dentry->d_op != &ns_dentry_operations) goto out2; if (!recurse && has_locked_children(old, old_path.dentry)) goto out2; if (recurse) mnt = copy_tree(old, old_path.dentry, CL_COPY_MNT_NS_FILE); else mnt = clone_mnt(old, old_path.dentry, 0); if (IS_ERR(mnt)) { err = PTR_ERR(mnt); goto out2; } mnt->mnt.mnt_flags &= ~MNT_LOCKED; err = graft_tree(mnt, parent, mp); if (err) { lock_mount_hash(); umount_tree(mnt, UMOUNT_SYNC); unlock_mount_hash(); } out2: unlock_mount(mp); out: path_put(&old_path); return err; } static int change_mount_flags(struct vfsmount *mnt, int ms_flags) { int error = 0; int readonly_request = 0; if (ms_flags & MS_RDONLY) readonly_request = 1; if (readonly_request == __mnt_is_readonly(mnt)) return 0; if (readonly_request) error = mnt_make_readonly(real_mount(mnt)); else __mnt_unmake_readonly(real_mount(mnt)); return error; } /* * change filesystem flags. dir should be a physical root of filesystem. * If you've mounted a non-root directory somewhere and want to do remount * on it - tough luck. */ static int do_remount(struct path *path, int flags, int mnt_flags, void *data) { int err; struct super_block *sb = path->mnt->mnt_sb; struct mount *mnt = real_mount(path->mnt); if (!check_mnt(mnt)) return -EINVAL; if (path->dentry != path->mnt->mnt_root) return -EINVAL; /* Don't allow changing of locked mnt flags. * * No locks need to be held here while testing the various * MNT_LOCK flags because those flags can never be cleared * once they are set. */ if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) && !(mnt_flags & MNT_READONLY)) { return -EPERM; } if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) && !(mnt_flags & MNT_NODEV)) { return -EPERM; } if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) && !(mnt_flags & MNT_NOSUID)) { return -EPERM; } if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) && !(mnt_flags & MNT_NOEXEC)) { return -EPERM; } if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) && ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) { return -EPERM; } err = security_sb_remount(sb, data); if (err) return err; down_write(&sb->s_umount); if (flags & MS_BIND) err = change_mount_flags(path->mnt, flags); else if (!capable(CAP_SYS_ADMIN)) err = -EPERM; else err = do_remount_sb(sb, flags, data, 0); if (!err) { lock_mount_hash(); mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; mnt->mnt.mnt_flags = mnt_flags; touch_mnt_namespace(mnt->mnt_ns); unlock_mount_hash(); } up_write(&sb->s_umount); return err; } static inline int tree_contains_unbindable(struct mount *mnt) { struct mount *p; for (p = mnt; p; p = next_mnt(p, mnt)) { if (IS_MNT_UNBINDABLE(p)) return 1; } return 0; } static int do_move_mount(struct path *path, const char *old_name) { struct path old_path, parent_path; struct mount *p; struct mount *old; struct mountpoint *mp; int err; if (!old_name || !*old_name) return -EINVAL; err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); if (err) return err; mp = lock_mount(path); err = PTR_ERR(mp); if (IS_ERR(mp)) goto out; old = real_mount(old_path.mnt); p = real_mount(path->mnt); err = -EINVAL; if (!check_mnt(p) || !check_mnt(old)) goto out1; if (old->mnt.mnt_flags & MNT_LOCKED) goto out1; err = -EINVAL; if (old_path.dentry != old_path.mnt->mnt_root) goto out1; if (!mnt_has_parent(old)) goto out1; if (d_is_dir(path->dentry) != d_is_dir(old_path.dentry)) goto out1; /* * Don't move a mount residing in a shared parent. */ if (IS_MNT_SHARED(old->mnt_parent)) goto out1; /* * Don't move a mount tree containing unbindable mounts to a destination * mount which is shared. */ if (IS_MNT_SHARED(p) && tree_contains_unbindable(old)) goto out1; err = -ELOOP; for (; mnt_has_parent(p); p = p->mnt_parent) if (p == old) goto out1; err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path); if (err) goto out1; /* if the mount is moved, it should no longer be expire * automatically */ list_del_init(&old->mnt_expire); out1: unlock_mount(mp); out: if (!err) path_put(&parent_path); path_put(&old_path); return err; } static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype) { int err; const char *subtype = strchr(fstype, '.'); if (subtype) { subtype++; err = -EINVAL; if (!subtype[0]) goto err; } else subtype = ""; mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL); err = -ENOMEM; if (!mnt->mnt_sb->s_subtype) goto err; return mnt; err: mntput(mnt); return ERR_PTR(err); } /* * add a mount into a namespace's mount tree */ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags) { struct mountpoint *mp; struct mount *parent; int err; mnt_flags &= ~MNT_INTERNAL_FLAGS; mp = lock_mount(path); if (IS_ERR(mp)) return PTR_ERR(mp); parent = real_mount(path->mnt); err = -EINVAL; if (unlikely(!check_mnt(parent))) { /* that's acceptable only for automounts done in private ns */ if (!(mnt_flags & MNT_SHRINKABLE)) goto unlock; /* ... and for those we'd better have mountpoint still alive */ if (!parent->mnt_ns) goto unlock; } /* Refuse the same filesystem on the same mount point */ err = -EBUSY; if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && path->mnt->mnt_root == path->dentry) goto unlock; err = -EINVAL; if (d_is_symlink(newmnt->mnt.mnt_root)) goto unlock; newmnt->mnt.mnt_flags = mnt_flags; err = graft_tree(newmnt, parent, mp); unlock: unlock_mount(mp); return err; } static bool mount_too_revealing(struct vfsmount *mnt, int *new_mnt_flags); /* * create a new mount for userspace and request it to be added into the * namespace's tree */ static int do_new_mount(struct path *path, const char *fstype, int flags, int mnt_flags, const char *name, void *data) { struct file_system_type *type; struct vfsmount *mnt; int err; if (!fstype) return -EINVAL; type = get_fs_type(fstype); if (!type) return -ENODEV; mnt = vfs_kern_mount(type, flags, name, data); if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) && !mnt->mnt_sb->s_subtype) mnt = fs_set_subtype(mnt, fstype); put_filesystem(type); if (IS_ERR(mnt)) return PTR_ERR(mnt); if (mount_too_revealing(mnt, &mnt_flags)) { mntput(mnt); return -EPERM; } err = do_add_mount(real_mount(mnt), path, mnt_flags); if (err) mntput(mnt); return err; } int finish_automount(struct vfsmount *m, struct path *path) { struct mount *mnt = real_mount(m); int err; /* The new mount record should have at least 2 refs to prevent it being * expired before we get a chance to add it */ BUG_ON(mnt_get_count(mnt) < 2); if (m->mnt_sb == path->mnt->mnt_sb && m->mnt_root == path->dentry) { err = -ELOOP; goto fail; } err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE); if (!err) return 0; fail: /* remove m from any expiration list it may be on */ if (!list_empty(&mnt->mnt_expire)) { namespace_lock(); list_del_init(&mnt->mnt_expire); namespace_unlock(); } mntput(m); mntput(m); return err; } /** * mnt_set_expiry - Put a mount on an expiration list * @mnt: The mount to list. * @expiry_list: The list to add the mount to. */ void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) { namespace_lock(); list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); namespace_unlock(); } EXPORT_SYMBOL(mnt_set_expiry); /* * process a list of expirable mountpoints with the intent of discarding any * mountpoints that aren't in use and haven't been touched since last we came * here */ void mark_mounts_for_expiry(struct list_head *mounts) { struct mount *mnt, *next; LIST_HEAD(graveyard); if (list_empty(mounts)) return; namespace_lock(); lock_mount_hash(); /* extract from the expiration list every vfsmount that matches the * following criteria: * - only referenced by its parent vfsmount * - still marked for expiry (marked on the last call here; marks are * cleared by mntput()) */ list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { if (!xchg(&mnt->mnt_expiry_mark, 1) || propagate_mount_busy(mnt, 1)) continue; list_move(&mnt->mnt_expire, &graveyard); } while (!list_empty(&graveyard)) { mnt = list_first_entry(&graveyard, struct mount, mnt_expire); touch_mnt_namespace(mnt->mnt_ns); umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); } unlock_mount_hash(); namespace_unlock(); } EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); /* * Ripoff of 'select_parent()' * * search the list of submounts for a given mountpoint, and move any * shrinkable submounts to the 'graveyard' list. */ static int select_submounts(struct mount *parent, struct list_head *graveyard) { struct mount *this_parent = parent; struct list_head *next; int found = 0; repeat: next = this_parent->mnt_mounts.next; resume: while (next != &this_parent->mnt_mounts) { struct list_head *tmp = next; struct mount *mnt = list_entry(tmp, struct mount, mnt_child); next = tmp->next; if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) continue; /* * Descend a level if the d_mounts list is non-empty. */ if (!list_empty(&mnt->mnt_mounts)) { this_parent = mnt; goto repeat; } if (!propagate_mount_busy(mnt, 1)) { list_move_tail(&mnt->mnt_expire, graveyard); found++; } } /* * All done at this level ... ascend and resume the search */ if (this_parent != parent) { next = this_parent->mnt_child.next; this_parent = this_parent->mnt_parent; goto resume; } return found; } /* * process a list of expirable mountpoints with the intent of discarding any * submounts of a specific parent mountpoint * * mount_lock must be held for write */ static void shrink_submounts(struct mount *mnt) { LIST_HEAD(graveyard); struct mount *m; /* extract submounts of 'mountpoint' from the expiration list */ while (select_submounts(mnt, &graveyard)) { while (!list_empty(&graveyard)) { m = list_first_entry(&graveyard, struct mount, mnt_expire); touch_mnt_namespace(m->mnt_ns); umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC); } } } /* * Some copy_from_user() implementations do not return the exact number of * bytes remaining to copy on a fault. But copy_mount_options() requires that. * Note that this function differs from copy_from_user() in that it will oops * on bad values of `to', rather than returning a short copy. */ static long exact_copy_from_user(void *to, const void __user * from, unsigned long n) { char *t = to; const char __user *f = from; char c; if (!access_ok(VERIFY_READ, from, n)) return n; while (n) { if (__get_user(c, f)) { memset(t, 0, n); break; } *t++ = c; f++; n--; } return n; } void *copy_mount_options(const void __user * data) { int i; unsigned long size; char *copy; if (!data) return NULL; copy = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!copy) return ERR_PTR(-ENOMEM); /* We only care that *some* data at the address the user * gave us is valid. Just in case, we'll zero * the remainder of the page. */ /* copy_from_user cannot cross TASK_SIZE ! */ size = TASK_SIZE - (unsigned long)data; if (size > PAGE_SIZE) size = PAGE_SIZE; i = size - exact_copy_from_user(copy, data, size); if (!i) { kfree(copy); return ERR_PTR(-EFAULT); } if (i != PAGE_SIZE) memset(copy + i, 0, PAGE_SIZE - i); return copy; } char *copy_mount_string(const void __user *data) { return data ? strndup_user(data, PAGE_SIZE) : NULL; } /* * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to * be given to the mount() call (ie: read-only, no-dev, no-suid etc). * * data is a (void *) that can point to any structure up to * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent * information (or be NULL). * * Pre-0.97 versions of mount() didn't have a flags word. * When the flags word was introduced its top half was required * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. * Therefore, if this magic number is present, it carries no information * and must be discarded. */ long do_mount(const char *dev_name, const char __user *dir_name, const char *type_page, unsigned long flags, void *data_page) { struct path path; int retval = 0; int mnt_flags = 0; /* Discard magic */ if ((flags & MS_MGC_MSK) == MS_MGC_VAL) flags &= ~MS_MGC_MSK; /* Basic sanity checks */ if (data_page) ((char *)data_page)[PAGE_SIZE - 1] = 0; /* ... and get the mountpoint */ retval = user_path(dir_name, &path); if (retval) return retval; retval = security_sb_mount(dev_name, &path, type_page, flags, data_page); if (!retval && !may_mount()) retval = -EPERM; if (!retval && (flags & MS_MANDLOCK) && !may_mandlock()) retval = -EPERM; if (retval) goto dput_out; /* Default to relatime unless overriden */ if (!(flags & MS_NOATIME)) mnt_flags |= MNT_RELATIME; /* Separate the per-mountpoint flags */ if (flags & MS_NOSUID) mnt_flags |= MNT_NOSUID; if (flags & MS_NODEV) mnt_flags |= MNT_NODEV; if (flags & MS_NOEXEC) mnt_flags |= MNT_NOEXEC; if (flags & MS_NOATIME) mnt_flags |= MNT_NOATIME; if (flags & MS_NODIRATIME) mnt_flags |= MNT_NODIRATIME; if (flags & MS_STRICTATIME) mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); if (flags & MS_RDONLY) mnt_flags |= MNT_READONLY; /* The default atime for remount is preservation */ if ((flags & MS_REMOUNT) && ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME | MS_STRICTATIME)) == 0)) { mnt_flags &= ~MNT_ATIME_MASK; mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK; } flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN | MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | MS_STRICTATIME); if (flags & MS_REMOUNT) retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, data_page); else if (flags & MS_BIND) retval = do_loopback(&path, dev_name, flags & MS_REC); else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) retval = do_change_type(&path, flags); else if (flags & MS_MOVE) retval = do_move_mount(&path, dev_name); else retval = do_new_mount(&path, type_page, flags, mnt_flags, dev_name, data_page); dput_out: path_put(&path); return retval; } static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns) { return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES); } static void dec_mnt_namespaces(struct ucounts *ucounts) { dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES); } static void free_mnt_ns(struct mnt_namespace *ns) { ns_free_inum(&ns->ns); dec_mnt_namespaces(ns->ucounts); put_user_ns(ns->user_ns); kfree(ns); } /* * Assign a sequence number so we can detect when we attempt to bind * mount a reference to an older mount namespace into the current * mount namespace, preventing reference counting loops. A 64bit * number incrementing at 10Ghz will take 12,427 years to wrap which * is effectively never, so we can ignore the possibility. */ static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1); static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) { struct mnt_namespace *new_ns; struct ucounts *ucounts; int ret; ucounts = inc_mnt_namespaces(user_ns); if (!ucounts) return ERR_PTR(-ENOSPC); new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL); if (!new_ns) { dec_mnt_namespaces(ucounts); return ERR_PTR(-ENOMEM); } ret = ns_alloc_inum(&new_ns->ns); if (ret) { kfree(new_ns); dec_mnt_namespaces(ucounts); return ERR_PTR(ret); } new_ns->ns.ops = &mntns_operations; new_ns->seq = atomic64_add_return(1, &mnt_ns_seq); atomic_set(&new_ns->count, 1); new_ns->root = NULL; INIT_LIST_HEAD(&new_ns->list); init_waitqueue_head(&new_ns->poll); new_ns->event = 0; new_ns->user_ns = get_user_ns(user_ns); new_ns->ucounts = ucounts; new_ns->mounts = 0; new_ns->pending_mounts = 0; return new_ns; } struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, struct user_namespace *user_ns, struct fs_struct *new_fs) { struct mnt_namespace *new_ns; struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; struct mount *p, *q; struct mount *old; struct mount *new; int copy_flags; BUG_ON(!ns); if (likely(!(flags & CLONE_NEWNS))) { get_mnt_ns(ns); return ns; } old = ns->root; new_ns = alloc_mnt_ns(user_ns); if (IS_ERR(new_ns)) return new_ns; namespace_lock(); /* First pass: copy the tree topology */ copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; if (user_ns != ns->user_ns) copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED; new = copy_tree(old, old->mnt.mnt_root, copy_flags); if (IS_ERR(new)) { namespace_unlock(); free_mnt_ns(new_ns); return ERR_CAST(new); } new_ns->root = new; list_add_tail(&new_ns->list, &new->mnt_list); /* * Second pass: switch the tsk->fs->* elements and mark new vfsmounts * as belonging to new namespace. We have already acquired a private * fs_struct, so tsk->fs->lock is not needed. */ p = old; q = new; while (p) { q->mnt_ns = new_ns; new_ns->mounts++; if (new_fs) { if (&p->mnt == new_fs->root.mnt) { new_fs->root.mnt = mntget(&q->mnt); rootmnt = &p->mnt; } if (&p->mnt == new_fs->pwd.mnt) { new_fs->pwd.mnt = mntget(&q->mnt); pwdmnt = &p->mnt; } } p = next_mnt(p, old); q = next_mnt(q, new); if (!q) break; while (p->mnt.mnt_root != q->mnt.mnt_root) p = next_mnt(p, old); } namespace_unlock(); if (rootmnt) mntput(rootmnt); if (pwdmnt) mntput(pwdmnt); return new_ns; } /** * create_mnt_ns - creates a private namespace and adds a root filesystem * @mnt: pointer to the new root filesystem mountpoint */ static struct mnt_namespace *create_mnt_ns(struct vfsmount *m) { struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns); if (!IS_ERR(new_ns)) { struct mount *mnt = real_mount(m); mnt->mnt_ns = new_ns; new_ns->root = mnt; new_ns->mounts++; list_add(&mnt->mnt_list, &new_ns->list); } else { mntput(m); } return new_ns; } struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) { struct mnt_namespace *ns; struct super_block *s; struct path path; int err; ns = create_mnt_ns(mnt); if (IS_ERR(ns)) return ERR_CAST(ns); err = vfs_path_lookup(mnt->mnt_root, mnt, name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); put_mnt_ns(ns); if (err) return ERR_PTR(err); /* trade a vfsmount reference for active sb one */ s = path.mnt->mnt_sb; atomic_inc(&s->s_active); mntput(path.mnt); /* lock the sucker */ down_write(&s->s_umount); /* ... and return the root of (sub)tree on it */ return path.dentry; } EXPORT_SYMBOL(mount_subtree); SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, char __user *, type, unsigned long, flags, void __user *, data) { int ret; char *kernel_type; char *kernel_dev; void *options; kernel_type = copy_mount_string(type); ret = PTR_ERR(kernel_type); if (IS_ERR(kernel_type)) goto out_type; kernel_dev = copy_mount_string(dev_name); ret = PTR_ERR(kernel_dev); if (IS_ERR(kernel_dev)) goto out_dev; options = copy_mount_options(data); ret = PTR_ERR(options); if (IS_ERR(options)) goto out_data; ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options); kfree(options); out_data: kfree(kernel_dev); out_dev: kfree(kernel_type); out_type: return ret; } /* * Return true if path is reachable from root * * namespace_sem or mount_lock is held */ bool is_path_reachable(struct mount *mnt, struct dentry *dentry, const struct path *root) { while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) { dentry = mnt->mnt_mountpoint; mnt = mnt->mnt_parent; } return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry); } bool path_is_under(struct path *path1, struct path *path2) { bool res; read_seqlock_excl(&mount_lock); res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); read_sequnlock_excl(&mount_lock); return res; } EXPORT_SYMBOL(path_is_under); /* * pivot_root Semantics: * Moves the root file system of the current process to the directory put_old, * makes new_root as the new root file system of the current process, and sets * root/cwd of all processes which had them on the current root to new_root. * * Restrictions: * The new_root and put_old must be directories, and must not be on the * same file system as the current process root. The put_old must be * underneath new_root, i.e. adding a non-zero number of /.. to the string * pointed to by put_old must yield the same directory as new_root. No other * file system may be mounted on put_old. After all, new_root is a mountpoint. * * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives * in this situation. * * Notes: * - we don't move root/cwd if they are not at the root (reason: if something * cared enough to change them, it's probably wrong to force them elsewhere) * - it's okay to pick a root that isn't the root of a file system, e.g. * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root * first. */ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, const char __user *, put_old) { struct path new, old, parent_path, root_parent, root; struct mount *new_mnt, *root_mnt, *old_mnt; struct mountpoint *old_mp, *root_mp; int error; if (!may_mount()) return -EPERM; error = user_path_dir(new_root, &new); if (error) goto out0; error = user_path_dir(put_old, &old); if (error) goto out1; error = security_sb_pivotroot(&old, &new); if (error) goto out2; get_fs_root(current->fs, &root); old_mp = lock_mount(&old); error = PTR_ERR(old_mp); if (IS_ERR(old_mp)) goto out3; error = -EINVAL; new_mnt = real_mount(new.mnt); root_mnt = real_mount(root.mnt); old_mnt = real_mount(old.mnt); if (IS_MNT_SHARED(old_mnt) || IS_MNT_SHARED(new_mnt->mnt_parent) || IS_MNT_SHARED(root_mnt->mnt_parent)) goto out4; if (!check_mnt(root_mnt) || !check_mnt(new_mnt)) goto out4; if (new_mnt->mnt.mnt_flags & MNT_LOCKED) goto out4; error = -ENOENT; if (d_unlinked(new.dentry)) goto out4; error = -EBUSY; if (new_mnt == root_mnt || old_mnt == root_mnt) goto out4; /* loop, on the same file system */ error = -EINVAL; if (root.mnt->mnt_root != root.dentry) goto out4; /* not a mountpoint */ if (!mnt_has_parent(root_mnt)) goto out4; /* not attached */ root_mp = root_mnt->mnt_mp; if (new.mnt->mnt_root != new.dentry) goto out4; /* not a mountpoint */ if (!mnt_has_parent(new_mnt)) goto out4; /* not attached */ /* make sure we can reach put_old from new_root */ if (!is_path_reachable(old_mnt, old.dentry, &new)) goto out4; /* make certain new is below the root */ if (!is_path_reachable(new_mnt, new.dentry, &root)) goto out4; root_mp->m_count++; /* pin it so it won't go away */ lock_mount_hash(); detach_mnt(new_mnt, &parent_path); detach_mnt(root_mnt, &root_parent); if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { new_mnt->mnt.mnt_flags |= MNT_LOCKED; root_mnt->mnt.mnt_flags &= ~MNT_LOCKED; } /* mount old root on put_old */ attach_mnt(root_mnt, old_mnt, old_mp); /* mount new_root on / */ attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp); touch_mnt_namespace(current->nsproxy->mnt_ns); /* A moved mount should not expire automatically */ list_del_init(&new_mnt->mnt_expire); unlock_mount_hash(); chroot_fs_refs(&root, &new); put_mountpoint(root_mp); error = 0; out4: unlock_mount(old_mp); if (!error) { path_put(&root_parent); path_put(&parent_path); } out3: path_put(&root); out2: path_put(&old); out1: path_put(&new); out0: return error; } static void __init init_mount_tree(void) { struct vfsmount *mnt; struct mnt_namespace *ns; struct path root; struct file_system_type *type; type = get_fs_type("rootfs"); if (!type) panic("Can't find rootfs type"); mnt = vfs_kern_mount(type, 0, "rootfs", NULL); put_filesystem(type); if (IS_ERR(mnt)) panic("Can't create rootfs"); ns = create_mnt_ns(mnt); if (IS_ERR(ns)) panic("Can't allocate initial namespace"); init_task.nsproxy->mnt_ns = ns; get_mnt_ns(ns); root.mnt = mnt; root.dentry = mnt->mnt_root; mnt->mnt_flags |= MNT_LOCKED; set_fs_pwd(current->fs, &root); set_fs_root(current->fs, &root); } void __init mnt_init(void) { unsigned u; int err; mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); mount_hashtable = alloc_large_system_hash("Mount-cache", sizeof(struct hlist_head), mhash_entries, 19, 0, &m_hash_shift, &m_hash_mask, 0, 0); mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache", sizeof(struct hlist_head), mphash_entries, 19, 0, &mp_hash_shift, &mp_hash_mask, 0, 0); if (!mount_hashtable || !mountpoint_hashtable) panic("Failed to allocate mount hash table\n"); for (u = 0; u <= m_hash_mask; u++) INIT_HLIST_HEAD(&mount_hashtable[u]); for (u = 0; u <= mp_hash_mask; u++) INIT_HLIST_HEAD(&mountpoint_hashtable[u]); kernfs_init(); err = sysfs_init(); if (err) printk(KERN_WARNING "%s: sysfs_init error: %d\n", __func__, err); fs_kobj = kobject_create_and_add("fs", NULL); if (!fs_kobj) printk(KERN_WARNING "%s: kobj create error\n", __func__); init_rootfs(); init_mount_tree(); } void put_mnt_ns(struct mnt_namespace *ns) { if (!atomic_dec_and_test(&ns->count)) return; drop_collected_mounts(&ns->root->mnt); free_mnt_ns(ns); } struct vfsmount *kern_mount_data(struct file_system_type *type, void *data) { struct vfsmount *mnt; mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data); if (!IS_ERR(mnt)) { /* * it is a longterm mount, don't release mnt until * we unmount before file sys is unregistered */ real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; } return mnt; } EXPORT_SYMBOL_GPL(kern_mount_data); void kern_unmount(struct vfsmount *mnt) { /* release long term mount so mount point can be released */ if (!IS_ERR_OR_NULL(mnt)) { real_mount(mnt)->mnt_ns = NULL; synchronize_rcu(); /* yecchhh... */ mntput(mnt); } } EXPORT_SYMBOL(kern_unmount); bool our_mnt(struct vfsmount *mnt) { return check_mnt(real_mount(mnt)); } bool current_chrooted(void) { /* Does the current process have a non-standard root */ struct path ns_root; struct path fs_root; bool chrooted; /* Find the namespace root */ ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt; ns_root.dentry = ns_root.mnt->mnt_root; path_get(&ns_root); while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root)) ; get_fs_root(current->fs, &fs_root); chrooted = !path_equal(&fs_root, &ns_root); path_put(&fs_root); path_put(&ns_root); return chrooted; } static bool mnt_already_visible(struct mnt_namespace *ns, struct vfsmount *new, int *new_mnt_flags) { int new_flags = *new_mnt_flags; struct mount *mnt; bool visible = false; down_read(&namespace_sem); list_for_each_entry(mnt, &ns->list, mnt_list) { struct mount *child; int mnt_flags; if (mnt->mnt.mnt_sb->s_type != new->mnt_sb->s_type) continue; /* This mount is not fully visible if it's root directory * is not the root directory of the filesystem. */ if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) continue; /* A local view of the mount flags */ mnt_flags = mnt->mnt.mnt_flags; /* Don't miss readonly hidden in the superblock flags */ if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY) mnt_flags |= MNT_LOCK_READONLY; /* Verify the mount flags are equal to or more permissive * than the proposed new mount. */ if ((mnt_flags & MNT_LOCK_READONLY) && !(new_flags & MNT_READONLY)) continue; if ((mnt_flags & MNT_LOCK_ATIME) && ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK))) continue; /* This mount is not fully visible if there are any * locked child mounts that cover anything except for * empty directories. */ list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { struct inode *inode = child->mnt_mountpoint->d_inode; /* Only worry about locked mounts */ if (!(child->mnt.mnt_flags & MNT_LOCKED)) continue; /* Is the directory permanetly empty? */ if (!is_empty_dir_inode(inode)) goto next; } /* Preserve the locked attributes */ *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \ MNT_LOCK_ATIME); visible = true; goto found; next: ; } found: up_read(&namespace_sem); return visible; } static bool mount_too_revealing(struct vfsmount *mnt, int *new_mnt_flags) { const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV; struct mnt_namespace *ns = current->nsproxy->mnt_ns; unsigned long s_iflags; if (ns->user_ns == &init_user_ns) return false; /* Can this filesystem be too revealing? */ s_iflags = mnt->mnt_sb->s_iflags; if (!(s_iflags & SB_I_USERNS_VISIBLE)) return false; if ((s_iflags & required_iflags) != required_iflags) { WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n", required_iflags); return true; } return !mnt_already_visible(ns, mnt, new_mnt_flags); } bool mnt_may_suid(struct vfsmount *mnt) { /* * Foreign mounts (accessed via fchdir or through /proc * symlinks) are always treated as if they are nosuid. This * prevents namespaces from trusting potentially unsafe * suid/sgid bits, file caps, or security labels that originate * in other namespaces. */ return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) && current_in_userns(mnt->mnt_sb->s_user_ns); } static struct ns_common *mntns_get(struct task_struct *task) { struct ns_common *ns = NULL; struct nsproxy *nsproxy; task_lock(task); nsproxy = task->nsproxy; if (nsproxy) { ns = &nsproxy->mnt_ns->ns; get_mnt_ns(to_mnt_ns(ns)); } task_unlock(task); return ns; } static void mntns_put(struct ns_common *ns) { put_mnt_ns(to_mnt_ns(ns)); } static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns) { struct fs_struct *fs = current->fs; struct mnt_namespace *mnt_ns = to_mnt_ns(ns); struct path root; if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) || !ns_capable(current_user_ns(), CAP_SYS_CHROOT) || !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) return -EPERM; if (fs->users != 1) return -EINVAL; get_mnt_ns(mnt_ns); put_mnt_ns(nsproxy->mnt_ns); nsproxy->mnt_ns = mnt_ns; /* Find the root */ root.mnt = &mnt_ns->root->mnt; root.dentry = mnt_ns->root->mnt.mnt_root; path_get(&root); while(d_mountpoint(root.dentry) && follow_down_one(&root)) ; /* Update the pwd and root */ set_fs_pwd(fs, &root); set_fs_root(fs, &root); path_put(&root); return 0; } static struct user_namespace *mntns_owner(struct ns_common *ns) { return to_mnt_ns(ns)->user_ns; } const struct proc_ns_operations mntns_operations = { .name = "mnt", .type = CLONE_NEWNS, .get = mntns_get, .put = mntns_put, .install = mntns_install, .owner = mntns_owner, };
./CrossVul/dataset_final_sorted/CWE-400/c/good_5200_2
crossvul-cpp_data_bad_1233_0
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/etherdevice.h> #include <linux/of_net.h> #include <linux/pci.h> #include <linux/bpf.h> /* Local includes */ #include "i40e.h" #include "i40e_diag.h" #include "i40e_xsk.h" #include <net/udp_tunnel.h> #include <net/xdp_sock.h> /* All i40e tracepoints are defined by the include below, which * must be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined */ #define CREATE_TRACE_POINTS #include "i40e_trace.h" const char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = "Intel(R) Ethernet Connection XL710 Network Driver"; #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 2 #define DRV_VERSION_MINOR 8 #define DRV_VERSION_BUILD 20 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN const char i40e_driver_version_str[] = DRV_VERSION; static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation."; /* a bit of forward declarations */ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired); static int i40e_add_vsi(struct i40e_vsi *vsi); static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); static int i40e_setup_misc_vector(struct i40e_pf *pf); static void i40e_determine_queue_usage(struct i40e_pf *pf); static int i40e_setup_pf_filter_control(struct i40e_pf *pf); static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired); static int i40e_reset(struct i40e_pf *pf); static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired); static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf); static int i40e_restore_interrupt_scheme(struct i40e_pf *pf); static bool i40e_check_recovery_mode(struct i40e_pf *pf); static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw); static void i40e_fdir_sb_setup(struct i40e_pf *pf); static int i40e_veb_get_bw_info(struct i40e_veb *veb); static int i40e_get_capabilities(struct i40e_pf *pf, enum i40e_admin_queue_opc list_type); /* i40e_pci_tbl - PCI Device ID Table * * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0}, /* required last entry */ {0, } }; MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); #define I40E_MAX_VF_COUNT 128 static int debug = -1; module_param(debug, uint, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)"); MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); static struct workqueue_struct *i40e_wq; /** * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested * @alignment: what to align the allocation to **/ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, u64 size, u32 alignment) { struct i40e_pf *pf = (struct i40e_pf *)hw->back; mem->size = ALIGN(size, alignment); mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, GFP_KERNEL); if (!mem->va) return -ENOMEM; return 0; } /** * i40e_free_dma_mem_d - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) { struct i40e_pf *pf = (struct i40e_pf *)hw->back; dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); mem->va = NULL; mem->pa = 0; mem->size = 0; return 0; } /** * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested **/ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size) { mem->size = size; mem->va = kzalloc(size, GFP_KERNEL); if (!mem->va) return -ENOMEM; return 0; } /** * i40e_free_virt_mem_d - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) { /* it's ok to kfree a NULL pointer */ kfree(mem->va); mem->va = NULL; mem->size = 0; return 0; } /** * i40e_get_lump - find a lump of free generic resource * @pf: board private structure * @pile: the pile of resource to search * @needed: the number of items needed * @id: an owner id to stick on the items assigned * * Returns the base item index of the lump, or negative for error * * The search_hint trick and lack of advanced fit-finding only work * because we're highly likely to have all the same size lump requests. * Linear search time and any fragmentation should be minimal. **/ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, u16 needed, u16 id) { int ret = -ENOMEM; int i, j; if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { dev_info(&pf->pdev->dev, "param err: pile=%s needed=%d id=0x%04x\n", pile ? "<valid>" : "<null>", needed, id); return -EINVAL; } /* start the linear search with an imperfect hint */ i = pile->search_hint; while (i < pile->num_entries) { /* skip already allocated entries */ if (pile->list[i] & I40E_PILE_VALID_BIT) { i++; continue; } /* do we have enough in this lump? */ for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { if (pile->list[i+j] & I40E_PILE_VALID_BIT) break; } if (j == needed) { /* there was enough, so assign it to the requestor */ for (j = 0; j < needed; j++) pile->list[i+j] = id | I40E_PILE_VALID_BIT; ret = i; pile->search_hint = i + j; break; } /* not enough, so skip over it and continue looking */ i += j; } return ret; } /** * i40e_put_lump - return a lump of generic resource * @pile: the pile of resource to search * @index: the base item index * @id: the owner id of the items assigned * * Returns the count of items in the lump **/ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) { int valid_id = (id | I40E_PILE_VALID_BIT); int count = 0; int i; if (!pile || index >= pile->num_entries) return -EINVAL; for (i = index; i < pile->num_entries && pile->list[i] == valid_id; i++) { pile->list[i] = 0; count++; } if (count && index < pile->search_hint) pile->search_hint = index; return count; } /** * i40e_find_vsi_from_id - searches for the vsi with the given id * @pf: the pf structure to search for the vsi * @id: id of the vsi it is searching for **/ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) { int i; for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i] && (pf->vsi[i]->id == id)) return pf->vsi[i]; return NULL; } /** * i40e_service_event_schedule - Schedule the service task to wake up * @pf: board private structure * * If not already scheduled, this puts the task into the work queue **/ void i40e_service_event_schedule(struct i40e_pf *pf) { if ((!test_bit(__I40E_DOWN, pf->state) && !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) || test_bit(__I40E_RECOVERY_MODE, pf->state)) queue_work(i40e_wq, &pf->service_task); } /** * i40e_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * * If any port has noticed a Tx timeout, it is likely that the whole * device is munged, not just the one netdev port, so go for the full * reset. **/ static void i40e_tx_timeout(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_ring *tx_ring = NULL; unsigned int i, hung_queue = 0; u32 head, val; pf->tx_timeout_count++; /* find the stopped queue the same way the stack does */ for (i = 0; i < netdev->num_tx_queues; i++) { struct netdev_queue *q; unsigned long trans_start; q = netdev_get_tx_queue(netdev, i); trans_start = q->trans_start; if (netif_xmit_stopped(q) && time_after(jiffies, (trans_start + netdev->watchdog_timeo))) { hung_queue = i; break; } } if (i == netdev->num_tx_queues) { netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); } else { /* now that we have an index, find the tx_ring struct */ for (i = 0; i < vsi->num_queue_pairs; i++) { if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { if (hung_queue == vsi->tx_rings[i]->queue_index) { tx_ring = vsi->tx_rings[i]; break; } } } } if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) pf->tx_timeout_recovery_level = 1; /* reset after some time */ else if (time_before(jiffies, (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) return; /* don't do any new action before the next timeout */ /* don't kick off another recovery if one is already pending */ if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state)) return; if (tx_ring) { head = i40e_get_head(tx_ring); /* Read interrupt register */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) val = rd32(&pf->hw, I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + tx_ring->vsi->base_vector - 1)); else val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", vsi->seid, hung_queue, tx_ring->next_to_clean, head, tx_ring->next_to_use, readl(tx_ring->tail), val); } pf->tx_timeout_last_recovery = jiffies; netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", pf->tx_timeout_recovery_level, hung_queue); switch (pf->tx_timeout_recovery_level) { case 1: set_bit(__I40E_PF_RESET_REQUESTED, pf->state); break; case 2: set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); break; case 3: set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); break; default: netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); break; } i40e_service_event_schedule(pf); pf->tx_timeout_recovery_level++; } /** * i40e_get_vsi_stats_struct - Get System Network Statistics * @vsi: the VSI we care about * * Returns the address of the device statistics structure. * The statistics are actually updated from the service task. **/ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) { return &vsi->net_stats; } /** * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring * @ring: Tx ring to get statistics from * @stats: statistics entry to be updated **/ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring, struct rtnl_link_stats64 *stats) { u64 bytes, packets; unsigned int start; do { start = u64_stats_fetch_begin_irq(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; } /** * i40e_get_netdev_stats_struct - Get statistics for netdev interface * @netdev: network interface device structure * @stats: data structure to store statistics * * Returns the address of the device statistics structure. * The statistics are actually updated from the service task. **/ static void i40e_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); struct i40e_ring *ring; int i; if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; if (!vsi->tx_rings) return; rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { u64 bytes, packets; unsigned int start; ring = READ_ONCE(vsi->tx_rings[i]); if (!ring) continue; i40e_get_netdev_stats_struct_tx(ring, stats); if (i40e_enabled_xdp_vsi(vsi)) { ring++; i40e_get_netdev_stats_struct_tx(ring, stats); } ring++; do { start = u64_stats_fetch_begin_irq(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; } rcu_read_unlock(); /* following stats updated by i40e_watchdog_subtask() */ stats->multicast = vsi_stats->multicast; stats->tx_errors = vsi_stats->tx_errors; stats->tx_dropped = vsi_stats->tx_dropped; stats->rx_errors = vsi_stats->rx_errors; stats->rx_dropped = vsi_stats->rx_dropped; stats->rx_crc_errors = vsi_stats->rx_crc_errors; stats->rx_length_errors = vsi_stats->rx_length_errors; } /** * i40e_vsi_reset_stats - Resets all stats of the given vsi * @vsi: the VSI to have its stats reset **/ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) { struct rtnl_link_stats64 *ns; int i; if (!vsi) return; ns = i40e_get_vsi_stats_struct(vsi); memset(ns, 0, sizeof(*ns)); memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); if (vsi->rx_rings && vsi->rx_rings[0]) { for (i = 0; i < vsi->num_queue_pairs; i++) { memset(&vsi->rx_rings[i]->stats, 0, sizeof(vsi->rx_rings[i]->stats)); memset(&vsi->rx_rings[i]->rx_stats, 0, sizeof(vsi->rx_rings[i]->rx_stats)); memset(&vsi->tx_rings[i]->stats, 0, sizeof(vsi->tx_rings[i]->stats)); memset(&vsi->tx_rings[i]->tx_stats, 0, sizeof(vsi->tx_rings[i]->tx_stats)); } } vsi->stat_offsets_loaded = false; } /** * i40e_pf_reset_stats - Reset all of the stats for the given PF * @pf: the PF to be reset **/ void i40e_pf_reset_stats(struct i40e_pf *pf) { int i; memset(&pf->stats, 0, sizeof(pf->stats)); memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); pf->stat_offsets_loaded = false; for (i = 0; i < I40E_MAX_VEB; i++) { if (pf->veb[i]) { memset(&pf->veb[i]->stats, 0, sizeof(pf->veb[i]->stats)); memset(&pf->veb[i]->stats_offsets, 0, sizeof(pf->veb[i]->stats_offsets)); memset(&pf->veb[i]->tc_stats, 0, sizeof(pf->veb[i]->tc_stats)); memset(&pf->veb[i]->tc_stats_offsets, 0, sizeof(pf->veb[i]->tc_stats_offsets)); pf->veb[i]->stat_offsets_loaded = false; } } pf->hw_csum_rx_error = 0; } /** * i40e_stat_update48 - read and update a 48 bit stat from the chip * @hw: ptr to the hardware info * @hireg: the high 32 bit reg to read * @loreg: the low 32 bit reg to read * @offset_loaded: has the initial offset been loaded yet * @offset: ptr to current offset value * @stat: ptr to the stat * * Since the device stats are not reset at PFReset, they likely will not * be zeroed when the driver starts. We'll save the first values read * and use them as offsets to be subtracted from the raw values in order * to report stats that count from zero. In the process, we also manage * the potential roll-over. **/ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, bool offset_loaded, u64 *offset, u64 *stat) { u64 new_data; if (hw->device_id == I40E_DEV_ID_QEMU) { new_data = rd32(hw, loreg); new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; } else { new_data = rd64(hw, loreg); } if (!offset_loaded) *offset = new_data; if (likely(new_data >= *offset)) *stat = new_data - *offset; else *stat = (new_data + BIT_ULL(48)) - *offset; *stat &= 0xFFFFFFFFFFFFULL; } /** * i40e_stat_update32 - read and update a 32 bit stat from the chip * @hw: ptr to the hardware info * @reg: the hw reg to read * @offset_loaded: has the initial offset been loaded yet * @offset: ptr to current offset value * @stat: ptr to the stat **/ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, bool offset_loaded, u64 *offset, u64 *stat) { u32 new_data; new_data = rd32(hw, reg); if (!offset_loaded) *offset = new_data; if (likely(new_data >= *offset)) *stat = (u32)(new_data - *offset); else *stat = (u32)((new_data + BIT_ULL(32)) - *offset); } /** * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat * @hw: ptr to the hardware info * @reg: the hw reg to read and clear * @stat: ptr to the stat **/ static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat) { u32 new_data = rd32(hw, reg); wr32(hw, reg, 1); /* must write a nonzero value to clear register */ *stat += new_data; } /** * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. * @vsi: the VSI to be updated **/ void i40e_update_eth_stats(struct i40e_vsi *vsi) { int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ es = &vsi->eth_stats; oes = &vsi->eth_stats_offsets; /* Gather up the stats that the hw collects */ i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), vsi->stat_offsets_loaded, &oes->tx_errors, &es->tx_errors); i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), vsi->stat_offsets_loaded, &oes->rx_discards, &es->rx_discards); i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unknown_protocol, &es->rx_unknown_protocol); i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), I40E_GLV_GORCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_bytes, &es->rx_bytes); i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), I40E_GLV_UPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unicast, &es->rx_unicast); i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), I40E_GLV_MPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_multicast, &es->rx_multicast); i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), I40E_GLV_BPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_broadcast, &es->rx_broadcast); i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), I40E_GLV_GOTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_bytes, &es->tx_bytes); i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), I40E_GLV_UPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_unicast, &es->tx_unicast); i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), I40E_GLV_MPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_multicast, &es->tx_multicast); i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), I40E_GLV_BPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); vsi->stat_offsets_loaded = true; } /** * i40e_update_veb_stats - Update Switch component statistics * @veb: the VEB being updated **/ void i40e_update_veb_stats(struct i40e_veb *veb) { struct i40e_pf *pf = veb->pf; struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ struct i40e_veb_tc_stats *veb_oes; struct i40e_veb_tc_stats *veb_es; int i, idx = 0; idx = veb->stats_idx; es = &veb->stats; oes = &veb->stats_offsets; veb_es = &veb->tc_stats; veb_oes = &veb->tc_stats_offsets; /* Gather up the stats that the hw collects */ i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), veb->stat_offsets_loaded, &oes->tx_discards, &es->tx_discards); if (hw->revision_id > 0) i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), veb->stat_offsets_loaded, &oes->rx_unknown_protocol, &es->rx_unknown_protocol); i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), veb->stat_offsets_loaded, &oes->rx_bytes, &es->rx_bytes); i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), veb->stat_offsets_loaded, &oes->rx_unicast, &es->rx_unicast); i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), veb->stat_offsets_loaded, &oes->rx_multicast, &es->rx_multicast); i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), veb->stat_offsets_loaded, &oes->rx_broadcast, &es->rx_broadcast); i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), veb->stat_offsets_loaded, &oes->tx_bytes, &es->tx_bytes); i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), veb->stat_offsets_loaded, &oes->tx_unicast, &es->tx_unicast); i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), veb->stat_offsets_loaded, &oes->tx_multicast, &es->tx_multicast); i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), veb->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx), I40E_GLVEBTC_RPCL(i, idx), veb->stat_offsets_loaded, &veb_oes->tc_rx_packets[i], &veb_es->tc_rx_packets[i]); i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx), I40E_GLVEBTC_RBCL(i, idx), veb->stat_offsets_loaded, &veb_oes->tc_rx_bytes[i], &veb_es->tc_rx_bytes[i]); i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx), I40E_GLVEBTC_TPCL(i, idx), veb->stat_offsets_loaded, &veb_oes->tc_tx_packets[i], &veb_es->tc_tx_packets[i]); i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx), I40E_GLVEBTC_TBCL(i, idx), veb->stat_offsets_loaded, &veb_oes->tc_tx_bytes[i], &veb_es->tc_tx_bytes[i]); } veb->stat_offsets_loaded = true; } /** * i40e_update_vsi_stats - Update the vsi statistics counters. * @vsi: the VSI to be updated * * There are a few instances where we store the same stat in a * couple of different structs. This is partly because we have * the netdev stats that need to be filled out, which is slightly * different from the "eth_stats" defined by the chip and used in * VF communications. We sort it out here. **/ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct rtnl_link_stats64 *ons; struct rtnl_link_stats64 *ns; /* netdev stats */ struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ u32 tx_restart, tx_busy; struct i40e_ring *p; u32 rx_page, rx_buf; u64 bytes, packets; unsigned int start; u64 tx_linearize; u64 tx_force_wb; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; if (test_bit(__I40E_VSI_DOWN, vsi->state) || test_bit(__I40E_CONFIG_BUSY, pf->state)) return; ns = i40e_get_vsi_stats_struct(vsi); ons = &vsi->net_stats_offsets; es = &vsi->eth_stats; oes = &vsi->eth_stats_offsets; /* Gather up the netdev and vsi stats that the driver collects * on the fly during packet processing */ rx_b = rx_p = 0; tx_b = tx_p = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; rx_page = 0; rx_buf = 0; rcu_read_lock(); for (q = 0; q < vsi->num_queue_pairs; q++) { /* locate Tx ring */ p = READ_ONCE(vsi->tx_rings[q]); do { start = u64_stats_fetch_begin_irq(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; } while (u64_stats_fetch_retry_irq(&p->syncp, start)); tx_b += bytes; tx_p += packets; tx_restart += p->tx_stats.restart_queue; tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; /* Rx queue is part of the same block as Tx queue */ p = &p[1]; do { start = u64_stats_fetch_begin_irq(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; } while (u64_stats_fetch_retry_irq(&p->syncp, start)); rx_b += bytes; rx_p += packets; rx_buf += p->rx_stats.alloc_buff_failed; rx_page += p->rx_stats.alloc_page_failed; } rcu_read_unlock(); vsi->tx_restart = tx_restart; vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; vsi->tx_force_wb = tx_force_wb; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; ns->rx_packets = rx_p; ns->rx_bytes = rx_b; ns->tx_packets = tx_p; ns->tx_bytes = tx_b; /* update netdev stats from eth stats */ i40e_update_eth_stats(vsi); ons->tx_errors = oes->tx_errors; ns->tx_errors = es->tx_errors; ons->multicast = oes->rx_multicast; ns->multicast = es->rx_multicast; ons->rx_dropped = oes->rx_discards; ns->rx_dropped = es->rx_discards; ons->tx_dropped = oes->tx_discards; ns->tx_dropped = es->tx_discards; /* pull in a couple PF stats if this is the main vsi */ if (vsi == pf->vsi[pf->lan_vsi]) { ns->rx_crc_errors = pf->stats.crc_errors; ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; ns->rx_length_errors = pf->stats.rx_length_errors; } } /** * i40e_update_pf_stats - Update the PF statistics counters. * @pf: the PF to be updated **/ static void i40e_update_pf_stats(struct i40e_pf *pf) { struct i40e_hw_port_stats *osd = &pf->stats_offsets; struct i40e_hw_port_stats *nsd = &pf->stats; struct i40e_hw *hw = &pf->hw; u32 val; int i; i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), I40E_GLPRT_GORCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_bytes, &nsd->eth.rx_bytes); i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), I40E_GLPRT_GOTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_bytes, &nsd->eth.tx_bytes); i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_discards, &nsd->eth.rx_discards); i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), I40E_GLPRT_UPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_unicast, &nsd->eth.rx_unicast); i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), I40E_GLPRT_MPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_multicast, &nsd->eth.rx_multicast); i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), I40E_GLPRT_BPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_broadcast, &nsd->eth.rx_broadcast); i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), I40E_GLPRT_UPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_unicast, &nsd->eth.tx_unicast); i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), I40E_GLPRT_MPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_multicast, &nsd->eth.tx_multicast); i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), I40E_GLPRT_BPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_broadcast, &nsd->eth.tx_broadcast); i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), pf->stat_offsets_loaded, &osd->tx_dropped_link_down, &nsd->tx_dropped_link_down); i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), pf->stat_offsets_loaded, &osd->crc_errors, &nsd->crc_errors); i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), pf->stat_offsets_loaded, &osd->illegal_bytes, &nsd->illegal_bytes); i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), pf->stat_offsets_loaded, &osd->mac_local_faults, &nsd->mac_local_faults); i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), pf->stat_offsets_loaded, &osd->mac_remote_faults, &nsd->mac_remote_faults); i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), pf->stat_offsets_loaded, &osd->rx_length_errors, &nsd->rx_length_errors); i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_rx, &nsd->link_xon_rx); i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_tx, &nsd->link_xon_tx); i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_rx, &nsd->link_xoff_rx); i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_tx, &nsd->link_xoff_tx); for (i = 0; i < 8; i++) { i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xoff_rx[i], &nsd->priority_xoff_rx[i]); i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xon_rx[i], &nsd->priority_xon_rx[i]); i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xon_tx[i], &nsd->priority_xon_tx[i]); i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xoff_tx[i], &nsd->priority_xoff_tx[i]); i40e_stat_update32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xon_2_xoff[i], &nsd->priority_xon_2_xoff[i]); } i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), I40E_GLPRT_PRC64L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_64, &nsd->rx_size_64); i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), I40E_GLPRT_PRC127L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_127, &nsd->rx_size_127); i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), I40E_GLPRT_PRC255L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_255, &nsd->rx_size_255); i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), I40E_GLPRT_PRC511L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_511, &nsd->rx_size_511); i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), I40E_GLPRT_PRC1023L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1023, &nsd->rx_size_1023); i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), I40E_GLPRT_PRC1522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1522, &nsd->rx_size_1522); i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), I40E_GLPRT_PRC9522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_big, &nsd->rx_size_big); i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), I40E_GLPRT_PTC64L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_64, &nsd->tx_size_64); i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), I40E_GLPRT_PTC127L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_127, &nsd->tx_size_127); i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), I40E_GLPRT_PTC255L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_255, &nsd->tx_size_255); i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), I40E_GLPRT_PTC511L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_511, &nsd->tx_size_511); i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), I40E_GLPRT_PTC1023L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1023, &nsd->tx_size_1023); i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), I40E_GLPRT_PTC1522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1522, &nsd->tx_size_1522); i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), I40E_GLPRT_PTC9522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_big, &nsd->tx_size_big); i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), pf->stat_offsets_loaded, &osd->rx_undersize, &nsd->rx_undersize); i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), pf->stat_offsets_loaded, &osd->rx_fragments, &nsd->rx_fragments); i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), pf->stat_offsets_loaded, &osd->rx_oversize, &nsd->rx_oversize); i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), pf->stat_offsets_loaded, &osd->rx_jabber, &nsd->rx_jabber); /* FDIR stats */ i40e_stat_update_and_clear32(hw, I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)), &nsd->fd_atr_match); i40e_stat_update_and_clear32(hw, I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)), &nsd->fd_sb_match); i40e_stat_update_and_clear32(hw, I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)), &nsd->fd_atr_tunnel_match); val = rd32(hw, I40E_PRTPM_EEE_STAT); nsd->tx_lpi_status = (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; nsd->rx_lpi_status = (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; i40e_stat_update32(hw, I40E_PRTPM_TLPIC, pf->stat_offsets_loaded, &osd->tx_lpi_count, &nsd->tx_lpi_count); i40e_stat_update32(hw, I40E_PRTPM_RLPIC, pf->stat_offsets_loaded, &osd->rx_lpi_count, &nsd->rx_lpi_count); if (pf->flags & I40E_FLAG_FD_SB_ENABLED && !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) nsd->fd_sb_status = true; else nsd->fd_sb_status = false; if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) nsd->fd_atr_status = true; else nsd->fd_atr_status = false; pf->stat_offsets_loaded = true; } /** * i40e_update_stats - Update the various statistics counters. * @vsi: the VSI to be updated * * Update the various stats for this VSI and its related entities. **/ void i40e_update_stats(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; if (vsi == pf->vsi[pf->lan_vsi]) i40e_update_pf_stats(pf); i40e_update_vsi_stats(vsi); } /** * i40e_count_filters - counts VSI mac filters * @vsi: the VSI to be searched * * Returns count of mac filters **/ int i40e_count_filters(struct i40e_vsi *vsi) { struct i40e_mac_filter *f; struct hlist_node *h; int bkt; int cnt = 0; hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) ++cnt; return cnt; } /** * i40e_find_filter - Search VSI filter list for specific mac/vlan filter * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan * * Returns ptr to the filter object or NULL **/ static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) { struct i40e_mac_filter *f; u64 key; if (!vsi || !macaddr) return NULL; key = i40e_addr_to_hkey(macaddr); hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { if ((ether_addr_equal(macaddr, f->macaddr)) && (vlan == f->vlan)) return f; } return NULL; } /** * i40e_find_mac - Find a mac addr in the macvlan filters list * @vsi: the VSI to be searched * @macaddr: the MAC address we are searching for * * Returns the first filter with the provided MAC address or NULL if * MAC address was not found **/ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr) { struct i40e_mac_filter *f; u64 key; if (!vsi || !macaddr) return NULL; key = i40e_addr_to_hkey(macaddr); hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { if ((ether_addr_equal(macaddr, f->macaddr))) return f; } return NULL; } /** * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode * @vsi: the VSI to be searched * * Returns true if VSI is in vlan mode or false otherwise **/ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) { /* If we have a PVID, always operate in VLAN mode */ if (vsi->info.pvid) return true; /* We need to operate in VLAN mode whenever we have any filters with * a VLAN other than I40E_VLAN_ALL. We could check the table each * time, incurring search cost repeatedly. However, we can notice two * things: * * 1) the only place where we can gain a VLAN filter is in * i40e_add_filter. * * 2) the only place where filters are actually removed is in * i40e_sync_filters_subtask. * * Thus, we can simply use a boolean value, has_vlan_filters which we * will set to true when we add a VLAN filter in i40e_add_filter. Then * we have to perform the full search after deleting filters in * i40e_sync_filters_subtask, but we already have to search * filters here and can perform the check at the same time. This * results in avoiding embedding a loop for VLAN mode inside another * loop over all the filters, and should maintain correctness as noted * above. */ return vsi->has_vlan_filter; } /** * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary * @vsi: the VSI to configure * @tmp_add_list: list of filters ready to be added * @tmp_del_list: list of filters ready to be deleted * @vlan_filters: the number of active VLAN filters * * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they * behave as expected. If we have any active VLAN filters remaining or about * to be added then we need to update non-VLAN filters to be marked as VLAN=0 * so that they only match against untagged traffic. If we no longer have any * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1 * so that they match against both tagged and untagged traffic. In this way, * we ensure that we correctly receive the desired traffic. This ensures that * when we have an active VLAN we will receive only untagged traffic and * traffic matching active VLANs. If we have no active VLANs then we will * operate in non-VLAN mode and receive all traffic, tagged or untagged. * * Finally, in a similar fashion, this function also corrects filters when * there is an active PVID assigned to this VSI. * * In case of memory allocation failure return -ENOMEM. Otherwise, return 0. * * This function is only expected to be called from within * i40e_sync_vsi_filters. * * NOTE: This function expects to be called while under the * mac_filter_hash_lock */ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, struct hlist_head *tmp_add_list, struct hlist_head *tmp_del_list, int vlan_filters) { s16 pvid = le16_to_cpu(vsi->info.pvid); struct i40e_mac_filter *f, *add_head; struct i40e_new_mac_filter *new; struct hlist_node *h; int bkt, new_vlan; /* To determine if a particular filter needs to be replaced we * have the three following conditions: * * a) if we have a PVID assigned, then all filters which are * not marked as VLAN=PVID must be replaced with filters that * are. * b) otherwise, if we have any active VLANS, all filters * which are marked as VLAN=-1 must be replaced with * filters marked as VLAN=0 * c) finally, if we do not have any active VLANS, all filters * which are marked as VLAN=0 must be replaced with filters * marked as VLAN=-1 */ /* Update the filters about to be added in place */ hlist_for_each_entry(new, tmp_add_list, hlist) { if (pvid && new->f->vlan != pvid) new->f->vlan = pvid; else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY) new->f->vlan = 0; else if (!vlan_filters && new->f->vlan == 0) new->f->vlan = I40E_VLAN_ANY; } /* Update the remaining active filters */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { /* Combine the checks for whether a filter needs to be changed * and then determine the new VLAN inside the if block, in * order to avoid duplicating code for adding the new filter * then deleting the old filter. */ if ((pvid && f->vlan != pvid) || (vlan_filters && f->vlan == I40E_VLAN_ANY) || (!vlan_filters && f->vlan == 0)) { /* Determine the new vlan we will be adding */ if (pvid) new_vlan = pvid; else if (vlan_filters) new_vlan = 0; else new_vlan = I40E_VLAN_ANY; /* Create the new filter */ add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); if (!add_head) return -ENOMEM; /* Create a temporary i40e_new_mac_filter */ new = kzalloc(sizeof(*new), GFP_ATOMIC); if (!new) return -ENOMEM; new->f = add_head; new->state = add_head->state; /* Add the new filter to the tmp list */ hlist_add_head(&new->hlist, tmp_add_list); /* Put the original filter into the delete list */ f->state = I40E_FILTER_REMOVE; hash_del(&f->hlist); hlist_add_head(&f->hlist, tmp_del_list); } } vsi->has_vlan_filter = !!vlan_filters; return 0; } /** * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM * @vsi: the PF Main VSI - inappropriate for any other VSI * @macaddr: the MAC address * * Remove whatever filter the firmware set up so the driver can manage * its own filtering intelligently. **/ static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) { struct i40e_aqc_remove_macvlan_element_data element; struct i40e_pf *pf = vsi->back; /* Only appropriate for the PF main VSI */ if (vsi->type != I40E_VSI_MAIN) return; memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, macaddr); element.vlan_tag = 0; /* Ignore error returns, some firmware does it this way... */ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, macaddr); element.vlan_tag = 0; /* ...and some firmware does it this way. */ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); } /** * i40e_add_filter - Add a mac/vlan filter to the VSI * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan * * Returns ptr to the filter object or NULL when no memory available. * * NOTE: This function is expected to be called with mac_filter_hash_lock * being held. **/ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) { struct i40e_mac_filter *f; u64 key; if (!vsi || !macaddr) return NULL; f = i40e_find_filter(vsi, macaddr, vlan); if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) return NULL; /* Update the boolean indicating if we need to function in * VLAN mode. */ if (vlan >= 0) vsi->has_vlan_filter = true; ether_addr_copy(f->macaddr, macaddr); f->vlan = vlan; f->state = I40E_FILTER_NEW; INIT_HLIST_NODE(&f->hlist); key = i40e_addr_to_hkey(macaddr); hash_add(vsi->mac_filter_hash, &f->hlist, key); vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); } /* If we're asked to add a filter that has been marked for removal, it * is safe to simply restore it to active state. __i40e_del_filter * will have simply deleted any filters which were previously marked * NEW or FAILED, so if it is currently marked REMOVE it must have * previously been ACTIVE. Since we haven't yet run the sync filters * task, just restore this filter to the ACTIVE state so that the * sync task leaves it in place */ if (f->state == I40E_FILTER_REMOVE) f->state = I40E_FILTER_ACTIVE; return f; } /** * __i40e_del_filter - Remove a specific filter from the VSI * @vsi: VSI to remove from * @f: the filter to remove from the list * * This function should be called instead of i40e_del_filter only if you know * the exact filter you will remove already, such as via i40e_find_filter or * i40e_find_mac. * * NOTE: This function is expected to be called with mac_filter_hash_lock * being held. * ANOTHER NOTE: This function MUST be called from within the context of * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() * instead of list_for_each_entry(). **/ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) { if (!f) return; /* If the filter was never added to firmware then we can just delete it * directly and we don't want to set the status to remove or else an * admin queue command will unnecessarily fire. */ if ((f->state == I40E_FILTER_FAILED) || (f->state == I40E_FILTER_NEW)) { hash_del(&f->hlist); kfree(f); } else { f->state = I40E_FILTER_REMOVE; } vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); } /** * i40e_del_filter - Remove a MAC/VLAN filter from the VSI * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the VLAN * * NOTE: This function is expected to be called with mac_filter_hash_lock * being held. * ANOTHER NOTE: This function MUST be called from within the context of * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() * instead of list_for_each_entry(). **/ void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) { struct i40e_mac_filter *f; if (!vsi || !macaddr) return; f = i40e_find_filter(vsi, macaddr, vlan); __i40e_del_filter(vsi, f); } /** * i40e_add_mac_filter - Add a MAC filter for all active VLANs * @vsi: the VSI to be searched * @macaddr: the mac address to be filtered * * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise, * go through all the macvlan filters and add a macvlan filter for each * unique vlan that already exists. If a PVID has been assigned, instead only * add the macaddr to that VLAN. * * Returns last filter added on success, else NULL **/ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr) { struct i40e_mac_filter *f, *add = NULL; struct hlist_node *h; int bkt; if (vsi->info.pvid) return i40e_add_filter(vsi, macaddr, le16_to_cpu(vsi->info.pvid)); if (!i40e_is_vsi_in_vlan(vsi)) return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY); hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (f->state == I40E_FILTER_REMOVE) continue; add = i40e_add_filter(vsi, macaddr, f->vlan); if (!add) return NULL; } return add; } /** * i40e_del_mac_filter - Remove a MAC filter from all VLANs * @vsi: the VSI to be searched * @macaddr: the mac address to be removed * * Removes a given MAC address from a VSI regardless of what VLAN it has been * associated with. * * Returns 0 for success, or error **/ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr) { struct i40e_mac_filter *f; struct hlist_node *h; bool found = false; int bkt; lockdep_assert_held(&vsi->mac_filter_hash_lock); hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (ether_addr_equal(macaddr, f->macaddr)) { __i40e_del_filter(vsi, f); found = true; } } if (found) return 0; else return -ENOENT; } /** * i40e_set_mac - NDO callback to set mac address * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/ static int i40e_set_mac(struct net_device *netdev, void *p) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { netdev_info(netdev, "already using mac address %pM\n", addr->sa_data); return 0; } if (test_bit(__I40E_DOWN, pf->state) || test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) return -EADDRNOTAVAIL; if (ether_addr_equal(hw->mac.addr, addr->sa_data)) netdev_info(netdev, "returning to hw mac address %pM\n", hw->mac.addr); else netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); /* Copy the address first, so that we avoid a possible race with * .set_rx_mode(). * - Remove old address from MAC filter * - Copy new address * - Add new address to MAC filter */ spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_del_mac_filter(vsi, netdev->dev_addr); ether_addr_copy(netdev->dev_addr, addr->sa_data); i40e_add_mac_filter(vsi, netdev->dev_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, addr->sa_data, NULL); if (ret) netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n", i40e_stat_str(hw, ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } /* schedule our worker thread which will take care of * applying the new filter changes */ i40e_service_event_schedule(pf); return 0; } /** * i40e_config_rss_aq - Prepare for RSS using AQ commands * @vsi: vsi structure * @seed: RSS hash seed **/ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int ret = 0; if (seed) { struct i40e_aqc_get_set_rss_key_data *seed_dw = (struct i40e_aqc_get_set_rss_key_data *)seed; ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); if (ret) { dev_info(&pf->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", i40e_stat_str(hw, ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; } } if (lut) { bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); if (ret) { dev_info(&pf->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", i40e_stat_str(hw, ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; } } return ret; } /** * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used * @vsi: VSI structure **/ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; u8 seed[I40E_HKEY_ARRAY_SIZE]; u8 *lut; int ret; if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)) return 0; if (!vsi->rss_size) vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs); if (!vsi->rss_size) return -EINVAL; lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; /* Use the user configured hash keys and lookup table if there is one, * otherwise use default */ if (vsi->rss_lut_user) memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); else i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); if (vsi->rss_hkey_user) memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); else netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); kfree(lut); return ret; } /** * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config * @vsi: the VSI being configured, * @ctxt: VSI context structure * @enabled_tc: number of traffic classes to enable * * Prepares VSI tc_config to have queue configurations based on MQPRIO options. **/ static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt, u8 enabled_tc) { u16 qcount = 0, max_qcount, qmap, sections = 0; int i, override_q, pow, num_qps, ret; u8 netdev_tc = 0, offset = 0; if (vsi->type != I40E_VSI_MAIN) return -EINVAL; sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; sections |= I40E_AQ_VSI_PROP_SCHED_VALID; vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc; vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; num_qps = vsi->mqprio_qopt.qopt.count[0]; /* find the next higher power-of-2 of num queue pairs */ pow = ilog2(num_qps); if (!is_power_of_2(num_qps)) pow++; qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); /* Setup queue offset/count for all TCs for given VSI */ max_qcount = vsi->mqprio_qopt.qopt.count[0]; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* See if the given TC is enabled for the given VSI */ if (vsi->tc_config.enabled_tc & BIT(i)) { offset = vsi->mqprio_qopt.qopt.offset[i]; qcount = vsi->mqprio_qopt.qopt.count[i]; if (qcount > max_qcount) max_qcount = qcount; vsi->tc_config.tc_info[i].qoffset = offset; vsi->tc_config.tc_info[i].qcount = qcount; vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; } else { /* TC is not enabled so set the offset to * default queue and allocate one queue * for the given TC. */ vsi->tc_config.tc_info[i].qoffset = 0; vsi->tc_config.tc_info[i].qcount = 1; vsi->tc_config.tc_info[i].netdev_tc = 0; } } /* Set actual Tx/Rx queue pairs */ vsi->num_queue_pairs = offset + qcount; /* Setup queue TC[0].qmap for given VSI context */ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); ctxt->info.valid_sections |= cpu_to_le16(sections); /* Reconfigure RSS for main VSI with max queue count */ vsi->rss_size = max_qcount; ret = i40e_vsi_config_rss(vsi); if (ret) { dev_info(&vsi->back->pdev->dev, "Failed to reconfig rss for num_queues (%u)\n", max_qcount); return ret; } vsi->reconfig_rss = true; dev_dbg(&vsi->back->pdev->dev, "Reconfigured rss with num_queues (%u)\n", max_qcount); /* Find queue count available for channel VSIs and starting offset * for channel VSIs */ override_q = vsi->mqprio_qopt.qopt.count[0]; if (override_q && override_q < vsi->num_queue_pairs) { vsi->cnt_q_avail = vsi->num_queue_pairs - override_q; vsi->next_base_queue = override_q; } return 0; } /** * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc * @vsi: the VSI being setup * @ctxt: VSI context structure * @enabled_tc: Enabled TCs bitmap * @is_add: True if called before Add VSI * * Setup VSI queue mapping for enabled traffic classes. **/ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt, u8 enabled_tc, bool is_add) { struct i40e_pf *pf = vsi->back; u16 sections = 0; u8 netdev_tc = 0; u16 numtc = 1; u16 qcount; u8 offset; u16 qmap; int i; u16 num_tc_qps = 0; sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; offset = 0; /* Number of queues per enabled TC */ num_tc_qps = vsi->alloc_queue_pairs; if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Find numtc from enabled TC bitmap */ for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) /* TC is enabled */ numtc++; } if (!numtc) { dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); numtc = 1; } num_tc_qps = num_tc_qps / numtc; num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); } vsi->tc_config.numtc = numtc; vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; /* Do not allow use more TC queue pairs than MSI-X vectors exist */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix); /* Setup queue offset/count for all TCs for given VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* See if the given TC is enabled for the given VSI */ if (vsi->tc_config.enabled_tc & BIT(i)) { /* TC is enabled */ int pow, num_qps; switch (vsi->type) { case I40E_VSI_MAIN: if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) || vsi->tc_config.enabled_tc != 1) { qcount = min_t(int, pf->alloc_rss_size, num_tc_qps); break; } /* fall through */ case I40E_VSI_FDIR: case I40E_VSI_SRIOV: case I40E_VSI_VMDQ2: default: qcount = num_tc_qps; WARN_ON(i != 0); break; } vsi->tc_config.tc_info[i].qoffset = offset; vsi->tc_config.tc_info[i].qcount = qcount; /* find the next higher power-of-2 of num queue pairs */ num_qps = qcount; pow = 0; while (num_qps && (BIT_ULL(pow) < qcount)) { pow++; num_qps >>= 1; } vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); offset += qcount; } else { /* TC is not enabled so set the offset to * default queue and allocate one queue * for the given TC. */ vsi->tc_config.tc_info[i].qoffset = 0; vsi->tc_config.tc_info[i].qcount = 1; vsi->tc_config.tc_info[i].netdev_tc = 0; qmap = 0; } ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); } /* Set actual Tx/Rx queue pairs */ vsi->num_queue_pairs = offset; if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { if (vsi->req_queue_pairs > 0) vsi->num_queue_pairs = vsi->req_queue_pairs; else if (pf->flags & I40E_FLAG_MSIX_ENABLED) vsi->num_queue_pairs = pf->num_lan_msix; } /* Scheduler section valid can only be set for ADD VSI */ if (is_add) { sections |= I40E_AQ_VSI_PROP_SCHED_VALID; ctxt->info.up_enable_bits = enabled_tc; } if (vsi->type == I40E_VSI_SRIOV) { ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); for (i = 0; i < vsi->num_queue_pairs; i++) ctxt->info.queue_mapping[i] = cpu_to_le16(vsi->base_queue + i); } else { ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); } ctxt->info.valid_sections |= cpu_to_le16(sections); } /** * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address * @netdev: the netdevice * @addr: address to add * * Called by __dev_(mc|uc)_sync when an address needs to be added. We call * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. */ static int i40e_addr_sync(struct net_device *netdev, const u8 *addr) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; if (i40e_add_mac_filter(vsi, addr)) return 0; else return -ENOMEM; } /** * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address * @netdev: the netdevice * @addr: address to add * * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. */ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; /* Under some circumstances, we might receive a request to delete * our own device address from our uc list. Because we store the * device address in the VSI's MAC/VLAN filter list, we need to ignore * such requests and not delete our device address from this list. */ if (ether_addr_equal(addr, netdev->dev_addr)) return 0; i40e_del_mac_filter(vsi, addr); return 0; } /** * i40e_set_rx_mode - NDO callback to set the netdev filters * @netdev: network interface device structure **/ static void i40e_set_rx_mode(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; spin_lock_bh(&vsi->mac_filter_hash_lock); __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync); __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync); spin_unlock_bh(&vsi->mac_filter_hash_lock); /* check for other flag changes */ if (vsi->current_netdev_flags != vsi->netdev->flags) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); } } /** * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries * @vsi: Pointer to VSI struct * @from: Pointer to list which contains MAC filter entries - changes to * those entries needs to be undone. * * MAC filter entries from this list were slated for deletion. **/ static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, struct hlist_head *from) { struct i40e_mac_filter *f; struct hlist_node *h; hlist_for_each_entry_safe(f, h, from, hlist) { u64 key = i40e_addr_to_hkey(f->macaddr); /* Move the element back into MAC filter list*/ hlist_del(&f->hlist); hash_add(vsi->mac_filter_hash, &f->hlist, key); } } /** * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries * @vsi: Pointer to vsi struct * @from: Pointer to list which contains MAC filter entries - changes to * those entries needs to be undone. * * MAC filter entries from this list were slated for addition. **/ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi, struct hlist_head *from) { struct i40e_new_mac_filter *new; struct hlist_node *h; hlist_for_each_entry_safe(new, h, from, hlist) { /* We can simply free the wrapper structure */ hlist_del(&new->hlist); kfree(new); } } /** * i40e_next_entry - Get the next non-broadcast filter from a list * @next: pointer to filter in list * * Returns the next non-broadcast filter in the list. Required so that we * ignore broadcast filters within the list, since these are not handled via * the normal firmware update path. */ static struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next) { hlist_for_each_entry_continue(next, hlist) { if (!is_broadcast_ether_addr(next->f->macaddr)) return next; } return NULL; } /** * i40e_update_filter_state - Update filter state based on return data * from firmware * @count: Number of filters added * @add_list: return data from fw * @add_head: pointer to first filter in current batch * * MAC filter entries from list were slated to be added to device. Returns * number of successful filters. Note that 0 does NOT mean success! **/ static int i40e_update_filter_state(int count, struct i40e_aqc_add_macvlan_element_data *add_list, struct i40e_new_mac_filter *add_head) { int retval = 0; int i; for (i = 0; i < count; i++) { /* Always check status of each filter. We don't need to check * the firmware return status because we pre-set the filter * status to I40E_AQC_MM_ERR_NO_RES when sending the filter * request to the adminq. Thus, if it no longer matches then * we know the filter is active. */ if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) { add_head->state = I40E_FILTER_FAILED; } else { add_head->state = I40E_FILTER_ACTIVE; retval++; } add_head = i40e_next_filter(add_head); if (!add_head) break; } return retval; } /** * i40e_aqc_del_filters - Request firmware to delete a set of filters * @vsi: ptr to the VSI * @vsi_name: name to display in messages * @list: the list of filters to send to firmware * @num_del: the number of filters to delete * @retval: Set to -EIO on failure to delete * * Send a request to firmware via AdminQ to delete a set of filters. Uses * *retval instead of a return value so that success does not force ret_val to * be set to 0. This ensures that a sequence of calls to this function * preserve the previous value of *retval on successful delete. */ static void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name, struct i40e_aqc_remove_macvlan_element_data *list, int num_del, int *retval) { struct i40e_hw *hw = &vsi->back->hw; i40e_status aq_ret; int aq_err; aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL); aq_err = hw->aq.asq_last_status; /* Explicitly ignore and do not report when firmware returns ENOENT */ if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) { *retval = -EIO; dev_info(&vsi->back->pdev->dev, "ignoring delete macvlan error on %s, err %s, aq_err %s\n", vsi_name, i40e_stat_str(hw, aq_ret), i40e_aq_str(hw, aq_err)); } } /** * i40e_aqc_add_filters - Request firmware to add a set of filters * @vsi: ptr to the VSI * @vsi_name: name to display in messages * @list: the list of filters to send to firmware * @add_head: Position in the add hlist * @num_add: the number of filters to add * * Send a request to firmware via AdminQ to add a chunk of filters. Will set * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of * space for more filters. */ static void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, struct i40e_aqc_add_macvlan_element_data *list, struct i40e_new_mac_filter *add_head, int num_add) { struct i40e_hw *hw = &vsi->back->hw; int aq_err, fcnt; i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL); aq_err = hw->aq.asq_last_status; fcnt = i40e_update_filter_state(num_add, list, add_head); if (fcnt != num_add) { if (vsi->type == I40E_VSI_MAIN) { set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); dev_warn(&vsi->back->pdev->dev, "Error %s adding RX filters on %s, promiscuous mode forced on\n", i40e_aq_str(hw, aq_err), vsi_name); } else if (vsi->type == I40E_VSI_SRIOV || vsi->type == I40E_VSI_VMDQ1 || vsi->type == I40E_VSI_VMDQ2) { dev_warn(&vsi->back->pdev->dev, "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n", i40e_aq_str(hw, aq_err), vsi_name, vsi_name); } else { dev_warn(&vsi->back->pdev->dev, "Error %s adding RX filters on %s, incorrect VSI type: %i.\n", i40e_aq_str(hw, aq_err), vsi_name, vsi->type); } } } /** * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags * @vsi: pointer to the VSI * @vsi_name: the VSI name * @f: filter data * * This function sets or clears the promiscuous broadcast flags for VLAN * filters in order to properly receive broadcast frames. Assumes that only * broadcast filters are passed. * * Returns status indicating success or failure; **/ static i40e_status i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, struct i40e_mac_filter *f) { bool enable = f->state == I40E_FILTER_NEW; struct i40e_hw *hw = &vsi->back->hw; i40e_status aq_ret; if (f->vlan == I40E_VLAN_ANY) { aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, enable, NULL); } else { aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw, vsi->seid, enable, f->vlan, NULL); } if (aq_ret) { set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); dev_warn(&vsi->back->pdev->dev, "Error %s, forcing overflow promiscuous on %s\n", i40e_aq_str(hw, hw->aq.asq_last_status), vsi_name); } return aq_ret; } /** * i40e_set_promiscuous - set promiscuous mode * @pf: board private structure * @promisc: promisc on or off * * There are different ways of setting promiscuous mode on a PF depending on * what state/environment we're in. This identifies and sets it appropriately. * Returns 0 on success. **/ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; i40e_status aq_ret; if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB && !(pf->flags & I40E_FLAG_MFP_ENABLED)) { /* set defport ON for Main VSI instead of true promisc * this way we will get all unicast/multicast and VLAN * promisc behavior but will not get VF or VMDq traffic * replicated on the Main VSI. */ if (promisc) aq_ret = i40e_aq_set_default_vsi(hw, vsi->seid, NULL); else aq_ret = i40e_aq_clear_default_vsi(hw, vsi->seid, NULL); if (aq_ret) { dev_info(&pf->pdev->dev, "Set default VSI failed, err %s, aq_err %s\n", i40e_stat_str(hw, aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } } else { aq_ret = i40e_aq_set_vsi_unicast_promiscuous( hw, vsi->seid, promisc, NULL, true); if (aq_ret) { dev_info(&pf->pdev->dev, "set unicast promisc failed, err %s, aq_err %s\n", i40e_stat_str(hw, aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } aq_ret = i40e_aq_set_vsi_multicast_promiscuous( hw, vsi->seid, promisc, NULL); if (aq_ret) { dev_info(&pf->pdev->dev, "set multicast promisc failed, err %s, aq_err %s\n", i40e_stat_str(hw, aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } } if (!aq_ret) pf->cur_promisc = promisc; return aq_ret; } /** * i40e_sync_vsi_filters - Update the VSI filter list to the HW * @vsi: ptr to the VSI * * Push any outstanding VSI filter changes through the AdminQ. * * Returns 0 or error value **/ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) { struct hlist_head tmp_add_list, tmp_del_list; struct i40e_mac_filter *f; struct i40e_new_mac_filter *new, *add_head = NULL; struct i40e_hw *hw = &vsi->back->hw; bool old_overflow, new_overflow; unsigned int failed_filters = 0; unsigned int vlan_filters = 0; char vsi_name[16] = "PF"; int filter_list_len = 0; i40e_status aq_ret = 0; u32 changed_flags = 0; struct hlist_node *h; struct i40e_pf *pf; int num_add = 0; int num_del = 0; int retval = 0; u16 cmd_flags; int list_size; int bkt; /* empty array typed pointers, kcalloc later */ struct i40e_aqc_add_macvlan_element_data *add_list; struct i40e_aqc_remove_macvlan_element_data *del_list; while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state)) usleep_range(1000, 2000); pf = vsi->back; old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); if (vsi->netdev) { changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; vsi->current_netdev_flags = vsi->netdev->flags; } INIT_HLIST_HEAD(&tmp_add_list); INIT_HLIST_HEAD(&tmp_del_list); if (vsi->type == I40E_VSI_SRIOV) snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); else if (vsi->type != I40E_VSI_MAIN) snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid); if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; spin_lock_bh(&vsi->mac_filter_hash_lock); /* Create a list of filters to delete. */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (f->state == I40E_FILTER_REMOVE) { /* Move the element into temporary del_list */ hash_del(&f->hlist); hlist_add_head(&f->hlist, &tmp_del_list); /* Avoid counting removed filters */ continue; } if (f->state == I40E_FILTER_NEW) { /* Create a temporary i40e_new_mac_filter */ new = kzalloc(sizeof(*new), GFP_ATOMIC); if (!new) goto err_no_memory_locked; /* Store pointer to the real filter */ new->f = f; new->state = f->state; /* Add it to the hash list */ hlist_add_head(&new->hlist, &tmp_add_list); } /* Count the number of active (current and new) VLAN * filters we have now. Does not count filters which * are marked for deletion. */ if (f->vlan > 0) vlan_filters++; } retval = i40e_correct_mac_vlan_filters(vsi, &tmp_add_list, &tmp_del_list, vlan_filters); if (retval) goto err_no_memory_locked; spin_unlock_bh(&vsi->mac_filter_hash_lock); } /* Now process 'del_list' outside the lock */ if (!hlist_empty(&tmp_del_list)) { filter_list_len = hw->aq.asq_buf_size / sizeof(struct i40e_aqc_remove_macvlan_element_data); list_size = filter_list_len * sizeof(struct i40e_aqc_remove_macvlan_element_data); del_list = kzalloc(list_size, GFP_ATOMIC); if (!del_list) goto err_no_memory; hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) { cmd_flags = 0; /* handle broadcast filters by updating the broadcast * promiscuous flag and release filter list. */ if (is_broadcast_ether_addr(f->macaddr)) { i40e_aqc_broadcast_filter(vsi, vsi_name, f); hlist_del(&f->hlist); kfree(f); continue; } /* add to delete list */ ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); if (f->vlan == I40E_VLAN_ANY) { del_list[num_del].vlan_tag = 0; cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; } else { del_list[num_del].vlan_tag = cpu_to_le16((u16)(f->vlan)); } cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; del_list[num_del].flags = cmd_flags; num_del++; /* flush a full buffer */ if (num_del == filter_list_len) { i40e_aqc_del_filters(vsi, vsi_name, del_list, num_del, &retval); memset(del_list, 0, list_size); num_del = 0; } /* Release memory for MAC filter entries which were * synced up with HW. */ hlist_del(&f->hlist); kfree(f); } if (num_del) { i40e_aqc_del_filters(vsi, vsi_name, del_list, num_del, &retval); } kfree(del_list); del_list = NULL; } if (!hlist_empty(&tmp_add_list)) { /* Do all the adds now. */ filter_list_len = hw->aq.asq_buf_size / sizeof(struct i40e_aqc_add_macvlan_element_data); list_size = filter_list_len * sizeof(struct i40e_aqc_add_macvlan_element_data); add_list = kzalloc(list_size, GFP_ATOMIC); if (!add_list) goto err_no_memory; num_add = 0; hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { /* handle broadcast filters by updating the broadcast * promiscuous flag instead of adding a MAC filter. */ if (is_broadcast_ether_addr(new->f->macaddr)) { if (i40e_aqc_broadcast_filter(vsi, vsi_name, new->f)) new->state = I40E_FILTER_FAILED; else new->state = I40E_FILTER_ACTIVE; continue; } /* add to add array */ if (num_add == 0) add_head = new; cmd_flags = 0; ether_addr_copy(add_list[num_add].mac_addr, new->f->macaddr); if (new->f->vlan == I40E_VLAN_ANY) { add_list[num_add].vlan_tag = 0; cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; } else { add_list[num_add].vlan_tag = cpu_to_le16((u16)(new->f->vlan)); } add_list[num_add].queue_number = 0; /* set invalid match method for later detection */ add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES; cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; add_list[num_add].flags = cpu_to_le16(cmd_flags); num_add++; /* flush a full buffer */ if (num_add == filter_list_len) { i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head, num_add); memset(add_list, 0, list_size); num_add = 0; } } if (num_add) { i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head, num_add); } /* Now move all of the filters from the temp add list back to * the VSI's list. */ spin_lock_bh(&vsi->mac_filter_hash_lock); hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { /* Only update the state if we're still NEW */ if (new->f->state == I40E_FILTER_NEW) new->f->state = new->state; hlist_del(&new->hlist); kfree(new); } spin_unlock_bh(&vsi->mac_filter_hash_lock); kfree(add_list); add_list = NULL; } /* Determine the number of active and failed filters. */ spin_lock_bh(&vsi->mac_filter_hash_lock); vsi->active_filters = 0; hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { if (f->state == I40E_FILTER_ACTIVE) vsi->active_filters++; else if (f->state == I40E_FILTER_FAILED) failed_filters++; } spin_unlock_bh(&vsi->mac_filter_hash_lock); /* Check if we are able to exit overflow promiscuous mode. We can * safely exit if we didn't just enter, we no longer have any failed * filters, and we have reduced filters below the threshold value. */ if (old_overflow && !failed_filters && vsi->active_filters < vsi->promisc_threshold) { dev_info(&pf->pdev->dev, "filter logjam cleared on %s, leaving overflow promiscuous mode\n", vsi_name); clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); vsi->promisc_threshold = 0; } /* if the VF is not trusted do not do promisc */ if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); goto out; } new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); /* If we are entering overflow promiscuous, we need to calculate a new * threshold for when we are safe to exit */ if (!old_overflow && new_overflow) vsi->promisc_threshold = (vsi->active_filters * 3) / 4; /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { bool cur_multipromisc; cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, vsi->seid, cur_multipromisc, NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, hw->aq.asq_last_status); dev_info(&pf->pdev->dev, "set multi promisc failed on %s, err %s aq_err %s\n", vsi_name, i40e_stat_str(hw, aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n", vsi->netdev->name, cur_multipromisc ? "entering" : "leaving"); } } if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) { bool cur_promisc; cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || new_overflow); aq_ret = i40e_set_promiscuous(pf, cur_promisc); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, hw->aq.asq_last_status); dev_info(&pf->pdev->dev, "Setting promiscuous %s failed on %s, err %s aq_err %s\n", cur_promisc ? "on" : "off", vsi_name, i40e_stat_str(hw, aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } } out: /* if something went wrong then set the changed flag so we try again */ if (retval) vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); return retval; err_no_memory: /* Restore elements on the temporary add and delete lists */ spin_lock_bh(&vsi->mac_filter_hash_lock); err_no_memory_locked: i40e_undo_del_filter_entries(vsi, &tmp_del_list); i40e_undo_add_filter_entries(vsi, &tmp_add_list); spin_unlock_bh(&vsi->mac_filter_hash_lock); vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); return -ENOMEM; } /** * i40e_sync_filters_subtask - Sync the VSI filter list with HW * @pf: board private structure **/ static void i40e_sync_filters_subtask(struct i40e_pf *pf) { int v; if (!pf) return; if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) return; if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) { set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); return; } for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { int ret = i40e_sync_vsi_filters(pf->vsi[v]); if (ret) { /* come back and try again later */ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); break; } } } clear_bit(__I40E_VF_DISABLE, pf->state); } /** * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP * @vsi: the vsi **/ static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi) { if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) return I40E_RXBUFFER_2048; else return I40E_RXBUFFER_3072; } /** * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; if (i40e_enabled_xdp_vsi(vsi)) { int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; if (frame_size > i40e_max_xdp_frame_size(vsi)) return -EINVAL; } netdev_info(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) i40e_vsi_reinit_locked(vsi); set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); return 0; } /** * i40e_ioctl - Access the hwtstamp interface * @netdev: network interface device structure * @ifr: interface request data * @cmd: ioctl command **/ int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; switch (cmd) { case SIOCGHWTSTAMP: return i40e_ptp_get_ts_config(pf, ifr); case SIOCSHWTSTAMP: return i40e_ptp_set_ts_config(pf, ifr); default: return -EOPNOTSUPP; } } /** * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI * @vsi: the vsi being adjusted **/ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) { struct i40e_vsi_context ctxt; i40e_status ret; /* Don't modify stripping options if a port VLAN is active */ if (vsi->info.pvid) return; if ((vsi->info.valid_sections & cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) return; /* already enabled */ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; ctxt.seid = vsi->seid; ctxt.info = vsi->info; ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "update vlan stripping failed, err %s aq_err %s\n", i40e_stat_str(&vsi->back->hw, ret), i40e_aq_str(&vsi->back->hw, vsi->back->hw.aq.asq_last_status)); } } /** * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI * @vsi: the vsi being adjusted **/ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) { struct i40e_vsi_context ctxt; i40e_status ret; /* Don't modify stripping options if a port VLAN is active */ if (vsi->info.pvid) return; if ((vsi->info.valid_sections & cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == I40E_AQ_VSI_PVLAN_EMOD_MASK)) return; /* already disabled */ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_NOTHING; ctxt.seid = vsi->seid; ctxt.info = vsi->info; ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "update vlan stripping failed, err %s aq_err %s\n", i40e_stat_str(&vsi->back->hw, ret), i40e_aq_str(&vsi->back->hw, vsi->back->hw.aq.asq_last_status)); } } /** * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address * @vsi: the vsi being configured * @vid: vlan id to be added (0 = untagged only , -1 = any) * * This is a helper function for adding a new MAC/VLAN filter with the * specified VLAN for each existing MAC address already in the hash table. * This function does *not* perform any accounting to update filters based on * VLAN mode. * * NOTE: this function expects to be called while under the * mac_filter_hash_lock **/ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) { struct i40e_mac_filter *f, *add_f; struct hlist_node *h; int bkt; hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (f->state == I40E_FILTER_REMOVE) continue; add_f = i40e_add_filter(vsi, f->macaddr, vid); if (!add_f) { dev_info(&vsi->back->pdev->dev, "Could not add vlan filter %d for %pM\n", vid, f->macaddr); return -ENOMEM; } } return 0; } /** * i40e_vsi_add_vlan - Add VSI membership for given VLAN * @vsi: the VSI being configured * @vid: VLAN id to be added **/ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid) { int err; if (vsi->info.pvid) return -EINVAL; /* The network stack will attempt to add VID=0, with the intention to * receive priority tagged packets with a VLAN of 0. Our HW receives * these packets by default when configured to receive untagged * packets, so we don't need to add a filter for this case. * Additionally, HW interprets adding a VID=0 filter as meaning to * receive *only* tagged traffic and stops receiving untagged traffic. * Thus, we do not want to actually add a filter for VID=0 */ if (!vid) return 0; /* Locked once because all functions invoked below iterates list*/ spin_lock_bh(&vsi->mac_filter_hash_lock); err = i40e_add_vlan_all_mac(vsi, vid); spin_unlock_bh(&vsi->mac_filter_hash_lock); if (err) return err; /* schedule our worker thread which will take care of * applying the new filter changes */ i40e_service_event_schedule(vsi->back); return 0; } /** * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN * @vsi: the vsi being configured * @vid: vlan id to be removed (0 = untagged only , -1 = any) * * This function should be used to remove all VLAN filters which match the * given VID. It does not schedule the service event and does not take the * mac_filter_hash_lock so it may be combined with other operations under * a single invocation of the mac_filter_hash_lock. * * NOTE: this function expects to be called while under the * mac_filter_hash_lock */ void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) { struct i40e_mac_filter *f; struct hlist_node *h; int bkt; hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (f->vlan == vid) __i40e_del_filter(vsi, f); } } /** * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN * @vsi: the VSI being configured * @vid: VLAN id to be removed **/ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid) { if (!vid || vsi->info.pvid) return; spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_rm_vlan_all_mac(vsi, vid); spin_unlock_bh(&vsi->mac_filter_hash_lock); /* schedule our worker thread which will take care of * applying the new filter changes */ i40e_service_event_schedule(vsi->back); } /** * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload * @netdev: network interface to be adjusted * @proto: unused protocol value * @vid: vlan id to be added * * net_device_ops implementation for adding vlan ids **/ static int i40e_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; int ret = 0; if (vid >= VLAN_N_VID) return -EINVAL; ret = i40e_vsi_add_vlan(vsi, vid); if (!ret) set_bit(vid, vsi->active_vlans); return ret; } /** * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path * @netdev: network interface to be adjusted * @proto: unused protocol value * @vid: vlan id to be added **/ static void i40e_vlan_rx_add_vid_up(struct net_device *netdev, __always_unused __be16 proto, u16 vid) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; if (vid >= VLAN_N_VID) return; set_bit(vid, vsi->active_vlans); } /** * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload * @netdev: network interface to be adjusted * @proto: unused protocol value * @vid: vlan id to be removed * * net_device_ops implementation for removing vlan ids **/ static int i40e_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; /* return code is ignored as there is nothing a user * can do about failure to remove and a log message was * already printed from the other function */ i40e_vsi_kill_vlan(vsi, vid); clear_bit(vid, vsi->active_vlans); return 0; } /** * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up * @vsi: the vsi being brought back up **/ static void i40e_restore_vlan(struct i40e_vsi *vsi) { u16 vid; if (!vsi->netdev) return; if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) i40e_vlan_stripping_enable(vsi); else i40e_vlan_stripping_disable(vsi); for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q), vid); } /** * i40e_vsi_add_pvid - Add pvid for the VSI * @vsi: the vsi being adjusted * @vid: the vlan id to set as a PVID **/ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) { struct i40e_vsi_context ctxt; i40e_status ret; vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.pvid = cpu_to_le16(vid); vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | I40E_AQ_VSI_PVLAN_INSERT_PVID | I40E_AQ_VSI_PVLAN_EMOD_STR; ctxt.seid = vsi->seid; ctxt.info = vsi->info; ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "add pvid failed, err %s aq_err %s\n", i40e_stat_str(&vsi->back->hw, ret), i40e_aq_str(&vsi->back->hw, vsi->back->hw.aq.asq_last_status)); return -ENOENT; } return 0; } /** * i40e_vsi_remove_pvid - Remove the pvid from the VSI * @vsi: the vsi being adjusted * * Just use the vlan_rx_register() service to put it back to normal **/ void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) { vsi->info.pvid = 0; i40e_vlan_stripping_disable(vsi); } /** * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources * @vsi: ptr to the VSI * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) { int i, err = 0; for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); if (!i40e_enabled_xdp_vsi(vsi)) return err; for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]); return err; } /** * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues * @vsi: ptr to the VSI * * Free VSI's transmit software resources **/ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) { int i; if (vsi->tx_rings) { for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) i40e_free_tx_resources(vsi->tx_rings[i]); } if (vsi->xdp_rings) { for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) i40e_free_tx_resources(vsi->xdp_rings[i]); } } /** * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources * @vsi: ptr to the VSI * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) { int i, err = 0; for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); return err; } /** * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues * @vsi: ptr to the VSI * * Free all receive software resources **/ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) { int i; if (!vsi->rx_rings) return; for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) i40e_free_rx_resources(vsi->rx_rings[i]); } /** * i40e_config_xps_tx_ring - Configure XPS for a Tx ring * @ring: The Tx ring to configure * * This enables/disables XPS for a given Tx descriptor ring * based on the TCs enabled for the VSI that ring belongs to. **/ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) { int cpu; if (!ring->q_vector || !ring->netdev || ring->ch) return; /* We only initialize XPS once, so as not to overwrite user settings */ if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) return; cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), ring->queue_index); } /** * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled * @ring: The Tx or Rx ring * * Returns the UMEM or NULL. **/ static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) { bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); int qid = ring->queue_index; if (ring_is_xdp(ring)) qid -= ring->vsi->alloc_queue_pairs; if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps)) return NULL; return xdp_get_umem_from_qid(ring->vsi->netdev, qid); } /** * i40e_configure_tx_ring - Configure a transmit ring context and rest * @ring: The Tx ring to configure * * Configure the Tx descriptor ring in the HMC context. **/ static int i40e_configure_tx_ring(struct i40e_ring *ring) { struct i40e_vsi *vsi = ring->vsi; u16 pf_q = vsi->base_queue + ring->queue_index; struct i40e_hw *hw = &vsi->back->hw; struct i40e_hmc_obj_txq tx_ctx; i40e_status err = 0; u32 qtx_ctl = 0; if (ring_is_xdp(ring)) ring->xsk_umem = i40e_xsk_umem(ring); /* some ATR related tx ring init */ if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { ring->atr_sample_rate = vsi->back->atr_sample_rate; ring->atr_count = 0; } else { ring->atr_sample_rate = 0; } /* configure XPS */ i40e_config_xps_tx_ring(ring); /* clear the context structure first */ memset(&tx_ctx, 0, sizeof(tx_ctx)); tx_ctx.new_context = 1; tx_ctx.base = (ring->dma / 128); tx_ctx.qlen = ring->count; tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)); tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); /* FDIR VSI tx ring can still use RS bit and writebacks */ if (vsi->type != I40E_VSI_FDIR) tx_ctx.head_wb_ena = 1; tx_ctx.head_wb_addr = ring->dma + (ring->count * sizeof(struct i40e_tx_desc)); /* As part of VSI creation/update, FW allocates certain * Tx arbitration queue sets for each TC enabled for * the VSI. The FW returns the handles to these queue * sets as part of the response buffer to Add VSI, * Update VSI, etc. AQ commands. It is expected that * these queue set handles be associated with the Tx * queues by the driver as part of the TX queue context * initialization. This has to be done regardless of * DCB as by default everything is mapped to TC0. */ if (ring->ch) tx_ctx.rdylist = le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]); else tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); tx_ctx.rdylist_act = 0; /* clear the context in the HMC */ err = i40e_clear_lan_tx_queue_context(hw, pf_q); if (err) { dev_info(&vsi->back->pdev->dev, "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", ring->queue_index, pf_q, err); return -ENOMEM; } /* set the context in the HMC */ err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); if (err) { dev_info(&vsi->back->pdev->dev, "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", ring->queue_index, pf_q, err); return -ENOMEM; } /* Now associate this queue with this PCI function */ if (ring->ch) { if (ring->ch->type == I40E_VSI_VMDQ2) qtx_ctl = I40E_QTX_CTL_VM_QUEUE; else return -EINVAL; qtx_ctl |= (ring->ch->vsi_number << I40E_QTX_CTL_VFVM_INDX_SHIFT) & I40E_QTX_CTL_VFVM_INDX_MASK; } else { if (vsi->type == I40E_VSI_VMDQ2) { qtx_ctl = I40E_QTX_CTL_VM_QUEUE; qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & I40E_QTX_CTL_VFVM_INDX_MASK; } else { qtx_ctl = I40E_QTX_CTL_PF_QUEUE; } } qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); i40e_flush(hw); /* cache tail off for easier writes later */ ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); return 0; } /** * i40e_configure_rx_ring - Configure a receive ring context * @ring: The Rx ring to configure * * Configure the Rx descriptor ring in the HMC context. **/ static int i40e_configure_rx_ring(struct i40e_ring *ring) { struct i40e_vsi *vsi = ring->vsi; u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; u16 pf_q = vsi->base_queue + ring->queue_index; struct i40e_hw *hw = &vsi->back->hw; struct i40e_hmc_obj_rxq rx_ctx; i40e_status err = 0; bool ok; int ret; bitmap_zero(ring->state, __I40E_RING_STATE_NBITS); /* clear the context structure first */ memset(&rx_ctx, 0, sizeof(rx_ctx)); if (ring->vsi->type == I40E_VSI_MAIN) xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); ring->xsk_umem = i40e_xsk_umem(ring); if (ring->xsk_umem) { ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr - XDP_PACKET_HEADROOM; /* For AF_XDP ZC, we disallow packets to span on * multiple buffers, thus letting us skip that * handling in the fast-path. */ chain_len = 1; ring->zca.free = i40e_zca_free; ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_ZERO_COPY, &ring->zca); if (ret) return ret; dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n", ring->queue_index); } else { ring->rx_buf_len = vsi->rx_buf_len; if (ring->vsi->type == I40E_VSI_MAIN) { ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); if (ret) return ret; } } rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); rx_ctx.base = (ring->dma / 128); rx_ctx.qlen = ring->count; /* use 32 byte descriptors */ rx_ctx.dsize = 1; /* descriptor type is always zero * rx_ctx.dtype = 0; */ rx_ctx.hsplit_0 = 0; rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len); if (hw->revision_id == 0) rx_ctx.lrxqthresh = 0; else rx_ctx.lrxqthresh = 1; rx_ctx.crcstrip = 1; rx_ctx.l2tsel = 1; /* this controls whether VLAN is stripped from inner headers */ rx_ctx.showiv = 0; /* set the prefena field to 1 because the manual says to */ rx_ctx.prefena = 1; /* clear the context in the HMC */ err = i40e_clear_lan_rx_queue_context(hw, pf_q); if (err) { dev_info(&vsi->back->pdev->dev, "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", ring->queue_index, pf_q, err); return -ENOMEM; } /* set the context in the HMC */ err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); if (err) { dev_info(&vsi->back->pdev->dev, "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", ring->queue_index, pf_q, err); return -ENOMEM; } /* configure Rx buffer alignment */ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) clear_ring_build_skb_enabled(ring); else set_ring_build_skb_enabled(ring); /* cache tail for quicker writes, and clear the reg before use */ ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); ok = ring->xsk_umem ? i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); if (!ok) { /* Log this in case the user has forgotten to give the kernel * any buffers, even later in the application. */ dev_info(&vsi->back->pdev->dev, "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n", ring->xsk_umem ? "UMEM enabled " : "", ring->queue_index, pf_q); } return 0; } /** * i40e_vsi_configure_tx - Configure the VSI for Tx * @vsi: VSI structure describing this set of rings and resources * * Configure the Tx VSI for operation. **/ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) { int err = 0; u16 i; for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) err = i40e_configure_tx_ring(vsi->tx_rings[i]); if (err || !i40e_enabled_xdp_vsi(vsi)) return err; for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) err = i40e_configure_tx_ring(vsi->xdp_rings[i]); return err; } /** * i40e_vsi_configure_rx - Configure the VSI for Rx * @vsi: the VSI being configured * * Configure the Rx VSI for operation. **/ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) { int err = 0; u16 i; if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { vsi->max_frame = I40E_MAX_RXBUFFER; vsi->rx_buf_len = I40E_RXBUFFER_2048; #if (PAGE_SIZE < 8192) } else if (!I40E_2K_TOO_SMALL_WITH_PADDING && (vsi->netdev->mtu <= ETH_DATA_LEN)) { vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; #endif } else { vsi->max_frame = I40E_MAX_RXBUFFER; vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048; } /* set up individual rings */ for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_configure_rx_ring(vsi->rx_rings[i]); return err; } /** * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC * @vsi: ptr to the VSI **/ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) { struct i40e_ring *tx_ring, *rx_ring; u16 qoffset, qcount; int i, n; if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Reset the TC information */ for (i = 0; i < vsi->num_queue_pairs; i++) { rx_ring = vsi->rx_rings[i]; tx_ring = vsi->tx_rings[i]; rx_ring->dcb_tc = 0; tx_ring->dcb_tc = 0; } return; } for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) continue; qoffset = vsi->tc_config.tc_info[n].qoffset; qcount = vsi->tc_config.tc_info[n].qcount; for (i = qoffset; i < (qoffset + qcount); i++) { rx_ring = vsi->rx_rings[i]; tx_ring = vsi->tx_rings[i]; rx_ring->dcb_tc = n; tx_ring->dcb_tc = n; } } } /** * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI * @vsi: ptr to the VSI **/ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) { if (vsi->netdev) i40e_set_rx_mode(vsi->netdev); } /** * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters * @vsi: Pointer to the targeted VSI * * This function replays the hlist on the hw where all the SB Flow Director * filters were saved. **/ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) { struct i40e_fdir_filter *filter; struct i40e_pf *pf = vsi->back; struct hlist_node *node; if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return; /* Reset FDir counters as we're replaying all existing filters */ pf->fd_tcp4_filter_cnt = 0; pf->fd_udp4_filter_cnt = 0; pf->fd_sctp4_filter_cnt = 0; pf->fd_ip4_filter_cnt = 0; hlist_for_each_entry_safe(filter, node, &pf->fdir_filter_list, fdir_node) { i40e_add_del_fdir(vsi, filter, true); } } /** * i40e_vsi_configure - Set up the VSI for action * @vsi: the VSI being configured **/ static int i40e_vsi_configure(struct i40e_vsi *vsi) { int err; i40e_set_vsi_rx_mode(vsi); i40e_restore_vlan(vsi); i40e_vsi_config_dcb_rings(vsi); err = i40e_vsi_configure_tx(vsi); if (!err) err = i40e_vsi_configure_rx(vsi); return err; } /** * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW * @vsi: the VSI being configured **/ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) { bool has_xdp = i40e_enabled_xdp_vsi(vsi); struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u16 vector; int i, q; u32 qp; /* The interrupt indexing is offset by 1 in the PFINT_ITRn * and PFINT_LNKLSTn registers, e.g.: * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) */ qp = vsi->base_queue; vector = vsi->base_vector; for (i = 0; i < vsi->num_q_vectors; i++, vector++) { struct i40e_q_vector *q_vector = vsi->q_vectors[i]; q_vector->rx.next_update = jiffies + 1; q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[i]->itr_setting); wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.target_itr >> 1); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->tx.next_update = jiffies + 1; q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[i]->itr_setting); wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.target_itr >> 1); q_vector->tx.current_itr = q_vector->tx.target_itr; wr32(hw, I40E_PFINT_RATEN(vector - 1), i40e_intrl_usec_to_reg(vsi->int_rate_limit)); /* Linked list for the queuepairs assigned to this vector */ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); for (q = 0; q < q_vector->num_ringpairs; q++) { u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp; u32 val; val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_RQCTL(qp), val); if (has_xdp) { val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_TQCTL(nextqp), val); } val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); /* Terminate the linked list */ if (q == (q_vector->num_ringpairs - 1)) val |= (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_QINT_TQCTL(qp), val); qp++; } } i40e_flush(hw); } /** * i40e_enable_misc_int_causes - enable the non-queue interrupts * @pf: pointer to private device data structure **/ static void i40e_enable_misc_int_causes(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 val; /* clear things first */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ rd32(hw, I40E_PFINT_ICR0); /* read to clear */ val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | I40E_PFINT_ICR0_ENA_GRST_MASK | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | I40E_PFINT_ICR0_ENA_GPIO_MASK | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK; if (pf->flags & I40E_FLAG_IWARP_ENABLED) val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; if (pf->flags & I40E_FLAG_PTP) val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, val); /* SW_ITR_IDX = 0, but don't change INTENA */ wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); /* OTHER_ITR_IDX = 0 */ wr32(hw, I40E_PFINT_STAT_CTL0, 0); } /** * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW * @vsi: the VSI being configured **/ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) { u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0; struct i40e_q_vector *q_vector = vsi->q_vectors[0]; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u32 val; /* set the ITR configuration */ q_vector->rx.next_update = jiffies + 1; q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting); wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->tx.next_update = jiffies + 1; q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting); wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1); q_vector->tx.current_itr = q_vector->tx.target_itr; i40e_enable_misc_int_causes(pf); /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ wr32(hw, I40E_PFINT_LNKLST0, 0); /* Associate the queue pair to the vector and enable the queue int */ val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_RQCTL(0), val); if (i40e_enabled_xdp_vsi(vsi)) { val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)| (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_TQCTL(nextqp), val); } val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_QINT_TQCTL(0), val); i40e_flush(hw); } /** * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 * @pf: board private structure **/ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; wr32(hw, I40E_PFINT_DYN_CTL0, I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); i40e_flush(hw); } /** * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 * @pf: board private structure **/ void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 val; val = I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTL0, val); i40e_flush(hw); } /** * i40e_msix_clean_rings - MSIX mode Interrupt Handler * @irq: interrupt number * @data: pointer to a q_vector **/ static irqreturn_t i40e_msix_clean_rings(int irq, void *data) { struct i40e_q_vector *q_vector = data; if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } /** * i40e_irq_affinity_notify - Callback for affinity changes * @notify: context as to what irq was changed * @mask: the new affinity mask * * This is a callback function used by the irq_set_affinity_notifier function * so that we may register to receive changes to the irq affinity masks. **/ static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) { struct i40e_q_vector *q_vector = container_of(notify, struct i40e_q_vector, affinity_notify); cpumask_copy(&q_vector->affinity_mask, mask); } /** * i40e_irq_affinity_release - Callback for affinity notifier release * @ref: internal core kernel usage * * This is a callback function used by the irq_set_affinity_notifier function * to inform the current notification subscriber that they will no longer * receive notifications. **/ static void i40e_irq_affinity_release(struct kref *ref) {} /** * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts * @vsi: the VSI being configured * @basename: name for the vector * * Allocates MSI-X vectors and requests interrupts from the kernel. **/ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) { int q_vectors = vsi->num_q_vectors; struct i40e_pf *pf = vsi->back; int base = vsi->base_vector; int rx_int_idx = 0; int tx_int_idx = 0; int vector, err; int irq_num; int cpu; for (vector = 0; vector < q_vectors; vector++) { struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; irq_num = pf->msix_entries[base + vector].vector; if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "TxRx", rx_int_idx++); tx_int_idx++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "rx", rx_int_idx++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "tx", tx_int_idx++); } else { /* skip this unused q_vector */ continue; } err = request_irq(irq_num, vsi->irq_handler, 0, q_vector->name, q_vector); if (err) { dev_info(&pf->pdev->dev, "MSIX request_irq failed, error: %d\n", err); goto free_queue_irqs; } /* register for affinity change notifications */ q_vector->affinity_notify.notify = i40e_irq_affinity_notify; q_vector->affinity_notify.release = i40e_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); /* Spread affinity hints out across online CPUs. * * get_cpu_mask returns a static constant mask with * a permanent lifetime so it's ok to pass to * irq_set_affinity_hint without making a copy. */ cpu = cpumask_local_spread(q_vector->v_idx, -1); irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); } vsi->irqs_ready = true; return 0; free_queue_irqs: while (vector) { vector--; irq_num = pf->msix_entries[base + vector].vector; irq_set_affinity_notifier(irq_num, NULL); irq_set_affinity_hint(irq_num, NULL); free_irq(irq_num, &vsi->q_vectors[vector]); } return err; } /** * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI * @vsi: the VSI being un-configured **/ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int base = vsi->base_vector; int i; /* disable interrupt causation from each queue */ for (i = 0; i < vsi->num_queue_pairs; i++) { u32 val; val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx)); val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val); if (!i40e_enabled_xdp_vsi(vsi)) continue; wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0); } /* disable each interrupt */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) { for (i = vsi->base_vector; i < (vsi->num_q_vectors + vsi->base_vector); i++) wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); i40e_flush(hw); for (i = 0; i < vsi->num_q_vectors; i++) synchronize_irq(pf->msix_entries[i + base].vector); } else { /* Legacy and MSI mode - this stops all interrupt handling */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); wr32(hw, I40E_PFINT_DYN_CTL0, 0); i40e_flush(hw); synchronize_irq(pf->pdev->irq); } } /** * i40e_vsi_enable_irq - Enable IRQ for the given VSI * @vsi: the VSI being configured **/ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { for (i = 0; i < vsi->num_q_vectors; i++) i40e_irq_dynamic_enable(vsi, i); } else { i40e_irq_dynamic_enable_icr0(pf); } i40e_flush(&pf->hw); return 0; } /** * i40e_free_misc_vector - Free the vector that handles non-queue events * @pf: board private structure **/ static void i40e_free_misc_vector(struct i40e_pf *pf) { /* Disable ICR 0 */ wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); i40e_flush(&pf->hw); if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { synchronize_irq(pf->msix_entries[0].vector); free_irq(pf->msix_entries[0].vector, pf); clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); } } /** * i40e_intr - MSI/Legacy and non-queue interrupt handler * @irq: interrupt number * @data: pointer to a q_vector * * This is the handler used for all MSI/Legacy interrupts, and deals * with both queue and non-queue interrupts. This is also used in * MSIX mode to handle the non-queue interrupts. **/ static irqreturn_t i40e_intr(int irq, void *data) { struct i40e_pf *pf = (struct i40e_pf *)data; struct i40e_hw *hw = &pf->hw; irqreturn_t ret = IRQ_NONE; u32 icr0, icr0_remaining; u32 val, ena_mask; icr0 = rd32(hw, I40E_PFINT_ICR0); ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); /* if sharing a legacy IRQ, we might get called w/o an intr pending */ if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) goto enable_intr; /* if interrupt but no bits showing, must be SWINT */ if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) pf->sw_int_count++; if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n"); set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); } /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_q_vector *q_vector = vsi->q_vectors[0]; /* We do not have a way to disarm Queue causes while leaving * interrupt enabled for all other causes, ideally * interrupt should be disabled while we are in NAPI but * this is not a performance path and napi_schedule() * can deal with rescheduling. */ if (!test_bit(__I40E_DOWN, pf->state)) napi_schedule_irqoff(&q_vector->napi); } if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); } if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; set_bit(__I40E_MDD_EVENT_PENDING, pf->state); } if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); } if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) set_bit(__I40E_RESET_INTR_RECEIVED, pf->state); ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; val = rd32(hw, I40E_GLGEN_RSTAT); val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; if (val == I40E_RESET_CORER) { pf->corer_count++; } else if (val == I40E_RESET_GLOBR) { pf->globr_count++; } else if (val == I40E_RESET_EMPR) { pf->empr_count++; set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state); } } if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; dev_info(&pf->pdev->dev, "HMC error interrupt\n"); dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", rd32(hw, I40E_PFHMC_ERRORINFO), rd32(hw, I40E_PFHMC_ERRORDATA)); } if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; i40e_ptp_tx_hwtstamp(pf); } } /* If a critical error is pending we have no choice but to reset the * device. * Report and mask out any remaining unexpected interrupts. */ icr0_remaining = icr0 & ena_mask; if (icr0_remaining) { dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", icr0_remaining); if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { dev_info(&pf->pdev->dev, "device will be reset\n"); set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } ena_mask &= ~icr0_remaining; } ret = IRQ_HANDLED; enable_intr: /* re-enable interrupt causes */ wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); if (!test_bit(__I40E_DOWN, pf->state) || test_bit(__I40E_RECOVERY_MODE, pf->state)) { i40e_service_event_schedule(pf); i40e_irq_dynamic_enable_icr0(pf); } return ret; } /** * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes * @tx_ring: tx ring to clean * @budget: how many cleans we're allowed * * Returns true if there's any budget left (e.g. the clean is finished) **/ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) { struct i40e_vsi *vsi = tx_ring->vsi; u16 i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; struct i40e_tx_desc *tx_desc; tx_buf = &tx_ring->tx_bi[i]; tx_desc = I40E_TX_DESC(tx_ring, i); i -= tx_ring->count; do { struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break; /* prevent any other reads prior to eop_desc */ smp_rmb(); /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->cmd_type_offset_bsz & cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) break; /* clear next_to_watch to prevent false hangs */ tx_buf->next_to_watch = NULL; tx_desc->buffer_addr = 0; tx_desc->cmd_type_offset_bsz = 0; /* move past filter desc */ tx_buf++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buf = tx_ring->tx_bi; tx_desc = I40E_TX_DESC(tx_ring, 0); } /* unmap skb header data */ dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) kfree(tx_buf->raw_buf); tx_buf->raw_buf = NULL; tx_buf->tx_flags = 0; tx_buf->next_to_watch = NULL; dma_unmap_len_set(tx_buf, len, 0); tx_desc->buffer_addr = 0; tx_desc->cmd_type_offset_bsz = 0; /* move us past the eop_desc for start of next FD desc */ tx_buf++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buf = tx_ring->tx_bi; tx_desc = I40E_TX_DESC(tx_ring, 0); } /* update budget accounting */ budget--; } while (likely(budget)); i += tx_ring->count; tx_ring->next_to_clean = i; if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); return budget > 0; } /** * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring * @irq: interrupt number * @data: pointer to a q_vector **/ static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) { struct i40e_q_vector *q_vector = data; struct i40e_vsi *vsi; if (!q_vector->tx.ring) return IRQ_HANDLED; vsi = q_vector->tx.ring->vsi; i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); return IRQ_HANDLED; } /** * i40e_map_vector_to_qp - Assigns the queue pair to the vector * @vsi: the VSI being configured * @v_idx: vector index * @qp_idx: queue pair index **/ static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) { struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; tx_ring->q_vector = q_vector; tx_ring->next = q_vector->tx.ring; q_vector->tx.ring = tx_ring; q_vector->tx.count++; /* Place XDP Tx ring in the same q_vector ring list as regular Tx */ if (i40e_enabled_xdp_vsi(vsi)) { struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx]; xdp_ring->q_vector = q_vector; xdp_ring->next = q_vector->tx.ring; q_vector->tx.ring = xdp_ring; q_vector->tx.count++; } rx_ring->q_vector = q_vector; rx_ring->next = q_vector->rx.ring; q_vector->rx.ring = rx_ring; q_vector->rx.count++; } /** * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors * @vsi: the VSI being configured * * This function maps descriptor rings to the queue-specific vectors * we were allotted through the MSI-X enabling code. Ideally, we'd have * one vector per queue pair, but on a constrained vector budget, we * group the queue pairs as "efficiently" as possible. **/ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) { int qp_remaining = vsi->num_queue_pairs; int q_vectors = vsi->num_q_vectors; int num_ringpairs; int v_start = 0; int qp_idx = 0; /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to * group them so there are multiple queues per vector. * It is also important to go through all the vectors available to be * sure that if we don't use all the vectors, that the remaining vectors * are cleared. This is especially important when decreasing the * number of queues in use. */ for (; v_start < q_vectors; v_start++) { struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); q_vector->num_ringpairs = num_ringpairs; q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1; q_vector->rx.count = 0; q_vector->tx.count = 0; q_vector->rx.ring = NULL; q_vector->tx.ring = NULL; while (num_ringpairs--) { i40e_map_vector_to_qp(vsi, v_start, qp_idx); qp_idx++; qp_remaining--; } } } /** * i40e_vsi_request_irq - Request IRQ from the OS * @vsi: the VSI being configured * @basename: name for the vector **/ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) { struct i40e_pf *pf = vsi->back; int err; if (pf->flags & I40E_FLAG_MSIX_ENABLED) err = i40e_vsi_request_irq_msix(vsi, basename); else if (pf->flags & I40E_FLAG_MSI_ENABLED) err = request_irq(pf->pdev->irq, i40e_intr, 0, pf->int_name, pf); else err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, pf->int_name, pf); if (err) dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); return err; } #ifdef CONFIG_NET_POLL_CONTROLLER /** * i40e_netpoll - A Polling 'interrupt' handler * @netdev: network interface device structure * * This is used by netconsole to send skbs without having to re-enable * interrupts. It's not called while the normal interrupt routine is executing. **/ static void i40e_netpoll(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; int i; /* if interface is down do nothing */ if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { for (i = 0; i < vsi->num_q_vectors; i++) i40e_msix_clean_rings(0, vsi->q_vectors[i]); } else { i40e_intr(pf->pdev->irq, netdev); } } #endif #define I40E_QTX_ENA_WAIT_COUNT 50 /** * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled * @pf: the PF being configured * @pf_q: the PF queue * @enable: enable or disable state of the queue * * This routine will wait for the given Tx queue of the PF to reach the * enabled or disabled state. * Returns -ETIMEDOUT in case of failing to reach the requested state after * multiple retries; else will return 0 in case of success. **/ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) { int i; u32 tx_reg; for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) break; usleep_range(10, 20); } if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) return -ETIMEDOUT; return 0; } /** * i40e_control_tx_q - Start or stop a particular Tx queue * @pf: the PF structure * @pf_q: the PF queue to configure * @enable: start or stop the queue * * This function enables or disables a single queue. Note that any delay * required after the operation is expected to be handled by the caller of * this function. **/ static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable) { struct i40e_hw *hw = &pf->hw; u32 tx_reg; int i; /* warn the TX unit of coming changes */ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); if (!enable) usleep_range(10, 20); for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) { tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) break; usleep_range(1000, 2000); } /* Skip if the queue is already in the requested state */ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) return; /* turn on/off the queue */ if (enable) { wr32(hw, I40E_QTX_HEAD(pf_q), 0); tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; } else { tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; } wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); } /** * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion * @seid: VSI SEID * @pf: the PF structure * @pf_q: the PF queue to configure * @is_xdp: true if the queue is used for XDP * @enable: start or stop the queue **/ int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, bool is_xdp, bool enable) { int ret; i40e_control_tx_q(pf, pf_q, enable); /* wait for the change to finish */ ret = i40e_pf_txq_wait(pf, pf_q, enable); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d %sTx ring %d %sable timeout\n", seid, (is_xdp ? "XDP " : ""), pf_q, (enable ? "en" : "dis")); } return ret; } /** * i40e_vsi_control_tx - Start or stop a VSI's rings * @vsi: the VSI being configured * @enable: start or stop the rings **/ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) { struct i40e_pf *pf = vsi->back; int i, pf_q, ret = 0; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, false /*is xdp*/, enable); if (ret) break; if (!i40e_enabled_xdp_vsi(vsi)) continue; ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q + vsi->alloc_queue_pairs, true /*is xdp*/, enable); if (ret) break; } return ret; } /** * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled * @pf: the PF being configured * @pf_q: the PF queue * @enable: enable or disable state of the queue * * This routine will wait for the given Rx queue of the PF to reach the * enabled or disabled state. * Returns -ETIMEDOUT in case of failing to reach the requested state after * multiple retries; else will return 0 in case of success. **/ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) { int i; u32 rx_reg; for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) break; usleep_range(10, 20); } if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) return -ETIMEDOUT; return 0; } /** * i40e_control_rx_q - Start or stop a particular Rx queue * @pf: the PF structure * @pf_q: the PF queue to configure * @enable: start or stop the queue * * This function enables or disables a single queue. Note that * any delay required after the operation is expected to be * handled by the caller of this function. **/ static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable) { struct i40e_hw *hw = &pf->hw; u32 rx_reg; int i; for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) { rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) break; usleep_range(1000, 2000); } /* Skip if the queue is already in the requested state */ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) return; /* turn on/off the queue */ if (enable) rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; else rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); } /** * i40e_control_wait_rx_q * @pf: the PF structure * @pf_q: queue being configured * @enable: start or stop the rings * * This function enables or disables a single queue along with waiting * for the change to finish. The caller of this function should handle * the delays needed in the case of disabling queues. **/ int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable) { int ret = 0; i40e_control_rx_q(pf, pf_q, enable); /* wait for the change to finish */ ret = i40e_pf_rxq_wait(pf, pf_q, enable); if (ret) return ret; return ret; } /** * i40e_vsi_control_rx - Start or stop a VSI's rings * @vsi: the VSI being configured * @enable: start or stop the rings **/ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) { struct i40e_pf *pf = vsi->back; int i, pf_q, ret = 0; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { ret = i40e_control_wait_rx_q(pf, pf_q, enable); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d Rx ring %d %sable timeout\n", vsi->seid, pf_q, (enable ? "en" : "dis")); break; } } /* Due to HW errata, on Rx disable only, the register can indicate done * before it really is. Needs 50ms to be sure */ if (!enable) mdelay(50); return ret; } /** * i40e_vsi_start_rings - Start a VSI's rings * @vsi: the VSI being configured **/ int i40e_vsi_start_rings(struct i40e_vsi *vsi) { int ret = 0; /* do rx first for enable and last for disable */ ret = i40e_vsi_control_rx(vsi, true); if (ret) return ret; ret = i40e_vsi_control_tx(vsi, true); return ret; } /** * i40e_vsi_stop_rings - Stop a VSI's rings * @vsi: the VSI being configured **/ void i40e_vsi_stop_rings(struct i40e_vsi *vsi) { /* When port TX is suspended, don't wait */ if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) return i40e_vsi_stop_rings_no_wait(vsi); /* do rx first for enable and last for disable * Ignore return value, we need to shutdown whatever we can */ i40e_vsi_control_tx(vsi, false); i40e_vsi_control_rx(vsi, false); } /** * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay * @vsi: the VSI being shutdown * * This function stops all the rings for a VSI but does not delay to verify * that rings have been disabled. It is expected that the caller is shutting * down multiple VSIs at once and will delay together for all the VSIs after * initiating the shutdown. This is particularly useful for shutting down lots * of VFs together. Otherwise, a large delay can be incurred while configuring * each VSI in serial. **/ void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i, pf_q; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { i40e_control_tx_q(pf, pf_q, false); i40e_control_rx_q(pf, pf_q, false); } } /** * i40e_vsi_free_irq - Free the irq association with the OS * @vsi: the VSI being configured **/ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int base = vsi->base_vector; u32 val, qp; int i; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { if (!vsi->q_vectors) return; if (!vsi->irqs_ready) return; vsi->irqs_ready = false; for (i = 0; i < vsi->num_q_vectors; i++) { int irq_num; u16 vector; vector = i + base; irq_num = pf->msix_entries[vector].vector; /* free only the irqs that were actually requested */ if (!vsi->q_vectors[i] || !vsi->q_vectors[i]->num_ringpairs) continue; /* clear the affinity notifier in the IRQ descriptor */ irq_set_affinity_notifier(irq_num, NULL); /* remove our suggested affinity mask for this IRQ */ irq_set_affinity_hint(irq_num, NULL); synchronize_irq(irq_num); free_irq(irq_num, vsi->q_vectors[i]); /* Tear down the interrupt queue link list * * We know that they come in pairs and always * the Rx first, then the Tx. To clear the * link list, stick the EOL value into the * next_q field of the registers. */ val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; val |= I40E_QUEUE_END_OF_LIST << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); while (qp != I40E_QUEUE_END_OF_LIST) { u32 next; val = rd32(hw, I40E_QINT_RQCTL(qp)); val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | I40E_QINT_RQCTL_MSIX0_INDX_MASK | I40E_QINT_RQCTL_CAUSE_ENA_MASK | I40E_QINT_RQCTL_INTEVENT_MASK); val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | I40E_QINT_RQCTL_NEXTQ_INDX_MASK); wr32(hw, I40E_QINT_RQCTL(qp), val); val = rd32(hw, I40E_QINT_TQCTL(qp)); next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | I40E_QINT_TQCTL_MSIX0_INDX_MASK | I40E_QINT_TQCTL_CAUSE_ENA_MASK | I40E_QINT_TQCTL_INTEVENT_MASK); val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | I40E_QINT_TQCTL_NEXTQ_INDX_MASK); wr32(hw, I40E_QINT_TQCTL(qp), val); qp = next; } } } else { free_irq(pf->pdev->irq, pf); val = rd32(hw, I40E_PFINT_LNKLST0); qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; val |= I40E_QUEUE_END_OF_LIST << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; wr32(hw, I40E_PFINT_LNKLST0, val); val = rd32(hw, I40E_QINT_RQCTL(qp)); val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | I40E_QINT_RQCTL_MSIX0_INDX_MASK | I40E_QINT_RQCTL_CAUSE_ENA_MASK | I40E_QINT_RQCTL_INTEVENT_MASK); val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | I40E_QINT_RQCTL_NEXTQ_INDX_MASK); wr32(hw, I40E_QINT_RQCTL(qp), val); val = rd32(hw, I40E_QINT_TQCTL(qp)); val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | I40E_QINT_TQCTL_MSIX0_INDX_MASK | I40E_QINT_TQCTL_CAUSE_ENA_MASK | I40E_QINT_TQCTL_INTEVENT_MASK); val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | I40E_QINT_TQCTL_NEXTQ_INDX_MASK); wr32(hw, I40E_QINT_TQCTL(qp), val); } } /** * i40e_free_q_vector - Free memory allocated for specific interrupt vector * @vsi: the VSI being configured * @v_idx: Index of vector to be freed * * This function frees the memory allocated to the q_vector. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) { struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; struct i40e_ring *ring; if (!q_vector) return; /* disassociate q_vector from rings */ i40e_for_each_ring(ring, q_vector->tx) ring->q_vector = NULL; i40e_for_each_ring(ring, q_vector->rx) ring->q_vector = NULL; /* only VSI w/ an associated netdev is set up w/ NAPI */ if (vsi->netdev) netif_napi_del(&q_vector->napi); vsi->q_vectors[v_idx] = NULL; kfree_rcu(q_vector, rcu); } /** * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors * @vsi: the VSI being un-configured * * This frees the memory allocated to the q_vectors and * deletes references to the NAPI struct. **/ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) { int v_idx; for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) i40e_free_q_vector(vsi, v_idx); } /** * i40e_reset_interrupt_capability - Disable interrupt setup in OS * @pf: board private structure **/ static void i40e_reset_interrupt_capability(struct i40e_pf *pf) { /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) { pci_disable_msix(pf->pdev); kfree(pf->msix_entries); pf->msix_entries = NULL; kfree(pf->irq_pile); pf->irq_pile = NULL; } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { pci_disable_msi(pf->pdev); } pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); } /** * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings * @pf: board private structure * * We go through and clear interrupt specific resources and reset the structure * to pre-load conditions **/ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) { int i; i40e_free_misc_vector(pf); i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, I40E_IWARP_IRQ_PILE_ID); i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i]) i40e_vsi_free_q_vectors(pf->vsi[i]); i40e_reset_interrupt_capability(pf); } /** * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI * @vsi: the VSI being configured **/ static void i40e_napi_enable_all(struct i40e_vsi *vsi) { int q_idx; if (!vsi->netdev) return; for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; if (q_vector->rx.ring || q_vector->tx.ring) napi_enable(&q_vector->napi); } } /** * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI * @vsi: the VSI being configured **/ static void i40e_napi_disable_all(struct i40e_vsi *vsi) { int q_idx; if (!vsi->netdev) return; for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; if (q_vector->rx.ring || q_vector->tx.ring) napi_disable(&q_vector->napi); } } /** * i40e_vsi_close - Shut down a VSI * @vsi: the vsi to be quelled **/ static void i40e_vsi_close(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state)) i40e_down(vsi); i40e_vsi_free_irq(vsi); i40e_vsi_free_tx_resources(vsi); i40e_vsi_free_rx_resources(vsi); vsi->current_netdev_flags = 0; set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) set_bit(__I40E_CLIENT_RESET, pf->state); } /** * i40e_quiesce_vsi - Pause a given VSI * @vsi: the VSI being paused **/ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) { if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state); if (vsi->netdev && netif_running(vsi->netdev)) vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); else i40e_vsi_close(vsi); } /** * i40e_unquiesce_vsi - Resume a given VSI * @vsi: the VSI being resumed **/ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) { if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state)) return; if (vsi->netdev && netif_running(vsi->netdev)) vsi->netdev->netdev_ops->ndo_open(vsi->netdev); else i40e_vsi_open(vsi); /* this clears the DOWN bit */ } /** * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF * @pf: the PF **/ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) { int v; for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v]) i40e_quiesce_vsi(pf->vsi[v]); } } /** * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF * @pf: the PF **/ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) { int v; for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v]) i40e_unquiesce_vsi(pf->vsi[v]); } } /** * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled * @vsi: the VSI being configured * * Wait until all queues on a given VSI have been disabled. **/ int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i, pf_q, ret; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { /* Check and wait for the Tx queue */ ret = i40e_pf_txq_wait(pf, pf_q, false); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d Tx ring %d disable timeout\n", vsi->seid, pf_q); return ret; } if (!i40e_enabled_xdp_vsi(vsi)) goto wait_rx; /* Check and wait for the XDP Tx queue */ ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs, false); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d XDP Tx ring %d disable timeout\n", vsi->seid, pf_q); return ret; } wait_rx: /* Check and wait for the Rx queue */ ret = i40e_pf_rxq_wait(pf, pf_q, false); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d Rx ring %d disable timeout\n", vsi->seid, pf_q); return ret; } } return 0; } #ifdef CONFIG_I40E_DCB /** * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled * @pf: the PF * * This function waits for the queues to be in disabled state for all the * VSIs that are managed by this PF. **/ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) { int v, ret = 0; for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { if (pf->vsi[v]) { ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); if (ret) break; } } return ret; } #endif /** * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP * @pf: pointer to PF * * Get TC map for ISCSI PF type that will include iSCSI TC * and LAN TC. **/ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) { struct i40e_dcb_app_priority_table app; struct i40e_hw *hw = &pf->hw; u8 enabled_tc = 1; /* TC0 is always enabled */ u8 tc, i; /* Get the iSCSI APP TLV */ struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; for (i = 0; i < dcbcfg->numapps; i++) { app = dcbcfg->app[i]; if (app.selector == I40E_APP_SEL_TCPIP && app.protocolid == I40E_APP_PROTOID_ISCSI) { tc = dcbcfg->etscfg.prioritytable[app.priority]; enabled_tc |= BIT(tc); break; } } return enabled_tc; } /** * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config * @dcbcfg: the corresponding DCBx configuration structure * * Return the number of TCs from given DCBx configuration **/ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) { int i, tc_unused = 0; u8 num_tc = 0; u8 ret = 0; /* Scan the ETS Config Priority Table to find * traffic class enabled for a given priority * and create a bitmask of enabled TCs */ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]); /* Now scan the bitmask to check for * contiguous TCs starting with TC0 */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (num_tc & BIT(i)) { if (!tc_unused) { ret++; } else { pr_err("Non-contiguous TC - Disabling DCB\n"); return 1; } } else { tc_unused = 1; } } /* There is always at least TC0 */ if (!ret) ret = 1; return ret; } /** * i40e_dcb_get_enabled_tc - Get enabled traffic classes * @dcbcfg: the corresponding DCBx configuration structure * * Query the current DCB configuration and return the number of * traffic classes enabled from the given DCBX config **/ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) { u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); u8 enabled_tc = 1; u8 i; for (i = 0; i < num_tc; i++) enabled_tc |= BIT(i); return enabled_tc; } /** * i40e_mqprio_get_enabled_tc - Get enabled traffic classes * @pf: PF being queried * * Query the current MQPRIO configuration and return the number of * traffic classes enabled. **/ static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; u8 num_tc = vsi->mqprio_qopt.qopt.num_tc; u8 enabled_tc = 1, i; for (i = 1; i < num_tc; i++) enabled_tc |= BIT(i); return enabled_tc; } /** * i40e_pf_get_num_tc - Get enabled traffic classes for PF * @pf: PF being queried * * Return number of traffic classes enabled for the given PF **/ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u8 i, enabled_tc = 1; u8 num_tc = 0; struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; if (pf->flags & I40E_FLAG_TC_MQPRIO) return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; /* If neither MQPRIO nor DCB is enabled, then always use single TC */ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) return 1; /* SFP mode will be enabled for all TCs on port */ if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) return i40e_dcb_get_num_tc(dcbcfg); /* MFP mode return count of enabled TCs for this PF */ if (pf->hw.func_caps.iscsi) enabled_tc = i40e_get_iscsi_tc_map(pf); else return 1; /* Only TC0 */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) num_tc++; } return num_tc; } /** * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes * @pf: PF being queried * * Return a bitmap for enabled traffic classes for this PF. **/ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) { if (pf->flags & I40E_FLAG_TC_MQPRIO) return i40e_mqprio_get_enabled_tc(pf); /* If neither MQPRIO nor DCB is enabled for this PF then just return * default TC */ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) return I40E_DEFAULT_TRAFFIC_CLASS; /* SFP mode we want PF to be enabled for all TCs */ if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); /* MFP enabled and iSCSI PF type */ if (pf->hw.func_caps.iscsi) return i40e_get_iscsi_tc_map(pf); else return I40E_DEFAULT_TRAFFIC_CLASS; } /** * i40e_vsi_get_bw_info - Query VSI BW Information * @vsi: the VSI being queried * * Returns 0 on success, negative value on failure **/ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) { struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; i40e_status ret; u32 tc_bw_max; int i; /* Get the VSI level BW configuration */ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't get PF vsi bw config, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } /* Get the VSI level BW configuration per TC */ ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't get PF vsi ets bw config, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { dev_info(&pf->pdev->dev, "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", bw_config.tc_valid_bits, bw_ets_config.tc_valid_bits); /* Still continuing */ } vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); vsi->bw_max_quanta = bw_config.max_bw; tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; vsi->bw_ets_limit_credits[i] = le16_to_cpu(bw_ets_config.credits[i]); /* 3 bits out of 4 for each TC */ vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); } return 0; } /** * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC * @vsi: the VSI being configured * @enabled_tc: TC bitmap * @bw_share: BW shared credits per TC * * Returns 0 on success, negative value on failure **/ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, u8 *bw_share) { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; struct i40e_pf *pf = vsi->back; i40e_status ret; int i; /* There is no need to reset BW when mqprio mode is on. */ if (pf->flags & I40E_FLAG_TC_MQPRIO) return 0; if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { ret = i40e_set_bw_limit(vsi, vsi->seid, 0); if (ret) dev_info(&pf->pdev->dev, "Failed to reset tx rate for vsi->seid %u\n", vsi->seid); return ret; } bw_data.tc_valid_bits = enabled_tc; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) bw_data.tc_bw_credits[i] = bw_share[i]; ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, "AQ command Config VSI BW allocation per TC failed = %d\n", pf->hw.aq.asq_last_status); return -EINVAL; } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) vsi->info.qs_handle[i] = bw_data.qs_handles[i]; return 0; } /** * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration * @vsi: the VSI being configured * @enabled_tc: TC map to be enabled * **/ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) { struct net_device *netdev = vsi->netdev; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u8 netdev_tc = 0; int i; struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; if (!netdev) return; if (!enabled_tc) { netdev_reset_tc(netdev); return; } /* Set up actual enabled TCs on the VSI */ if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) return; /* set per TC queues for the VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* Only set TC queues for enabled tcs * * e.g. For a VSI that has TC0 and TC3 enabled the * enabled_tc bitmap would be 0x00001001; the driver * will set the numtc for netdev as 2 that will be * referenced by the netdev layer as TC 0 and 1. */ if (vsi->tc_config.enabled_tc & BIT(i)) netdev_set_tc_queue(netdev, vsi->tc_config.tc_info[i].netdev_tc, vsi->tc_config.tc_info[i].qcount, vsi->tc_config.tc_info[i].qoffset); } if (pf->flags & I40E_FLAG_TC_MQPRIO) return; /* Assign UP2TC map for the VSI */ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { /* Get the actual TC# for the UP */ u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; /* Get the mapped netdev TC# for the UP */ netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; netdev_set_prio_tc_map(netdev, i, netdev_tc); } } /** * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map * @vsi: the VSI being configured * @ctxt: the ctxt buffer returned from AQ VSI update param command **/ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) { /* copy just the sections touched not the entire info * since not all sections are valid as returned by * update vsi params */ vsi->info.mapping_flags = ctxt->info.mapping_flags; memcpy(&vsi->info.queue_mapping, &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, sizeof(vsi->info.tc_mapping)); } /** * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map * @vsi: VSI to be configured * @enabled_tc: TC bitmap * * This configures a particular VSI for TCs that are mapped to the * given TC bitmap. It uses default bandwidth share for TCs across * VSIs to configure TC for a particular VSI. * * NOTE: * It is expected that the VSI queues have been quisced before calling * this function. **/ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) { u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; int ret = 0; int i; /* Check if enabled_tc is same as existing or new TCs */ if (vsi->tc_config.enabled_tc == enabled_tc && vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) return ret; /* Enable ETS TCs with equal BW Share for now across all VSIs */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) bw_share[i] = 1; } ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); if (ret) { struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; dev_info(&pf->pdev->dev, "Failed configuring TC map %d for VSI %d\n", enabled_tc, vsi->seid); ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); if (ret) { dev_info(&pf->pdev->dev, "Failed querying vsi bw info, err %s aq_err %s\n", i40e_stat_str(hw, ret), i40e_aq_str(hw, hw->aq.asq_last_status)); goto out; } if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) { u8 valid_tc = bw_config.tc_valid_bits & enabled_tc; if (!valid_tc) valid_tc = bw_config.tc_valid_bits; /* Always enable TC0, no matter what */ valid_tc |= 1; dev_info(&pf->pdev->dev, "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n", enabled_tc, bw_config.tc_valid_bits, valid_tc); enabled_tc = valid_tc; } ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); if (ret) { dev_err(&pf->pdev->dev, "Unable to configure TC map %d for VSI %d\n", enabled_tc, vsi->seid); goto out; } } /* Update Queue Pairs Mapping for currently enabled UPs */ ctxt.seid = vsi->seid; ctxt.pf_num = vsi->back->hw.pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; ctxt.info = vsi->info; if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) { ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc); if (ret) goto out; } else { i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); } /* On destroying the qdisc, reset vsi->rss_size, as number of enabled * queues changed. */ if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) { vsi->rss_size = min_t(int, vsi->back->alloc_rss_size, vsi->num_queue_pairs); ret = i40e_vsi_config_rss(vsi); if (ret) { dev_info(&vsi->back->pdev->dev, "Failed to reconfig rss for num_queues\n"); return ret; } vsi->reconfig_rss = false; } if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; } /* Update the VSI after updating the VSI queue-mapping * information */ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "Update vsi tc config failed, err %s aq_err %s\n", i40e_stat_str(hw, ret), i40e_aq_str(hw, hw->aq.asq_last_status)); goto out; } /* update the local VSI info with updated queue map */ i40e_vsi_update_queue_map(vsi, &ctxt); vsi->info.valid_sections = 0; /* Update current VSI BW information */ ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&pf->pdev->dev, "Failed updating vsi bw info, err %s aq_err %s\n", i40e_stat_str(hw, ret), i40e_aq_str(hw, hw->aq.asq_last_status)); goto out; } /* Update the netdev TC setup */ i40e_vsi_config_netdev_tc(vsi, enabled_tc); out: return ret; } /** * i40e_get_link_speed - Returns link speed for the interface * @vsi: VSI to be configured * **/ static int i40e_get_link_speed(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; switch (pf->hw.phy.link_info.link_speed) { case I40E_LINK_SPEED_40GB: return 40000; case I40E_LINK_SPEED_25GB: return 25000; case I40E_LINK_SPEED_20GB: return 20000; case I40E_LINK_SPEED_10GB: return 10000; case I40E_LINK_SPEED_1GB: return 1000; default: return -EINVAL; } } /** * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate * @vsi: VSI to be configured * @seid: seid of the channel/VSI * @max_tx_rate: max TX rate to be configured as BW limit * * Helper function to set BW limit for a given VSI **/ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) { struct i40e_pf *pf = vsi->back; u64 credits = 0; int speed = 0; int ret = 0; speed = i40e_get_link_speed(vsi); if (max_tx_rate > speed) { dev_err(&pf->pdev->dev, "Invalid max tx rate %llu specified for VSI seid %d.", max_tx_rate, seid); return -EINVAL; } if (max_tx_rate && max_tx_rate < 50) { dev_warn(&pf->pdev->dev, "Setting max tx rate to minimum usable value of 50Mbps.\n"); max_tx_rate = 50; } /* Tx rate credits are in values of 50Mbps, 0 is disabled */ credits = max_tx_rate; do_div(credits, I40E_BW_CREDIT_DIVISOR); ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits, I40E_MAX_BW_INACTIVE_ACCUM, NULL); if (ret) dev_err(&pf->pdev->dev, "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n", max_tx_rate, seid, i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } /** * i40e_remove_queue_channels - Remove queue channels for the TCs * @vsi: VSI to be configured * * Remove queue channels for the TCs **/ static void i40e_remove_queue_channels(struct i40e_vsi *vsi) { enum i40e_admin_queue_err last_aq_status; struct i40e_cloud_filter *cfilter; struct i40e_channel *ch, *ch_tmp; struct i40e_pf *pf = vsi->back; struct hlist_node *node; int ret, i; /* Reset rss size that was stored when reconfiguring rss for * channel VSIs with non-power-of-2 queue count. */ vsi->current_rss_size = 0; /* perform cleanup for channels if they exist */ if (list_empty(&vsi->ch_list)) return; list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { struct i40e_vsi *p_vsi; list_del(&ch->list); p_vsi = ch->parent_vsi; if (!p_vsi || !ch->initialized) { kfree(ch); continue; } /* Reset queue contexts */ for (i = 0; i < ch->num_queue_pairs; i++) { struct i40e_ring *tx_ring, *rx_ring; u16 pf_q; pf_q = ch->base_queue + i; tx_ring = vsi->tx_rings[pf_q]; tx_ring->ch = NULL; rx_ring = vsi->rx_rings[pf_q]; rx_ring->ch = NULL; } /* Reset BW configured for this VSI via mqprio */ ret = i40e_set_bw_limit(vsi, ch->seid, 0); if (ret) dev_info(&vsi->back->pdev->dev, "Failed to reset tx rate for ch->seid %u\n", ch->seid); /* delete cloud filters associated with this channel */ hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, cloud_node) { if (cfilter->seid != ch->seid) continue; hash_del(&cfilter->cloud_node); if (cfilter->dst_port) ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, false); else ret = i40e_add_del_cloud_filter(vsi, cfilter, false); last_aq_status = pf->hw.aq.asq_last_status; if (ret) dev_info(&pf->pdev->dev, "Failed to delete cloud filter, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, last_aq_status)); kfree(cfilter); } /* delete VSI from FW */ ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, NULL); if (ret) dev_err(&vsi->back->pdev->dev, "unable to remove channel (%d) for parent VSI(%d)\n", ch->seid, p_vsi->seid); kfree(ch); } INIT_LIST_HEAD(&vsi->ch_list); } /** * i40e_is_any_channel - channel exist or not * @vsi: ptr to VSI to which channels are associated with * * Returns true or false if channel(s) exist for associated VSI or not **/ static bool i40e_is_any_channel(struct i40e_vsi *vsi) { struct i40e_channel *ch, *ch_tmp; list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { if (ch->initialized) return true; } return false; } /** * i40e_get_max_queues_for_channel * @vsi: ptr to VSI to which channels are associated with * * Helper function which returns max value among the queue counts set on the * channels/TCs created. **/ static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi) { struct i40e_channel *ch, *ch_tmp; int max = 0; list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { if (!ch->initialized) continue; if (ch->num_queue_pairs > max) max = ch->num_queue_pairs; } return max; } /** * i40e_validate_num_queues - validate num_queues w.r.t channel * @pf: ptr to PF device * @num_queues: number of queues * @vsi: the parent VSI * @reconfig_rss: indicates should the RSS be reconfigured or not * * This function validates number of queues in the context of new channel * which is being established and determines if RSS should be reconfigured * or not for parent VSI. **/ static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues, struct i40e_vsi *vsi, bool *reconfig_rss) { int max_ch_queues; if (!reconfig_rss) return -EINVAL; *reconfig_rss = false; if (vsi->current_rss_size) { if (num_queues > vsi->current_rss_size) { dev_dbg(&pf->pdev->dev, "Error: num_queues (%d) > vsi's current_size(%d)\n", num_queues, vsi->current_rss_size); return -EINVAL; } else if ((num_queues < vsi->current_rss_size) && (!is_power_of_2(num_queues))) { dev_dbg(&pf->pdev->dev, "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n", num_queues, vsi->current_rss_size); return -EINVAL; } } if (!is_power_of_2(num_queues)) { /* Find the max num_queues configured for channel if channel * exist. * if channel exist, then enforce 'num_queues' to be more than * max ever queues configured for channel. */ max_ch_queues = i40e_get_max_queues_for_channel(vsi); if (num_queues < max_ch_queues) { dev_dbg(&pf->pdev->dev, "Error: num_queues (%d) < max queues configured for channel(%d)\n", num_queues, max_ch_queues); return -EINVAL; } *reconfig_rss = true; } return 0; } /** * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size * @vsi: the VSI being setup * @rss_size: size of RSS, accordingly LUT gets reprogrammed * * This function reconfigures RSS by reprogramming LUTs using 'rss_size' **/ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size) { struct i40e_pf *pf = vsi->back; u8 seed[I40E_HKEY_ARRAY_SIZE]; struct i40e_hw *hw = &pf->hw; int local_rss_size; u8 *lut; int ret; if (!vsi->rss_size) return -EINVAL; if (rss_size > vsi->rss_size) return -EINVAL; local_rss_size = min_t(int, vsi->rss_size, rss_size); lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; /* Ignoring user configured lut if there is one */ i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size); /* Use user configured hash key if there is one, otherwise * use default. */ if (vsi->rss_hkey_user) memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); else netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); if (ret) { dev_info(&pf->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", i40e_stat_str(hw, ret), i40e_aq_str(hw, hw->aq.asq_last_status)); kfree(lut); return ret; } kfree(lut); /* Do the update w.r.t. storing rss_size */ if (!vsi->orig_rss_size) vsi->orig_rss_size = vsi->rss_size; vsi->current_rss_size = local_rss_size; return ret; } /** * i40e_channel_setup_queue_map - Setup a channel queue map * @pf: ptr to PF device * @vsi: the VSI being setup * @ctxt: VSI context structure * @ch: ptr to channel structure * * Setup queue map for a specific channel **/ static void i40e_channel_setup_queue_map(struct i40e_pf *pf, struct i40e_vsi_context *ctxt, struct i40e_channel *ch) { u16 qcount, qmap, sections = 0; u8 offset = 0; int pow; sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; sections |= I40E_AQ_VSI_PROP_SCHED_VALID; qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix); ch->num_queue_pairs = qcount; /* find the next higher power-of-2 of num queue pairs */ pow = ilog2(qcount); if (!is_power_of_2(qcount)) pow++; qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); /* Setup queue TC[0].qmap for given VSI context */ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */ ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue); ctxt->info.valid_sections |= cpu_to_le16(sections); } /** * i40e_add_channel - add a channel by adding VSI * @pf: ptr to PF device * @uplink_seid: underlying HW switching element (VEB) ID * @ch: ptr to channel structure * * Add a channel (VSI) using add_vsi and queue_map **/ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, struct i40e_channel *ch) { struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; u8 enabled_tc = 0x1; /* TC0 enabled */ int ret; if (ch->type != I40E_VSI_VMDQ2) { dev_info(&pf->pdev->dev, "add new vsi failed, ch->type %d\n", ch->type); return -EINVAL; } memset(&ctxt, 0, sizeof(ctxt)); ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; if (ch->type == I40E_VSI_VMDQ2) ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } /* Set queue map for a given VSI context */ i40e_channel_setup_queue_map(pf, &ctxt, ch); /* Now time to create VSI */ ret = i40e_aq_add_vsi(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "add new vsi failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -ENOENT; } /* Success, update channel, set enabled_tc only if the channel * is not a macvlan */ ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc; ch->seid = ctxt.seid; ch->vsi_number = ctxt.vsi_number; ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx); /* copy just the sections touched not the entire info * since not all sections are valid as returned by * update vsi params */ ch->info.mapping_flags = ctxt.info.mapping_flags; memcpy(&ch->info.queue_mapping, &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping)); memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping, sizeof(ctxt.info.tc_mapping)); return 0; } static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch, u8 *bw_share) { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; i40e_status ret; int i; bw_data.tc_valid_bits = ch->enabled_tc; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) bw_data.tc_bw_credits[i] = bw_share[i]; ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid, &bw_data, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n", vsi->back->hw.aq.asq_last_status, ch->seid); return -EINVAL; } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) ch->info.qs_handle[i] = bw_data.qs_handles[i]; return 0; } /** * i40e_channel_config_tx_ring - config TX ring associated with new channel * @pf: ptr to PF device * @vsi: the VSI being setup * @ch: ptr to channel structure * * Configure TX rings associated with channel (VSI) since queues are being * from parent VSI. **/ static int i40e_channel_config_tx_ring(struct i40e_pf *pf, struct i40e_vsi *vsi, struct i40e_channel *ch) { i40e_status ret; int i; u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; /* Enable ETS TCs with equal BW Share for now across all VSIs */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (ch->enabled_tc & BIT(i)) bw_share[i] = 1; } /* configure BW for new VSI */ ret = i40e_channel_config_bw(vsi, ch, bw_share); if (ret) { dev_info(&vsi->back->pdev->dev, "Failed configuring TC map %d for channel (seid %u)\n", ch->enabled_tc, ch->seid); return ret; } for (i = 0; i < ch->num_queue_pairs; i++) { struct i40e_ring *tx_ring, *rx_ring; u16 pf_q; pf_q = ch->base_queue + i; /* Get to TX ring ptr of main VSI, for re-setup TX queue * context */ tx_ring = vsi->tx_rings[pf_q]; tx_ring->ch = ch; /* Get the RX ring ptr */ rx_ring = vsi->rx_rings[pf_q]; rx_ring->ch = ch; } return 0; } /** * i40e_setup_hw_channel - setup new channel * @pf: ptr to PF device * @vsi: the VSI being setup * @ch: ptr to channel structure * @uplink_seid: underlying HW switching element (VEB) ID * @type: type of channel to be created (VMDq2/VF) * * Setup new channel (VSI) based on specified type (VMDq2/VF) * and configures TX rings accordingly **/ static inline int i40e_setup_hw_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, struct i40e_channel *ch, u16 uplink_seid, u8 type) { int ret; ch->initialized = false; ch->base_queue = vsi->next_base_queue; ch->type = type; /* Proceed with creation of channel (VMDq2) VSI */ ret = i40e_add_channel(pf, uplink_seid, ch); if (ret) { dev_info(&pf->pdev->dev, "failed to add_channel using uplink_seid %u\n", uplink_seid); return ret; } /* Mark the successful creation of channel */ ch->initialized = true; /* Reconfigure TX queues using QTX_CTL register */ ret = i40e_channel_config_tx_ring(pf, vsi, ch); if (ret) { dev_info(&pf->pdev->dev, "failed to configure TX rings for channel %u\n", ch->seid); return ret; } /* update 'next_base_queue' */ vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs; dev_dbg(&pf->pdev->dev, "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n", ch->seid, ch->vsi_number, ch->stat_counter_idx, ch->num_queue_pairs, vsi->next_base_queue); return ret; } /** * i40e_setup_channel - setup new channel using uplink element * @pf: ptr to PF device * @type: type of channel to be created (VMDq2/VF) * @uplink_seid: underlying HW switching element (VEB) ID * @ch: ptr to channel structure * * Setup new channel (VSI) based on specified type (VMDq2/VF) * and uplink switching element (uplink_seid) **/ static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, struct i40e_channel *ch) { u8 vsi_type; u16 seid; int ret; if (vsi->type == I40E_VSI_MAIN) { vsi_type = I40E_VSI_VMDQ2; } else { dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n", vsi->type); return false; } /* underlying switching element */ seid = pf->vsi[pf->lan_vsi]->uplink_seid; /* create channel (VSI), configure TX rings */ ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type); if (ret) { dev_err(&pf->pdev->dev, "failed to setup hw_channel\n"); return false; } return ch->initialized ? true : false; } /** * i40e_validate_and_set_switch_mode - sets up switch mode correctly * @vsi: ptr to VSI which has PF backing * * Sets up switch mode correctly if it needs to be changed and perform * what are allowed modes. **/ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi) { u8 mode; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int ret; ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities); if (ret) return -EINVAL; if (hw->dev_caps.switch_mode) { /* if switch mode is set, support mode2 (non-tunneled for * cloud filter) for now */ u32 switch_mode = hw->dev_caps.switch_mode & I40E_SWITCH_MODE_MASK; if (switch_mode >= I40E_CLOUD_FILTER_MODE1) { if (switch_mode == I40E_CLOUD_FILTER_MODE2) return 0; dev_err(&pf->pdev->dev, "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n", hw->dev_caps.switch_mode); return -EINVAL; } } /* Set Bit 7 to be valid */ mode = I40E_AQ_SET_SWITCH_BIT7_VALID; /* Set L4type for TCP support */ mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP; /* Set cloud filter mode */ mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL; /* Prep mode field for set_switch_config */ ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags, pf->last_sw_conf_valid_flags, mode, NULL); if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH) dev_err(&pf->pdev->dev, "couldn't set switch config bits, err %s aq_err %s\n", i40e_stat_str(hw, ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; } /** * i40e_create_queue_channel - function to create channel * @vsi: VSI to be configured * @ch: ptr to channel (it contains channel specific params) * * This function creates channel (VSI) using num_queues specified by user, * reconfigs RSS if needed. **/ int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch) { struct i40e_pf *pf = vsi->back; bool reconfig_rss; int err; if (!ch) return -EINVAL; if (!ch->num_queue_pairs) { dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n", ch->num_queue_pairs); return -EINVAL; } /* validate user requested num_queues for channel */ err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi, &reconfig_rss); if (err) { dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n", ch->num_queue_pairs); return -EINVAL; } /* By default we are in VEPA mode, if this is the first VF/VMDq * VSI to be added switch to VEB mode. */ if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) || (!i40e_is_any_channel(vsi))) { if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) { dev_dbg(&pf->pdev->dev, "Failed to create channel. Override queues (%u) not power of 2\n", vsi->tc_config.tc_info[0].qcount); return -EINVAL; } if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; if (vsi->type == I40E_VSI_MAIN) { if (pf->flags & I40E_FLAG_TC_MQPRIO) i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); else i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); } } /* now onwards for main VSI, number of queues will be value * of TC0's queue count */ } /* By this time, vsi->cnt_q_avail shall be set to non-zero and * it should be more than num_queues */ if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) { dev_dbg(&pf->pdev->dev, "Error: cnt_q_avail (%u) less than num_queues %d\n", vsi->cnt_q_avail, ch->num_queue_pairs); return -EINVAL; } /* reconfig_rss only if vsi type is MAIN_VSI */ if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) { err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs); if (err) { dev_info(&pf->pdev->dev, "Error: unable to reconfig rss for num_queues (%u)\n", ch->num_queue_pairs); return -EINVAL; } } if (!i40e_setup_channel(pf, vsi, ch)) { dev_info(&pf->pdev->dev, "Failed to setup channel\n"); return -EINVAL; } dev_info(&pf->pdev->dev, "Setup channel (id:%u) utilizing num_queues %d\n", ch->seid, ch->num_queue_pairs); /* configure VSI for BW limit */ if (ch->max_tx_rate) { u64 credits = ch->max_tx_rate; if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate)) return -EINVAL; do_div(credits, I40E_BW_CREDIT_DIVISOR); dev_dbg(&pf->pdev->dev, "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", ch->max_tx_rate, credits, ch->seid); } /* in case of VF, this will be main SRIOV VSI */ ch->parent_vsi = vsi; /* and update main_vsi's count for queue_available to use */ vsi->cnt_q_avail -= ch->num_queue_pairs; return 0; } /** * i40e_configure_queue_channels - Add queue channel for the given TCs * @vsi: VSI to be configured * * Configures queue channel mapping to the given TCs **/ static int i40e_configure_queue_channels(struct i40e_vsi *vsi) { struct i40e_channel *ch; u64 max_rate = 0; int ret = 0, i; /* Create app vsi with the TCs. Main VSI with TC0 is already set up */ vsi->tc_seid_map[0] = vsi->seid; for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (vsi->tc_config.enabled_tc & BIT(i)) { ch = kzalloc(sizeof(*ch), GFP_KERNEL); if (!ch) { ret = -ENOMEM; goto err_free; } INIT_LIST_HEAD(&ch->list); ch->num_queue_pairs = vsi->tc_config.tc_info[i].qcount; ch->base_queue = vsi->tc_config.tc_info[i].qoffset; /* Bandwidth limit through tc interface is in bytes/s, * change to Mbit/s */ max_rate = vsi->mqprio_qopt.max_rate[i]; do_div(max_rate, I40E_BW_MBPS_DIVISOR); ch->max_tx_rate = max_rate; list_add_tail(&ch->list, &vsi->ch_list); ret = i40e_create_queue_channel(vsi, ch); if (ret) { dev_err(&vsi->back->pdev->dev, "Failed creating queue channel with TC%d: queues %d\n", i, ch->num_queue_pairs); goto err_free; } vsi->tc_seid_map[i] = ch->seid; } } return ret; err_free: i40e_remove_queue_channels(vsi); return ret; } /** * i40e_veb_config_tc - Configure TCs for given VEB * @veb: given VEB * @enabled_tc: TC bitmap * * Configures given TC bitmap for VEB (switching) element **/ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) { struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; struct i40e_pf *pf = veb->pf; int ret = 0; int i; /* No TCs or already enabled TCs just return */ if (!enabled_tc || veb->enabled_tc == enabled_tc) return ret; bw_data.tc_valid_bits = enabled_tc; /* bw_data.absolute_credits is not set (relative) */ /* Enable ETS TCs with equal BW Share for now */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) bw_data.tc_bw_share_credits[i] = 1; } ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, "VEB bw config failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto out; } /* Update the BW information */ ret = i40e_veb_get_bw_info(veb); if (ret) { dev_info(&pf->pdev->dev, "Failed getting veb bw config, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } out: return ret; } #ifdef CONFIG_I40E_DCB /** * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs * @pf: PF struct * * Reconfigure VEB/VSIs on a given PF; it is assumed that * the caller would've quiesce all the VSIs before calling * this function **/ static void i40e_dcb_reconfigure(struct i40e_pf *pf) { u8 tc_map = 0; int ret; u8 v; /* Enable the TCs available on PF to all VEBs */ tc_map = i40e_pf_get_tc_map(pf); for (v = 0; v < I40E_MAX_VEB; v++) { if (!pf->veb[v]) continue; ret = i40e_veb_config_tc(pf->veb[v], tc_map); if (ret) { dev_info(&pf->pdev->dev, "Failed configuring TC for VEB seid=%d\n", pf->veb[v]->seid); /* Will try to configure as many components */ } } /* Update each VSI */ for (v = 0; v < pf->num_alloc_vsi; v++) { if (!pf->vsi[v]) continue; /* - Enable all TCs for the LAN VSI * - For all others keep them at TC0 for now */ if (v == pf->lan_vsi) tc_map = i40e_pf_get_tc_map(pf); else tc_map = I40E_DEFAULT_TRAFFIC_CLASS; ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); if (ret) { dev_info(&pf->pdev->dev, "Failed configuring TC for VSI seid=%d\n", pf->vsi[v]->seid); /* Will try to configure as many components */ } else { /* Re-configure VSI vectors based on updated TC map */ i40e_vsi_map_rings_to_vectors(pf->vsi[v]); if (pf->vsi[v]->netdev) i40e_dcbnl_set_all(pf->vsi[v]); } } } /** * i40e_resume_port_tx - Resume port Tx * @pf: PF struct * * Resume a port's Tx and issue a PF reset in case of failure to * resume. **/ static int i40e_resume_port_tx(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; int ret; ret = i40e_aq_resume_port_tx(hw, NULL); if (ret) { dev_info(&pf->pdev->dev, "Resume Port Tx failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } return ret; } /** * i40e_init_pf_dcb - Initialize DCB configuration * @pf: PF being configured * * Query the current DCB configuration and cache it * in the hardware structure **/ static int i40e_init_pf_dcb(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; int err = 0; /* Do not enable DCB for SW1 and SW2 images even if the FW is capable * Also do not enable DCBx if FW LLDP agent is disabled */ if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) || (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) { dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n"); err = I40E_NOT_SUPPORTED; goto out; } err = i40e_init_dcb(hw, true); if (!err) { /* Device/Function is not DCBX capable */ if ((!hw->func_caps.dcb) || (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { dev_info(&pf->pdev->dev, "DCBX offload is not supported or is disabled for this PF.\n"); } else { /* When status is not DISABLED then DCBX in FW */ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; pf->flags |= I40E_FLAG_DCB_CAPABLE; /* Enable DCB tagging only when more than one TC * or explicitly disable if only one TC */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; else pf->flags &= ~I40E_FLAG_DCB_ENABLED; dev_dbg(&pf->pdev->dev, "DCBX offload is supported for this PF.\n"); } } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) { dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n"); pf->flags |= I40E_FLAG_DISABLE_FW_LLDP; } else { dev_info(&pf->pdev->dev, "Query for DCB configuration failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } out: return err; } #endif /* CONFIG_I40E_DCB */ #define SPEED_SIZE 14 #define FC_SIZE 8 /** * i40e_print_link_message - print link up or down * @vsi: the VSI for which link needs a message * @isup: true of link is up, false otherwise */ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) { enum i40e_aq_link_speed new_speed; struct i40e_pf *pf = vsi->back; char *speed = "Unknown"; char *fc = "Unknown"; char *fec = ""; char *req_fec = ""; char *an = ""; if (isup) new_speed = pf->hw.phy.link_info.link_speed; else new_speed = I40E_LINK_SPEED_UNKNOWN; if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) return; vsi->current_isup = isup; vsi->current_speed = new_speed; if (!isup) { netdev_info(vsi->netdev, "NIC Link is Down\n"); return; } /* Warn user if link speed on NPAR enabled partition is not at * least 10GB */ if (pf->hw.func_caps.npar_enable && (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) netdev_warn(vsi->netdev, "The partition detected link speed that is less than 10Gbps\n"); switch (pf->hw.phy.link_info.link_speed) { case I40E_LINK_SPEED_40GB: speed = "40 G"; break; case I40E_LINK_SPEED_20GB: speed = "20 G"; break; case I40E_LINK_SPEED_25GB: speed = "25 G"; break; case I40E_LINK_SPEED_10GB: speed = "10 G"; break; case I40E_LINK_SPEED_5GB: speed = "5 G"; break; case I40E_LINK_SPEED_2_5GB: speed = "2.5 G"; break; case I40E_LINK_SPEED_1GB: speed = "1000 M"; break; case I40E_LINK_SPEED_100MB: speed = "100 M"; break; default: break; } switch (pf->hw.fc.current_mode) { case I40E_FC_FULL: fc = "RX/TX"; break; case I40E_FC_TX_PAUSE: fc = "TX"; break; case I40E_FC_RX_PAUSE: fc = "RX"; break; default: fc = "None"; break; } if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { req_fec = "None"; fec = "None"; an = "False"; if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) an = "True"; if (pf->hw.phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) fec = "CL74 FC-FEC/BASE-R"; else if (pf->hw.phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) fec = "CL108 RS-FEC"; /* 'CL108 RS-FEC' should be displayed when RS is requested, or * both RS and FC are requested */ if (vsi->back->hw.phy.link_info.req_fec_info & (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) { if (vsi->back->hw.phy.link_info.req_fec_info & I40E_AQ_REQUEST_FEC_RS) req_fec = "CL108 RS-FEC"; else req_fec = "CL74 FC-FEC/BASE-R"; } netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", speed, req_fec, fec, an, fc); } else { netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n", speed, fc); } } /** * i40e_up_complete - Finish the last steps of bringing up a connection * @vsi: the VSI being configured **/ static int i40e_up_complete(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int err; if (pf->flags & I40E_FLAG_MSIX_ENABLED) i40e_vsi_configure_msix(vsi); else i40e_configure_msi_and_legacy(vsi); /* start rings */ err = i40e_vsi_start_rings(vsi); if (err) return err; clear_bit(__I40E_VSI_DOWN, vsi->state); i40e_napi_enable_all(vsi); i40e_vsi_enable_irq(vsi); if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && (vsi->netdev)) { i40e_print_link_message(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); } /* replay FDIR SB filters */ if (vsi->type == I40E_VSI_FDIR) { /* reset fd counters */ pf->fd_add_err = 0; pf->fd_atr_cnt = 0; i40e_fdir_filter_restore(vsi); } /* On the next run of the service_task, notify any clients of the new * opened netdev */ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); i40e_service_event_schedule(pf); return 0; } /** * i40e_vsi_reinit_locked - Reset the VSI * @vsi: the VSI being configured * * Rebuild the ring structs after some configuration * has changed, e.g. MTU size. **/ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; WARN_ON(in_interrupt()); while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) usleep_range(1000, 2000); i40e_down(vsi); i40e_up(vsi); clear_bit(__I40E_CONFIG_BUSY, pf->state); } /** * i40e_up - Bring the connection back up after being down * @vsi: the VSI being configured **/ int i40e_up(struct i40e_vsi *vsi) { int err; err = i40e_vsi_configure(vsi); if (!err) err = i40e_up_complete(vsi); return err; } /** * i40e_force_link_state - Force the link status * @pf: board private structure * @is_up: whether the link state should be forced up or down **/ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) { struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config = {0}; struct i40e_hw *hw = &pf->hw; i40e_status err; u64 mask; u8 speed; /* Card might've been put in an unstable state by other drivers * and applications, which causes incorrect speed values being * set on startup. In order to clear speed registers, we call * get_phy_capabilities twice, once to get initial state of * available speeds, and once to get current PHY config. */ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); if (err) { dev_err(&pf->pdev->dev, "failed to get phy cap., ret = %s last_status = %s\n", i40e_stat_str(hw, err), i40e_aq_str(hw, hw->aq.asq_last_status)); return err; } speed = abilities.link_speed; /* Get the current phy config */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (err) { dev_err(&pf->pdev->dev, "failed to get phy cap., ret = %s last_status = %s\n", i40e_stat_str(hw, err), i40e_aq_str(hw, hw->aq.asq_last_status)); return err; } /* If link needs to go up, but was not forced to go down, * and its speed values are OK, no need for a flap */ if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) return I40E_SUCCESS; /* To force link we need to set bits for all supported PHY types, * but there are now more than 32, so we need to split the bitmap * across two fields. */ mask = I40E_PHY_TYPES_BITMASK; config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0; config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0; /* Copy the old settings, except of phy_type */ config.abilities = abilities.abilities; if (abilities.link_speed != 0) config.link_speed = abilities.link_speed; else config.link_speed = speed; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; config.fec_config = abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_PHY_FEC_CONFIG_MASK; err = i40e_aq_set_phy_config(hw, &config, NULL); if (err) { dev_err(&pf->pdev->dev, "set phy config ret = %s last_status = %s\n", i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return err; } /* Update the link info */ err = i40e_update_link_info(hw); if (err) { /* Wait a little bit (on 40G cards it sometimes takes a really * long time for link to come back from the atomic reset) * and try once more */ msleep(1000); i40e_update_link_info(hw); } i40e_aq_set_link_restart_an(hw, true, NULL); return I40E_SUCCESS; } /** * i40e_down - Shutdown the connection processing * @vsi: the VSI being stopped **/ void i40e_down(struct i40e_vsi *vsi) { int i; /* It is assumed that the caller of this function * sets the vsi->state __I40E_VSI_DOWN bit. */ if (vsi->netdev) { netif_carrier_off(vsi->netdev); netif_tx_disable(vsi->netdev); } i40e_vsi_disable_irq(vsi); i40e_vsi_stop_rings(vsi); if (vsi->type == I40E_VSI_MAIN && vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) i40e_force_link_state(vsi->back, false); i40e_napi_disable_all(vsi); for (i = 0; i < vsi->num_queue_pairs; i++) { i40e_clean_tx_ring(vsi->tx_rings[i]); if (i40e_enabled_xdp_vsi(vsi)) { /* Make sure that in-progress ndo_xdp_xmit * calls are completed. */ synchronize_rcu(); i40e_clean_tx_ring(vsi->xdp_rings[i]); } i40e_clean_rx_ring(vsi->rx_rings[i]); } } /** * i40e_validate_mqprio_qopt- validate queue mapping info * @vsi: the VSI being configured * @mqprio_qopt: queue parametrs **/ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, struct tc_mqprio_qopt_offload *mqprio_qopt) { u64 sum_max_rate = 0; u64 max_rate = 0; int i; if (mqprio_qopt->qopt.offset[0] != 0 || mqprio_qopt->qopt.num_tc < 1 || mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS) return -EINVAL; for (i = 0; ; i++) { if (!mqprio_qopt->qopt.count[i]) return -EINVAL; if (mqprio_qopt->min_rate[i]) { dev_err(&vsi->back->pdev->dev, "Invalid min tx rate (greater than 0) specified\n"); return -EINVAL; } max_rate = mqprio_qopt->max_rate[i]; do_div(max_rate, I40E_BW_MBPS_DIVISOR); sum_max_rate += max_rate; if (i >= mqprio_qopt->qopt.num_tc - 1) break; if (mqprio_qopt->qopt.offset[i + 1] != (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) return -EINVAL; } if (vsi->num_queue_pairs < (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) { return -EINVAL; } if (sum_max_rate > i40e_get_link_speed(vsi)) { dev_err(&vsi->back->pdev->dev, "Invalid max tx rate specified\n"); return -EINVAL; } return 0; } /** * i40e_vsi_set_default_tc_config - set default values for tc configuration * @vsi: the VSI being configured **/ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi) { u16 qcount; int i; /* Only TC0 is enabled */ vsi->tc_config.numtc = 1; vsi->tc_config.enabled_tc = 1; qcount = min_t(int, vsi->alloc_queue_pairs, i40e_pf_get_max_q_per_tc(vsi->back)); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* For the TC that is not enabled set the offset to to default * queue and allocate one queue for the given TC. */ vsi->tc_config.tc_info[i].qoffset = 0; if (i == 0) vsi->tc_config.tc_info[i].qcount = qcount; else vsi->tc_config.tc_info[i].qcount = 1; vsi->tc_config.tc_info[i].netdev_tc = 0; } } /** * i40e_del_macvlan_filter * @hw: pointer to the HW structure * @seid: seid of the channel VSI * @macaddr: the mac address to apply as a filter * @aq_err: store the admin Q error * * This function deletes a mac filter on the channel VSI which serves as the * macvlan. Returns 0 on success. **/ static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid, const u8 *macaddr, int *aq_err) { struct i40e_aqc_remove_macvlan_element_data element; i40e_status status; memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, macaddr); element.vlan_tag = 0; element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL); *aq_err = hw->aq.asq_last_status; return status; } /** * i40e_add_macvlan_filter * @hw: pointer to the HW structure * @seid: seid of the channel VSI * @macaddr: the mac address to apply as a filter * @aq_err: store the admin Q error * * This function adds a mac filter on the channel VSI which serves as the * macvlan. Returns 0 on success. **/ static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid, const u8 *macaddr, int *aq_err) { struct i40e_aqc_add_macvlan_element_data element; i40e_status status; u16 cmd_flags = 0; ether_addr_copy(element.mac_addr, macaddr); element.vlan_tag = 0; element.queue_number = 0; element.match_method = I40E_AQC_MM_ERR_NO_RES; cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; element.flags = cpu_to_le16(cmd_flags); status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL); *aq_err = hw->aq.asq_last_status; return status; } /** * i40e_reset_ch_rings - Reset the queue contexts in a channel * @vsi: the VSI we want to access * @ch: the channel we want to access */ static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch) { struct i40e_ring *tx_ring, *rx_ring; u16 pf_q; int i; for (i = 0; i < ch->num_queue_pairs; i++) { pf_q = ch->base_queue + i; tx_ring = vsi->tx_rings[pf_q]; tx_ring->ch = NULL; rx_ring = vsi->rx_rings[pf_q]; rx_ring->ch = NULL; } } /** * i40e_free_macvlan_channels * @vsi: the VSI we want to access * * This function frees the Qs of the channel VSI from * the stack and also deletes the channel VSIs which * serve as macvlans. */ static void i40e_free_macvlan_channels(struct i40e_vsi *vsi) { struct i40e_channel *ch, *ch_tmp; int ret; if (list_empty(&vsi->macvlan_list)) return; list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { struct i40e_vsi *parent_vsi; if (i40e_is_channel_macvlan(ch)) { i40e_reset_ch_rings(vsi, ch); clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev); netdev_set_sb_channel(ch->fwd->netdev, 0); kfree(ch->fwd); ch->fwd = NULL; } list_del(&ch->list); parent_vsi = ch->parent_vsi; if (!parent_vsi || !ch->initialized) { kfree(ch); continue; } /* remove the VSI */ ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, NULL); if (ret) dev_err(&vsi->back->pdev->dev, "unable to remove channel (%d) for parent VSI(%d)\n", ch->seid, parent_vsi->seid); kfree(ch); } vsi->macvlan_cnt = 0; } /** * i40e_fwd_ring_up - bring the macvlan device up * @vsi: the VSI we want to access * @vdev: macvlan netdevice * @fwd: the private fwd structure */ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev, struct i40e_fwd_adapter *fwd) { int ret = 0, num_tc = 1, i, aq_err; struct i40e_channel *ch, *ch_tmp; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; if (list_empty(&vsi->macvlan_list)) return -EINVAL; /* Go through the list and find an available channel */ list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { if (!i40e_is_channel_macvlan(ch)) { ch->fwd = fwd; /* record configuration for macvlan interface in vdev */ for (i = 0; i < num_tc; i++) netdev_bind_sb_channel_queue(vsi->netdev, vdev, i, ch->num_queue_pairs, ch->base_queue); for (i = 0; i < ch->num_queue_pairs; i++) { struct i40e_ring *tx_ring, *rx_ring; u16 pf_q; pf_q = ch->base_queue + i; /* Get to TX ring ptr */ tx_ring = vsi->tx_rings[pf_q]; tx_ring->ch = ch; /* Get the RX ring ptr */ rx_ring = vsi->rx_rings[pf_q]; rx_ring->ch = ch; } break; } } /* Guarantee all rings are updated before we update the * MAC address filter. */ wmb(); /* Add a mac filter */ ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err); if (ret) { /* if we cannot add the MAC rule then disable the offload */ macvlan_release_l2fw_offload(vdev); for (i = 0; i < ch->num_queue_pairs; i++) { struct i40e_ring *rx_ring; u16 pf_q; pf_q = ch->base_queue + i; rx_ring = vsi->rx_rings[pf_q]; rx_ring->netdev = NULL; } dev_info(&pf->pdev->dev, "Error adding mac filter on macvlan err %s, aq_err %s\n", i40e_stat_str(hw, ret), i40e_aq_str(hw, aq_err)); netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n"); } return ret; } /** * i40e_setup_macvlans - create the channels which will be macvlans * @vsi: the VSI we want to access * @macvlan_cnt: no. of macvlans to be setup * @qcnt: no. of Qs per macvlan * @vdev: macvlan netdevice */ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt, struct net_device *vdev) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; u16 sections, qmap, num_qps; struct i40e_channel *ch; int i, pow, ret = 0; u8 offset = 0; if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt) return -EINVAL; num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt); /* find the next higher power-of-2 of num queue pairs */ pow = fls(roundup_pow_of_two(num_qps) - 1); qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); /* Setup context bits for the main VSI */ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; sections |= I40E_AQ_VSI_PROP_SCHED_VALID; memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = vsi->seid; ctxt.pf_num = vsi->back->hw.pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; ctxt.info = vsi->info; ctxt.info.tc_mapping[0] = cpu_to_le16(qmap); ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); ctxt.info.valid_sections |= cpu_to_le16(sections); /* Reconfigure RSS for main VSI with new max queue count */ vsi->rss_size = max_t(u16, num_qps, qcnt); ret = i40e_vsi_config_rss(vsi); if (ret) { dev_info(&pf->pdev->dev, "Failed to reconfig RSS for num_queues (%u)\n", vsi->rss_size); return ret; } vsi->reconfig_rss = true; dev_dbg(&vsi->back->pdev->dev, "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size); vsi->next_base_queue = num_qps; vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps; /* Update the VSI after updating the VSI queue-mapping * information */ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "Update vsi tc config failed, err %s aq_err %s\n", i40e_stat_str(hw, ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; } /* update the local VSI info with updated queue map */ i40e_vsi_update_queue_map(vsi, &ctxt); vsi->info.valid_sections = 0; /* Create channels for macvlans */ INIT_LIST_HEAD(&vsi->macvlan_list); for (i = 0; i < macvlan_cnt; i++) { ch = kzalloc(sizeof(*ch), GFP_KERNEL); if (!ch) { ret = -ENOMEM; goto err_free; } INIT_LIST_HEAD(&ch->list); ch->num_queue_pairs = qcnt; if (!i40e_setup_channel(pf, vsi, ch)) { ret = -EINVAL; goto err_free; } ch->parent_vsi = vsi; vsi->cnt_q_avail -= ch->num_queue_pairs; vsi->macvlan_cnt++; list_add_tail(&ch->list, &vsi->macvlan_list); } return ret; err_free: dev_info(&pf->pdev->dev, "Failed to setup macvlans\n"); i40e_free_macvlan_channels(vsi); return ret; } /** * i40e_fwd_add - configure macvlans * @netdev: net device to configure * @vdev: macvlan netdevice **/ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_fwd_adapter *fwd; int avail_macvlan, ret; if ((pf->flags & I40E_FLAG_DCB_ENABLED)) { netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n"); return ERR_PTR(-EINVAL); } if ((pf->flags & I40E_FLAG_TC_MQPRIO)) { netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n"); return ERR_PTR(-EINVAL); } if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) { netdev_info(netdev, "Not enough vectors available to support macvlans\n"); return ERR_PTR(-EINVAL); } /* The macvlan device has to be a single Q device so that the * tc_to_txq field can be reused to pick the tx queue. */ if (netif_is_multiqueue(vdev)) return ERR_PTR(-ERANGE); if (!vsi->macvlan_cnt) { /* reserve bit 0 for the pf device */ set_bit(0, vsi->fwd_bitmask); /* Try to reserve as many queues as possible for macvlans. First * reserve 3/4th of max vectors, then half, then quarter and * calculate Qs per macvlan as you go */ vectors = pf->num_lan_msix; if (vectors <= I40E_MAX_MACVLANS && vectors > 64) { /* allocate 4 Qs per macvlan and 32 Qs to the PF*/ q_per_macvlan = 4; macvlan_cnt = (vectors - 32) / 4; } else if (vectors <= 64 && vectors > 32) { /* allocate 2 Qs per macvlan and 16 Qs to the PF*/ q_per_macvlan = 2; macvlan_cnt = (vectors - 16) / 2; } else if (vectors <= 32 && vectors > 16) { /* allocate 1 Q per macvlan and 16 Qs to the PF*/ q_per_macvlan = 1; macvlan_cnt = vectors - 16; } else if (vectors <= 16 && vectors > 8) { /* allocate 1 Q per macvlan and 8 Qs to the PF */ q_per_macvlan = 1; macvlan_cnt = vectors - 8; } else { /* allocate 1 Q per macvlan and 1 Q to the PF */ q_per_macvlan = 1; macvlan_cnt = vectors - 1; } if (macvlan_cnt == 0) return ERR_PTR(-EBUSY); /* Quiesce VSI queues */ i40e_quiesce_vsi(vsi); /* sets up the macvlans but does not "enable" them */ ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan, vdev); if (ret) return ERR_PTR(ret); /* Unquiesce VSI */ i40e_unquiesce_vsi(vsi); } avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask, vsi->macvlan_cnt); if (avail_macvlan >= I40E_MAX_MACVLANS) return ERR_PTR(-EBUSY); /* create the fwd struct */ fwd = kzalloc(sizeof(*fwd), GFP_KERNEL); if (!fwd) return ERR_PTR(-ENOMEM); set_bit(avail_macvlan, vsi->fwd_bitmask); fwd->bit_no = avail_macvlan; netdev_set_sb_channel(vdev, avail_macvlan); fwd->netdev = vdev; if (!netif_running(netdev)) return fwd; /* Set fwd ring up */ ret = i40e_fwd_ring_up(vsi, vdev, fwd); if (ret) { /* unbind the queues and drop the subordinate channel config */ netdev_unbind_sb_channel(netdev, vdev); netdev_set_sb_channel(vdev, 0); kfree(fwd); return ERR_PTR(-EINVAL); } return fwd; } /** * i40e_del_all_macvlans - Delete all the mac filters on the channels * @vsi: the VSI we want to access */ static void i40e_del_all_macvlans(struct i40e_vsi *vsi) { struct i40e_channel *ch, *ch_tmp; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int aq_err, ret = 0; if (list_empty(&vsi->macvlan_list)) return; list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { if (i40e_is_channel_macvlan(ch)) { ret = i40e_del_macvlan_filter(hw, ch->seid, i40e_channel_mac(ch), &aq_err); if (!ret) { /* Reset queue contexts */ i40e_reset_ch_rings(vsi, ch); clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev); netdev_set_sb_channel(ch->fwd->netdev, 0); kfree(ch->fwd); ch->fwd = NULL; } } } } /** * i40e_fwd_del - delete macvlan interfaces * @netdev: net device to configure * @vdev: macvlan netdevice */ static void i40e_fwd_del(struct net_device *netdev, void *vdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_fwd_adapter *fwd = vdev; struct i40e_channel *ch, *ch_tmp; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int aq_err, ret = 0; /* Find the channel associated with the macvlan and del mac filter */ list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { if (i40e_is_channel_macvlan(ch) && ether_addr_equal(i40e_channel_mac(ch), fwd->netdev->dev_addr)) { ret = i40e_del_macvlan_filter(hw, ch->seid, i40e_channel_mac(ch), &aq_err); if (!ret) { /* Reset queue contexts */ i40e_reset_ch_rings(vsi, ch); clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); netdev_unbind_sb_channel(netdev, fwd->netdev); netdev_set_sb_channel(fwd->netdev, 0); kfree(ch->fwd); ch->fwd = NULL; } else { dev_info(&pf->pdev->dev, "Error deleting mac filter on macvlan err %s, aq_err %s\n", i40e_stat_str(hw, ret), i40e_aq_str(hw, aq_err)); } break; } } } /** * i40e_setup_tc - configure multiple traffic classes * @netdev: net device to configure * @type_data: tc offload data **/ static int i40e_setup_tc(struct net_device *netdev, void *type_data) { struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u8 enabled_tc = 0, num_tc, hw; bool need_reset = false; int old_queue_pairs; int ret = -EINVAL; u16 mode; int i; old_queue_pairs = vsi->num_queue_pairs; num_tc = mqprio_qopt->qopt.num_tc; hw = mqprio_qopt->qopt.hw; mode = mqprio_qopt->mode; if (!hw) { pf->flags &= ~I40E_FLAG_TC_MQPRIO; memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); goto config_tc; } /* Check if MFP enabled */ if (pf->flags & I40E_FLAG_MFP_ENABLED) { netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); return ret; } switch (mode) { case TC_MQPRIO_MODE_DCB: pf->flags &= ~I40E_FLAG_TC_MQPRIO; /* Check if DCB enabled to continue */ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { netdev_info(netdev, "DCB is not enabled for adapter\n"); return ret; } /* Check whether tc count is within enabled limit */ if (num_tc > i40e_pf_get_num_tc(pf)) { netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); return ret; } break; case TC_MQPRIO_MODE_CHANNEL: if (pf->flags & I40E_FLAG_DCB_ENABLED) { netdev_info(netdev, "Full offload of TC Mqprio options is not supported when DCB is enabled\n"); return ret; } if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) return ret; ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt); if (ret) return ret; memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); pf->flags |= I40E_FLAG_TC_MQPRIO; pf->flags &= ~I40E_FLAG_DCB_ENABLED; break; default: return -EINVAL; } config_tc: /* Generate TC map for number of tc requested */ for (i = 0; i < num_tc; i++) enabled_tc |= BIT(i); /* Requesting same TC configuration as already enabled */ if (enabled_tc == vsi->tc_config.enabled_tc && mode != TC_MQPRIO_MODE_CHANNEL) return 0; /* Quiesce VSI queues */ i40e_quiesce_vsi(vsi); if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO)) i40e_remove_queue_channels(vsi); /* Configure VSI for enabled TCs */ ret = i40e_vsi_config_tc(vsi, enabled_tc); if (ret) { netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", vsi->seid); need_reset = true; goto exit; } else { dev_info(&vsi->back->pdev->dev, "Setup channel (id:%u) utilizing num_queues %d\n", vsi->seid, vsi->tc_config.tc_info[0].qcount); } if (pf->flags & I40E_FLAG_TC_MQPRIO) { if (vsi->mqprio_qopt.max_rate[0]) { u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (!ret) { u64 credits = max_tx_rate; do_div(credits, I40E_BW_CREDIT_DIVISOR); dev_dbg(&vsi->back->pdev->dev, "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", max_tx_rate, credits, vsi->seid); } else { need_reset = true; goto exit; } } ret = i40e_configure_queue_channels(vsi); if (ret) { vsi->num_queue_pairs = old_queue_pairs; netdev_info(netdev, "Failed configuring queue channels\n"); need_reset = true; goto exit; } } exit: /* Reset the configuration data to defaults, only TC0 is enabled */ if (need_reset) { i40e_vsi_set_default_tc_config(vsi); need_reset = false; } /* Unquiesce VSI */ i40e_unquiesce_vsi(vsi); return ret; } /** * i40e_set_cld_element - sets cloud filter element data * @filter: cloud filter rule * @cld: ptr to cloud filter element data * * This is helper function to copy data into cloud filter element **/ static inline void i40e_set_cld_element(struct i40e_cloud_filter *filter, struct i40e_aqc_cloud_filters_element_data *cld) { int i, j; u32 ipa; memset(cld, 0, sizeof(*cld)); ether_addr_copy(cld->outer_mac, filter->dst_mac); ether_addr_copy(cld->inner_mac, filter->src_mac); if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6) return; if (filter->n_proto == ETH_P_IPV6) { #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1) for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++, j += 2) { ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]); ipa = cpu_to_le32(ipa); memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa)); } } else { ipa = be32_to_cpu(filter->dst_ipv4); memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa)); } cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id)); /* tenant_id is not supported by FW now, once the support is enabled * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id) */ if (filter->tenant_id) return; } /** * i40e_add_del_cloud_filter - Add/del cloud filter * @vsi: pointer to VSI * @filter: cloud filter rule * @add: if true, add, if false, delete * * Add or delete a cloud filter for a specific flow spec. * Returns 0 if the filter were successfully added. **/ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, struct i40e_cloud_filter *filter, bool add) { struct i40e_aqc_cloud_filters_element_data cld_filter; struct i40e_pf *pf = vsi->back; int ret; static const u16 flag_table[128] = { [I40E_CLOUD_FILTER_FLAGS_OMAC] = I40E_AQC_ADD_CLOUD_FILTER_OMAC, [I40E_CLOUD_FILTER_FLAGS_IMAC] = I40E_AQC_ADD_CLOUD_FILTER_IMAC, [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN, [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID, [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC, [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID, [I40E_CLOUD_FILTER_FLAGS_IIP] = I40E_AQC_ADD_CLOUD_FILTER_IIP, }; if (filter->flags >= ARRAY_SIZE(flag_table)) return I40E_ERR_CONFIG; /* copy element needed to add cloud filter from filter */ i40e_set_cld_element(filter, &cld_filter); if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE) cld_filter.flags = cpu_to_le16(filter->tunnel_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT); if (filter->n_proto == ETH_P_IPV6) cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | I40E_AQC_ADD_CLOUD_FLAGS_IPV6); else cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | I40E_AQC_ADD_CLOUD_FLAGS_IPV4); if (add) ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid, &cld_filter, 1); else ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid, &cld_filter, 1); if (ret) dev_dbg(&pf->pdev->dev, "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n", add ? "add" : "delete", filter->dst_port, ret, pf->hw.aq.asq_last_status); else dev_info(&pf->pdev->dev, "%s cloud filter for VSI: %d\n", add ? "Added" : "Deleted", filter->seid); return ret; } /** * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf * @vsi: pointer to VSI * @filter: cloud filter rule * @add: if true, add, if false, delete * * Add or delete a cloud filter for a specific flow spec using big buffer. * Returns 0 if the filter were successfully added. **/ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, struct i40e_cloud_filter *filter, bool add) { struct i40e_aqc_cloud_filters_element_bb cld_filter; struct i40e_pf *pf = vsi->back; int ret; /* Both (src/dst) valid mac_addr are not supported */ if ((is_valid_ether_addr(filter->dst_mac) && is_valid_ether_addr(filter->src_mac)) || (is_multicast_ether_addr(filter->dst_mac) && is_multicast_ether_addr(filter->src_mac))) return -EOPNOTSUPP; /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP * ports are not supported via big buffer now. */ if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP) return -EOPNOTSUPP; /* adding filter using src_port/src_ip is not supported at this stage */ if (filter->src_port || filter->src_ipv4 || !ipv6_addr_any(&filter->ip.v6.src_ip6)) return -EOPNOTSUPP; /* copy element needed to add cloud filter from filter */ i40e_set_cld_element(filter, &cld_filter.element); if (is_valid_ether_addr(filter->dst_mac) || is_valid_ether_addr(filter->src_mac) || is_multicast_ether_addr(filter->dst_mac) || is_multicast_ether_addr(filter->src_mac)) { /* MAC + IP : unsupported mode */ if (filter->dst_ipv4) return -EOPNOTSUPP; /* since we validated that L4 port must be valid before * we get here, start with respective "flags" value * and update if vlan is present or not */ cld_filter.element.flags = cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT); if (filter->vlan_id) { cld_filter.element.flags = cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT); } } else if (filter->dst_ipv4 || !ipv6_addr_any(&filter->ip.v6.dst_ip6)) { cld_filter.element.flags = cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT); if (filter->n_proto == ETH_P_IPV6) cld_filter.element.flags |= cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6); else cld_filter.element.flags |= cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4); } else { dev_err(&pf->pdev->dev, "either mac or ip has to be valid for cloud filter\n"); return -EINVAL; } /* Now copy L4 port in Byte 6..7 in general fields */ cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] = be16_to_cpu(filter->dst_port); if (add) { /* Validate current device switch mode, change if necessary */ ret = i40e_validate_and_set_switch_mode(vsi); if (ret) { dev_err(&pf->pdev->dev, "failed to set switch mode, ret %d\n", ret); return ret; } ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid, &cld_filter, 1); } else { ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid, &cld_filter, 1); } if (ret) dev_dbg(&pf->pdev->dev, "Failed to %s cloud filter(big buffer) err %d aq_err %d\n", add ? "add" : "delete", ret, pf->hw.aq.asq_last_status); else dev_info(&pf->pdev->dev, "%s cloud filter for VSI: %d, L4 port: %d\n", add ? "add" : "delete", filter->seid, ntohs(filter->dst_port)); return ret; } /** * i40e_parse_cls_flower - Parse tc flower filters provided by kernel * @vsi: Pointer to VSI * @cls_flower: Pointer to struct flow_cls_offload * @filter: Pointer to cloud filter structure * **/ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, struct flow_cls_offload *f, struct i40e_cloud_filter *filter) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = rule->match.dissector; u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; struct i40e_pf *pf = vsi->back; u8 field_flags = 0; if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | BIT(FLOW_DISSECTOR_KEY_VLAN) | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_PORTS) | BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n", dissector->used_keys); return -EOPNOTSUPP; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { struct flow_match_enc_keyid match; flow_rule_match_enc_keyid(rule, &match); if (match.mask->keyid != 0) field_flags |= I40E_CLOUD_FIELD_TEN_ID; filter->tenant_id = be32_to_cpu(match.key->keyid); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; flow_rule_match_basic(rule, &match); n_proto_key = ntohs(match.key->n_proto); n_proto_mask = ntohs(match.mask->n_proto); if (n_proto_key == ETH_P_ALL) { n_proto_key = 0; n_proto_mask = 0; } filter->n_proto = n_proto_key & n_proto_mask; filter->ip_proto = match.key->ip_proto; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match; flow_rule_match_eth_addrs(rule, &match); /* use is_broadcast and is_zero to check for all 0xf or 0 */ if (!is_zero_ether_addr(match.mask->dst)) { if (is_broadcast_ether_addr(match.mask->dst)) { field_flags |= I40E_CLOUD_FIELD_OMAC; } else { dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", match.mask->dst); return I40E_ERR_CONFIG; } } if (!is_zero_ether_addr(match.mask->src)) { if (is_broadcast_ether_addr(match.mask->src)) { field_flags |= I40E_CLOUD_FIELD_IMAC; } else { dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", match.mask->src); return I40E_ERR_CONFIG; } } ether_addr_copy(filter->dst_mac, match.key->dst); ether_addr_copy(filter->src_mac, match.key->src); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_match_vlan match; flow_rule_match_vlan(rule, &match); if (match.mask->vlan_id) { if (match.mask->vlan_id == VLAN_VID_MASK) { field_flags |= I40E_CLOUD_FIELD_IVLAN; } else { dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", match.mask->vlan_id); return I40E_ERR_CONFIG; } } filter->vlan_id = cpu_to_be16(match.key->vlan_id); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_match_control match; flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_match_ipv4_addrs match; flow_rule_match_ipv4_addrs(rule, &match); if (match.mask->dst) { if (match.mask->dst == cpu_to_be32(0xffffffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", &match.mask->dst); return I40E_ERR_CONFIG; } } if (match.mask->src) { if (match.mask->src == cpu_to_be32(0xffffffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", &match.mask->src); return I40E_ERR_CONFIG; } } if (field_flags & I40E_CLOUD_FIELD_TEN_ID) { dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); return I40E_ERR_CONFIG; } filter->dst_ipv4 = match.key->dst; filter->src_ipv4 = match.key->src; } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { struct flow_match_ipv6_addrs match; flow_rule_match_ipv6_addrs(rule, &match); /* src and dest IPV6 address should not be LOOPBACK * (0:0:0:0:0:0:0:1), which can be represented as ::1 */ if (ipv6_addr_loopback(&match.key->dst) || ipv6_addr_loopback(&match.key->src)) { dev_err(&pf->pdev->dev, "Bad ipv6, addr is LOOPBACK\n"); return I40E_ERR_CONFIG; } if (!ipv6_addr_any(&match.mask->dst) || !ipv6_addr_any(&match.mask->src)) field_flags |= I40E_CLOUD_FIELD_IIP; memcpy(&filter->src_ipv6, &match.key->src.s6_addr32, sizeof(filter->src_ipv6)); memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32, sizeof(filter->dst_ipv6)); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports match; flow_rule_match_ports(rule, &match); if (match.mask->src) { if (match.mask->src == cpu_to_be16(0xffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", be16_to_cpu(match.mask->src)); return I40E_ERR_CONFIG; } } if (match.mask->dst) { if (match.mask->dst == cpu_to_be16(0xffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", be16_to_cpu(match.mask->dst)); return I40E_ERR_CONFIG; } } filter->dst_port = match.key->dst; filter->src_port = match.key->src; switch (filter->ip_proto) { case IPPROTO_TCP: case IPPROTO_UDP: break; default: dev_err(&pf->pdev->dev, "Only UDP and TCP transport are supported\n"); return -EINVAL; } } filter->flags = field_flags; return 0; } /** * i40e_handle_tclass: Forward to a traffic class on the device * @vsi: Pointer to VSI * @tc: traffic class index on the device * @filter: Pointer to cloud filter structure * **/ static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc, struct i40e_cloud_filter *filter) { struct i40e_channel *ch, *ch_tmp; /* direct to a traffic class on the same device */ if (tc == 0) { filter->seid = vsi->seid; return 0; } else if (vsi->tc_config.enabled_tc & BIT(tc)) { if (!filter->dst_port) { dev_err(&vsi->back->pdev->dev, "Specify destination port to direct to traffic class that is not default\n"); return -EINVAL; } if (list_empty(&vsi->ch_list)) return -EINVAL; list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { if (ch->seid == vsi->tc_seid_map[tc]) filter->seid = ch->seid; } return 0; } dev_err(&vsi->back->pdev->dev, "TC is not enabled\n"); return -EINVAL; } /** * i40e_configure_clsflower - Configure tc flower filters * @vsi: Pointer to VSI * @cls_flower: Pointer to struct flow_cls_offload * **/ static int i40e_configure_clsflower(struct i40e_vsi *vsi, struct flow_cls_offload *cls_flower) { int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); struct i40e_cloud_filter *filter = NULL; struct i40e_pf *pf = vsi->back; int err = 0; if (tc < 0) { dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); return -EOPNOTSUPP; } if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) return -EBUSY; if (pf->fdir_pf_active_filters || (!hlist_empty(&pf->fdir_filter_list))) { dev_err(&vsi->back->pdev->dev, "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n"); return -EINVAL; } if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) { dev_err(&vsi->back->pdev->dev, "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n"); vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED; vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER; } filter = kzalloc(sizeof(*filter), GFP_KERNEL); if (!filter) return -ENOMEM; filter->cookie = cls_flower->cookie; err = i40e_parse_cls_flower(vsi, cls_flower, filter); if (err < 0) goto err; err = i40e_handle_tclass(vsi, tc, filter); if (err < 0) goto err; /* Add cloud filter */ if (filter->dst_port) err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true); else err = i40e_add_del_cloud_filter(vsi, filter, true); if (err) { dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %s\n", i40e_stat_str(&pf->hw, err)); goto err; } /* add filter to the ordered list */ INIT_HLIST_NODE(&filter->cloud_node); hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list); pf->num_cloud_filters++; return err; err: kfree(filter); return err; } /** * i40e_find_cloud_filter - Find the could filter in the list * @vsi: Pointer to VSI * @cookie: filter specific cookie * **/ static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi, unsigned long *cookie) { struct i40e_cloud_filter *filter = NULL; struct hlist_node *node2; hlist_for_each_entry_safe(filter, node2, &vsi->back->cloud_filter_list, cloud_node) if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) return filter; return NULL; } /** * i40e_delete_clsflower - Remove tc flower filters * @vsi: Pointer to VSI * @cls_flower: Pointer to struct flow_cls_offload * **/ static int i40e_delete_clsflower(struct i40e_vsi *vsi, struct flow_cls_offload *cls_flower) { struct i40e_cloud_filter *filter = NULL; struct i40e_pf *pf = vsi->back; int err = 0; filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie); if (!filter) return -EINVAL; hash_del(&filter->cloud_node); if (filter->dst_port) err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false); else err = i40e_add_del_cloud_filter(vsi, filter, false); kfree(filter); if (err) { dev_err(&pf->pdev->dev, "Failed to delete cloud filter, err %s\n", i40e_stat_str(&pf->hw, err)); return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status); } pf->num_cloud_filters--; if (!pf->num_cloud_filters) if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { pf->flags |= I40E_FLAG_FD_SB_ENABLED; pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; } return 0; } /** * i40e_setup_tc_cls_flower - flower classifier offloads * @netdev: net device to configure * @type_data: offload data **/ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np, struct flow_cls_offload *cls_flower) { struct i40e_vsi *vsi = np->vsi; switch (cls_flower->command) { case FLOW_CLS_REPLACE: return i40e_configure_clsflower(vsi, cls_flower); case FLOW_CLS_DESTROY: return i40e_delete_clsflower(vsi, cls_flower); case FLOW_CLS_STATS: return -EOPNOTSUPP; default: return -EOPNOTSUPP; } } static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { struct i40e_netdev_priv *np = cb_priv; if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data)) return -EOPNOTSUPP; switch (type) { case TC_SETUP_CLSFLOWER: return i40e_setup_tc_cls_flower(np, type_data); default: return -EOPNOTSUPP; } } static LIST_HEAD(i40e_block_cb_list); static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { struct i40e_netdev_priv *np = netdev_priv(netdev); switch (type) { case TC_SETUP_QDISC_MQPRIO: return i40e_setup_tc(netdev, type_data); case TC_SETUP_BLOCK: return flow_block_cb_setup_simple(type_data, &i40e_block_cb_list, i40e_setup_tc_block_cb, np, np, true); default: return -EOPNOTSUPP; } } /** * i40e_open - Called when a network interface is made active * @netdev: network interface device structure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the netdev watchdog subtask is * enabled, and the stack is notified that the interface is ready. * * Returns 0 on success, negative value on failure **/ int i40e_open(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; int err; /* disallow open during test or if eeprom is broken */ if (test_bit(__I40E_TESTING, pf->state) || test_bit(__I40E_BAD_EEPROM, pf->state)) return -EBUSY; netif_carrier_off(netdev); if (i40e_force_link_state(pf, true)) return -EAGAIN; err = i40e_vsi_open(vsi); if (err) return err; /* configure global TSO hardware offload settings */ wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | TCP_FLAG_FIN) >> 16); wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | TCP_FLAG_FIN | TCP_FLAG_CWR) >> 16); wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); udp_tunnel_get_rx_info(netdev); return 0; } /** * i40e_vsi_open - * @vsi: the VSI to open * * Finish initialization of the VSI. * * Returns 0 on success, negative value on failure * * Note: expects to be called while under rtnl_lock() **/ int i40e_vsi_open(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; char int_name[I40E_INT_NAME_STR_LEN]; int err; /* allocate descriptors */ err = i40e_vsi_setup_tx_resources(vsi); if (err) goto err_setup_tx; err = i40e_vsi_setup_rx_resources(vsi); if (err) goto err_setup_rx; err = i40e_vsi_configure(vsi); if (err) goto err_setup_rx; if (vsi->netdev) { snprintf(int_name, sizeof(int_name) - 1, "%s-%s", dev_driver_string(&pf->pdev->dev), vsi->netdev->name); err = i40e_vsi_request_irq(vsi, int_name); if (err) goto err_setup_rx; /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs); if (err) goto err_set_queues; err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs); if (err) goto err_set_queues; } else if (vsi->type == I40E_VSI_FDIR) { snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", dev_driver_string(&pf->pdev->dev), dev_name(&pf->pdev->dev)); err = i40e_vsi_request_irq(vsi, int_name); } else { err = -EINVAL; goto err_setup_rx; } err = i40e_up_complete(vsi); if (err) goto err_up_complete; return 0; err_up_complete: i40e_down(vsi); err_set_queues: i40e_vsi_free_irq(vsi); err_setup_rx: i40e_vsi_free_rx_resources(vsi); err_setup_tx: i40e_vsi_free_tx_resources(vsi); if (vsi == pf->vsi[pf->lan_vsi]) i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); return err; } /** * i40e_fdir_filter_exit - Cleans up the Flow Director accounting * @pf: Pointer to PF * * This function destroys the hlist where all the Flow Director * filters were saved. **/ static void i40e_fdir_filter_exit(struct i40e_pf *pf) { struct i40e_fdir_filter *filter; struct i40e_flex_pit *pit_entry, *tmp; struct hlist_node *node2; hlist_for_each_entry_safe(filter, node2, &pf->fdir_filter_list, fdir_node) { hlist_del(&filter->fdir_node); kfree(filter); } list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) { list_del(&pit_entry->list); kfree(pit_entry); } INIT_LIST_HEAD(&pf->l3_flex_pit_list); list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) { list_del(&pit_entry->list); kfree(pit_entry); } INIT_LIST_HEAD(&pf->l4_flex_pit_list); pf->fdir_pf_active_filters = 0; pf->fd_tcp4_filter_cnt = 0; pf->fd_udp4_filter_cnt = 0; pf->fd_sctp4_filter_cnt = 0; pf->fd_ip4_filter_cnt = 0; /* Reprogram the default input set for TCP/IPv4 */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for UDP/IPv4 */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for SCTP/IPv4 */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for Other/IPv4 */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); } /** * i40e_cloud_filter_exit - Cleans up the cloud filters * @pf: Pointer to PF * * This function destroys the hlist where all the cloud filters * were saved. **/ static void i40e_cloud_filter_exit(struct i40e_pf *pf) { struct i40e_cloud_filter *cfilter; struct hlist_node *node; hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, cloud_node) { hlist_del(&cfilter->cloud_node); kfree(cfilter); } pf->num_cloud_filters = 0; if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { pf->flags |= I40E_FLAG_FD_SB_ENABLED; pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; } } /** * i40e_close - Disables a network interface * @netdev: network interface device structure * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the driver's control, but * this netdev interface is disabled. * * Returns 0, this is not allowed to fail **/ int i40e_close(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; i40e_vsi_close(vsi); return 0; } /** * i40e_do_reset - Start a PF or Core Reset sequence * @pf: board private structure * @reset_flags: which reset is requested * @lock_acquired: indicates whether or not the lock has been acquired * before this function was called. * * The essential difference in resets is that the PF Reset * doesn't clear the packet buffers, doesn't reset the PE * firmware, and doesn't bother the other PFs on the chip. **/ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) { u32 val; WARN_ON(in_interrupt()); /* do the biggest reset indicated */ if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { /* Request a Global Reset * * This will start the chip's countdown to the actual full * chip reset event, and a warning interrupt to be sent * to all PFs, including the requestor. Our handler * for the warning interrupt will deal with the shutdown * and recovery of the switch setup. */ dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); val = rd32(&pf->hw, I40E_GLGEN_RTRIG); val |= I40E_GLGEN_RTRIG_GLOBR_MASK; wr32(&pf->hw, I40E_GLGEN_RTRIG, val); } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) { /* Request a Core Reset * * Same as Global Reset, except does *not* include the MAC/PHY */ dev_dbg(&pf->pdev->dev, "CoreR requested\n"); val = rd32(&pf->hw, I40E_GLGEN_RTRIG); val |= I40E_GLGEN_RTRIG_CORER_MASK; wr32(&pf->hw, I40E_GLGEN_RTRIG, val); i40e_flush(&pf->hw); } else if (reset_flags & I40E_PF_RESET_FLAG) { /* Request a PF Reset * * Resets only the PF-specific registers * * This goes directly to the tear-down and rebuild of * the switch, since we need to do all the recovery as * for the Core Reset. */ dev_dbg(&pf->pdev->dev, "PFR requested\n"); i40e_handle_reset_warning(pf, lock_acquired); dev_info(&pf->pdev->dev, pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? "FW LLDP is disabled\n" : "FW LLDP is enabled\n"); } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { int v; /* Find the VSI(s) that requested a re-init */ dev_info(&pf->pdev->dev, "VSI reinit requested\n"); for (v = 0; v < pf->num_alloc_vsi; v++) { struct i40e_vsi *vsi = pf->vsi[v]; if (vsi != NULL && test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED, vsi->state)) i40e_vsi_reinit_locked(pf->vsi[v]); } } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { int v; /* Find the VSI(s) that needs to be brought down */ dev_info(&pf->pdev->dev, "VSI down requested\n"); for (v = 0; v < pf->num_alloc_vsi; v++) { struct i40e_vsi *vsi = pf->vsi[v]; if (vsi != NULL && test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state)) { set_bit(__I40E_VSI_DOWN, vsi->state); i40e_down(vsi); } } } else { dev_info(&pf->pdev->dev, "bad reset request 0x%08x\n", reset_flags); } } #ifdef CONFIG_I40E_DCB /** * i40e_dcb_need_reconfig - Check if DCB needs reconfig * @pf: board private structure * @old_cfg: current DCB config * @new_cfg: new DCB config **/ bool i40e_dcb_need_reconfig(struct i40e_pf *pf, struct i40e_dcbx_config *old_cfg, struct i40e_dcbx_config *new_cfg) { bool need_reconfig = false; /* Check if ETS configuration has changed */ if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg, sizeof(new_cfg->etscfg))) { /* If Priority Table has changed reconfig is needed */ if (memcmp(&new_cfg->etscfg.prioritytable, &old_cfg->etscfg.prioritytable, sizeof(new_cfg->etscfg.prioritytable))) { need_reconfig = true; dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); } if (memcmp(&new_cfg->etscfg.tcbwtable, &old_cfg->etscfg.tcbwtable, sizeof(new_cfg->etscfg.tcbwtable))) dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); if (memcmp(&new_cfg->etscfg.tsatable, &old_cfg->etscfg.tsatable, sizeof(new_cfg->etscfg.tsatable))) dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); } /* Check if PFC configuration has changed */ if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) { need_reconfig = true; dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); } /* Check if APP Table has changed */ if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) { need_reconfig = true; dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); } dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); return need_reconfig; } /** * i40e_handle_lldp_event - Handle LLDP Change MIB event * @pf: board private structure * @e: event info posted on ARQ **/ static int i40e_handle_lldp_event(struct i40e_pf *pf, struct i40e_arq_event_info *e) { struct i40e_aqc_lldp_get_mib *mib = (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; struct i40e_hw *hw = &pf->hw; struct i40e_dcbx_config tmp_dcbx_cfg; bool need_reconfig = false; int ret = 0; u8 type; /* Not DCB capable or capability disabled */ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) return ret; /* Ignore if event is not for Nearest Bridge */ type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) return ret; /* Check MIB Type and return if event for Remote MIB update */ type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; dev_dbg(&pf->pdev->dev, "LLDP event mib type %s\n", type ? "remote" : "local"); if (type == I40E_AQ_LLDP_MIB_REMOTE) { /* Update the remote cached instance and return */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, &hw->remote_dcbx_config); goto exit; } /* Store the old configuration */ tmp_dcbx_cfg = hw->local_dcbx_config; /* Reset the old DCBx configuration data */ memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); /* Get updated DCBX data from firmware */ ret = i40e_get_dcb_config(&pf->hw); if (ret) { dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto exit; } /* No change detected in DCBX configs */ if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, sizeof(tmp_dcbx_cfg))) { dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); goto exit; } need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); if (!need_reconfig) goto exit; /* Enable DCB tagging only when more than one TC */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; else pf->flags &= ~I40E_FLAG_DCB_ENABLED; set_bit(__I40E_PORT_SUSPENDED, pf->state); /* Reconfiguration needed quiesce all VSIs */ i40e_pf_quiesce_all_vsi(pf); /* Changes in configuration update VEB/VSI */ i40e_dcb_reconfigure(pf); ret = i40e_resume_port_tx(pf); clear_bit(__I40E_PORT_SUSPENDED, pf->state); /* In case of error no point in resuming VSIs */ if (ret) goto exit; /* Wait for the PF's queues to be disabled */ ret = i40e_pf_wait_queues_disabled(pf); if (ret) { /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } else { i40e_pf_unquiesce_all_vsi(pf); set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); } exit: return ret; } #endif /* CONFIG_I40E_DCB */ /** * i40e_do_reset_safe - Protected reset path for userland calls. * @pf: board private structure * @reset_flags: which reset is requested * **/ void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) { rtnl_lock(); i40e_do_reset(pf, reset_flags, true); rtnl_unlock(); } /** * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event * @pf: board private structure * @e: event info posted on ARQ * * Handler for LAN Queue Overflow Event generated by the firmware for PF * and VF queues **/ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, struct i40e_arq_event_info *e) { struct i40e_aqc_lan_overflow *data = (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; u32 queue = le32_to_cpu(data->prtdcb_rupto); u32 qtx_ctl = le32_to_cpu(data->otx_ctl); struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf; u16 vf_id; dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", queue, qtx_ctl); /* Queue belongs to VF, find the VF and issue VF reset */ if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) >> I40E_QTX_CTL_VFVM_INDX_SHIFT); vf_id -= hw->func_caps.vf_base_id; vf = &pf->vf[vf_id]; i40e_vc_notify_vf_reset(vf); /* Allow VF to process pending reset notification */ msleep(20); i40e_reset_vf(vf, false); } } /** * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters * @pf: board private structure **/ u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) { u32 val, fcnt_prog; val = rd32(&pf->hw, I40E_PFQF_FDSTAT); fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); return fcnt_prog; } /** * i40e_get_current_fd_count - Get total FD filters programmed for this PF * @pf: board private structure **/ u32 i40e_get_current_fd_count(struct i40e_pf *pf) { u32 val, fcnt_prog; val = rd32(&pf->hw, I40E_PFQF_FDSTAT); fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); return fcnt_prog; } /** * i40e_get_global_fd_count - Get total FD filters programmed on device * @pf: board private structure **/ u32 i40e_get_global_fd_count(struct i40e_pf *pf) { u32 val, fcnt_prog; val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); return fcnt_prog; } /** * i40e_reenable_fdir_sb - Restore FDir SB capability * @pf: board private structure **/ static void i40e_reenable_fdir_sb(struct i40e_pf *pf) { if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); } /** * i40e_reenable_fdir_atr - Restore FDir ATR capability * @pf: board private structure **/ static void i40e_reenable_fdir_atr(struct i40e_pf *pf) { if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) { /* ATR uses the same filtering logic as SB rules. It only * functions properly if the input set mask is at the default * settings. It is safe to restore the default input set * because there are no active TCPv4 filter rules. */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n"); } } /** * i40e_delete_invalid_filter - Delete an invalid FDIR filter * @pf: board private structure * @filter: FDir filter to remove */ static void i40e_delete_invalid_filter(struct i40e_pf *pf, struct i40e_fdir_filter *filter) { /* Update counters */ pf->fdir_pf_active_filters--; pf->fd_inv = 0; switch (filter->flow_type) { case TCP_V4_FLOW: pf->fd_tcp4_filter_cnt--; break; case UDP_V4_FLOW: pf->fd_udp4_filter_cnt--; break; case SCTP_V4_FLOW: pf->fd_sctp4_filter_cnt--; break; case IP_USER_FLOW: switch (filter->ip4_proto) { case IPPROTO_TCP: pf->fd_tcp4_filter_cnt--; break; case IPPROTO_UDP: pf->fd_udp4_filter_cnt--; break; case IPPROTO_SCTP: pf->fd_sctp4_filter_cnt--; break; case IPPROTO_IP: pf->fd_ip4_filter_cnt--; break; } break; } /* Remove the filter from the list and free memory */ hlist_del(&filter->fdir_node); kfree(filter); } /** * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled * @pf: board private structure **/ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) { struct i40e_fdir_filter *filter; u32 fcnt_prog, fcnt_avail; struct hlist_node *node; if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) return; /* Check if we have enough room to re-enable FDir SB capability. */ fcnt_prog = i40e_get_global_fd_count(pf); fcnt_avail = pf->fdir_pf_filter_count; if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || (pf->fd_add_err == 0) || (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) i40e_reenable_fdir_sb(pf); /* We should wait for even more space before re-enabling ATR. * Additionally, we cannot enable ATR as long as we still have TCP SB * rules active. */ if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) && (pf->fd_tcp4_filter_cnt == 0)) i40e_reenable_fdir_atr(pf); /* if hw had a problem adding a filter, delete it */ if (pf->fd_inv > 0) { hlist_for_each_entry_safe(filter, node, &pf->fdir_filter_list, fdir_node) if (filter->fd_id == pf->fd_inv) i40e_delete_invalid_filter(pf, filter); } } #define I40E_MIN_FD_FLUSH_INTERVAL 10 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 /** * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB * @pf: board private structure **/ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) { unsigned long min_flush_time; int flush_wait_retry = 50; bool disable_atr = false; int fd_room; int reg; if (!time_after(jiffies, pf->fd_flush_timestamp + (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) return; /* If the flush is happening too quick and we have mostly SB rules we * should not re-enable ATR for some time. */ min_flush_time = pf->fd_flush_timestamp + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; if (!(time_after(jiffies, min_flush_time)) && (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); disable_atr = true; } pf->fd_flush_timestamp = jiffies; set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); /* flush all filters */ wr32(&pf->hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); i40e_flush(&pf->hw); pf->fd_flush_cnt++; pf->fd_add_err = 0; do { /* Check FD flush status every 5-6msec */ usleep_range(5000, 6000); reg = rd32(&pf->hw, I40E_PFQF_CTL_1); if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) break; } while (flush_wait_retry--); if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); } else { /* replay sideband filters */ i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); if (!disable_atr && !pf->fd_tcp4_filter_cnt) clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } } /** * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed * @pf: board private structure **/ u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) { return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; } /* We can see up to 256 filter programming desc in transit if the filters are * being applied really fast; before we see the first * filter miss error on Rx queue 0. Accumulating enough error messages before * reacting will make sure we don't cause flush too often. */ #define I40E_MAX_FD_PROGRAM_ERROR 256 /** * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table * @pf: board private structure **/ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) { /* if interface is down do nothing */ if (test_bit(__I40E_DOWN, pf->state)) return; if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) i40e_fdir_flush_and_replay(pf); i40e_fdir_check_and_reenable(pf); } /** * i40e_vsi_link_event - notify VSI of a link event * @vsi: vsi to be notified * @link_up: link up or down **/ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) { if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state)) return; switch (vsi->type) { case I40E_VSI_MAIN: if (!vsi->netdev || !vsi->netdev_registered) break; if (link_up) { netif_carrier_on(vsi->netdev); netif_tx_wake_all_queues(vsi->netdev); } else { netif_carrier_off(vsi->netdev); netif_tx_stop_all_queues(vsi->netdev); } break; case I40E_VSI_SRIOV: case I40E_VSI_VMDQ2: case I40E_VSI_CTRL: case I40E_VSI_IWARP: case I40E_VSI_MIRROR: default: /* there is no notification for other VSIs */ break; } } /** * i40e_veb_link_event - notify elements on the veb of a link event * @veb: veb to be notified * @link_up: link up or down **/ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) { struct i40e_pf *pf; int i; if (!veb || !veb->pf) return; pf = veb->pf; /* depth first... */ for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) i40e_veb_link_event(pf->veb[i], link_up); /* ... now the local VSIs */ for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) i40e_vsi_link_event(pf->vsi[i], link_up); } /** * i40e_link_event - Update netif_carrier status * @pf: board private structure **/ static void i40e_link_event(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; u8 new_link_speed, old_link_speed; i40e_status status; bool new_link, old_link; /* set this to force the get_link_status call to refresh state */ pf->hw.phy.get_link_info = true; old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); status = i40e_get_link_status(&pf->hw, &new_link); /* On success, disable temp link polling */ if (status == I40E_SUCCESS) { clear_bit(__I40E_TEMP_LINK_POLLING, pf->state); } else { /* Enable link polling temporarily until i40e_get_link_status * returns I40E_SUCCESS */ set_bit(__I40E_TEMP_LINK_POLLING, pf->state); dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", status); return; } old_link_speed = pf->hw.phy.link_info_old.link_speed; new_link_speed = pf->hw.phy.link_info.link_speed; if (new_link == old_link && new_link_speed == old_link_speed && (test_bit(__I40E_VSI_DOWN, vsi->state) || new_link == netif_carrier_ok(vsi->netdev))) return; i40e_print_link_message(vsi, new_link); /* Notify the base of the switch tree connected to * the link. Floating VEBs are not notified. */ if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); else i40e_vsi_link_event(vsi, new_link); if (pf->vf) i40e_vc_notify_link_state(pf); if (pf->flags & I40E_FLAG_PTP) i40e_ptp_set_increment(pf); } /** * i40e_watchdog_subtask - periodic checks not using event driven response * @pf: board private structure **/ static void i40e_watchdog_subtask(struct i40e_pf *pf) { int i; /* if interface is down do nothing */ if (test_bit(__I40E_DOWN, pf->state) || test_bit(__I40E_CONFIG_BUSY, pf->state)) return; /* make sure we don't do these things too often */ if (time_before(jiffies, (pf->service_timer_previous + pf->service_timer_period))) return; pf->service_timer_previous = jiffies; if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) || test_bit(__I40E_TEMP_LINK_POLLING, pf->state)) i40e_link_event(pf); /* Update the stats for active netdevs so the network stack * can look at updated numbers whenever it cares to */ for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i] && pf->vsi[i]->netdev) i40e_update_stats(pf->vsi[i]); if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { /* Update the stats for the active switching components */ for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i]) i40e_update_veb_stats(pf->veb[i]); } i40e_ptp_rx_hang(pf); i40e_ptp_tx_hang(pf); } /** * i40e_reset_subtask - Set up for resetting the device and driver * @pf: board private structure **/ static void i40e_reset_subtask(struct i40e_pf *pf) { u32 reset_flags = 0; if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_REINIT_REQUESTED); clear_bit(__I40E_REINIT_REQUESTED, pf->state); } if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); clear_bit(__I40E_PF_RESET_REQUESTED, pf->state); } if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state); } if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); } if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_DOWN_REQUESTED); clear_bit(__I40E_DOWN_REQUESTED, pf->state); } /* If there's a recovery already waiting, it takes * precedence before starting a new reset sequence. */ if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { i40e_prep_for_reset(pf, false); i40e_reset(pf); i40e_rebuild(pf, false, false); } /* If we're already down or resetting, just bail */ if (reset_flags && !test_bit(__I40E_DOWN, pf->state) && !test_bit(__I40E_CONFIG_BUSY, pf->state)) { i40e_do_reset(pf, reset_flags, false); } } /** * i40e_handle_link_event - Handle link event * @pf: board private structure * @e: event info posted on ARQ **/ static void i40e_handle_link_event(struct i40e_pf *pf, struct i40e_arq_event_info *e) { struct i40e_aqc_get_link_status *status = (struct i40e_aqc_get_link_status *)&e->desc.params.raw; /* Do a new status request to re-enable LSE reporting * and load new status information into the hw struct * This completely ignores any state information * in the ARQ event info, instead choosing to always * issue the AQ update link status command. */ i40e_link_event(pf); /* Check if module meets thermal requirements */ if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) { dev_err(&pf->pdev->dev, "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n"); dev_err(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); } else { /* check for unqualified module, if link is down, suppress * the message if link was forced to be down. */ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && (!(status->link_info & I40E_AQ_LINK_UP)) && (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) { dev_err(&pf->pdev->dev, "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n"); dev_err(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); } } } /** * i40e_clean_adminq_subtask - Clean the AdminQ rings * @pf: board private structure **/ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) { struct i40e_arq_event_info event; struct i40e_hw *hw = &pf->hw; u16 pending, i = 0; i40e_status ret; u16 opcode; u32 oldval; u32 val; /* Do not run clean AQ when PF reset fails */ if (test_bit(__I40E_RESET_FAILED, pf->state)) return; /* check for error indications */ val = rd32(&pf->hw, pf->hw.aq.arq.len); oldval = val; if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; } if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; pf->arq_overflows++; } if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; } if (oldval != val) wr32(&pf->hw, pf->hw.aq.arq.len, val); val = rd32(&pf->hw, pf->hw.aq.asq.len); oldval = val; if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { if (pf->hw.debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; } if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { if (pf->hw.debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; } if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { if (pf->hw.debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; } if (oldval != val) wr32(&pf->hw, pf->hw.aq.asq.len, val); event.buf_len = I40E_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) return; do { ret = i40e_clean_arq_element(hw, &event, &pending); if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) break; else if (ret) { dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); break; } opcode = le16_to_cpu(event.desc.opcode); switch (opcode) { case i40e_aqc_opc_get_link_status: i40e_handle_link_event(pf, &event); break; case i40e_aqc_opc_send_msg_to_pf: ret = i40e_vc_process_vf_msg(pf, le16_to_cpu(event.desc.retval), le32_to_cpu(event.desc.cookie_high), le32_to_cpu(event.desc.cookie_low), event.msg_buf, event.msg_len); break; case i40e_aqc_opc_lldp_update_mib: dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); #ifdef CONFIG_I40E_DCB rtnl_lock(); ret = i40e_handle_lldp_event(pf, &event); rtnl_unlock(); #endif /* CONFIG_I40E_DCB */ break; case i40e_aqc_opc_event_lan_overflow: dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); i40e_handle_lan_overflow_event(pf, &event); break; case i40e_aqc_opc_send_msg_to_peer: dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); break; case i40e_aqc_opc_nvm_erase: case i40e_aqc_opc_nvm_update: case i40e_aqc_opc_oem_post_update: i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation 0x%04x completed\n", opcode); break; default: dev_info(&pf->pdev->dev, "ARQ: Unknown event 0x%04x ignored\n", opcode); break; } } while (i++ < pf->adminq_work_limit); if (i < pf->adminq_work_limit) clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); /* re-enable Admin queue interrupt cause */ val = rd32(hw, I40E_PFINT_ICR0_ENA); val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, val); i40e_flush(hw); kfree(event.msg_buf); } /** * i40e_verify_eeprom - make sure eeprom is good to use * @pf: board private structure **/ static void i40e_verify_eeprom(struct i40e_pf *pf) { int err; err = i40e_diag_eeprom_test(&pf->hw); if (err) { /* retry in case of garbage read */ err = i40e_diag_eeprom_test(&pf->hw); if (err) { dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", err); set_bit(__I40E_BAD_EEPROM, pf->state); } } if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) { dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); clear_bit(__I40E_BAD_EEPROM, pf->state); } } /** * i40e_enable_pf_switch_lb * @pf: pointer to the PF structure * * enable switch loop back or die - no point in a return value **/ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; int ret; ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't get PF vsi config, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return; } ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "update vsi switch failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } /** * i40e_disable_pf_switch_lb * @pf: pointer to the PF structure * * disable switch loop back or die - no point in a return value **/ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; int ret; ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't get PF vsi config, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return; } ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "update vsi switch failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } /** * i40e_config_bridge_mode - Configure the HW bridge mode * @veb: pointer to the bridge instance * * Configure the loop back mode for the LAN VSI that is downlink to the * specified HW bridge instance. It is expected this function is called * when a new HW bridge is instantiated. **/ static void i40e_config_bridge_mode(struct i40e_veb *veb) { struct i40e_pf *pf = veb->pf; if (pf->hw.debug_mask & I40E_DEBUG_LAN) dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); if (veb->bridge_mode & BRIDGE_MODE_VEPA) i40e_disable_pf_switch_lb(pf); else i40e_enable_pf_switch_lb(pf); } /** * i40e_reconstitute_veb - rebuild the VEB and anything connected to it * @veb: pointer to the VEB instance * * This is a recursive function that first builds the attached VSIs then * recurses in to build the next layer of VEB. We track the connections * through our own index numbers because the seid's from the HW could * change across the reset. **/ static int i40e_reconstitute_veb(struct i40e_veb *veb) { struct i40e_vsi *ctl_vsi = NULL; struct i40e_pf *pf = veb->pf; int v, veb_idx; int ret; /* build VSI that owns this VEB, temporarily attached to base VEB */ for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { if (pf->vsi[v] && pf->vsi[v]->veb_idx == veb->idx && pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { ctl_vsi = pf->vsi[v]; break; } } if (!ctl_vsi) { dev_info(&pf->pdev->dev, "missing owner VSI for veb_idx %d\n", veb->idx); ret = -ENOENT; goto end_reconstitute; } if (ctl_vsi != pf->vsi[pf->lan_vsi]) ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; ret = i40e_add_vsi(ctl_vsi); if (ret) { dev_info(&pf->pdev->dev, "rebuild of veb_idx %d owner VSI failed: %d\n", veb->idx, ret); goto end_reconstitute; } i40e_vsi_reset_stats(ctl_vsi); /* create the VEB in the switch and move the VSI onto the VEB */ ret = i40e_add_veb(veb, ctl_vsi); if (ret) goto end_reconstitute; if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) veb->bridge_mode = BRIDGE_MODE_VEB; else veb->bridge_mode = BRIDGE_MODE_VEPA; i40e_config_bridge_mode(veb); /* create the remaining VSIs attached to this VEB */ for (v = 0; v < pf->num_alloc_vsi; v++) { if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) continue; if (pf->vsi[v]->veb_idx == veb->idx) { struct i40e_vsi *vsi = pf->vsi[v]; vsi->uplink_seid = veb->seid; ret = i40e_add_vsi(vsi); if (ret) { dev_info(&pf->pdev->dev, "rebuild of vsi_idx %d failed: %d\n", v, ret); goto end_reconstitute; } i40e_vsi_reset_stats(vsi); } } /* create any VEBs attached to this VEB - RECURSION */ for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { pf->veb[veb_idx]->uplink_seid = veb->seid; ret = i40e_reconstitute_veb(pf->veb[veb_idx]); if (ret) break; } } end_reconstitute: return ret; } /** * i40e_get_capabilities - get info about the HW * @pf: the PF struct **/ static int i40e_get_capabilities(struct i40e_pf *pf, enum i40e_admin_queue_opc list_type) { struct i40e_aqc_list_capabilities_element_resp *cap_buf; u16 data_size; int buf_len; int err; buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); do { cap_buf = kzalloc(buf_len, GFP_KERNEL); if (!cap_buf) return -ENOMEM; /* this loads the data into the hw struct for us */ err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, &data_size, list_type, NULL); /* data loaded, buffer no longer needed */ kfree(cap_buf); if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { /* retry with a larger buffer */ buf_len = data_size; } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { dev_info(&pf->pdev->dev, "capability discovery failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -ENODEV; } } while (err); if (pf->hw.debug_mask & I40E_DEBUG_USER) { if (list_type == i40e_aqc_opc_list_func_capabilities) { dev_info(&pf->pdev->dev, "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", pf->hw.pf_id, pf->hw.func_caps.num_vfs, pf->hw.func_caps.num_msix_vectors, pf->hw.func_caps.num_msix_vectors_vf, pf->hw.func_caps.fd_filters_guaranteed, pf->hw.func_caps.fd_filters_best_effort, pf->hw.func_caps.num_tx_qp, pf->hw.func_caps.num_vsis); } else if (list_type == i40e_aqc_opc_list_dev_capabilities) { dev_info(&pf->pdev->dev, "switch_mode=0x%04x, function_valid=0x%08x\n", pf->hw.dev_caps.switch_mode, pf->hw.dev_caps.valid_functions); dev_info(&pf->pdev->dev, "SR-IOV=%d, num_vfs for all function=%u\n", pf->hw.dev_caps.sr_iov_1_1, pf->hw.dev_caps.num_vfs); dev_info(&pf->pdev->dev, "num_vsis=%u, num_rx:%u, num_tx=%u\n", pf->hw.dev_caps.num_vsis, pf->hw.dev_caps.num_rx_qp, pf->hw.dev_caps.num_tx_qp); } } if (list_type == i40e_aqc_opc_list_func_capabilities) { #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ + pf->hw.func_caps.num_vfs) if (pf->hw.revision_id == 0 && pf->hw.func_caps.num_vsis < DEF_NUM_VSI) { dev_info(&pf->pdev->dev, "got num_vsis %d, setting num_vsis to %d\n", pf->hw.func_caps.num_vsis, DEF_NUM_VSI); pf->hw.func_caps.num_vsis = DEF_NUM_VSI; } } return 0; } static int i40e_vsi_clear(struct i40e_vsi *vsi); /** * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband * @pf: board private structure **/ static void i40e_fdir_sb_setup(struct i40e_pf *pf) { struct i40e_vsi *vsi; /* quick workaround for an NVM issue that leaves a critical register * uninitialized */ if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { static const u32 hkey[] = { 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, 0x95b3a76d}; int i; for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); } if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return; /* find existing VSI and see if it needs configuring */ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); /* create a new VSI if none exists */ if (!vsi) { vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->vsi[pf->lan_vsi]->seid, 0); if (!vsi) { dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; pf->flags |= I40E_FLAG_FD_SB_INACTIVE; return; } } i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); } /** * i40e_fdir_teardown - release the Flow Director resources * @pf: board private structure **/ static void i40e_fdir_teardown(struct i40e_pf *pf) { struct i40e_vsi *vsi; i40e_fdir_filter_exit(pf); vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); if (vsi) i40e_vsi_release(vsi); } /** * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs * @vsi: PF main vsi * @seid: seid of main or channel VSIs * * Rebuilds cloud filters associated with main VSI and channel VSIs if they * existed before reset **/ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid) { struct i40e_cloud_filter *cfilter; struct i40e_pf *pf = vsi->back; struct hlist_node *node; i40e_status ret; /* Add cloud filters back if they exist */ hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, cloud_node) { if (cfilter->seid != seid) continue; if (cfilter->dst_port) ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); else ret = i40e_add_del_cloud_filter(vsi, cfilter, true); if (ret) { dev_dbg(&pf->pdev->dev, "Failed to rebuild cloud filter, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } } return 0; } /** * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset * @vsi: PF main vsi * * Rebuilds channel VSIs if they existed before reset **/ static int i40e_rebuild_channels(struct i40e_vsi *vsi) { struct i40e_channel *ch, *ch_tmp; i40e_status ret; if (list_empty(&vsi->ch_list)) return 0; list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { if (!ch->initialized) break; /* Proceed with creation of channel (VMDq2) VSI */ ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch); if (ret) { dev_info(&vsi->back->pdev->dev, "failed to rebuild channels using uplink_seid %u\n", vsi->uplink_seid); return ret; } /* Reconfigure TX queues using QTX_CTL register */ ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch); if (ret) { dev_info(&vsi->back->pdev->dev, "failed to configure TX rings for channel %u\n", ch->seid); return ret; } /* update 'next_base_queue' */ vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs; if (ch->max_tx_rate) { u64 credits = ch->max_tx_rate; if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate)) return -EINVAL; do_div(credits, I40E_BW_CREDIT_DIVISOR); dev_dbg(&vsi->back->pdev->dev, "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", ch->max_tx_rate, credits, ch->seid); } ret = i40e_rebuild_cloud_filters(vsi, ch->seid); if (ret) { dev_dbg(&vsi->back->pdev->dev, "Failed to rebuild cloud filters for channel VSI %u\n", ch->seid); return ret; } } return 0; } /** * i40e_prep_for_reset - prep for the core to reset * @pf: board private structure * @lock_acquired: indicates whether or not the lock has been acquired * before this function was called. * * Close up the VFs and other things in prep for PF Reset. **/ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired) { struct i40e_hw *hw = &pf->hw; i40e_status ret = 0; u32 v; clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state); if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) return; if (i40e_check_asq_alive(&pf->hw)) i40e_vc_notify_reset(pf); dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); /* quiesce the VSIs and their queues that are not already DOWN */ /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */ if (!lock_acquired) rtnl_lock(); i40e_pf_quiesce_all_vsi(pf); if (!lock_acquired) rtnl_unlock(); for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v]) pf->vsi[v]->seid = 0; } i40e_shutdown_adminq(&pf->hw); /* call shutdown HMC */ if (hw->hmc.hmc_obj) { ret = i40e_shutdown_lan_hmc(hw); if (ret) dev_warn(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret); } /* Save the current PTP time so that we can restore the time after the * reset completes. */ i40e_ptp_save_hw_time(pf); } /** * i40e_send_version - update firmware with driver version * @pf: PF struct */ static void i40e_send_version(struct i40e_pf *pf) { struct i40e_driver_version dv; dv.major_version = DRV_VERSION_MAJOR; dv.minor_version = DRV_VERSION_MINOR; dv.build_version = DRV_VERSION_BUILD; dv.subbuild_version = 0; strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); i40e_aq_send_driver_version(&pf->hw, &dv, NULL); } /** * i40e_get_oem_version - get OEM specific version information * @hw: pointer to the hardware structure **/ static void i40e_get_oem_version(struct i40e_hw *hw) { u16 block_offset = 0xffff; u16 block_length = 0; u16 capabilities = 0; u16 gen_snap = 0; u16 release = 0; #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B #define I40E_NVM_OEM_LENGTH_OFFSET 0x00 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01 #define I40E_NVM_OEM_GEN_OFFSET 0x02 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F #define I40E_NVM_OEM_LENGTH 3 /* Check if pointer to OEM version block is valid. */ i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset); if (block_offset == 0xffff) return; /* Check if OEM version block has correct length. */ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET, &block_length); if (block_length < I40E_NVM_OEM_LENGTH) return; /* Check if OEM version format is as expected. */ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET, &capabilities); if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0) return; i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET, &gen_snap); i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET, &release); hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release; hw->nvm.eetrack = I40E_OEM_EETRACK_ID; } /** * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen * @pf: board private structure **/ static int i40e_reset(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; i40e_status ret; ret = i40e_pf_reset(hw); if (ret) { dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); set_bit(__I40E_RESET_FAILED, pf->state); clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); } else { pf->pfr_count++; } return ret; } /** * i40e_rebuild - rebuild using a saved config * @pf: board private structure * @reinit: if the Main VSI needs to re-initialized. * @lock_acquired: indicates whether or not the lock has been acquired * before this function was called. **/ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) { int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state); struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; u8 set_fc_aq_fail = 0; i40e_status ret; u32 val; int v; if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && i40e_check_recovery_mode(pf)) { i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); } if (test_bit(__I40E_DOWN, pf->state) && !test_bit(__I40E_RECOVERY_MODE, pf->state) && !old_recovery_mode_bit) goto clear_recovery; dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ ret = i40e_init_adminq(&pf->hw); if (ret) { dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto clear_recovery; } i40e_get_oem_version(&pf->hw); if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) || hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) { /* The following delay is necessary for 4.33 firmware and older * to recover after EMP reset. 200 ms should suffice but we * put here 300 ms to be sure that FW is ready to operate * after reset. */ mdelay(300); } /* re-verify the eeprom if we just had an EMP reset */ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) i40e_verify_eeprom(pf); /* if we are going out of or into recovery mode we have to act * accordingly with regard to resources initialization * and deinitialization */ if (test_bit(__I40E_RECOVERY_MODE, pf->state) || old_recovery_mode_bit) { if (i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities)) goto end_unlock; if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { /* we're staying in recovery mode so we'll reinitialize * misc vector here */ if (i40e_setup_misc_vector_for_recovery_mode(pf)) goto end_unlock; } else { if (!lock_acquired) rtnl_lock(); /* we're going out of recovery mode so we'll free * the IRQ allocated specifically for recovery mode * and restore the interrupt scheme */ free_irq(pf->pdev->irq, pf); i40e_clear_interrupt_scheme(pf); if (i40e_restore_interrupt_scheme(pf)) goto end_unlock; } /* tell the firmware that we're starting */ i40e_send_version(pf); /* bail out in case recovery mode was detected, as there is * no need for further configuration. */ goto end_unlock; } i40e_clear_pxe_mode(hw); ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); if (ret) goto end_core_reset; ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, 0, 0); if (ret) { dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); goto end_core_reset; } ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (ret) { dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); goto end_core_reset; } /* Enable FW to write a default DCB config on link-up */ i40e_aq_set_dcb_parameters(hw, true, NULL); #ifdef CONFIG_I40E_DCB ret = i40e_init_pf_dcb(pf); if (ret) { dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); pf->flags &= ~I40E_FLAG_DCB_CAPABLE; /* Continue without DCB enabled */ } #endif /* CONFIG_I40E_DCB */ /* do basic switch setup */ if (!lock_acquired) rtnl_lock(); ret = i40e_setup_pf_switch(pf, reinit); if (ret) goto end_unlock; /* The driver only wants link up/down and module qualification * reports from firmware. Note the negative logic. */ ret = i40e_aq_set_phy_int_mask(&pf->hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MEDIA_NA | I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (ret) dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* make sure our flow control settings are restored */ ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); if (ret) dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Rebuild the VSIs and VEBs that existed before reset. * They are still in our local switch element arrays, so only * need to rebuild the switch model in the HW. * * If there were VEBs but the reconstitution failed, we'll try * try to recover minimal use by getting the basic PF VSI working. */ if (vsi->uplink_seid != pf->mac_seid) { dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); /* find the one VEB connected to the MAC, and find orphans */ for (v = 0; v < I40E_MAX_VEB; v++) { if (!pf->veb[v]) continue; if (pf->veb[v]->uplink_seid == pf->mac_seid || pf->veb[v]->uplink_seid == 0) { ret = i40e_reconstitute_veb(pf->veb[v]); if (!ret) continue; /* If Main VEB failed, we're in deep doodoo, * so give up rebuilding the switch and set up * for minimal rebuild of PF VSI. * If orphan failed, we'll report the error * but try to keep going. */ if (pf->veb[v]->uplink_seid == pf->mac_seid) { dev_info(&pf->pdev->dev, "rebuild of switch failed: %d, will try to set up simple PF connection\n", ret); vsi->uplink_seid = pf->mac_seid; break; } else if (pf->veb[v]->uplink_seid == 0) { dev_info(&pf->pdev->dev, "rebuild of orphan VEB failed: %d\n", ret); } } } } if (vsi->uplink_seid == pf->mac_seid) { dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); /* no VEB, so rebuild only the Main VSI */ ret = i40e_add_vsi(vsi); if (ret) { dev_info(&pf->pdev->dev, "rebuild of Main VSI failed: %d\n", ret); goto end_unlock; } } if (vsi->mqprio_qopt.max_rate[0]) { u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; u64 credits = 0; do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (ret) goto end_unlock; credits = max_tx_rate; do_div(credits, I40E_BW_CREDIT_DIVISOR); dev_dbg(&vsi->back->pdev->dev, "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", max_tx_rate, credits, vsi->seid); } ret = i40e_rebuild_cloud_filters(vsi, vsi->seid); if (ret) goto end_unlock; /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs * for this main VSI if they exist */ ret = i40e_rebuild_channels(vsi); if (ret) goto end_unlock; /* Reconfigure hardware for allowing smaller MSS in the case * of TSO, so that we avoid the MDD being fired and causing * a reset in the case of small MSS+TSO. */ #define I40E_REG_MSS 0x000E64DC #define I40E_REG_MSS_MIN_MASK 0x3FF0000 #define I40E_64BYTE_MSS 0x400000 val = rd32(hw, I40E_REG_MSS); if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { val &= ~I40E_REG_MSS_MIN_MASK; val |= I40E_64BYTE_MSS; wr32(hw, I40E_REG_MSS, val); } if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { msleep(75); ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (ret) dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } /* reinit the misc interrupt */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) ret = i40e_setup_misc_vector(pf); /* Add a filter to drop all Flow control frames from any VSI from being * transmitted. By doing so we stop a malicious VF from sending out * PAUSE or PFC frames and potentially controlling traffic for other * PF/VF VSIs. * The FW can still send Flow control frames if enabled. */ i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, pf->main_vsi_seid); /* restart the VSIs that were rebuilt and running before the reset */ i40e_pf_unquiesce_all_vsi(pf); /* Release the RTNL lock before we start resetting VFs */ if (!lock_acquired) rtnl_unlock(); /* Restore promiscuous settings */ ret = i40e_set_promiscuous(pf, pf->cur_promisc); if (ret) dev_warn(&pf->pdev->dev, "Failed to restore promiscuous setting: %s, err %s aq_err %s\n", pf->cur_promisc ? "on" : "off", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); i40e_reset_all_vfs(pf, true); /* tell the firmware that we're starting */ i40e_send_version(pf); /* We've already released the lock, so don't do it again */ goto end_core_reset; end_unlock: if (!lock_acquired) rtnl_unlock(); end_core_reset: clear_bit(__I40E_RESET_FAILED, pf->state); clear_recovery: clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state); } /** * i40e_reset_and_rebuild - reset and rebuild using a saved config * @pf: board private structure * @reinit: if the Main VSI needs to re-initialized. * @lock_acquired: indicates whether or not the lock has been acquired * before this function was called. **/ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) { int ret; /* Now we wait for GRST to settle out. * We don't have to delete the VEBs or VSIs from the hw switch * because the reset will make them disappear. */ ret = i40e_reset(pf); if (!ret) i40e_rebuild(pf, reinit, lock_acquired); } /** * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild * @pf: board private structure * * Close up the VFs and other things in prep for a Core Reset, * then get ready to rebuild the world. * @lock_acquired: indicates whether or not the lock has been acquired * before this function was called. **/ static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired) { i40e_prep_for_reset(pf, lock_acquired); i40e_reset_and_rebuild(pf, false, lock_acquired); } /** * i40e_handle_mdd_event * @pf: pointer to the PF structure * * Called from the MDD irq handler to identify possibly malicious vfs **/ static void i40e_handle_mdd_event(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; bool mdd_detected = false; struct i40e_vf *vf; u32 reg; int i; if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state)) return; /* find what triggered the MDD event */ reg = rd32(hw, I40E_GL_MDET_TX); if (reg & I40E_GL_MDET_TX_VALID_MASK) { u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> I40E_GL_MDET_TX_PF_NUM_SHIFT; u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> I40E_GL_MDET_TX_VF_NUM_SHIFT; u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> I40E_GL_MDET_TX_EVENT_SHIFT; u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> I40E_GL_MDET_TX_QUEUE_SHIFT) - pf->hw.func_caps.base_queue; if (netif_msg_tx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", event, queue, pf_num, vf_num); wr32(hw, I40E_GL_MDET_TX, 0xffffffff); mdd_detected = true; } reg = rd32(hw, I40E_GL_MDET_RX); if (reg & I40E_GL_MDET_RX_VALID_MASK) { u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> I40E_GL_MDET_RX_FUNCTION_SHIFT; u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> I40E_GL_MDET_RX_EVENT_SHIFT; u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> I40E_GL_MDET_RX_QUEUE_SHIFT) - pf->hw.func_caps.base_queue; if (netif_msg_rx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", event, queue, func); wr32(hw, I40E_GL_MDET_RX, 0xffffffff); mdd_detected = true; } if (mdd_detected) { reg = rd32(hw, I40E_PF_MDET_TX); if (reg & I40E_PF_MDET_TX_VALID_MASK) { wr32(hw, I40E_PF_MDET_TX, 0xFFFF); dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n"); } reg = rd32(hw, I40E_PF_MDET_RX); if (reg & I40E_PF_MDET_RX_VALID_MASK) { wr32(hw, I40E_PF_MDET_RX, 0xFFFF); dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n"); } } /* see if one of the VFs needs its hand slapped */ for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { vf = &(pf->vf[i]); reg = rd32(hw, I40E_VP_MDET_TX(i)); if (reg & I40E_VP_MDET_TX_VALID_MASK) { wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); vf->num_mdd_events++; dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", i); dev_info(&pf->pdev->dev, "Use PF Control I/F to re-enable the VF\n"); set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); } reg = rd32(hw, I40E_VP_MDET_RX(i)); if (reg & I40E_VP_MDET_RX_VALID_MASK) { wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); vf->num_mdd_events++; dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", i); dev_info(&pf->pdev->dev, "Use PF Control I/F to re-enable the VF\n"); set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); } } /* re-enable mdd interrupt cause */ clear_bit(__I40E_MDD_EVENT_PENDING, pf->state); reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); i40e_flush(hw); } static const char *i40e_tunnel_name(u8 type) { switch (type) { case UDP_TUNNEL_TYPE_VXLAN: return "vxlan"; case UDP_TUNNEL_TYPE_GENEVE: return "geneve"; default: return "unknown"; } } /** * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters * @pf: board private structure **/ static void i40e_sync_udp_filters(struct i40e_pf *pf) { int i; /* loop through and set pending bit for all active UDP filters */ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { if (pf->udp_ports[i].port) pf->pending_udp_bitmap |= BIT_ULL(i); } set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state); } /** * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW * @pf: board private structure **/ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u8 filter_index, type; u16 port; int i; if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state)) return; /* acquire RTNL to maintain state of flags and port requests */ rtnl_lock(); for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { if (pf->pending_udp_bitmap & BIT_ULL(i)) { struct i40e_udp_port_config *udp_port; i40e_status ret = 0; udp_port = &pf->udp_ports[i]; pf->pending_udp_bitmap &= ~BIT_ULL(i); port = READ_ONCE(udp_port->port); type = READ_ONCE(udp_port->type); filter_index = READ_ONCE(udp_port->filter_index); /* release RTNL while we wait on AQ command */ rtnl_unlock(); if (port) ret = i40e_aq_add_udp_tunnel(hw, port, type, &filter_index, NULL); else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED) ret = i40e_aq_del_udp_tunnel(hw, filter_index, NULL); /* reacquire RTNL so we can update filter_index */ rtnl_lock(); if (ret) { dev_info(&pf->pdev->dev, "%s %s port %d, index %d failed, err %s aq_err %s\n", i40e_tunnel_name(type), port ? "add" : "delete", port, filter_index, i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); if (port) { /* failed to add, just reset port, * drop pending bit for any deletion */ udp_port->port = 0; pf->pending_udp_bitmap &= ~BIT_ULL(i); } } else if (port) { /* record filter index on success */ udp_port->filter_index = filter_index; } } } rtnl_unlock(); } /** * i40e_service_task - Run the driver's async subtasks * @work: pointer to work_struct containing our data **/ static void i40e_service_task(struct work_struct *work) { struct i40e_pf *pf = container_of(work, struct i40e_pf, service_task); unsigned long start_time = jiffies; /* don't bother with service tasks if a reset is in progress */ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || test_bit(__I40E_SUSPENDED, pf->state)) return; if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) return; if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) { i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); i40e_sync_filters_subtask(pf); i40e_reset_subtask(pf); i40e_handle_mdd_event(pf); i40e_vc_process_vflr_event(pf); i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) { /* Client subtask will reopen next time through. */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true); } else { i40e_client_subtask(pf); if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE, pf->state)) i40e_notify_client_of_l2_param_changes( pf->vsi[pf->lan_vsi]); } i40e_sync_filters_subtask(pf); i40e_sync_udp_filters_subtask(pf); } else { i40e_reset_subtask(pf); } i40e_clean_adminq_subtask(pf); /* flush memory to make sure state is correct before next watchdog */ smp_mb__before_atomic(); clear_bit(__I40E_SERVICE_SCHED, pf->state); /* If the tasks have taken longer than one timer cycle or there * is more work to be done, reschedule the service task now * rather than wait for the timer to tick again. */ if (time_after(jiffies, (start_time + pf->service_timer_period)) || test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) || test_bit(__I40E_MDD_EVENT_PENDING, pf->state) || test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) i40e_service_event_schedule(pf); } /** * i40e_service_timer - timer callback * @data: pointer to PF struct **/ static void i40e_service_timer(struct timer_list *t) { struct i40e_pf *pf = from_timer(pf, t, service_timer); mod_timer(&pf->service_timer, round_jiffies(jiffies + pf->service_timer_period)); i40e_service_event_schedule(pf); } /** * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI * @vsi: the VSI being configured **/ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; switch (vsi->type) { case I40E_VSI_MAIN: vsi->alloc_queue_pairs = pf->num_lan_qps; if (!vsi->num_tx_desc) vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); if (!vsi->num_rx_desc) vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); if (pf->flags & I40E_FLAG_MSIX_ENABLED) vsi->num_q_vectors = pf->num_lan_msix; else vsi->num_q_vectors = 1; break; case I40E_VSI_FDIR: vsi->alloc_queue_pairs = 1; vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT, I40E_REQ_DESCRIPTOR_MULTIPLE); vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT, I40E_REQ_DESCRIPTOR_MULTIPLE); vsi->num_q_vectors = pf->num_fdsb_msix; break; case I40E_VSI_VMDQ2: vsi->alloc_queue_pairs = pf->num_vmdq_qps; if (!vsi->num_tx_desc) vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); if (!vsi->num_rx_desc) vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); vsi->num_q_vectors = pf->num_vmdq_msix; break; case I40E_VSI_SRIOV: vsi->alloc_queue_pairs = pf->num_vf_qps; if (!vsi->num_tx_desc) vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); if (!vsi->num_rx_desc) vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); break; default: WARN_ON(1); return -ENODATA; } return 0; } /** * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi * @vsi: VSI pointer * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. * * On error: returns error code (negative) * On success: returns 0 **/ static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) { struct i40e_ring **next_rings; int size; int ret = 0; /* allocate memory for both Tx, XDP Tx and Rx ring pointers */ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 3 : 2); vsi->tx_rings = kzalloc(size, GFP_KERNEL); if (!vsi->tx_rings) return -ENOMEM; next_rings = vsi->tx_rings + vsi->alloc_queue_pairs; if (i40e_enabled_xdp_vsi(vsi)) { vsi->xdp_rings = next_rings; next_rings += vsi->alloc_queue_pairs; } vsi->rx_rings = next_rings; if (alloc_qvectors) { /* allocate memory for q_vector pointers */ size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; vsi->q_vectors = kzalloc(size, GFP_KERNEL); if (!vsi->q_vectors) { ret = -ENOMEM; goto err_vectors; } } return ret; err_vectors: kfree(vsi->tx_rings); return ret; } /** * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF * @pf: board private structure * @type: type of VSI * * On error: returns error code (negative) * On success: returns vsi index in PF (positive) **/ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) { int ret = -ENODEV; struct i40e_vsi *vsi; int vsi_idx; int i; /* Need to protect the allocation of the VSIs at the PF level */ mutex_lock(&pf->switch_mutex); /* VSI list may be fragmented if VSI creation/destruction has * been happening. We can afford to do a quick scan to look * for any free VSIs in the list. * * find next empty vsi slot, looping back around if necessary */ i = pf->next_vsi; while (i < pf->num_alloc_vsi && pf->vsi[i]) i++; if (i >= pf->num_alloc_vsi) { i = 0; while (i < pf->next_vsi && pf->vsi[i]) i++; } if (i < pf->num_alloc_vsi && !pf->vsi[i]) { vsi_idx = i; /* Found one! */ } else { ret = -ENODEV; goto unlock_pf; /* out of VSI slots! */ } pf->next_vsi = ++i; vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); if (!vsi) { ret = -ENOMEM; goto unlock_pf; } vsi->type = type; vsi->back = pf; set_bit(__I40E_VSI_DOWN, vsi->state); vsi->flags = 0; vsi->idx = vsi_idx; vsi->int_rate_limit = 0; vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? pf->rss_table_size : 64; vsi->netdev_registered = false; vsi->work_limit = I40E_DEFAULT_IRQ_WORK; hash_init(vsi->mac_filter_hash); vsi->irqs_ready = false; if (type == I40E_VSI_MAIN) { vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); if (!vsi->af_xdp_zc_qps) goto err_rings; } ret = i40e_set_num_rings_in_vsi(vsi); if (ret) goto err_rings; ret = i40e_vsi_alloc_arrays(vsi, true); if (ret) goto err_rings; /* Setup default MSIX irq handler for VSI */ i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); /* Initialize VSI lock */ spin_lock_init(&vsi->mac_filter_hash_lock); pf->vsi[vsi_idx] = vsi; ret = vsi_idx; goto unlock_pf; err_rings: bitmap_free(vsi->af_xdp_zc_qps); pf->next_vsi = i - 1; kfree(vsi); unlock_pf: mutex_unlock(&pf->switch_mutex); return ret; } /** * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI * @vsi: VSI pointer * @free_qvectors: a bool to specify if q_vectors need to be freed. * * On error: returns error code (negative) * On success: returns 0 **/ static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) { /* free the ring and vector containers */ if (free_qvectors) { kfree(vsi->q_vectors); vsi->q_vectors = NULL; } kfree(vsi->tx_rings); vsi->tx_rings = NULL; vsi->rx_rings = NULL; vsi->xdp_rings = NULL; } /** * i40e_clear_rss_config_user - clear the user configured RSS hash keys * and lookup table * @vsi: Pointer to VSI structure */ static void i40e_clear_rss_config_user(struct i40e_vsi *vsi) { if (!vsi) return; kfree(vsi->rss_hkey_user); vsi->rss_hkey_user = NULL; kfree(vsi->rss_lut_user); vsi->rss_lut_user = NULL; } /** * i40e_vsi_clear - Deallocate the VSI provided * @vsi: the VSI being un-configured **/ static int i40e_vsi_clear(struct i40e_vsi *vsi) { struct i40e_pf *pf; if (!vsi) return 0; if (!vsi->back) goto free_vsi; pf = vsi->back; mutex_lock(&pf->switch_mutex); if (!pf->vsi[vsi->idx]) { dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n", vsi->idx, vsi->idx, vsi->type); goto unlock_vsi; } if (pf->vsi[vsi->idx] != vsi) { dev_err(&pf->pdev->dev, "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n", pf->vsi[vsi->idx]->idx, pf->vsi[vsi->idx]->type, vsi->idx, vsi->type); goto unlock_vsi; } /* updates the PF for this cleared vsi */ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); bitmap_free(vsi->af_xdp_zc_qps); i40e_vsi_free_arrays(vsi, true); i40e_clear_rss_config_user(vsi); pf->vsi[vsi->idx] = NULL; if (vsi->idx < pf->next_vsi) pf->next_vsi = vsi->idx; unlock_vsi: mutex_unlock(&pf->switch_mutex); free_vsi: kfree(vsi); return 0; } /** * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI * @vsi: the VSI being cleaned **/ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) { int i; if (vsi->tx_rings && vsi->tx_rings[0]) { for (i = 0; i < vsi->alloc_queue_pairs; i++) { kfree_rcu(vsi->tx_rings[i], rcu); vsi->tx_rings[i] = NULL; vsi->rx_rings[i] = NULL; if (vsi->xdp_rings) vsi->xdp_rings[i] = NULL; } } } /** * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI * @vsi: the VSI being configured **/ static int i40e_alloc_rings(struct i40e_vsi *vsi) { int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2; struct i40e_pf *pf = vsi->back; struct i40e_ring *ring; /* Set basic values in the rings to be used later during open() */ for (i = 0; i < vsi->alloc_queue_pairs; i++) { /* allocate space for both Tx and Rx in one shot */ ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL); if (!ring) goto err_out; ring->queue_index = i; ring->reg_idx = vsi->base_queue + i; ring->ring_active = false; ring->vsi = vsi; ring->netdev = vsi->netdev; ring->dev = &pf->pdev->dev; ring->count = vsi->num_tx_desc; ring->size = 0; ring->dcb_tc = 0; if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; ring->itr_setting = pf->tx_itr_default; vsi->tx_rings[i] = ring++; if (!i40e_enabled_xdp_vsi(vsi)) goto setup_rx; ring->queue_index = vsi->alloc_queue_pairs + i; ring->reg_idx = vsi->base_queue + ring->queue_index; ring->ring_active = false; ring->vsi = vsi; ring->netdev = NULL; ring->dev = &pf->pdev->dev; ring->count = vsi->num_tx_desc; ring->size = 0; ring->dcb_tc = 0; if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; set_ring_xdp(ring); ring->itr_setting = pf->tx_itr_default; vsi->xdp_rings[i] = ring++; setup_rx: ring->queue_index = i; ring->reg_idx = vsi->base_queue + i; ring->ring_active = false; ring->vsi = vsi; ring->netdev = vsi->netdev; ring->dev = &pf->pdev->dev; ring->count = vsi->num_rx_desc; ring->size = 0; ring->dcb_tc = 0; ring->itr_setting = pf->rx_itr_default; vsi->rx_rings[i] = ring; } return 0; err_out: i40e_vsi_clear_rings(vsi); return -ENOMEM; } /** * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel * @pf: board private structure * @vectors: the number of MSI-X vectors to request * * Returns the number of vectors reserved, or error **/ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) { vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, I40E_MIN_MSIX, vectors); if (vectors < 0) { dev_info(&pf->pdev->dev, "MSI-X vector reservation failed: %d\n", vectors); vectors = 0; } return vectors; } /** * i40e_init_msix - Setup the MSIX capability * @pf: board private structure * * Work with the OS to set up the MSIX vectors needed. * * Returns the number of vectors reserved or negative on failure **/ static int i40e_init_msix(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; int cpus, extra_vectors; int vectors_left; int v_budget, i; int v_actual; int iwarp_requested = 0; if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) return -ENODEV; /* The number of vectors we'll request will be comprised of: * - Add 1 for "other" cause for Admin Queue events, etc. * - The number of LAN queue pairs * - Queues being used for RSS. * We don't need as many as max_rss_size vectors. * use rss_size instead in the calculation since that * is governed by number of cpus in the system. * - assumes symmetric Tx/Rx pairing * - The number of VMDq pairs * - The CPU count within the NUMA node if iWARP is enabled * Once we count this up, try the request. * * If we can't get what we want, we'll simplify to nearly nothing * and try again. If that still fails, we punt. */ vectors_left = hw->func_caps.num_msix_vectors; v_budget = 0; /* reserve one vector for miscellaneous handler */ if (vectors_left) { v_budget++; vectors_left--; } /* reserve some vectors for the main PF traffic queues. Initially we * only reserve at most 50% of the available vectors, in the case that * the number of online CPUs is large. This ensures that we can enable * extra features as well. Once we've enabled the other features, we * will use any remaining vectors to reach as close as we can to the * number of online CPUs. */ cpus = num_online_cpus(); pf->num_lan_msix = min_t(int, cpus, vectors_left / 2); vectors_left -= pf->num_lan_msix; /* reserve one vector for sideband flow director */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { if (vectors_left) { pf->num_fdsb_msix = 1; v_budget++; vectors_left--; } else { pf->num_fdsb_msix = 0; } } /* can we reserve enough for iWARP? */ if (pf->flags & I40E_FLAG_IWARP_ENABLED) { iwarp_requested = pf->num_iwarp_msix; if (!vectors_left) pf->num_iwarp_msix = 0; else if (vectors_left < pf->num_iwarp_msix) pf->num_iwarp_msix = 1; v_budget += pf->num_iwarp_msix; vectors_left -= pf->num_iwarp_msix; } /* any vectors left over go for VMDq support */ if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { if (!vectors_left) { pf->num_vmdq_msix = 0; pf->num_vmdq_qps = 0; } else { int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); /* if we're short on vectors for what's desired, we limit * the queues per vmdq. If this is still more than are * available, the user will need to change the number of * queues/vectors used by the PF later with the ethtool * channels command */ if (vectors_left < vmdq_vecs_wanted) { pf->num_vmdq_qps = 1; vmdq_vecs_wanted = pf->num_vmdq_vsis; vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); } pf->num_vmdq_msix = pf->num_vmdq_qps; v_budget += vmdq_vecs; vectors_left -= vmdq_vecs; } } /* On systems with a large number of SMP cores, we previously limited * the number of vectors for num_lan_msix to be at most 50% of the * available vectors, to allow for other features. Now, we add back * the remaining vectors. However, we ensure that the total * num_lan_msix will not exceed num_online_cpus(). To do this, we * calculate the number of vectors we can add without going over the * cap of CPUs. For systems with a small number of CPUs this will be * zero. */ extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left); pf->num_lan_msix += extra_vectors; vectors_left -= extra_vectors; WARN(vectors_left < 0, "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n"); v_budget += pf->num_lan_msix; pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); if (!pf->msix_entries) return -ENOMEM; for (i = 0; i < v_budget; i++) pf->msix_entries[i].entry = i; v_actual = i40e_reserve_msix_vectors(pf, v_budget); if (v_actual < I40E_MIN_MSIX) { pf->flags &= ~I40E_FLAG_MSIX_ENABLED; kfree(pf->msix_entries); pf->msix_entries = NULL; pci_disable_msix(pf->pdev); return -ENODEV; } else if (v_actual == I40E_MIN_MSIX) { /* Adjust for minimal MSIX use */ pf->num_vmdq_vsis = 0; pf->num_vmdq_qps = 0; pf->num_lan_qps = 1; pf->num_lan_msix = 1; } else if (v_actual != v_budget) { /* If we have limited resources, we will start with no vectors * for the special features and then allocate vectors to some * of these features based on the policy and at the end disable * the features that did not get any vectors. */ int vec; dev_info(&pf->pdev->dev, "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n", v_actual, v_budget); /* reserve the misc vector */ vec = v_actual - 1; /* Scale vector usage down */ pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ pf->num_vmdq_vsis = 1; pf->num_vmdq_qps = 1; /* partition out the remaining vectors */ switch (vec) { case 2: pf->num_lan_msix = 1; break; case 3: if (pf->flags & I40E_FLAG_IWARP_ENABLED) { pf->num_lan_msix = 1; pf->num_iwarp_msix = 1; } else { pf->num_lan_msix = 2; } break; default: if (pf->flags & I40E_FLAG_IWARP_ENABLED) { pf->num_iwarp_msix = min_t(int, (vec / 3), iwarp_requested); pf->num_vmdq_vsis = min_t(int, (vec / 3), I40E_DEFAULT_NUM_VMDQ_VSI); } else { pf->num_vmdq_vsis = min_t(int, (vec / 2), I40E_DEFAULT_NUM_VMDQ_VSI); } if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { pf->num_fdsb_msix = 1; vec--; } pf->num_lan_msix = min_t(int, (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), pf->num_lan_msix); pf->num_lan_qps = pf->num_lan_msix; break; } } if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && (pf->num_fdsb_msix == 0)) { dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n"); pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; pf->flags |= I40E_FLAG_FD_SB_INACTIVE; } if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && (pf->num_vmdq_msix == 0)) { dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; } if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && (pf->num_iwarp_msix == 0)) { dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); pf->flags &= ~I40E_FLAG_IWARP_ENABLED; } i40e_debug(&pf->hw, I40E_DEBUG_INIT, "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", pf->num_lan_msix, pf->num_vmdq_msix * pf->num_vmdq_vsis, pf->num_fdsb_msix, pf->num_iwarp_msix); return v_actual; } /** * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector * @vsi: the VSI being configured * @v_idx: index of the vector in the vsi struct * @cpu: cpu to be used on affinity_mask * * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu) { struct i40e_q_vector *q_vector; /* allocate q_vector */ q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); if (!q_vector) return -ENOMEM; q_vector->vsi = vsi; q_vector->v_idx = v_idx; cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); if (vsi->netdev) netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll, NAPI_POLL_WEIGHT); /* tie q_vector and vsi together */ vsi->q_vectors[v_idx] = q_vector; return 0; } /** * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors * @vsi: the VSI being configured * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int err, v_idx, num_q_vectors, current_cpu; /* if not MSIX, give the one vector only to the LAN VSI */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) num_q_vectors = vsi->num_q_vectors; else if (vsi == pf->vsi[pf->lan_vsi]) num_q_vectors = 1; else return -EINVAL; current_cpu = cpumask_first(cpu_online_mask); for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu); if (err) goto err_out; current_cpu = cpumask_next(current_cpu, cpu_online_mask); if (unlikely(current_cpu >= nr_cpu_ids)) current_cpu = cpumask_first(cpu_online_mask); } return 0; err_out: while (v_idx--) i40e_free_q_vector(vsi, v_idx); return err; } /** * i40e_init_interrupt_scheme - Determine proper interrupt scheme * @pf: board private structure to initialize **/ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) { int vectors = 0; ssize_t size; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { vectors = i40e_init_msix(pf); if (vectors < 0) { pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_IWARP_ENABLED | I40E_FLAG_RSS_ENABLED | I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED | I40E_FLAG_SRIOV_ENABLED | I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_VMDQ_ENABLED); pf->flags |= I40E_FLAG_FD_SB_INACTIVE; /* rework the queue expectations without MSIX */ i40e_determine_queue_usage(pf); } } if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && (pf->flags & I40E_FLAG_MSI_ENABLED)) { dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); vectors = pci_enable_msi(pf->pdev); if (vectors < 0) { dev_info(&pf->pdev->dev, "MSI init failed - %d\n", vectors); pf->flags &= ~I40E_FLAG_MSI_ENABLED; } vectors = 1; /* one MSI or Legacy vector */ } if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); /* set up vector assignment tracking */ size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); pf->irq_pile = kzalloc(size, GFP_KERNEL); if (!pf->irq_pile) return -ENOMEM; pf->irq_pile->num_entries = vectors; pf->irq_pile->search_hint = 0; /* track first vector for misc interrupts, ignore return */ (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); return 0; } /** * i40e_restore_interrupt_scheme - Restore the interrupt scheme * @pf: private board data structure * * Restore the interrupt scheme that was cleared when we suspended the * device. This should be called during resume to re-allocate the q_vectors * and reacquire IRQs. */ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) { int err, i; /* We cleared the MSI and MSI-X flags when disabling the old interrupt * scheme. We need to re-enabled them here in order to attempt to * re-acquire the MSI or MSI-X vectors */ pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); err = i40e_init_interrupt_scheme(pf); if (err) return err; /* Now that we've re-acquired IRQs, we need to remap the vectors and * rings together again. */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i]) { err = i40e_vsi_alloc_q_vectors(pf->vsi[i]); if (err) goto err_unwind; i40e_vsi_map_rings_to_vectors(pf->vsi[i]); } } err = i40e_setup_misc_vector(pf); if (err) goto err_unwind; if (pf->flags & I40E_FLAG_IWARP_ENABLED) i40e_client_update_msix_info(pf); return 0; err_unwind: while (i--) { if (pf->vsi[i]) i40e_vsi_free_q_vectors(pf->vsi[i]); } return err; } /** * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle * non queue events in recovery mode * @pf: board private structure * * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage * the non-queue interrupts, e.g. AdminQ and errors in recovery mode. * This is handled differently than in recovery mode since no Tx/Rx resources * are being allocated. **/ static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf) { int err; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { err = i40e_setup_misc_vector(pf); if (err) { dev_info(&pf->pdev->dev, "MSI-X misc vector request failed, error %d\n", err); return err; } } else { u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED; err = request_irq(pf->pdev->irq, i40e_intr, flags, pf->int_name, pf); if (err) { dev_info(&pf->pdev->dev, "MSI/legacy misc vector request failed, error %d\n", err); return err; } i40e_enable_misc_int_causes(pf); i40e_irq_dynamic_enable_icr0(pf); } return 0; } /** * i40e_setup_misc_vector - Setup the misc vector to handle non queue events * @pf: board private structure * * This sets up the handler for MSIX 0, which is used to manage the * non-queue interrupts, e.g. AdminQ and errors. This is not used * when in MSI or Legacy interrupt mode. **/ static int i40e_setup_misc_vector(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; int err = 0; /* Only request the IRQ once, the first time through. */ if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) { err = request_irq(pf->msix_entries[0].vector, i40e_intr, 0, pf->int_name, pf); if (err) { clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); dev_info(&pf->pdev->dev, "request_irq for %s failed: %d\n", pf->int_name, err); return -EFAULT; } } i40e_enable_misc_int_causes(pf); /* associate no queues to the misc vector */ wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1); i40e_flush(hw); i40e_irq_dynamic_enable_icr0(pf); return err; } /** * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands * @vsi: Pointer to vsi structure * @seed: Buffter to store the hash keys * @lut: Buffer to store the lookup table entries * @lut_size: Size of buffer to store the lookup table entries * * Return 0 on success, negative on failure */ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int ret = 0; if (seed) { ret = i40e_aq_get_rss_key(hw, vsi->id, (struct i40e_aqc_get_set_rss_key_data *)seed); if (ret) { dev_info(&pf->pdev->dev, "Cannot get RSS key, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } } if (lut) { bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); if (ret) { dev_info(&pf->pdev->dev, "Cannot get RSS lut, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } } return ret; } /** * i40e_config_rss_reg - Configure RSS keys and lut by writing registers * @vsi: Pointer to vsi structure * @seed: RSS hash seed * @lut: Lookup table * @lut_size: Lookup table size * * Returns 0 on success, negative on failure **/ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, const u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u16 vf_id = vsi->vf_id; u8 i; /* Fill out hash function seed */ if (seed) { u32 *seed_dw = (u32 *)seed; if (vsi->type == I40E_VSI_MAIN) { for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); } else if (vsi->type == I40E_VSI_SRIOV) { for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++) wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]); } else { dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); } } if (lut) { u32 *lut_dw = (u32 *)lut; if (vsi->type == I40E_VSI_MAIN) { if (lut_size != I40E_HLUT_ARRAY_SIZE) return -EINVAL; for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]); } else if (vsi->type == I40E_VSI_SRIOV) { if (lut_size != I40E_VF_HLUT_ARRAY_SIZE) return -EINVAL; for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]); } else { dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); } } i40e_flush(hw); return 0; } /** * i40e_get_rss_reg - Get the RSS keys and lut by reading registers * @vsi: Pointer to VSI structure * @seed: Buffer to store the keys * @lut: Buffer to store the lookup table entries * @lut_size: Size of buffer to store the lookup table entries * * Returns 0 on success, negative on failure */ static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u16 i; if (seed) { u32 *seed_dw = (u32 *)seed; for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); } if (lut) { u32 *lut_dw = (u32 *)lut; if (lut_size != I40E_HLUT_ARRAY_SIZE) return -EINVAL; for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i)); } return 0; } /** * i40e_config_rss - Configure RSS keys and lut * @vsi: Pointer to VSI structure * @seed: RSS hash seed * @lut: Lookup table * @lut_size: Lookup table size * * Returns 0 on success, negative on failure */ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) return i40e_config_rss_aq(vsi, seed, lut, lut_size); else return i40e_config_rss_reg(vsi, seed, lut, lut_size); } /** * i40e_get_rss - Get RSS keys and lut * @vsi: Pointer to VSI structure * @seed: Buffer to store the keys * @lut: Buffer to store the lookup table entries * @lut_size: Size of buffer to store the lookup table entries * * Returns 0 on success, negative on failure */ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) return i40e_get_rss_aq(vsi, seed, lut, lut_size); else return i40e_get_rss_reg(vsi, seed, lut, lut_size); } /** * i40e_fill_rss_lut - Fill the RSS lookup table with default values * @pf: Pointer to board private structure * @lut: Lookup table * @rss_table_size: Lookup table size * @rss_size: Range of queue number for hashing */ void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, u16 rss_table_size, u16 rss_size) { u16 i; for (i = 0; i < rss_table_size; i++) lut[i] = i % rss_size; } /** * i40e_pf_config_rss - Prepare for RSS if used * @pf: board private structure **/ static int i40e_pf_config_rss(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; u8 seed[I40E_HKEY_ARRAY_SIZE]; u8 *lut; struct i40e_hw *hw = &pf->hw; u32 reg_val; u64 hena; int ret; /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); hena |= i40e_pf_get_default_rss_hena(pf); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); /* Determine the RSS table size based on the hardware capabilities */ reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); reg_val = (pf->rss_table_size == 512) ? (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val); /* Determine the RSS size of the VSI */ if (!vsi->rss_size) { u16 qcount; /* If the firmware does something weird during VSI init, we * could end up with zero TCs. Check for that to avoid * divide-by-zero. It probably won't pass traffic, but it also * won't panic. */ qcount = vsi->num_queue_pairs / (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1); vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); } if (!vsi->rss_size) return -EINVAL; lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; /* Use user configured lut if there is one, otherwise use default */ if (vsi->rss_lut_user) memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); else i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); /* Use user configured hash key if there is one, otherwise * use default. */ if (vsi->rss_hkey_user) memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); else netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); kfree(lut); return ret; } /** * i40e_reconfig_rss_queues - change number of queues for rss and rebuild * @pf: board private structure * @queue_count: the requested queue count for rss. * * returns 0 if rss is not enabled, if enabled returns the final rss queue * count which may be different from the requested queue count. * Note: expects to be called while under rtnl_lock() **/ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; int new_rss_size; if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) return 0; queue_count = min_t(int, queue_count, num_online_cpus()); new_rss_size = min_t(int, queue_count, pf->rss_size_max); if (queue_count != vsi->num_queue_pairs) { u16 qcount; vsi->req_queue_pairs = queue_count; i40e_prep_for_reset(pf, true); pf->alloc_rss_size = new_rss_size; i40e_reset_and_rebuild(pf, true, true); /* Discard the user configured hash keys and lut, if less * queues are enabled. */ if (queue_count < vsi->rss_size) { i40e_clear_rss_config_user(vsi); dev_dbg(&pf->pdev->dev, "discard user configured hash keys and lut\n"); } /* Reset vsi->rss_size, as number of enabled queues changed */ qcount = vsi->num_queue_pairs / vsi->tc_config.numtc; vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); i40e_pf_config_rss(pf); } dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n", vsi->req_queue_pairs, pf->rss_size_max); return pf->alloc_rss_size; } /** * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition * @pf: board private structure **/ i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf) { i40e_status status; bool min_valid, max_valid; u32 max_bw, min_bw; status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, &min_valid, &max_valid); if (!status) { if (min_valid) pf->min_bw = min_bw; if (max_valid) pf->max_bw = max_bw; } return status; } /** * i40e_set_partition_bw_setting - Set BW settings for this PF partition * @pf: board private structure **/ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf) { struct i40e_aqc_configure_partition_bw_data bw_data; i40e_status status; /* Set the valid bit for this PF */ bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK; bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK; /* Set the new bandwidths */ status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); return status; } /** * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition * @pf: board private structure **/ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf) { /* Commit temporary BW setting to permanent NVM image */ enum i40e_admin_queue_err last_aq_status; i40e_status ret; u16 nvm_word; if (pf->hw.partition_id != 1) { dev_info(&pf->pdev->dev, "Commit BW only works on partition 1! This is partition %d", pf->hw.partition_id); ret = I40E_NOT_SUPPORTED; goto bw_commit_out; } /* Acquire NVM for read access */ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); last_aq_status = pf->hw.aq.asq_last_status; if (ret) { dev_info(&pf->pdev->dev, "Cannot acquire NVM for read access, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } /* Read word 0x10 of NVM - SW compatibility word 1 */ ret = i40e_aq_read_nvm(&pf->hw, I40E_SR_NVM_CONTROL_WORD, 0x10, sizeof(nvm_word), &nvm_word, false, NULL); /* Save off last admin queue command status before releasing * the NVM */ last_aq_status = pf->hw.aq.asq_last_status; i40e_release_nvm(&pf->hw); if (ret) { dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } /* Wait a bit for NVM release to complete */ msleep(50); /* Acquire NVM for write access */ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); last_aq_status = pf->hw.aq.asq_last_status; if (ret) { dev_info(&pf->pdev->dev, "Cannot acquire NVM for write access, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } /* Write it back out unchanged to initiate update NVM, * which will force a write of the shadow (alt) RAM to * the NVM - thus storing the bandwidth values permanently. */ ret = i40e_aq_update_nvm(&pf->hw, I40E_SR_NVM_CONTROL_WORD, 0x10, sizeof(nvm_word), &nvm_word, true, 0, NULL); /* Save off last admin queue command status before releasing * the NVM */ last_aq_status = pf->hw.aq.asq_last_status; i40e_release_nvm(&pf->hw); if (ret) dev_info(&pf->pdev->dev, "BW settings NOT SAVED, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, last_aq_status)); bw_commit_out: return ret; } /** * i40e_sw_init - Initialize general software structures (struct i40e_pf) * @pf: board private structure to initialize * * i40e_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ static int i40e_sw_init(struct i40e_pf *pf) { int err = 0; int size; /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | I40E_FLAG_MSI_ENABLED | I40E_FLAG_MSIX_ENABLED; /* Set default ITR */ pf->rx_itr_default = I40E_ITR_RX_DEF; pf->tx_itr_default = I40E_ITR_TX_DEF; /* Depending on PF configurations, it is possible that the RSS * maximum might end up larger than the available queues */ pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); pf->alloc_rss_size = 1; pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, pf->hw.func_caps.num_tx_qp); if (pf->hw.func_caps.rss) { pf->flags |= I40E_FLAG_RSS_ENABLED; pf->alloc_rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); } /* MFP mode enabled */ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { pf->flags |= I40E_FLAG_MFP_ENABLED; dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); if (i40e_get_partition_bw_setting(pf)) { dev_warn(&pf->pdev->dev, "Could not get partition bw settings\n"); } else { dev_info(&pf->pdev->dev, "Partition BW Min = %8.8x, Max = %8.8x\n", pf->min_bw, pf->max_bw); /* nudge the Tx scheduler */ i40e_set_partition_bw_setting(pf); } } if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || (pf->hw.func_caps.fd_filters_best_effort > 0)) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; if (pf->flags & I40E_FLAG_MFP_ENABLED && pf->hw.num_partitions > 1) dev_info(&pf->pdev->dev, "Flow Director Sideband mode Disabled in MFP mode\n"); else pf->flags |= I40E_FLAG_FD_SB_ENABLED; pf->fdir_pf_filter_count = pf->hw.func_caps.fd_filters_guaranteed; pf->hw.fdir_shared_filter_count = pf->hw.func_caps.fd_filters_best_effort; } if (pf->hw.mac.type == I40E_MAC_X722) { pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE | I40E_HW_128_QP_RSS_CAPABLE | I40E_HW_ATR_EVICT_CAPABLE | I40E_HW_WB_ON_ITR_CAPABLE | I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE | I40E_HW_NO_PCI_LINK_CHECK | I40E_HW_USE_SET_LLDP_MIB | I40E_HW_GENEVE_OFFLOAD_CAPABLE | I40E_HW_PTP_L4_CAPABLE | I40E_HW_WOL_MC_MAGIC_PKT_WAKE | I40E_HW_OUTER_UDP_CSUM_CAPABLE); #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) != I40E_FDEVICT_PCTYPE_DEFAULT) { dev_warn(&pf->pdev->dev, "FD EVICT PCTYPES are not right, disable FD HW EVICT\n"); pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE; } } else if ((pf->hw.aq.api_maj_ver > 1) || ((pf->hw.aq.api_maj_ver == 1) && (pf->hw.aq.api_min_ver > 4))) { /* Supported in FW API version higher than 1.4 */ pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE; } /* Enable HW ATR eviction if possible */ if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE) pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4))) { pf->hw_features |= I40E_HW_RESTART_AUTONEG; /* No DCB support for FW < v4.33 */ pf->hw_features |= I40E_HW_NO_DCB_SUPPORT; } /* Disable FW LLDP if FW < v4.3 */ if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || (pf->hw.aq.fw_maj_ver < 4))) pf->hw_features |= I40E_HW_STOP_FW_LLDP; /* Use the FW Set LLDP MIB API if FW > v4.40 */ if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || (pf->hw.aq.fw_maj_ver >= 5))) pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB; /* Enable PTP L4 if FW > v6.0 */ if (pf->hw.mac.type == I40E_MAC_XL710 && pf->hw.aq.fw_maj_ver >= 6) pf->hw_features |= I40E_HW_PTP_L4_CAPABLE; if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; pf->flags |= I40E_FLAG_VMDQ_ENABLED; pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); } if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) { pf->flags |= I40E_FLAG_IWARP_ENABLED; /* IWARP needs one extra vector for CQP just like MISC.*/ pf->num_iwarp_msix = (int)num_online_cpus() + 1; } /* Stopping FW LLDP engine is supported on XL710 and X722 * starting from FW versions determined in i40e_init_adminq. * Stopping the FW LLDP engine is not supported on XL710 * if NPAR is functioning so unset this hw flag in this case. */ if (pf->hw.mac.type == I40E_MAC_XL710 && pf->hw.func_caps.npar_enable && (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE; #ifdef CONFIG_PCI_IOV if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; pf->flags |= I40E_FLAG_SRIOV_ENABLED; pf->num_req_vfs = min_t(int, pf->hw.func_caps.num_vfs, I40E_MAX_VF_COUNT); } #endif /* CONFIG_PCI_IOV */ pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; /* By default FW has this off for performance reasons */ pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; /* set up queue assignment tracking */ size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); pf->qp_pile = kzalloc(size, GFP_KERNEL); if (!pf->qp_pile) { err = -ENOMEM; goto sw_init_done; } pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; pf->qp_pile->search_hint = 0; pf->tx_timeout_recovery_level = 1; mutex_init(&pf->switch_mutex); sw_init_done: return err; } /** * i40e_set_ntuple - set the ntuple feature flag and take action * @pf: board private structure to initialize * @features: the feature set that the stack is suggesting * * returns a bool to indicate if reset needs to happen **/ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) { bool need_reset = false; /* Check if Flow Director n-tuple support was enabled or disabled. If * the state changed, we need to reset. */ if (features & NETIF_F_NTUPLE) { /* Enable filters and mark for reset */ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) need_reset = true; /* enable FD_SB only if there is MSI-X vector and no cloud * filters exist */ if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) { pf->flags |= I40E_FLAG_FD_SB_ENABLED; pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; } } else { /* turn off filters, mark for reset and clear SW filter list */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { need_reset = true; i40e_fdir_filter_exit(pf); } pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state); pf->flags |= I40E_FLAG_FD_SB_INACTIVE; /* reset fd counters */ pf->fd_add_err = 0; pf->fd_atr_cnt = 0; /* if ATR was auto disabled it can be re-enabled. */ if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); } return need_reset; } /** * i40e_clear_rss_lut - clear the rx hash lookup table * @vsi: the VSI being configured **/ static void i40e_clear_rss_lut(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u16 vf_id = vsi->vf_id; u8 i; if (vsi->type == I40E_VSI_MAIN) { for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) wr32(hw, I40E_PFQF_HLUT(i), 0); } else if (vsi->type == I40E_VSI_SRIOV) { for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0); } else { dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); } } /** * i40e_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting * Note: expects to be called while under rtnl_lock() **/ static int i40e_set_features(struct net_device *netdev, netdev_features_t features) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; bool need_reset; if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) i40e_pf_config_rss(pf); else if (!(features & NETIF_F_RXHASH) && netdev->features & NETIF_F_RXHASH) i40e_clear_rss_lut(vsi); if (features & NETIF_F_HW_VLAN_CTAG_RX) i40e_vlan_stripping_enable(vsi); else i40e_vlan_stripping_disable(vsi); if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) { dev_err(&pf->pdev->dev, "Offloaded tc filters active, can't turn hw_tc_offload off"); return -EINVAL; } if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt) i40e_del_all_macvlans(vsi); need_reset = i40e_set_ntuple(pf, features); if (need_reset) i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); return 0; } /** * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port * @pf: board private structure * @port: The UDP port to look up * * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found **/ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port) { u8 i; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { /* Do not report ports with pending deletions as * being available. */ if (!port && (pf->pending_udp_bitmap & BIT_ULL(i))) continue; if (pf->udp_ports[i].port == port) return i; } return i; } /** * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up * @netdev: This physical port's netdev * @ti: Tunnel endpoint information **/ static void i40e_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u16 port = ntohs(ti->port); u8 next_idx; u8 idx; idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { netdev_info(netdev, "port %d already offloaded\n", port); return; } /* Now check if there is space to add the new port */ next_idx = i40e_get_udp_port_idx(pf, 0); if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n", port); return; } switch (ti->type) { case UDP_TUNNEL_TYPE_VXLAN: pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; break; case UDP_TUNNEL_TYPE_GENEVE: if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE)) return; pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; break; default: return; } /* New port: add it and mark its index in the bitmap */ pf->udp_ports[next_idx].port = port; pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED; pf->pending_udp_bitmap |= BIT_ULL(next_idx); set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state); } /** * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away * @netdev: This physical port's netdev * @ti: Tunnel endpoint information **/ static void i40e_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u16 port = ntohs(ti->port); u8 idx; idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS) goto not_found; switch (ti->type) { case UDP_TUNNEL_TYPE_VXLAN: if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN) goto not_found; break; case UDP_TUNNEL_TYPE_GENEVE: if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE) goto not_found; break; default: goto not_found; } /* if port exists, set it to 0 (mark for deletion) * and make it pending */ pf->udp_ports[idx].port = 0; /* Toggle pending bit instead of setting it. This way if we are * deleting a port that has yet to be added we just clear the pending * bit and don't have to worry about it. */ pf->pending_udp_bitmap ^= BIT_ULL(idx); set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state); return; not_found: netdev_warn(netdev, "UDP port %d was not found, not deleting\n", port); } static int i40e_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; if (!(pf->hw_features & I40E_HW_PORT_ID_VALID)) return -EOPNOTSUPP; ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); return 0; } /** * i40e_ndo_fdb_add - add an entry to the hardware database * @ndm: the input from the stack * @tb: pointer to array of nladdr (unused) * @dev: the net device pointer * @addr: the MAC address entry being added * @vid: VLAN ID * @flags: instructions from stack about fdb operation */ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 flags, struct netlink_ext_ack *extack) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_pf *pf = np->vsi->back; int err = 0; if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) return -EOPNOTSUPP; if (vid) { pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); return -EINVAL; } /* Hardware does not support aging addresses so if a * ndm_state is given only allow permanent addresses */ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { netdev_info(dev, "FDB only supports static addresses\n"); return -EINVAL; } if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) err = dev_uc_add_excl(dev, addr); else if (is_multicast_ether_addr(addr)) err = dev_mc_add_excl(dev, addr); else err = -EINVAL; /* Only return duplicate errors if NLM_F_EXCL is set */ if (err == -EEXIST && !(flags & NLM_F_EXCL)) err = 0; return err; } /** * i40e_ndo_bridge_setlink - Set the hardware bridge mode * @dev: the netdev being configured * @nlh: RTNL message * @flags: bridge flags * @extack: netlink extended ack * * Inserts a new hardware bridge if not already created and * enables the bridging mode requested (VEB or VEPA). If the * hardware bridge has already been inserted and the request * is to change the mode then that requires a PF reset to * allow rebuild of the components with required hardware * bridge mode enabled. * * Note: expects to be called while under rtnl_lock() **/ static int i40e_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags, struct netlink_ext_ack *extack) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_veb *veb = NULL; struct nlattr *attr, *br_spec; int i, rem; /* Only for PF VSI for now */ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) return -EOPNOTSUPP; /* Find the HW bridge for PF VSI */ for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) veb = pf->veb[i]; } br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); nla_for_each_nested(attr, br_spec, rem) { __u16 mode; if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; mode = nla_get_u16(attr); if ((mode != BRIDGE_MODE_VEPA) && (mode != BRIDGE_MODE_VEB)) return -EINVAL; /* Insert a new HW bridge */ if (!veb) { veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, vsi->tc_config.enabled_tc); if (veb) { veb->bridge_mode = mode; i40e_config_bridge_mode(veb); } else { /* No Bridge HW offload available */ return -ENOENT; } break; } else if (mode != veb->bridge_mode) { /* Existing HW bridge but different mode needs reset */ veb->bridge_mode = mode; /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ if (mode == BRIDGE_MODE_VEB) pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; else pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); break; } } return 0; } /** * i40e_ndo_bridge_getlink - Get the hardware bridge mode * @skb: skb buff * @pid: process id * @seq: RTNL message seq # * @dev: the netdev being configured * @filter_mask: unused * @nlflags: netlink flags passed in * * Return the mode in which the hardware bridge is operating in * i.e VEB or VEPA. **/ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 __always_unused filter_mask, int nlflags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_veb *veb = NULL; int i; /* Only for PF VSI for now */ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) return -EOPNOTSUPP; /* Find the HW bridge for the PF VSI */ for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) veb = pf->veb[i]; } if (!veb) return 0; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, 0, 0, nlflags, filter_mask, NULL); } /** * i40e_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff * @dev: This physical port's netdev * @features: Offload features that the stack believes apply **/ static netdev_features_t i40e_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { size_t len; /* No point in doing any of this if neither checksum nor GSO are * being requested for this frame. We can rule out both by just * checking for CHECKSUM_PARTIAL */ if (skb->ip_summed != CHECKSUM_PARTIAL) return features; /* We cannot support GSO if the MSS is going to be less than * 64 bytes. If it is then we need to drop support for GSO. */ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) features &= ~NETIF_F_GSO_MASK; /* MACLEN can support at most 63 words */ len = skb_network_header(skb) - skb->data; if (len & ~(63 * 2)) goto out_err; /* IPLEN and EIPLEN can support at most 127 dwords */ len = skb_transport_header(skb) - skb_network_header(skb); if (len & ~(127 * 4)) goto out_err; if (skb->encapsulation) { /* L4TUNLEN can support 127 words */ len = skb_inner_network_header(skb) - skb_transport_header(skb); if (len & ~(127 * 2)) goto out_err; /* IPLEN can support at most 127 dwords */ len = skb_inner_transport_header(skb) - skb_inner_network_header(skb); if (len & ~(127 * 4)) goto out_err; } /* No need to validate L4LEN as TCP is the only protocol with a * a flexible value and we support all possible values supported * by TCP, which is at most 15 dwords */ return features; out_err: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } /** * i40e_xdp_setup - add/remove an XDP program * @vsi: VSI to changed * @prog: XDP program **/ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog) { int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; struct i40e_pf *pf = vsi->back; struct bpf_prog *old_prog; bool need_reset; int i; /* Don't allow frames that span over multiple buffers */ if (frame_size > vsi->rx_buf_len) return -EINVAL; if (!i40e_enabled_xdp_vsi(vsi) && !prog) return 0; /* When turning XDP on->off/off->on we reset and rebuild the rings. */ need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog); if (need_reset) i40e_prep_for_reset(pf, true); old_prog = xchg(&vsi->xdp_prog, prog); if (need_reset) i40e_reset_and_rebuild(pf, true, true); for (i = 0; i < vsi->num_queue_pairs; i++) WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); if (old_prog) bpf_prog_put(old_prog); /* Kick start the NAPI context if there is an AF_XDP socket open * on that queue id. This so that receiving will start. */ if (need_reset && prog) for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->xdp_rings[i]->xsk_umem) (void)i40e_xsk_wakeup(vsi->netdev, i, XDP_WAKEUP_RX); return 0; } /** * i40e_enter_busy_conf - Enters busy config state * @vsi: vsi * * Returns 0 on success, <0 for failure. **/ static int i40e_enter_busy_conf(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int timeout = 50; while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { timeout--; if (!timeout) return -EBUSY; usleep_range(1000, 2000); } return 0; } /** * i40e_exit_busy_conf - Exits busy config state * @vsi: vsi **/ static void i40e_exit_busy_conf(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; clear_bit(__I40E_CONFIG_BUSY, pf->state); } /** * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair * @vsi: vsi * @queue_pair: queue pair **/ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair) { memset(&vsi->rx_rings[queue_pair]->rx_stats, 0, sizeof(vsi->rx_rings[queue_pair]->rx_stats)); memset(&vsi->tx_rings[queue_pair]->stats, 0, sizeof(vsi->tx_rings[queue_pair]->stats)); if (i40e_enabled_xdp_vsi(vsi)) { memset(&vsi->xdp_rings[queue_pair]->stats, 0, sizeof(vsi->xdp_rings[queue_pair]->stats)); } } /** * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair * @vsi: vsi * @queue_pair: queue pair **/ static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) { i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); if (i40e_enabled_xdp_vsi(vsi)) { /* Make sure that in-progress ndo_xdp_xmit calls are * completed. */ synchronize_rcu(); i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); } i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); } /** * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair * @vsi: vsi * @queue_pair: queue pair * @enable: true for enable, false for disable **/ static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair, bool enable) { struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; struct i40e_q_vector *q_vector = rxr->q_vector; if (!vsi->netdev) return; /* All rings in a qp belong to the same qvector. */ if (q_vector->rx.ring || q_vector->tx.ring) { if (enable) napi_enable(&q_vector->napi); else napi_disable(&q_vector->napi); } } /** * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair * @vsi: vsi * @queue_pair: queue pair * @enable: true for enable, false for disable * * Returns 0 on success, <0 on failure. **/ static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair, bool enable) { struct i40e_pf *pf = vsi->back; int pf_q, ret = 0; pf_q = vsi->base_queue + queue_pair; ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, false /*is xdp*/, enable); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d Tx ring %d %sable timeout\n", vsi->seid, pf_q, (enable ? "en" : "dis")); return ret; } i40e_control_rx_q(pf, pf_q, enable); ret = i40e_pf_rxq_wait(pf, pf_q, enable); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d Rx ring %d %sable timeout\n", vsi->seid, pf_q, (enable ? "en" : "dis")); return ret; } /* Due to HW errata, on Rx disable only, the register can * indicate done before it really is. Needs 50ms to be sure */ if (!enable) mdelay(50); if (!i40e_enabled_xdp_vsi(vsi)) return ret; ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q + vsi->alloc_queue_pairs, true /*is xdp*/, enable); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d XDP Tx ring %d %sable timeout\n", vsi->seid, pf_q, (enable ? "en" : "dis")); } return ret; } /** * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair * @vsi: vsi * @queue_pair: queue_pair **/ static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair) { struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; /* All rings in a qp belong to the same qvector. */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx); else i40e_irq_dynamic_enable_icr0(pf); i40e_flush(hw); } /** * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair * @vsi: vsi * @queue_pair: queue_pair **/ static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair) { struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; /* For simplicity, instead of removing the qp interrupt causes * from the interrupt linked list, we simply disable the interrupt, and * leave the list intact. * * All rings in a qp belong to the same qvector. */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) { u32 intpf = vsi->base_vector + rxr->q_vector->v_idx; wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0); i40e_flush(hw); synchronize_irq(pf->msix_entries[intpf].vector); } else { /* Legacy and MSI mode - this stops all interrupt handling */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); wr32(hw, I40E_PFINT_DYN_CTL0, 0); i40e_flush(hw); synchronize_irq(pf->pdev->irq); } } /** * i40e_queue_pair_disable - Disables a queue pair * @vsi: vsi * @queue_pair: queue pair * * Returns 0 on success, <0 on failure. **/ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) { int err; err = i40e_enter_busy_conf(vsi); if (err) return err; i40e_queue_pair_disable_irq(vsi, queue_pair); err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); i40e_queue_pair_clean_rings(vsi, queue_pair); i40e_queue_pair_reset_stats(vsi, queue_pair); return err; } /** * i40e_queue_pair_enable - Enables a queue pair * @vsi: vsi * @queue_pair: queue pair * * Returns 0 on success, <0 on failure. **/ int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair) { int err; err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]); if (err) return err; if (i40e_enabled_xdp_vsi(vsi)) { err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]); if (err) return err; } err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]); if (err) return err; err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */); i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */); i40e_queue_pair_enable_irq(vsi, queue_pair); i40e_exit_busy_conf(vsi); return err; } /** * i40e_xdp - implements ndo_bpf for i40e * @dev: netdevice * @xdp: XDP command **/ static int i40e_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; if (vsi->type != I40E_VSI_MAIN) return -EINVAL; switch (xdp->command) { case XDP_SETUP_PROG: return i40e_xdp_setup(vsi, xdp->prog); case XDP_QUERY_PROG: xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; return 0; case XDP_SETUP_XSK_UMEM: return i40e_xsk_umem_setup(vsi, xdp->xsk.umem, xdp->xsk.queue_id); default: return -EINVAL; } } static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, .ndo_start_xmit = i40e_lan_xmit_frame, .ndo_get_stats64 = i40e_get_netdev_stats_struct, .ndo_set_rx_mode = i40e_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = i40e_set_mac, .ndo_change_mtu = i40e_change_mtu, .ndo_do_ioctl = i40e_ioctl, .ndo_tx_timeout = i40e_tx_timeout, .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = i40e_netpoll, #endif .ndo_setup_tc = __i40e_setup_tc, .ndo_set_features = i40e_set_features, .ndo_set_vf_mac = i40e_ndo_set_vf_mac, .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, .ndo_set_vf_rate = i40e_ndo_set_vf_bw, .ndo_get_vf_config = i40e_ndo_get_vf_config, .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, .ndo_set_vf_trust = i40e_ndo_set_vf_trust, .ndo_udp_tunnel_add = i40e_udp_tunnel_add, .ndo_udp_tunnel_del = i40e_udp_tunnel_del, .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, .ndo_features_check = i40e_features_check, .ndo_bridge_getlink = i40e_ndo_bridge_getlink, .ndo_bridge_setlink = i40e_ndo_bridge_setlink, .ndo_bpf = i40e_xdp, .ndo_xdp_xmit = i40e_xdp_xmit, .ndo_xsk_wakeup = i40e_xsk_wakeup, .ndo_dfwd_add_station = i40e_fwd_add, .ndo_dfwd_del_station = i40e_fwd_del, }; /** * i40e_config_netdev - Setup the netdev flags * @vsi: the VSI being configured * * Returns 0 on success, negative value on failure **/ static int i40e_config_netdev(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_netdev_priv *np; struct net_device *netdev; u8 broadcast[ETH_ALEN]; u8 mac_addr[ETH_ALEN]; int etherdev_size; netdev_features_t hw_enc_features; netdev_features_t hw_features; etherdev_size = sizeof(struct i40e_netdev_priv); netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); if (!netdev) return -ENOMEM; vsi->netdev = netdev; np = netdev_priv(netdev); np->vsi = vsi; hw_enc_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA | NETIF_F_SOFT_FEATURES | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_PARTIAL | NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | NETIF_F_RXHASH | NETIF_F_RXCSUM | 0; if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE)) netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; netdev->hw_enc_features |= hw_enc_features; /* record features VLANs can make use of */ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; /* enable macvlan offloads */ netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; hw_features = hw_enc_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; netdev->hw_features |= hw_features; netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; if (vsi->type == I40E_VSI_MAIN) { SET_NETDEV_DEV(netdev, &pf->pdev->dev); ether_addr_copy(mac_addr, hw->mac.perm_addr); /* The following steps are necessary for two reasons. First, * some older NVM configurations load a default MAC-VLAN * filter that will accept any tagged packet, and we want to * replace this with a normal filter. Additionally, it is * possible our MAC address was provided by the platform using * Open Firmware or similar. * * Thus, we need to remove the default filter and install one * specific to the MAC address. */ i40e_rm_default_mac_filter(vsi, mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_add_mac_filter(vsi, mac_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); } else { /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to * the end, which is 4 bytes long, so force truncation of the * original name by IFNAMSIZ - 4 */ snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", IFNAMSIZ - 4, pf->vsi[pf->lan_vsi]->netdev->name); eth_random_addr(mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_add_mac_filter(vsi, mac_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); } /* Add the broadcast filter so that we initially will receive * broadcast packets. Note that when a new VLAN is first added the * driver will convert all filters marked I40E_VLAN_ANY into VLAN * specific filters as part of transitioning into "vlan" operation. * When more VLANs are added, the driver will copy each existing MAC * filter and add it for the new VLAN. * * Broadcast filters are handled specially by * i40e_sync_filters_subtask, as the driver must to set the broadcast * promiscuous bit instead of adding this directly as a MAC/VLAN * filter. The subtask will update the correct broadcast promiscuous * bits as VLANs become active or inactive. */ eth_broadcast_addr(broadcast); spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_add_mac_filter(vsi, broadcast); spin_unlock_bh(&vsi->mac_filter_hash_lock); ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); /* i40iw_net_event() reads 16 bytes from neigh->primary_key */ netdev->neigh_priv_len = sizeof(u32) * 4; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; /* Setup netdev TC information */ i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); netdev->netdev_ops = &i40e_netdev_ops; netdev->watchdog_timeo = 5 * HZ; i40e_set_ethtool_ops(netdev); /* MTU range: 68 - 9706 */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; return 0; } /** * i40e_vsi_delete - Delete a VSI from the switch * @vsi: the VSI being removed * * Returns 0 on success, negative value on failure **/ static void i40e_vsi_delete(struct i40e_vsi *vsi) { /* remove default VSI is not allowed */ if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) return; i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); } /** * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB * @vsi: the VSI being queried * * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode **/ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) { struct i40e_veb *veb; struct i40e_pf *pf = vsi->back; /* Uplink is not a bridge so default to VEB */ if (vsi->veb_idx >= I40E_MAX_VEB) return 1; veb = pf->veb[vsi->veb_idx]; if (!veb) { dev_info(&pf->pdev->dev, "There is no veb associated with the bridge\n"); return -ENOENT; } /* Uplink is a bridge in VEPA mode */ if (veb->bridge_mode & BRIDGE_MODE_VEPA) { return 0; } else { /* Uplink is a bridge in VEB mode */ return 1; } /* VEPA is now default bridge, so return 0 */ return 0; } /** * i40e_add_vsi - Add a VSI to the switch * @vsi: the VSI being configured * * This initializes a VSI context depending on the VSI type to be added and * passes it down to the add_vsi aq command. **/ static int i40e_add_vsi(struct i40e_vsi *vsi) { int ret = -ENODEV; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; struct i40e_mac_filter *f; struct hlist_node *h; int bkt; u8 enabled_tc = 0x1; /* TC0 enabled */ int f_count = 0; memset(&ctxt, 0, sizeof(ctxt)); switch (vsi->type) { case I40E_VSI_MAIN: /* The PF's main VSI is already setup as part of the * device initialization, so we'll not bother with * the add_vsi call, but we will retrieve the current * VSI context. */ ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); ctxt.flags = I40E_AQ_VSI_TYPE_PF; if (ret) { dev_info(&pf->pdev->dev, "couldn't get PF vsi config, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -ENOENT; } vsi->info = ctxt.info; vsi->info.valid_sections = 0; vsi->seid = ctxt.seid; vsi->id = ctxt.vsi_number; enabled_tc = i40e_pf_get_tc_map(pf); /* Source pruning is enabled by default, so the flag is * negative logic - if it's set, we need to fiddle with * the VSI to disable source pruning. */ if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) { memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB); ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "update vsi failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } } /* MFP mode setup queue map and update VSI */ if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "update vsi failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } /* update the local VSI info queue map */ i40e_vsi_update_queue_map(vsi, &ctxt); vsi->info.valid_sections = 0; } else { /* Default/Main VSI is only enabled for TC0 * reconfigure it to enable all TCs that are * available on the port in SFP mode. * For MFP case the iSCSI PF would use this * flow to enable LAN+iSCSI TC. */ ret = i40e_vsi_config_tc(vsi, enabled_tc); if (ret) { /* Single TC condition is not fatal, * message and continue */ dev_info(&pf->pdev->dev, "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", enabled_tc, i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } break; case I40E_VSI_FDIR: ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_PF; if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && (i40e_is_vsi_uplink_mode_veb(vsi))) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; case I40E_VSI_VMDQ2: ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; /* This VSI is connected to VEB so the switch_id * should be set to zero by default. */ if (i40e_is_vsi_uplink_mode_veb(vsi)) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } /* Setup the VSI tx/rx queue map for TC0 only for now */ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; case I40E_VSI_SRIOV: ctxt.pf_num = hw->pf_id; ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_VF; /* This VSI is connected to VEB so the switch_id * should be set to zero by default. */ if (i40e_is_vsi_uplink_mode_veb(vsi)) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= (I40E_AQ_VSI_QUE_OPT_TCP_ENA | I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI); } ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; if (pf->vf[vsi->vf_id].spoofchk) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); } /* Setup the VSI tx/rx queue map for TC0 only for now */ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; case I40E_VSI_IWARP: /* send down message to iWARP */ break; default: return -ENODEV; } if (vsi->type != I40E_VSI_MAIN) { ret = i40e_aq_add_vsi(hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "add vsi failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } vsi->info = ctxt.info; vsi->info.valid_sections = 0; vsi->seid = ctxt.seid; vsi->id = ctxt.vsi_number; } vsi->active_filters = 0; clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); spin_lock_bh(&vsi->mac_filter_hash_lock); /* If macvlan filters already exist, force them to get loaded */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { f->state = I40E_FILTER_NEW; f_count++; } spin_unlock_bh(&vsi->mac_filter_hash_lock); if (f_count) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); } /* Update VSI BW information */ ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&pf->pdev->dev, "couldn't get vsi bw info, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* VSI is already added so not tearing that up */ ret = 0; } err: return ret; } /** * i40e_vsi_release - Delete a VSI and free its resources * @vsi: the VSI being removed * * Returns 0 on success or < 0 on error **/ int i40e_vsi_release(struct i40e_vsi *vsi) { struct i40e_mac_filter *f; struct hlist_node *h; struct i40e_veb *veb = NULL; struct i40e_pf *pf; u16 uplink_seid; int i, n, bkt; pf = vsi->back; /* release of a VEB-owner or last VSI is not allowed */ if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", vsi->seid, vsi->uplink_seid); return -ENODEV; } if (vsi == pf->vsi[pf->lan_vsi] && !test_bit(__I40E_DOWN, pf->state)) { dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); return -ENODEV; } uplink_seid = vsi->uplink_seid; if (vsi->type != I40E_VSI_SRIOV) { if (vsi->netdev_registered) { vsi->netdev_registered = false; if (vsi->netdev) { /* results in a call to i40e_close() */ unregister_netdev(vsi->netdev); } } else { i40e_vsi_close(vsi); } i40e_vsi_disable_irq(vsi); } spin_lock_bh(&vsi->mac_filter_hash_lock); /* clear the sync flag on all filters */ if (vsi->netdev) { __dev_uc_unsync(vsi->netdev, NULL); __dev_mc_unsync(vsi->netdev, NULL); } /* make sure any remaining filters are marked for deletion */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) __i40e_del_filter(vsi, f); spin_unlock_bh(&vsi->mac_filter_hash_lock); i40e_sync_vsi_filters(vsi); i40e_vsi_delete(vsi); i40e_vsi_free_q_vectors(vsi); if (vsi->netdev) { free_netdev(vsi->netdev); vsi->netdev = NULL; } i40e_vsi_clear_rings(vsi); i40e_vsi_clear(vsi); /* If this was the last thing on the VEB, except for the * controlling VSI, remove the VEB, which puts the controlling * VSI onto the next level down in the switch. * * Well, okay, there's one more exception here: don't remove * the orphan VEBs yet. We'll wait for an explicit remove request * from up the network stack. */ for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->uplink_seid == uplink_seid && (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { n++; /* count the VSIs */ } } for (i = 0; i < I40E_MAX_VEB; i++) { if (!pf->veb[i]) continue; if (pf->veb[i]->uplink_seid == uplink_seid) n++; /* count the VEBs */ if (pf->veb[i]->seid == uplink_seid) veb = pf->veb[i]; } if (n == 0 && veb && veb->uplink_seid != 0) i40e_veb_release(veb); return 0; } /** * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI * @vsi: ptr to the VSI * * This should only be called after i40e_vsi_mem_alloc() which allocates the * corresponding SW VSI structure and initializes num_queue_pairs for the * newly allocated VSI. * * Returns 0 on success or negative on failure **/ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) { int ret = -ENOENT; struct i40e_pf *pf = vsi->back; if (vsi->q_vectors[0]) { dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", vsi->seid); return -EEXIST; } if (vsi->base_vector) { dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", vsi->seid, vsi->base_vector); return -EEXIST; } ret = i40e_vsi_alloc_q_vectors(vsi); if (ret) { dev_info(&pf->pdev->dev, "failed to allocate %d q_vector for VSI %d, ret=%d\n", vsi->num_q_vectors, vsi->seid, ret); vsi->num_q_vectors = 0; goto vector_setup_out; } /* In Legacy mode, we do not have to get any other vector since we * piggyback on the misc/ICR0 for queue interrupts. */ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) return ret; if (vsi->num_q_vectors) vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, vsi->num_q_vectors, vsi->idx); if (vsi->base_vector < 0) { dev_info(&pf->pdev->dev, "failed to get tracking for %d vectors for VSI %d, err=%d\n", vsi->num_q_vectors, vsi->seid, vsi->base_vector); i40e_vsi_free_q_vectors(vsi); ret = -ENOENT; goto vector_setup_out; } vector_setup_out: return ret; } /** * i40e_vsi_reinit_setup - return and reallocate resources for a VSI * @vsi: pointer to the vsi. * * This re-allocates a vsi's queue resources. * * Returns pointer to the successfully allocated and configured VSI sw struct * on success, otherwise returns NULL on failure. **/ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) { u16 alloc_queue_pairs; struct i40e_pf *pf; u8 enabled_tc; int ret; if (!vsi) return NULL; pf = vsi->back; i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_vsi_clear_rings(vsi); i40e_vsi_free_arrays(vsi, false); i40e_set_num_rings_in_vsi(vsi); ret = i40e_vsi_alloc_arrays(vsi, false); if (ret) goto err_vsi; alloc_queue_pairs = vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); if (ret < 0) { dev_info(&pf->pdev->dev, "failed to get tracking for %d queues for VSI %d err %d\n", alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } vsi->base_queue = ret; /* Update the FW view of the VSI. Force a reset of TC and queue * layout configurations. */ enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); if (vsi->type == I40E_VSI_MAIN) i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); /* assign it some queues */ ret = i40e_alloc_rings(vsi); if (ret) goto err_rings; /* map all of the rings to the q_vectors */ i40e_vsi_map_rings_to_vectors(vsi); return vsi; err_rings: i40e_vsi_free_q_vectors(vsi); if (vsi->netdev_registered) { vsi->netdev_registered = false; unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); vsi->netdev = NULL; } i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); err_vsi: i40e_vsi_clear(vsi); return NULL; } /** * i40e_vsi_setup - Set up a VSI by a given type * @pf: board private structure * @type: VSI type * @uplink_seid: the switch element to link to * @param1: usage depends upon VSI type. For VF types, indicates VF id * * This allocates the sw VSI structure and its queue resources, then add a VSI * to the identified VEB. * * Returns pointer to the successfully allocated and configure VSI sw struct on * success, otherwise returns NULL on failure. **/ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, u16 uplink_seid, u32 param1) { struct i40e_vsi *vsi = NULL; struct i40e_veb *veb = NULL; u16 alloc_queue_pairs; int ret, i; int v_idx; /* The requested uplink_seid must be either * - the PF's port seid * no VEB is needed because this is the PF * or this is a Flow Director special case VSI * - seid of an existing VEB * - seid of a VSI that owns an existing VEB * - seid of a VSI that doesn't own a VEB * a new VEB is created and the VSI becomes the owner * - seid of the PF VSI, which is what creates the first VEB * this is a special case of the previous * * Find which uplink_seid we were given and create a new VEB if needed */ for (i = 0; i < I40E_MAX_VEB; i++) { if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { veb = pf->veb[i]; break; } } if (!veb && uplink_seid != pf->mac_seid) { for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { vsi = pf->vsi[i]; break; } } if (!vsi) { dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", uplink_seid); return NULL; } if (vsi->uplink_seid == pf->mac_seid) veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, vsi->tc_config.enabled_tc); else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, vsi->tc_config.enabled_tc); if (veb) { if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { dev_info(&vsi->back->pdev->dev, "New VSI creation error, uplink seid of LAN VSI expected.\n"); return NULL; } /* We come up by default in VEPA mode if SRIOV is not * already enabled, in which case we can't force VEPA * mode. */ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { veb->bridge_mode = BRIDGE_MODE_VEPA; pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; } i40e_config_bridge_mode(veb); } for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) veb = pf->veb[i]; } if (!veb) { dev_info(&pf->pdev->dev, "couldn't add VEB\n"); return NULL; } vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; uplink_seid = veb->seid; } /* get vsi sw struct */ v_idx = i40e_vsi_mem_alloc(pf, type); if (v_idx < 0) goto err_alloc; vsi = pf->vsi[v_idx]; if (!vsi) goto err_alloc; vsi->type = type; vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); if (type == I40E_VSI_MAIN) pf->lan_vsi = v_idx; else if (type == I40E_VSI_SRIOV) vsi->vf_id = param1; /* assign it some queues */ alloc_queue_pairs = vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); if (ret < 0) { dev_info(&pf->pdev->dev, "failed to get tracking for %d queues for VSI %d err=%d\n", alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } vsi->base_queue = ret; /* get a VSI from the hardware */ vsi->uplink_seid = uplink_seid; ret = i40e_add_vsi(vsi); if (ret) goto err_vsi; switch (vsi->type) { /* setup the netdev if needed */ case I40E_VSI_MAIN: case I40E_VSI_VMDQ2: ret = i40e_config_netdev(vsi); if (ret) goto err_netdev; ret = register_netdev(vsi->netdev); if (ret) goto err_netdev; vsi->netdev_registered = true; netif_carrier_off(vsi->netdev); #ifdef CONFIG_I40E_DCB /* Setup DCB netlink interface */ i40e_dcbnl_setup(vsi); #endif /* CONFIG_I40E_DCB */ /* fall through */ case I40E_VSI_FDIR: /* set up vectors and rings if needed */ ret = i40e_vsi_setup_vectors(vsi); if (ret) goto err_msix; ret = i40e_alloc_rings(vsi); if (ret) goto err_rings; /* map all of the rings to the q_vectors */ i40e_vsi_map_rings_to_vectors(vsi); i40e_vsi_reset_stats(vsi); break; default: /* no netdev or rings for the other VSI types */ break; } if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && (vsi->type == I40E_VSI_VMDQ2)) { ret = i40e_vsi_config_rss(vsi); } return vsi; err_rings: i40e_vsi_free_q_vectors(vsi); err_msix: if (vsi->netdev_registered) { vsi->netdev_registered = false; unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); vsi->netdev = NULL; } err_netdev: i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); err_vsi: i40e_vsi_clear(vsi); err_alloc: return NULL; } /** * i40e_veb_get_bw_info - Query VEB BW information * @veb: the veb to query * * Query the Tx scheduler BW configuration data for given VEB **/ static int i40e_veb_get_bw_info(struct i40e_veb *veb) { struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; struct i40e_pf *pf = veb->pf; struct i40e_hw *hw = &pf->hw; u32 tc_bw_max; int ret = 0; int i; ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, "query veb bw config failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); goto out; } ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, &ets_data, NULL); if (ret) { dev_info(&pf->pdev->dev, "query veb bw ets config failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); goto out; } veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); veb->bw_max_quanta = ets_data.tc_bw_max; veb->is_abs_credits = bw_data.absolute_credits_enable; veb->enabled_tc = ets_data.tc_valid_bits; tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; veb->bw_tc_limit_credits[i] = le16_to_cpu(bw_data.tc_bw_limits[i]); veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); } out: return ret; } /** * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF * @pf: board private structure * * On error: returns error code (negative) * On success: returns vsi index in PF (positive) **/ static int i40e_veb_mem_alloc(struct i40e_pf *pf) { int ret = -ENOENT; struct i40e_veb *veb; int i; /* Need to protect the allocation of switch elements at the PF level */ mutex_lock(&pf->switch_mutex); /* VEB list may be fragmented if VEB creation/destruction has * been happening. We can afford to do a quick scan to look * for any free slots in the list. * * find next empty veb slot, looping back around if necessary */ i = 0; while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) i++; if (i >= I40E_MAX_VEB) { ret = -ENOMEM; goto err_alloc_veb; /* out of VEB slots! */ } veb = kzalloc(sizeof(*veb), GFP_KERNEL); if (!veb) { ret = -ENOMEM; goto err_alloc_veb; } veb->pf = pf; veb->idx = i; veb->enabled_tc = 1; pf->veb[i] = veb; ret = i; err_alloc_veb: mutex_unlock(&pf->switch_mutex); return ret; } /** * i40e_switch_branch_release - Delete a branch of the switch tree * @branch: where to start deleting * * This uses recursion to find the tips of the branch to be * removed, deleting until we get back to and can delete this VEB. **/ static void i40e_switch_branch_release(struct i40e_veb *branch) { struct i40e_pf *pf = branch->pf; u16 branch_seid = branch->seid; u16 veb_idx = branch->idx; int i; /* release any VEBs on this VEB - RECURSION */ for (i = 0; i < I40E_MAX_VEB; i++) { if (!pf->veb[i]) continue; if (pf->veb[i]->uplink_seid == branch->seid) i40e_switch_branch_release(pf->veb[i]); } /* Release the VSIs on this VEB, but not the owner VSI. * * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing * the VEB itself, so don't use (*branch) after this loop. */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (!pf->vsi[i]) continue; if (pf->vsi[i]->uplink_seid == branch_seid && (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { i40e_vsi_release(pf->vsi[i]); } } /* There's one corner case where the VEB might not have been * removed, so double check it here and remove it if needed. * This case happens if the veb was created from the debugfs * commands and no VSIs were added to it. */ if (pf->veb[veb_idx]) i40e_veb_release(pf->veb[veb_idx]); } /** * i40e_veb_clear - remove veb struct * @veb: the veb to remove **/ static void i40e_veb_clear(struct i40e_veb *veb) { if (!veb) return; if (veb->pf) { struct i40e_pf *pf = veb->pf; mutex_lock(&pf->switch_mutex); if (pf->veb[veb->idx] == veb) pf->veb[veb->idx] = NULL; mutex_unlock(&pf->switch_mutex); } kfree(veb); } /** * i40e_veb_release - Delete a VEB and free its resources * @veb: the VEB being removed **/ void i40e_veb_release(struct i40e_veb *veb) { struct i40e_vsi *vsi = NULL; struct i40e_pf *pf; int i, n = 0; pf = veb->pf; /* find the remaining VSI and check for extras */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { n++; vsi = pf->vsi[i]; } } if (n != 1) { dev_info(&pf->pdev->dev, "can't remove VEB %d with %d VSIs left\n", veb->seid, n); return; } /* move the remaining VSI to uplink veb */ vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; if (veb->uplink_seid) { vsi->uplink_seid = veb->uplink_seid; if (veb->uplink_seid == pf->mac_seid) vsi->veb_idx = I40E_NO_VEB; else vsi->veb_idx = veb->veb_idx; } else { /* floating VEB */ vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; } i40e_aq_delete_element(&pf->hw, veb->seid, NULL); i40e_veb_clear(veb); } /** * i40e_add_veb - create the VEB in the switch * @veb: the VEB to be instantiated * @vsi: the controlling VSI **/ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { struct i40e_pf *pf = veb->pf; bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); int ret; ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, veb->enabled_tc, false, &veb->seid, enable_stats, NULL); /* get a VEB from the hardware */ if (ret) { dev_info(&pf->pdev->dev, "couldn't add VEB, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EPERM; } /* get statistics counter */ ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, &veb->stats_idx, NULL, NULL, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't get VEB statistics idx, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EPERM; } ret = i40e_veb_get_bw_info(veb); if (ret) { dev_info(&pf->pdev->dev, "couldn't get VEB bw info, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); i40e_aq_delete_element(&pf->hw, veb->seid, NULL); return -ENOENT; } vsi->uplink_seid = veb->seid; vsi->veb_idx = veb->idx; vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; return 0; } /** * i40e_veb_setup - Set up a VEB * @pf: board private structure * @flags: VEB setup flags * @uplink_seid: the switch element to link to * @vsi_seid: the initial VSI seid * @enabled_tc: Enabled TC bit-map * * This allocates the sw VEB structure and links it into the switch * It is possible and legal for this to be a duplicate of an already * existing VEB. It is also possible for both uplink and vsi seids * to be zero, in order to create a floating VEB. * * Returns pointer to the successfully allocated VEB sw struct on * success, otherwise returns NULL on failure. **/ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, u16 vsi_seid, u8 enabled_tc) { struct i40e_veb *veb, *uplink_veb = NULL; int vsi_idx, veb_idx; int ret; /* if one seid is 0, the other must be 0 to create a floating relay */ if ((uplink_seid == 0 || vsi_seid == 0) && (uplink_seid + vsi_seid != 0)) { dev_info(&pf->pdev->dev, "one, not both seid's are 0: uplink=%d vsi=%d\n", uplink_seid, vsi_seid); return NULL; } /* make sure there is such a vsi and uplink */ for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) break; if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) { dev_info(&pf->pdev->dev, "vsi seid %d not found\n", vsi_seid); return NULL; } if (uplink_seid && uplink_seid != pf->mac_seid) { for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { if (pf->veb[veb_idx] && pf->veb[veb_idx]->seid == uplink_seid) { uplink_veb = pf->veb[veb_idx]; break; } } if (!uplink_veb) { dev_info(&pf->pdev->dev, "uplink seid %d not found\n", uplink_seid); return NULL; } } /* get veb sw struct */ veb_idx = i40e_veb_mem_alloc(pf); if (veb_idx < 0) goto err_alloc; veb = pf->veb[veb_idx]; veb->flags = flags; veb->uplink_seid = uplink_seid; veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); /* create the VEB in the switch */ ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); if (ret) goto err_veb; if (vsi_idx == pf->lan_vsi) pf->lan_veb = veb->idx; return veb; err_veb: i40e_veb_clear(veb); err_alloc: return NULL; } /** * i40e_setup_pf_switch_element - set PF vars based on switch type * @pf: board private structure * @ele: element we are building info from * @num_reported: total number of elements * @printconfig: should we print the contents * * helper function to assist in extracting a few useful SEID values. **/ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, struct i40e_aqc_switch_config_element_resp *ele, u16 num_reported, bool printconfig) { u16 downlink_seid = le16_to_cpu(ele->downlink_seid); u16 uplink_seid = le16_to_cpu(ele->uplink_seid); u8 element_type = ele->element_type; u16 seid = le16_to_cpu(ele->seid); if (printconfig) dev_info(&pf->pdev->dev, "type=%d seid=%d uplink=%d downlink=%d\n", element_type, seid, uplink_seid, downlink_seid); switch (element_type) { case I40E_SWITCH_ELEMENT_TYPE_MAC: pf->mac_seid = seid; break; case I40E_SWITCH_ELEMENT_TYPE_VEB: /* Main VEB? */ if (uplink_seid != pf->mac_seid) break; if (pf->lan_veb >= I40E_MAX_VEB) { int v; /* find existing or else empty VEB */ for (v = 0; v < I40E_MAX_VEB; v++) { if (pf->veb[v] && (pf->veb[v]->seid == seid)) { pf->lan_veb = v; break; } } if (pf->lan_veb >= I40E_MAX_VEB) { v = i40e_veb_mem_alloc(pf); if (v < 0) break; pf->lan_veb = v; } } if (pf->lan_veb >= I40E_MAX_VEB) break; pf->veb[pf->lan_veb]->seid = seid; pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; pf->veb[pf->lan_veb]->pf = pf; pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; break; case I40E_SWITCH_ELEMENT_TYPE_VSI: if (num_reported != 1) break; /* This is immediately after a reset so we can assume this is * the PF's VSI */ pf->mac_seid = uplink_seid; pf->pf_seid = downlink_seid; pf->main_vsi_seid = seid; if (printconfig) dev_info(&pf->pdev->dev, "pf_seid=%d main_vsi_seid=%d\n", pf->pf_seid, pf->main_vsi_seid); break; case I40E_SWITCH_ELEMENT_TYPE_PF: case I40E_SWITCH_ELEMENT_TYPE_VF: case I40E_SWITCH_ELEMENT_TYPE_EMP: case I40E_SWITCH_ELEMENT_TYPE_BMC: case I40E_SWITCH_ELEMENT_TYPE_PE: case I40E_SWITCH_ELEMENT_TYPE_PA: /* ignore these for now */ break; default: dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", element_type, seid); break; } } /** * i40e_fetch_switch_configuration - Get switch config from firmware * @pf: board private structure * @printconfig: should we print the contents * * Get the current switch configuration from the device and * extract a few useful SEID values. **/ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) { struct i40e_aqc_get_switch_config_resp *sw_config; u16 next_seid = 0; int ret = 0; u8 *aq_buf; int i; aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); if (!aq_buf) return -ENOMEM; sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; do { u16 num_reported, num_total; ret = i40e_aq_get_switch_config(&pf->hw, sw_config, I40E_AQ_LARGE_BUF, &next_seid, NULL); if (ret) { dev_info(&pf->pdev->dev, "get switch config failed err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); kfree(aq_buf); return -ENOENT; } num_reported = le16_to_cpu(sw_config->header.num_reported); num_total = le16_to_cpu(sw_config->header.num_total); if (printconfig) dev_info(&pf->pdev->dev, "header: %d reported %d total\n", num_reported, num_total); for (i = 0; i < num_reported; i++) { struct i40e_aqc_switch_config_element_resp *ele = &sw_config->element[i]; i40e_setup_pf_switch_element(pf, ele, num_reported, printconfig); } } while (next_seid != 0); kfree(aq_buf); return ret; } /** * i40e_setup_pf_switch - Setup the HW switch on startup or after reset * @pf: board private structure * @reinit: if the Main VSI needs to re-initialized. * * Returns 0 on success, negative value on failure **/ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) { u16 flags = 0; int ret; /* find out what's out there already */ ret = i40e_fetch_switch_configuration(pf, false); if (ret) { dev_info(&pf->pdev->dev, "couldn't fetch switch config, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } i40e_pf_reset_stats(pf); /* set the switch config bit for the whole device to * support limited promisc or true promisc * when user requests promisc. The default is limited * promisc. */ if ((pf->hw.pf_id == 0) && !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; pf->last_sw_conf_flags = flags; } if (pf->hw.pf_id == 0) { u16 valid_flags; valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0, NULL); if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { dev_info(&pf->pdev->dev, "couldn't set switch config bits, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* not a fatal problem, just keep going */ } pf->last_sw_conf_valid_flags = valid_flags; } /* first time setup */ if (pf->lan_vsi == I40E_NO_VSI || reinit) { struct i40e_vsi *vsi = NULL; u16 uplink_seid; /* Set up the PF VSI associated with the PF's main VSI * that is already in the HW switch */ if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) uplink_seid = pf->veb[pf->lan_veb]->seid; else uplink_seid = pf->mac_seid; if (pf->lan_vsi == I40E_NO_VSI) vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); else if (reinit) vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); if (!vsi) { dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); i40e_cloud_filter_exit(pf); i40e_fdir_teardown(pf); return -EAGAIN; } } else { /* force a reset of TC and queue layout configurations */ u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); } i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); i40e_fdir_sb_setup(pf); /* Setup static PF queue filter control settings */ ret = i40e_setup_pf_filter_control(pf); if (ret) { dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", ret); /* Failure here should not stop continuing other steps */ } /* enable RSS in the HW, even for only one queue, as the stack can use * the hash */ if ((pf->flags & I40E_FLAG_RSS_ENABLED)) i40e_pf_config_rss(pf); /* fill in link information and enable LSE reporting */ i40e_link_event(pf); /* Initialize user-specific link properties */ pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? true : false); i40e_ptp_init(pf); /* repopulate tunnel port filters */ i40e_sync_udp_filters(pf); return ret; } /** * i40e_determine_queue_usage - Work out queue distribution * @pf: board private structure **/ static void i40e_determine_queue_usage(struct i40e_pf *pf) { int queues_left; int q_max; pf->num_lan_qps = 0; /* Find the max queues to be put into basic use. We'll always be * using TC0, whether or not DCB is running, and TC0 will get the * big RSS set. */ queues_left = pf->hw.func_caps.num_tx_qp; if ((queues_left == 1) || !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { /* one qp for PF, no queues for anything else */ queues_left = 0; pf->alloc_rss_size = pf->num_lan_qps = 1; /* make sure all the fancies are disabled */ pf->flags &= ~(I40E_FLAG_RSS_ENABLED | I40E_FLAG_IWARP_ENABLED | I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED | I40E_FLAG_SRIOV_ENABLED | I40E_FLAG_VMDQ_ENABLED); pf->flags |= I40E_FLAG_FD_SB_INACTIVE; } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_CAPABLE))) { /* one qp for PF */ pf->alloc_rss_size = pf->num_lan_qps = 1; queues_left -= pf->num_lan_qps; pf->flags &= ~(I40E_FLAG_RSS_ENABLED | I40E_FLAG_IWARP_ENABLED | I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_ENABLED | I40E_FLAG_VMDQ_ENABLED); pf->flags |= I40E_FLAG_FD_SB_INACTIVE; } else { /* Not enough queues for all TCs */ if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && (queues_left < I40E_MAX_TRAFFIC_CLASS)) { pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); } /* limit lan qps to the smaller of qps, cpus or msix */ q_max = max_t(int, pf->rss_size_max, num_online_cpus()); q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp); q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors); pf->num_lan_qps = q_max; queues_left -= pf->num_lan_qps; } if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { if (queues_left > 1) { queues_left -= 1; /* save 1 queue for FD */ } else { pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; pf->flags |= I40E_FLAG_FD_SB_INACTIVE; dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); } } if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && pf->num_vf_qps && pf->num_req_vfs && queues_left) { pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left / pf->num_vf_qps)); queues_left -= (pf->num_req_vfs * pf->num_vf_qps); } if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, (queues_left / pf->num_vmdq_qps)); queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); } pf->queues_left = queues_left; dev_dbg(&pf->pdev->dev, "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", pf->hw.func_caps.num_tx_qp, !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); } /** * i40e_setup_pf_filter_control - Setup PF static filter control * @pf: PF to be setup * * i40e_setup_pf_filter_control sets up a PF's initial filter control * settings. If PE/FCoE are enabled then it will also set the per PF * based filter sizes required for them. It also enables Flow director, * ethertype and macvlan type filter settings for the pf. * * Returns 0 on success, negative on failure **/ static int i40e_setup_pf_filter_control(struct i40e_pf *pf) { struct i40e_filter_control_settings *settings = &pf->filter_settings; settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; /* Flow Director is enabled */ if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) settings->enable_fdir = true; /* Ethtype and MACVLAN filters enabled for PF */ settings->enable_ethtype = true; settings->enable_macvlan = true; if (i40e_set_filter_control(&pf->hw, settings)) return -ENOENT; return 0; } #define INFO_STRING_LEN 255 #define REMAIN(__x) (INFO_STRING_LEN - (__x)) static void i40e_print_features(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; char *buf; int i; buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL); if (!buf) return; i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); #ifdef CONFIG_PCI_IOV i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); #endif i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", pf->hw.func_caps.num_vsis, pf->vsi[pf->lan_vsi]->num_queue_pairs); if (pf->flags & I40E_FLAG_RSS_ENABLED) i += snprintf(&buf[i], REMAIN(i), " RSS"); if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) i += snprintf(&buf[i], REMAIN(i), " FD_ATR"); if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { i += snprintf(&buf[i], REMAIN(i), " FD_SB"); i += snprintf(&buf[i], REMAIN(i), " NTUPLE"); } if (pf->flags & I40E_FLAG_DCB_CAPABLE) i += snprintf(&buf[i], REMAIN(i), " DCB"); i += snprintf(&buf[i], REMAIN(i), " VxLAN"); i += snprintf(&buf[i], REMAIN(i), " Geneve"); if (pf->flags & I40E_FLAG_PTP) i += snprintf(&buf[i], REMAIN(i), " PTP"); if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) i += snprintf(&buf[i], REMAIN(i), " VEB"); else i += snprintf(&buf[i], REMAIN(i), " VEPA"); dev_info(&pf->pdev->dev, "%s\n", buf); kfree(buf); WARN_ON(i > INFO_STRING_LEN); } /** * i40e_get_platform_mac_addr - get platform-specific MAC address * @pdev: PCI device information struct * @pf: board private structure * * Look up the MAC address for the device. First we'll try * eth_platform_get_mac_address, which will check Open Firmware, or arch * specific fallback. Otherwise, we'll default to the stored value in * firmware. **/ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) { if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr); } /** * i40e_set_fec_in_flags - helper function for setting FEC options in flags * @fec_cfg: FEC option to set in flags * @flags: ptr to flags in which we set FEC option **/ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags) { if (fec_cfg & I40E_AQ_SET_FEC_AUTO) *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC; if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) || (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) { *flags |= I40E_FLAG_RS_FEC; *flags &= ~I40E_FLAG_BASE_R_FEC; } if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) || (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) { *flags |= I40E_FLAG_BASE_R_FEC; *flags &= ~I40E_FLAG_RS_FEC; } if (fec_cfg == 0) *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC); } /** * i40e_check_recovery_mode - check if we are running transition firmware * @pf: board private structure * * Check registers indicating the firmware runs in recovery mode. Sets the * appropriate driver state. * * Returns true if the recovery mode was detected, false otherwise **/ static bool i40e_check_recovery_mode(struct i40e_pf *pf) { u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; bool is_recovery_mode = false; if (pf->hw.mac.type == I40E_MAC_XL710) is_recovery_mode = val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK || val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK || val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK; if (pf->hw.mac.type == I40E_MAC_X722) is_recovery_mode = val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK || val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK; if (is_recovery_mode) { dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); set_bit(__I40E_RECOVERY_MODE, pf->state); return true; } if (test_and_clear_bit(__I40E_RECOVERY_MODE, pf->state)) dev_info(&pf->pdev->dev, "Reinitializing in normal mode with full functionality.\n"); return false; } /** * i40e_pf_loop_reset - perform reset in a loop. * @pf: board private structure * * This function is useful when a NIC is about to enter recovery mode. * When a NIC's internal data structures are corrupted the NIC's * firmware is going to enter recovery mode. * Right after a POR it takes about 7 minutes for firmware to enter * recovery mode. Until that time a NIC is in some kind of intermediate * state. After that time period the NIC almost surely enters * recovery mode. The only way for a driver to detect intermediate * state is to issue a series of pf-resets and check a return value. * If a PF reset returns success then the firmware could be in recovery * mode so the caller of this code needs to check for recovery mode * if this function returns success. There is a little chance that * firmware will hang in intermediate state forever. * Since waiting 7 minutes is quite a lot of time this function waits * 10 seconds and then gives up by returning an error. * * Return 0 on success, negative on failure. **/ static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf) { const unsigned short MAX_CNT = 1000; const unsigned short MSECS = 10; struct i40e_hw *hw = &pf->hw; i40e_status ret; int cnt; for (cnt = 0; cnt < MAX_CNT; ++cnt) { ret = i40e_pf_reset(hw); if (!ret) break; msleep(MSECS); } if (cnt == MAX_CNT) { dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); return ret; } pf->pfr_count++; return ret; } /** * i40e_init_recovery_mode - initialize subsystems needed in recovery mode * @pf: board private structure * @hw: ptr to the hardware info * * This function does a minimal setup of all subsystems needed for running * recovery mode. * * Returns 0 on success, negative on failure **/ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw) { struct i40e_vsi *vsi; int err; int v_idx; pci_save_state(pf->pdev); /* set up periodic task facility */ timer_setup(&pf->service_timer, i40e_service_timer, 0); pf->service_timer_period = HZ; INIT_WORK(&pf->service_task, i40e_service_task); clear_bit(__I40E_SERVICE_SCHED, pf->state); err = i40e_init_interrupt_scheme(pf); if (err) goto err_switch_setup; /* The number of VSIs reported by the FW is the minimum guaranteed * to us; HW supports far more and we share the remaining pool with * the other PFs. We allocate space for more than the guarantee with * the understanding that we might not get them all later. */ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; else pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */ pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), GFP_KERNEL); if (!pf->vsi) { err = -ENOMEM; goto err_switch_setup; } /* We allocate one VSI which is needed as absolute minimum * in order to register the netdev */ v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN); if (v_idx < 0) goto err_switch_setup; pf->lan_vsi = v_idx; vsi = pf->vsi[v_idx]; if (!vsi) goto err_switch_setup; vsi->alloc_queue_pairs = 1; err = i40e_config_netdev(vsi); if (err) goto err_switch_setup; err = register_netdev(vsi->netdev); if (err) goto err_switch_setup; vsi->netdev_registered = true; i40e_dbg_pf_init(pf); err = i40e_setup_misc_vector_for_recovery_mode(pf); if (err) goto err_switch_setup; /* tell the firmware that we're starting */ i40e_send_version(pf); /* since everything's happy, start the service_task timer */ mod_timer(&pf->service_timer, round_jiffies(jiffies + pf->service_timer_period)); return 0; err_switch_setup: i40e_reset_interrupt_capability(pf); del_timer_sync(&pf->service_timer); i40e_shutdown_adminq(hw); iounmap(hw->hw_addr); pci_disable_pcie_error_reporting(pf->pdev); pci_release_mem_regions(pf->pdev); pci_disable_device(pf->pdev); kfree(pf); return err; } /** * i40e_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in i40e_pci_tbl * * i40e_probe initializes a PF identified by a pci_dev structure. * The OS initialization, configuring of the PF private structure, * and a hardware reset occur. * * Returns 0 on success, negative on failure **/ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_pf *pf; struct i40e_hw *hw; static u16 pfs_found; u16 wol_nvm_bits; u16 link_status; int err; u32 val; u32 i; u8 set_fc_aq_fail; err = pci_enable_device_mem(pdev); if (err) return err; /* set up for high or low dma */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); goto err_dma; } } /* set up pci connections */ err = pci_request_mem_regions(pdev, i40e_driver_name); if (err) { dev_info(&pdev->dev, "pci_request_selected_regions failed %d\n", err); goto err_pci_reg; } pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); /* Now that we have a PCI connection, we need to do the * low level device setup. This is primarily setting up * the Admin Queue structures and then querying for the * device's current profile information. */ pf = kzalloc(sizeof(*pf), GFP_KERNEL); if (!pf) { err = -ENOMEM; goto err_pf_alloc; } pf->next_vsi = 0; pf->pdev = pdev; set_bit(__I40E_DOWN, pf->state); hw = &pf->hw; hw->back = pf; pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), I40E_MAX_CSR_SPACE); /* We believe that the highest register to read is * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size * is not less than that before mapping to prevent a * kernel panic. */ if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) { dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n", pf->ioremap_len); err = -ENOMEM; goto err_ioremap; } hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); if (!hw->hw_addr) { err = -EIO; dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", (unsigned int)pci_resource_start(pdev, 0), pf->ioremap_len, err); goto err_ioremap; } hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); hw->bus.bus_id = pdev->bus->number; pf->instance = pfs_found; /* Select something other than the 802.1ad ethertype for the * switch to use internally and drop on ingress. */ hw->switch_tag = 0xffff; hw->first_tag = ETH_P_8021AD; hw->second_tag = ETH_P_8021Q; INIT_LIST_HEAD(&pf->l3_flex_pit_list); INIT_LIST_HEAD(&pf->l4_flex_pit_list); INIT_LIST_HEAD(&pf->ddp_old_prof); /* set up the locks for the AQ, do this only once in probe * and destroy them only once in remove */ mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex); pf->msg_enable = netif_msg_init(debug, NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK); if (debug < -1) pf->hw.debug_mask = debug; /* do a special CORER for clearing PXE mode once at init */ if (hw->revision_id == 0 && (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); i40e_flush(hw); msleep(200); pf->corer_count++; i40e_clear_pxe_mode(hw); } /* Reset here to make sure all is clean and to define PF 'n' */ i40e_clear_hw(hw); err = i40e_set_mac_type(hw); if (err) { dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", err); goto err_pf_reset; } err = i40e_pf_loop_reset(pf); if (err) { dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); goto err_pf_reset; } i40e_check_recovery_mode(pf); hw->aq.num_arq_entries = I40E_AQ_LEN; hw->aq.num_asq_entries = I40E_AQ_LEN; hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); err = i40e_init_shared_code(hw); if (err) { dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", err); goto err_pf_reset; } /* set up a default setting for link flow control */ pf->hw.fc.requested_mode = I40E_FC_NONE; err = i40e_init_adminq(hw); if (err) { if (err == I40E_ERR_FIRMWARE_API_VERSION) dev_info(&pdev->dev, "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver, I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw)); else dev_info(&pdev->dev, "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n"); goto err_pf_reset; } i40e_get_oem_version(hw); /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */ dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, hw->aq.api_maj_ver, hw->aq.api_min_ver, i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id, hw->subsystem_vendor_id, hw->subsystem_device_id); if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) dev_info(&pdev->dev, "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver, I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw)); else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) dev_info(&pdev->dev, "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver, I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw)); i40e_verify_eeprom(pf); /* Rev 0 hardware was never productized */ if (hw->revision_id < 1) dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); i40e_clear_pxe_mode(hw); err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); if (err) goto err_adminq_setup; err = i40e_sw_init(pf); if (err) { dev_info(&pdev->dev, "sw_init failed: %d\n", err); goto err_sw_init; } if (test_bit(__I40E_RECOVERY_MODE, pf->state)) return i40e_init_recovery_mode(pf, hw); err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, 0, 0); if (err) { dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); goto err_init_lan_hmc; } err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (err) { dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); err = -ENOENT; goto err_configure_lan_hmc; } /* Disable LLDP for NICs that have firmware versions lower than v4.3. * Ignore error return codes because if it was already disabled via * hardware settings this will fail */ if (pf->hw_features & I40E_HW_STOP_FW_LLDP) { dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); i40e_aq_stop_lldp(hw, true, false, NULL); } /* allow a platform config to override the HW addr */ i40e_get_platform_mac_addr(pdev, pf); if (!is_valid_ether_addr(hw->mac.addr)) { dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); err = -EIO; goto err_mac_addr; } dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); i40e_get_port_mac_addr(hw, hw->mac.port_addr); if (is_valid_ether_addr(hw->mac.port_addr)) pf->hw_features |= I40E_HW_PORT_ID_VALID; pci_set_drvdata(pdev, pf); pci_save_state(pdev); dev_info(&pdev->dev, (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ? "FW LLDP is disabled\n" : "FW LLDP is enabled\n"); /* Enable FW to write default DCB config on link-up */ i40e_aq_set_dcb_parameters(hw, true, NULL); #ifdef CONFIG_I40E_DCB err = i40e_init_pf_dcb(pf); if (err) { dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); /* Continue without DCB enabled */ } #endif /* CONFIG_I40E_DCB */ /* set up periodic task facility */ timer_setup(&pf->service_timer, i40e_service_timer, 0); pf->service_timer_period = HZ; INIT_WORK(&pf->service_task, i40e_service_task); clear_bit(__I40E_SERVICE_SCHED, pf->state); /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1) pf->wol_en = false; else pf->wol_en = true; device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); /* set up the main switch operations */ i40e_determine_queue_usage(pf); err = i40e_init_interrupt_scheme(pf); if (err) goto err_switch_setup; /* The number of VSIs reported by the FW is the minimum guaranteed * to us; HW supports far more and we share the remaining pool with * the other PFs. We allocate space for more than the guarantee with * the understanding that we might not get them all later. */ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; else pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), GFP_KERNEL); if (!pf->vsi) { err = -ENOMEM; goto err_switch_setup; } #ifdef CONFIG_PCI_IOV /* prep for VF support */ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && !test_bit(__I40E_BAD_EEPROM, pf->state)) { if (pci_num_vf(pdev)) pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; } #endif err = i40e_setup_pf_switch(pf, false); if (err) { dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); goto err_vsis; } INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); /* Make sure flow control is set according to current settings */ err = i40e_set_fc(hw, &set_fc_aq_fail, true); if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET) dev_dbg(&pf->pdev->dev, "Set fc with err %s aq_err %s on get_phy_cap\n", i40e_stat_str(hw, err), i40e_aq_str(hw, hw->aq.asq_last_status)); if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET) dev_dbg(&pf->pdev->dev, "Set fc with err %s aq_err %s on set_phy_config\n", i40e_stat_str(hw, err), i40e_aq_str(hw, hw->aq.asq_last_status)); if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE) dev_dbg(&pf->pdev->dev, "Set fc with err %s aq_err %s on get_link_info\n", i40e_stat_str(hw, err), i40e_aq_str(hw, hw->aq.asq_last_status)); /* if FDIR VSI was set up, start it now */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { i40e_vsi_open(pf->vsi[i]); break; } } /* The driver only wants link up/down and module qualification * reports from firmware. Note the negative logic. */ err = i40e_aq_set_phy_int_mask(&pf->hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MEDIA_NA | I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (err) dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Reconfigure hardware for allowing smaller MSS in the case * of TSO, so that we avoid the MDD being fired and causing * a reset in the case of small MSS+TSO. */ val = rd32(hw, I40E_REG_MSS); if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { val &= ~I40E_REG_MSS_MIN_MASK; val |= I40E_64BYTE_MSS; wr32(hw, I40E_REG_MSS, val); } if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { msleep(75); err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (err) dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } /* The main driver is (mostly) up and happy. We need to set this state * before setting up the misc vector or we get a race and the vector * ends up disabled forever. */ clear_bit(__I40E_DOWN, pf->state); /* In case of MSIX we are going to setup the misc vector right here * to handle admin queue events etc. In case of legacy and MSI * the misc functionality and queue processing is combined in * the same vector and that gets setup at open. */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) { err = i40e_setup_misc_vector(pf); if (err) { dev_info(&pdev->dev, "setup of misc vector failed: %d\n", err); goto err_vsis; } } #ifdef CONFIG_PCI_IOV /* prep for VF support */ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && !test_bit(__I40E_BAD_EEPROM, pf->state)) { /* disable link interrupts for VFs */ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); i40e_flush(hw); if (pci_num_vf(pdev)) { dev_info(&pdev->dev, "Active VFs found, allocating resources.\n"); err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); if (err) dev_info(&pdev->dev, "Error %d allocating resources for existing VFs\n", err); } } #endif /* CONFIG_PCI_IOV */ if (pf->flags & I40E_FLAG_IWARP_ENABLED) { pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, pf->num_iwarp_msix, I40E_IWARP_IRQ_PILE_ID); if (pf->iwarp_base_vector < 0) { dev_info(&pdev->dev, "failed to get tracking for %d vectors for IWARP err=%d\n", pf->num_iwarp_msix, pf->iwarp_base_vector); pf->flags &= ~I40E_FLAG_IWARP_ENABLED; } } i40e_dbg_pf_init(pf); /* tell the firmware that we're starting */ i40e_send_version(pf); /* since everything's happy, start the service_task timer */ mod_timer(&pf->service_timer, round_jiffies(jiffies + pf->service_timer_period)); /* add this PF to client device list and launch a client service task */ if (pf->flags & I40E_FLAG_IWARP_ENABLED) { err = i40e_lan_add_device(pf); if (err) dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", err); } #define PCI_SPEED_SIZE 8 #define PCI_WIDTH_SIZE 8 /* Devices on the IOSF bus do not have this information * and will report PCI Gen 1 x 1 by default so don't bother * checking them. */ if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) { char speed[PCI_SPEED_SIZE] = "Unknown"; char width[PCI_WIDTH_SIZE] = "Unknown"; /* Get the negotiated link width and speed from PCI config * space */ pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); i40e_set_pci_config_data(hw, link_status); switch (hw->bus.speed) { case i40e_bus_speed_8000: strlcpy(speed, "8.0", PCI_SPEED_SIZE); break; case i40e_bus_speed_5000: strlcpy(speed, "5.0", PCI_SPEED_SIZE); break; case i40e_bus_speed_2500: strlcpy(speed, "2.5", PCI_SPEED_SIZE); break; default: break; } switch (hw->bus.width) { case i40e_bus_width_pcie_x8: strlcpy(width, "8", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x4: strlcpy(width, "4", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x2: strlcpy(width, "2", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x1: strlcpy(width, "1", PCI_WIDTH_SIZE); break; default: break; } dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n", speed, width); if (hw->bus.width < i40e_bus_width_pcie_x8 || hw->bus.speed < i40e_bus_speed_8000) { dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); } } /* get the requested speeds from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (err) dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); pf->hw.phy.link_info.requested_speeds = abilities.link_speed; /* set the FEC config due to the board capabilities */ i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags); /* get the supported phy types from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); if (err) dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Add a filter to drop all Flow control frames from any VSI from being * transmitted. By doing so we stop a malicious VF from sending out * PAUSE or PFC frames and potentially controlling traffic for other * PF/VF VSIs. * The FW can still send Flow control frames if enabled. */ i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, pf->main_vsi_seid); if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS; if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER; /* print a string summarizing features */ i40e_print_features(pf); return 0; /* Unwind what we've done if something failed in the setup */ err_vsis: set_bit(__I40E_DOWN, pf->state); i40e_clear_interrupt_scheme(pf); kfree(pf->vsi); err_switch_setup: i40e_reset_interrupt_capability(pf); del_timer_sync(&pf->service_timer); err_mac_addr: err_configure_lan_hmc: (void)i40e_shutdown_lan_hmc(hw); err_init_lan_hmc: kfree(pf->qp_pile); err_sw_init: err_adminq_setup: err_pf_reset: iounmap(hw->hw_addr); err_ioremap: kfree(pf); err_pf_alloc: pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /** * i40e_remove - Device removal routine * @pdev: PCI device information struct * * i40e_remove is called by the PCI subsystem to alert the driver * that is should release a PCI device. This could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ static void i40e_remove(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; i40e_status ret_code; int i; i40e_dbg_pf_exit(pf); i40e_ptp_stop(pf); /* Disable RSS in hw */ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); /* no more scheduling of any task */ set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); if (pf->service_timer.function) del_timer_sync(&pf->service_timer); if (pf->service_task.func) cancel_work_sync(&pf->service_task); if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { struct i40e_vsi *vsi = pf->vsi[0]; /* We know that we have allocated only one vsi for this PF, * it was just for registering netdevice, so the interface * could be visible in the 'ifconfig' output */ unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); goto unmap; } /* Client close must be called explicitly here because the timer * has been stopped. */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { i40e_free_vfs(pf); pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; } i40e_fdir_teardown(pf); /* If there is a switch structure or any orphans, remove them. * This will leave only the PF's VSI remaining. */ for (i = 0; i < I40E_MAX_VEB; i++) { if (!pf->veb[i]) continue; if (pf->veb[i]->uplink_seid == pf->mac_seid || pf->veb[i]->uplink_seid == 0) i40e_switch_branch_release(pf->veb[i]); } /* Now we can shutdown the PF's VSI, just before we kill * adminq and hmc. */ if (pf->vsi[pf->lan_vsi]) i40e_vsi_release(pf->vsi[pf->lan_vsi]); i40e_cloud_filter_exit(pf); /* remove attached clients */ if (pf->flags & I40E_FLAG_IWARP_ENABLED) { ret_code = i40e_lan_del_device(pf); if (ret_code) dev_warn(&pdev->dev, "Failed to delete client device: %d\n", ret_code); } /* shutdown and destroy the HMC */ if (hw->hmc.hmc_obj) { ret_code = i40e_shutdown_lan_hmc(hw); if (ret_code) dev_warn(&pdev->dev, "Failed to destroy the HMC resources: %d\n", ret_code); } unmap: /* Free MSI/legacy interrupt 0 when in recovery mode. */ if (test_bit(__I40E_RECOVERY_MODE, pf->state) && !(pf->flags & I40E_FLAG_MSIX_ENABLED)) free_irq(pf->pdev->irq, pf); /* shutdown the adminq */ i40e_shutdown_adminq(hw); /* destroy the locks only once, here */ mutex_destroy(&hw->aq.arq_mutex); mutex_destroy(&hw->aq.asq_mutex); /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ rtnl_lock(); i40e_clear_interrupt_scheme(pf); for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i]) { if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) i40e_vsi_clear_rings(pf->vsi[i]); i40e_vsi_clear(pf->vsi[i]); pf->vsi[i] = NULL; } } rtnl_unlock(); for (i = 0; i < I40E_MAX_VEB; i++) { kfree(pf->veb[i]); pf->veb[i] = NULL; } kfree(pf->qp_pile); kfree(pf->vsi); iounmap(hw->hw_addr); kfree(pf); pci_release_mem_regions(pdev); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); } /** * i40e_pci_error_detected - warning that something funky happened in PCI land * @pdev: PCI device information struct * @error: the type of PCI error * * Called to warn that something happened and the error handling steps * are in progress. Allows the driver to quiesce things, be ready for * remediation. **/ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, enum pci_channel_state error) { struct i40e_pf *pf = pci_get_drvdata(pdev); dev_info(&pdev->dev, "%s: error %d\n", __func__, error); if (!pf) { dev_info(&pdev->dev, "Cannot recover - error happened during device probe\n"); return PCI_ERS_RESULT_DISCONNECT; } /* shutdown all operations */ if (!test_bit(__I40E_SUSPENDED, pf->state)) i40e_prep_for_reset(pf, false); /* Request a slot reset */ return PCI_ERS_RESULT_NEED_RESET; } /** * i40e_pci_error_slot_reset - a PCI slot reset just happened * @pdev: PCI device information struct * * Called to find if the driver can work with the device now that * the pci slot has been reset. If a basic connection seems good * (registers are readable and have sane content) then return a * happy little PCI_ERS_RESULT_xxx. **/ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); pci_ers_result_t result; u32 reg; dev_dbg(&pdev->dev, "%s\n", __func__); if (pci_enable_device_mem(pdev)) { dev_info(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); pci_wake_from_d3(pdev, false); reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); if (reg == 0) result = PCI_ERS_RESULT_RECOVERED; else result = PCI_ERS_RESULT_DISCONNECT; } return result; } /** * i40e_pci_error_reset_prepare - prepare device driver for pci reset * @pdev: PCI device information struct */ static void i40e_pci_error_reset_prepare(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); i40e_prep_for_reset(pf, false); } /** * i40e_pci_error_reset_done - pci reset done, device driver reset can begin * @pdev: PCI device information struct */ static void i40e_pci_error_reset_done(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); i40e_reset_and_rebuild(pf, false, false); } /** * i40e_pci_error_resume - restart operations after PCI error recovery * @pdev: PCI device information struct * * Called to allow the driver to bring things back up after PCI error * and/or reset recovery has finished. **/ static void i40e_pci_error_resume(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); dev_dbg(&pdev->dev, "%s\n", __func__); if (test_bit(__I40E_SUSPENDED, pf->state)) return; i40e_handle_reset_warning(pf, false); } /** * i40e_enable_mc_magic_wake - enable multicast magic packet wake up * using the mac_address_write admin q function * @pf: pointer to i40e_pf struct **/ static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; i40e_status ret; u8 mac_addr[6]; u16 flags = 0; /* Get current MAC address in case it's an LAA */ if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { ether_addr_copy(mac_addr, pf->vsi[pf->lan_vsi]->netdev->dev_addr); } else { dev_err(&pf->pdev->dev, "Failed to retrieve MAC address; using default\n"); ether_addr_copy(mac_addr, hw->mac.addr); } /* The FW expects the mac address write cmd to first be called with * one of these flags before calling it again with the multicast * enable flags. */ flags = I40E_AQC_WRITE_TYPE_LAA_WOL; if (hw->func_caps.flex10_enable && hw->partition_id != 1) flags = I40E_AQC_WRITE_TYPE_LAA_ONLY; ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); if (ret) { dev_err(&pf->pdev->dev, "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up"); return; } flags = I40E_AQC_MC_MAG_EN | I40E_AQC_WOL_PRESERVE_ON_PFR | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG; ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); if (ret) dev_err(&pf->pdev->dev, "Failed to enable Multicast Magic Packet wake up\n"); } /** * i40e_shutdown - PCI callback for shutting down * @pdev: PCI device information struct **/ static void i40e_shutdown(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); i40e_cloud_filter_exit(pf); i40e_fdir_teardown(pf); /* Client close must be called explicitly here because the timer * has been stopped. */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); i40e_prep_for_reset(pf, false); wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); /* Free MSI/legacy interrupt 0 when in recovery mode. */ if (test_bit(__I40E_RECOVERY_MODE, pf->state) && !(pf->flags & I40E_FLAG_MSIX_ENABLED)) free_irq(pf->pdev->irq, pf); /* Since we're going to destroy queues during the * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this * whole section */ rtnl_lock(); i40e_clear_interrupt_scheme(pf); rtnl_unlock(); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, pf->wol_en); pci_set_power_state(pdev, PCI_D3hot); } } /** * i40e_suspend - PM callback for moving to D3 * @dev: generic device information structure **/ static int __maybe_unused i40e_suspend(struct device *dev) { struct i40e_pf *pf = dev_get_drvdata(dev); struct i40e_hw *hw = &pf->hw; /* If we're already suspended, then there is nothing to do */ if (test_and_set_bit(__I40E_SUSPENDED, pf->state)) return 0; set_bit(__I40E_DOWN, pf->state); /* Ensure service task will not be running */ del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); /* Client close must be called explicitly here because the timer * has been stopped. */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); /* Since we're going to destroy queues during the * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this * whole section */ rtnl_lock(); i40e_prep_for_reset(pf, true); wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); /* Clear the interrupt scheme and release our IRQs so that the system * can safely hibernate even when there are a large number of CPUs. * Otherwise hibernation might fail when mapping all the vectors back * to CPU0. */ i40e_clear_interrupt_scheme(pf); rtnl_unlock(); return 0; } /** * i40e_resume - PM callback for waking up from D3 * @dev: generic device information structure **/ static int __maybe_unused i40e_resume(struct device *dev) { struct i40e_pf *pf = dev_get_drvdata(dev); int err; /* If we're not suspended, then there is nothing to do */ if (!test_bit(__I40E_SUSPENDED, pf->state)) return 0; /* We need to hold the RTNL lock prior to restoring interrupt schemes, * since we're going to be restoring queues */ rtnl_lock(); /* We cleared the interrupt scheme when we suspended, so we need to * restore it now to resume device functionality. */ err = i40e_restore_interrupt_scheme(pf); if (err) { dev_err(dev, "Cannot restore interrupt scheme: %d\n", err); } clear_bit(__I40E_DOWN, pf->state); i40e_reset_and_rebuild(pf, false, true); rtnl_unlock(); /* Clear suspended state last after everything is recovered */ clear_bit(__I40E_SUSPENDED, pf->state); /* Restart the service task */ mod_timer(&pf->service_timer, round_jiffies(jiffies + pf->service_timer_period)); return 0; } static const struct pci_error_handlers i40e_err_handler = { .error_detected = i40e_pci_error_detected, .slot_reset = i40e_pci_error_slot_reset, .reset_prepare = i40e_pci_error_reset_prepare, .reset_done = i40e_pci_error_reset_done, .resume = i40e_pci_error_resume, }; static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume); static struct pci_driver i40e_driver = { .name = i40e_driver_name, .id_table = i40e_pci_tbl, .probe = i40e_probe, .remove = i40e_remove, .driver = { .pm = &i40e_pm_ops, }, .shutdown = i40e_shutdown, .err_handler = &i40e_err_handler, .sriov_configure = i40e_pci_sriov_configure, }; /** * i40e_init_module - Driver registration routine * * i40e_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ static int __init i40e_init_module(void) { pr_info("%s: %s - version %s\n", i40e_driver_name, i40e_driver_string, i40e_driver_version_str); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); /* There is no need to throttle the number of active tasks because * each device limits its own task using a state bit for scheduling * the service task, and the device tasks do not interfere with each * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM * since we need to be able to guarantee forward progress even under * memory pressure. */ i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name); if (!i40e_wq) { pr_err("%s: Failed to create workqueue\n", i40e_driver_name); return -ENOMEM; } i40e_dbg_init(); return pci_register_driver(&i40e_driver); } module_init(i40e_init_module); /** * i40e_exit_module - Driver exit cleanup routine * * i40e_exit_module is called just before the driver is removed * from memory. **/ static void __exit i40e_exit_module(void) { pci_unregister_driver(&i40e_driver); destroy_workqueue(i40e_wq); i40e_dbg_exit(); } module_exit(i40e_exit_module);
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1233_0
crossvul-cpp_data_bad_1267_0
/* * Broadcom NetXtreme-E RoCE driver. * * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term * Broadcom refers to Broadcom Limited and/or its subsidiaries. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Description: IB Verbs interpreter */ #include <linux/interrupt.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/if_ether.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_umem.h> #include <rdma/ib_addr.h> #include <rdma/ib_mad.h> #include <rdma/ib_cache.h> #include <rdma/uverbs_ioctl.h> #include "bnxt_ulp.h" #include "roce_hsi.h" #include "qplib_res.h" #include "qplib_sp.h" #include "qplib_fp.h" #include "qplib_rcfw.h" #include "bnxt_re.h" #include "ib_verbs.h" #include <rdma/bnxt_re-abi.h> static int __from_ib_access_flags(int iflags) { int qflags = 0; if (iflags & IB_ACCESS_LOCAL_WRITE) qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; if (iflags & IB_ACCESS_REMOTE_READ) qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; if (iflags & IB_ACCESS_REMOTE_WRITE) qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; if (iflags & IB_ACCESS_REMOTE_ATOMIC) qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; if (iflags & IB_ACCESS_MW_BIND) qflags |= BNXT_QPLIB_ACCESS_MW_BIND; if (iflags & IB_ZERO_BASED) qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; if (iflags & IB_ACCESS_ON_DEMAND) qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; return qflags; }; static enum ib_access_flags __to_ib_access_flags(int qflags) { enum ib_access_flags iflags = 0; if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) iflags |= IB_ACCESS_LOCAL_WRITE; if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) iflags |= IB_ACCESS_REMOTE_WRITE; if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) iflags |= IB_ACCESS_REMOTE_READ; if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) iflags |= IB_ACCESS_REMOTE_ATOMIC; if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) iflags |= IB_ACCESS_MW_BIND; if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) iflags |= IB_ZERO_BASED; if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) iflags |= IB_ACCESS_ON_DEMAND; return iflags; }; static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, struct bnxt_qplib_sge *sg_list, int num) { int i, total = 0; for (i = 0; i < num; i++) { sg_list[i].addr = ib_sg_list[i].addr; sg_list[i].lkey = ib_sg_list[i].lkey; sg_list[i].size = ib_sg_list[i].length; total += sg_list[i].size; } return total; } /* Device */ int bnxt_re_query_device(struct ib_device *ibdev, struct ib_device_attr *ib_attr, struct ib_udata *udata) { struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; memset(ib_attr, 0, sizeof(*ib_attr)); memcpy(&ib_attr->fw_ver, dev_attr->fw_ver, min(sizeof(dev_attr->fw_ver), sizeof(ib_attr->fw_ver))); bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ib_attr->sys_image_guid); ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE; ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M; ib_attr->vendor_id = rdev->en_dev->pdev->vendor; ib_attr->vendor_part_id = rdev->en_dev->pdev->device; ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device; ib_attr->max_qp = dev_attr->max_qp; ib_attr->max_qp_wr = dev_attr->max_qp_wqes; ib_attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_SHUTDOWN_PORT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_RESIZE_MAX_WR | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_N_NOTIFY_CQ | IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_WINDOW_TYPE_2B | IB_DEVICE_MEM_MGT_EXTENSIONS; ib_attr->max_send_sge = dev_attr->max_qp_sges; ib_attr->max_recv_sge = dev_attr->max_qp_sges; ib_attr->max_sge_rd = dev_attr->max_qp_sges; ib_attr->max_cq = dev_attr->max_cq; ib_attr->max_cqe = dev_attr->max_cq_wqes; ib_attr->max_mr = dev_attr->max_mr; ib_attr->max_pd = dev_attr->max_pd; ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; ib_attr->atomic_cap = IB_ATOMIC_NONE; ib_attr->masked_atomic_cap = IB_ATOMIC_NONE; ib_attr->max_ee_rd_atom = 0; ib_attr->max_res_rd_atom = 0; ib_attr->max_ee_init_rd_atom = 0; ib_attr->max_ee = 0; ib_attr->max_rdd = 0; ib_attr->max_mw = dev_attr->max_mw; ib_attr->max_raw_ipv6_qp = 0; ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp; ib_attr->max_mcast_grp = 0; ib_attr->max_mcast_qp_attach = 0; ib_attr->max_total_mcast_qp_attach = 0; ib_attr->max_ah = dev_attr->max_ah; ib_attr->max_fmr = 0; ib_attr->max_map_per_fmr = 0; ib_attr->max_srq = dev_attr->max_srq; ib_attr->max_srq_wr = dev_attr->max_srq_wqes; ib_attr->max_srq_sge = dev_attr->max_srq_sges; ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS; ib_attr->max_pkeys = 1; ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY; return 0; } int bnxt_re_modify_device(struct ib_device *ibdev, int device_modify_mask, struct ib_device_modify *device_modify) { switch (device_modify_mask) { case IB_DEVICE_MODIFY_SYS_IMAGE_GUID: /* Modify the GUID requires the modification of the GID table */ /* GUID should be made as READ-ONLY */ break; case IB_DEVICE_MODIFY_NODE_DESC: /* Node Desc should be made as READ-ONLY */ break; default: break; } return 0; } /* Port */ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num, struct ib_port_attr *port_attr) { struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; memset(port_attr, 0, sizeof(*port_attr)); if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) { port_attr->state = IB_PORT_ACTIVE; port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; } else { port_attr->state = IB_PORT_DOWN; port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; } port_attr->max_mtu = IB_MTU_4096; port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu); port_attr->gid_tbl_len = dev_attr->max_sgid; port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP; port_attr->ip_gids = true; port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW; port_attr->bad_pkey_cntr = 0; port_attr->qkey_viol_cntr = 0; port_attr->pkey_tbl_len = dev_attr->max_pkey; port_attr->lid = 0; port_attr->sm_lid = 0; port_attr->lmc = 0; port_attr->max_vl_num = 4; port_attr->sm_sl = 0; port_attr->subnet_timeout = 0; port_attr->init_type_reply = 0; port_attr->active_speed = rdev->active_speed; port_attr->active_width = rdev->active_width; return 0; } int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num, struct ib_port_immutable *immutable) { struct ib_port_attr port_attr; if (bnxt_re_query_port(ibdev, port_num, &port_attr)) return -EINVAL; immutable->pkey_tbl_len = port_attr.pkey_tbl_len; immutable->gid_tbl_len = port_attr.gid_tbl_len; immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; immutable->max_mad_size = IB_MGMT_MAD_SIZE; return 0; } void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str) { struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d", rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1], rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]); } int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index, u16 *pkey) { struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); /* Ignore port_num */ memset(pkey, 0, sizeof(*pkey)); return bnxt_qplib_get_pkey(&rdev->qplib_res, &rdev->qplib_res.pkey_tbl, index, pkey); } int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num, int index, union ib_gid *gid) { struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); int rc = 0; /* Ignore port_num */ memset(gid, 0, sizeof(*gid)); rc = bnxt_qplib_get_sgid(&rdev->qplib_res, &rdev->qplib_res.sgid_tbl, index, (struct bnxt_qplib_gid *)gid); return rc; } int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) { int rc = 0; struct bnxt_re_gid_ctx *ctx, **ctx_tbl; struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; struct bnxt_qplib_gid *gid_to_del; u16 vlan_id = 0xFFFF; /* Delete the entry from the hardware */ ctx = *context; if (!ctx) return -EINVAL; if (sgid_tbl && sgid_tbl->active) { if (ctx->idx >= sgid_tbl->max) return -EINVAL; gid_to_del = &sgid_tbl->tbl[ctx->idx].gid; vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id; /* DEL_GID is called in WQ context(netdevice_event_work_handler) * or via the ib_unregister_device path. In the former case QP1 * may not be destroyed yet, in which case just return as FW * needs that entry to be present and will fail it's deletion. * We could get invoked again after QP1 is destroyed OR get an * ADD_GID call with a different GID value for the same index * where we issue MODIFY_GID cmd to update the GID entry -- TBD */ if (ctx->idx == 0 && rdma_link_local_addr((struct in6_addr *)gid_to_del) && ctx->refcnt == 1 && rdev->qp1_sqp) { dev_dbg(rdev_to_dev(rdev), "Trying to delete GID0 while QP1 is alive\n"); return -EFAULT; } ctx->refcnt--; if (!ctx->refcnt) { rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, vlan_id, true); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to remove GID: %#x", rc); } else { ctx_tbl = sgid_tbl->ctx; ctx_tbl[ctx->idx] = NULL; kfree(ctx); } } } else { return -EINVAL; } return rc; } int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context) { int rc; u32 tbl_idx = 0; u16 vlan_id = 0xFFFF; struct bnxt_re_gid_ctx *ctx, **ctx_tbl; struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL); if (rc) return rc; rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid, rdev->qplib_res.netdev->dev_addr, vlan_id, true, &tbl_idx); if (rc == -EALREADY) { ctx_tbl = sgid_tbl->ctx; ctx_tbl[tbl_idx]->refcnt++; *context = ctx_tbl[tbl_idx]; return 0; } if (rc < 0) { dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc); return rc; } ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx_tbl = sgid_tbl->ctx; ctx->idx = tbl_idx; ctx->refcnt = 1; ctx_tbl[tbl_idx] = ctx; *context = ctx; return rc; } enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, u8 port_num) { return IB_LINK_LAYER_ETHERNET; } #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE) static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd) { struct bnxt_re_fence_data *fence = &pd->fence; struct ib_mr *ib_mr = &fence->mr->ib_mr; struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; memset(wqe, 0, sizeof(*wqe)); wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; wqe->wr_id = BNXT_QPLIB_FENCE_WRID; wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; wqe->bind.zero_based = false; wqe->bind.parent_l_key = ib_mr->lkey; wqe->bind.va = (u64)(unsigned long)fence->va; wqe->bind.length = fence->size; wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ); wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1; /* Save the initial rkey in fence structure for now; * wqe->bind.r_key will be set at (re)bind time. */ fence->bind_rkey = ib_inc_rkey(fence->mw->rkey); } static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp) { struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp, qplib_qp); struct ib_pd *ib_pd = qp->ib_qp.pd; struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_fence_data *fence = &pd->fence; struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe; struct bnxt_qplib_swqe wqe; int rc; memcpy(&wqe, fence_wqe, sizeof(wqe)); wqe.bind.r_key = fence->bind_rkey; fence->bind_rkey = ib_inc_rkey(fence->bind_rkey); dev_dbg(rdev_to_dev(qp->rdev), "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", wqe.bind.r_key, qp->qplib_qp.id, pd); rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); if (rc) { dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n"); return rc; } bnxt_qplib_post_send_db(&qp->qplib_qp); return rc; } static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd) { struct bnxt_re_fence_data *fence = &pd->fence; struct bnxt_re_dev *rdev = pd->rdev; struct device *dev = &rdev->en_dev->pdev->dev; struct bnxt_re_mr *mr = fence->mr; if (fence->mw) { bnxt_re_dealloc_mw(fence->mw); fence->mw = NULL; } if (mr) { if (mr->ib_mr.rkey) bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr, true); if (mr->ib_mr.lkey) bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); kfree(mr); fence->mr = NULL; } if (fence->dma_addr) { dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES, DMA_BIDIRECTIONAL); fence->dma_addr = 0; } } static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) { int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND; struct bnxt_re_fence_data *fence = &pd->fence; struct bnxt_re_dev *rdev = pd->rdev; struct device *dev = &rdev->en_dev->pdev->dev; struct bnxt_re_mr *mr = NULL; dma_addr_t dma_addr = 0; struct ib_mw *mw; u64 pbl_tbl; int rc; dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES, DMA_BIDIRECTIONAL); rc = dma_mapping_error(dev, dma_addr); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n"); rc = -EIO; fence->dma_addr = 0; goto fail; } fence->dma_addr = dma_addr; /* Allocate a MR */ mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) { rc = -ENOMEM; goto fail; } fence->mr = mr; mr->rdev = rdev; mr->qplib_mr.pd = &pd->qplib_pd; mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n"); goto fail; } /* Register MR */ mr->ib_mr.lkey = mr->qplib_mr.lkey; mr->qplib_mr.va = (u64)(unsigned long)fence->va; mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; pbl_tbl = dma_addr; rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl, BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n"); goto fail; } mr->ib_mr.rkey = mr->qplib_mr.rkey; /* Create a fence MW only for kernel consumers */ mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); if (IS_ERR(mw)) { dev_err(rdev_to_dev(rdev), "Failed to create fence-MW for PD: %p\n", pd); rc = PTR_ERR(mw); goto fail; } fence->mw = mw; bnxt_re_create_fence_wqe(pd); return 0; fail: bnxt_re_destroy_fence_mr(pd); return rc; } /* Protection Domains */ void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) { struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; bnxt_re_destroy_fence_mr(pd); if (pd->qplib_pd.id) bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, &pd->qplib_pd); } int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct ib_device *ibdev = ibpd->device; struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context( udata, struct bnxt_re_ucontext, ib_uctx); struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd); int rc; pd->rdev = rdev; if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) { dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD"); rc = -ENOMEM; goto fail; } if (udata) { struct bnxt_re_pd_resp resp; if (!ucntx->dpi.dbr) { /* Allocate DPI in alloc_pd to avoid failing of * ibv_devinfo and family of application when DPIs * are depleted. */ if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl, &ucntx->dpi, ucntx)) { rc = -ENOMEM; goto dbfail; } } resp.pdid = pd->qplib_pd.id; /* Still allow mapping this DBR to the new user PD. */ resp.dpi = ucntx->dpi.dpi; resp.dbr = (u64)ucntx->dpi.umdbr; rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to copy user response\n"); goto dbfail; } } if (!udata) if (bnxt_re_create_fence_mr(pd)) dev_warn(rdev_to_dev(rdev), "Failed to create Fence-MR\n"); return 0; dbfail: bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, &pd->qplib_pd); fail: return rc; } /* Address Handles */ void bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) { struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); struct bnxt_re_dev *rdev = ah->rdev; bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, !(flags & RDMA_DESTROY_AH_SLEEPABLE)); } static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype) { u8 nw_type; switch (ntype) { case RDMA_NETWORK_IPV4: nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4; break; case RDMA_NETWORK_IPV6: nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6; break; default: nw_type = CMDQ_CREATE_AH_TYPE_V1; break; } return nw_type; } int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr, u32 flags, struct ib_udata *udata) { struct ib_pd *ib_pd = ib_ah->pd; struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); struct bnxt_re_dev *rdev = pd->rdev; const struct ib_gid_attr *sgid_attr; struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); u8 nw_type; int rc; if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) { dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set"); return -EINVAL; } ah->rdev = rdev; ah->qplib_ah.pd = &pd->qplib_pd; /* Supply the configuration for the HW */ memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw, sizeof(union ib_gid)); /* * If RoCE V2 is enabled, stack will have two entries for * each GID entry. Avoiding this duplicte entry in HW. Dividing * the GID index by 2 for RoCE V2 */ ah->qplib_ah.sgid_index = grh->sgid_index / 2; ah->qplib_ah.host_sgid_index = grh->sgid_index; ah->qplib_ah.traffic_class = grh->traffic_class; ah->qplib_ah.flow_label = grh->flow_label; ah->qplib_ah.hop_limit = grh->hop_limit; ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr); sgid_attr = grh->sgid_attr; /* Get network header type for this GID */ nw_type = rdma_gid_attr_network_type(sgid_attr); ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type); memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN); rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, !(flags & RDMA_CREATE_AH_SLEEPABLE)); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH"); return rc; } /* Write AVID to shared page. */ if (udata) { struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context( udata, struct bnxt_re_ucontext, ib_uctx); unsigned long flag; u32 *wrptr; spin_lock_irqsave(&uctx->sh_lock, flag); wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT); *wrptr = ah->qplib_ah.id; wmb(); /* make sure cache is updated. */ spin_unlock_irqrestore(&uctx->sh_lock, flag); } return 0; } int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) { return 0; } int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) { struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); ah_attr->type = ib_ah->type; rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl); memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN); rdma_ah_set_grh(ah_attr, NULL, 0, ah->qplib_ah.host_sgid_index, 0, ah->qplib_ah.traffic_class); rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data); rdma_ah_set_port_num(ah_attr, 1); rdma_ah_set_static_rate(ah_attr, 0); return 0; } unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) { unsigned long flags; spin_lock_irqsave(&qp->scq->cq_lock, flags); if (qp->rcq != qp->scq) spin_lock(&qp->rcq->cq_lock); else __acquire(&qp->rcq->cq_lock); return flags; } void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags) __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) { if (qp->rcq != qp->scq) spin_unlock(&qp->rcq->cq_lock); else __release(&qp->rcq->cq_lock); spin_unlock_irqrestore(&qp->scq->cq_lock, flags); } /* Queue Pairs */ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) { struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_re_dev *rdev = qp->rdev; unsigned int flags; int rc; bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP"); return rc; } if (rdma_is_kernel_res(&qp->ib_qp.res)) { flags = bnxt_re_lock_cqs(qp); bnxt_qplib_clean_qp(&qp->qplib_qp); bnxt_re_unlock_cqs(qp, flags); } bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp); if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) { bnxt_qplib_destroy_ah(&rdev->qplib_res, &rdev->sqp_ah->qplib_ah, false); bnxt_qplib_clean_qp(&qp->qplib_qp); rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &rdev->qp1_sqp->qplib_qp); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to destroy Shadow QP"); return rc; } bnxt_qplib_free_qp_res(&rdev->qplib_res, &rdev->qp1_sqp->qplib_qp); mutex_lock(&rdev->qp_lock); list_del(&rdev->qp1_sqp->list); atomic_dec(&rdev->qp_count); mutex_unlock(&rdev->qp_lock); kfree(rdev->sqp_ah); kfree(rdev->qp1_sqp); rdev->qp1_sqp = NULL; rdev->sqp_ah = NULL; } ib_umem_release(qp->rumem); ib_umem_release(qp->sumem); mutex_lock(&rdev->qp_lock); list_del(&qp->list); atomic_dec(&rdev->qp_count); mutex_unlock(&rdev->qp_lock); kfree(qp); return 0; } static u8 __from_ib_qp_type(enum ib_qp_type type) { switch (type) { case IB_QPT_GSI: return CMDQ_CREATE_QP1_TYPE_GSI; case IB_QPT_RC: return CMDQ_CREATE_QP_TYPE_RC; case IB_QPT_UD: return CMDQ_CREATE_QP_TYPE_UD; default: return IB_QPT_MAX; } } static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, struct bnxt_re_qp *qp, struct ib_udata *udata) { struct bnxt_re_qp_req ureq; struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp; struct ib_umem *umem; int bytes = 0, psn_sz; struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context( udata, struct bnxt_re_ucontext, ib_uctx); if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) return -EFAULT; bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE); /* Consider mapping PSN search memory only for RC QPs. */ if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) { psn_sz = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ? sizeof(struct sq_psn_search_ext) : sizeof(struct sq_psn_search); bytes += (qplib_qp->sq.max_wqe * psn_sz); } bytes = PAGE_ALIGN(bytes); umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(umem)) return PTR_ERR(umem); qp->sumem = umem; qplib_qp->sq.sg_info.sglist = umem->sg_head.sgl; qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem); qplib_qp->sq.sg_info.nmap = umem->nmap; qplib_qp->qp_handle = ureq.qp_handle; if (!qp->qplib_qp.srq) { bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE); bytes = PAGE_ALIGN(bytes); umem = ib_umem_get(udata, ureq.qprva, bytes, IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(umem)) goto rqfail; qp->rumem = umem; qplib_qp->rq.sg_info.sglist = umem->sg_head.sgl; qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem); qplib_qp->rq.sg_info.nmap = umem->nmap; } qplib_qp->dpi = &cntx->dpi; return 0; rqfail: ib_umem_release(qp->sumem); qp->sumem = NULL; memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info)); return PTR_ERR(umem); } static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah (struct bnxt_re_pd *pd, struct bnxt_qplib_res *qp1_res, struct bnxt_qplib_qp *qp1_qp) { struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_re_ah *ah; union ib_gid sgid; int rc; ah = kzalloc(sizeof(*ah), GFP_KERNEL); if (!ah) return NULL; ah->rdev = rdev; ah->qplib_ah.pd = &pd->qplib_pd; rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid); if (rc) goto fail; /* supply the dgid data same as sgid */ memcpy(ah->qplib_ah.dgid.data, &sgid.raw, sizeof(union ib_gid)); ah->qplib_ah.sgid_index = 0; ah->qplib_ah.traffic_class = 0; ah->qplib_ah.flow_label = 0; ah->qplib_ah.hop_limit = 1; ah->qplib_ah.sl = 0; /* Have DMAC same as SMAC */ ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr); rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH for Shadow QP"); goto fail; } return ah; fail: kfree(ah); return NULL; } static struct bnxt_re_qp *bnxt_re_create_shadow_qp (struct bnxt_re_pd *pd, struct bnxt_qplib_res *qp1_res, struct bnxt_qplib_qp *qp1_qp) { struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_re_qp *qp; int rc; qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return NULL; qp->rdev = rdev; /* Initialize the shadow QP structure from the QP1 values */ ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr); qp->qplib_qp.pd = &pd->qplib_pd; qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp); qp->qplib_qp.type = IB_QPT_UD; qp->qplib_qp.max_inline_data = 0; qp->qplib_qp.sig_type = true; /* Shadow QP SQ depth should be same as QP1 RQ depth */ qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.sq.max_sge = 2; /* Q full delta can be 1 since it is internal QP */ qp->qplib_qp.sq.q_full_delta = 1; qp->qplib_qp.scq = qp1_qp->scq; qp->qplib_qp.rcq = qp1_qp->rcq; qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; /* Q full delta can be 1 since it is internal QP */ qp->qplib_qp.rq.q_full_delta = 1; qp->qplib_qp.mtu = qp1_qp->mtu; qp->qplib_qp.sq_hdr_buf_size = 0; qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6; qp->qplib_qp.dpi = &rdev->dpi_privileged; rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp); if (rc) goto fail; rdev->sqp_id = qp->qplib_qp.id; spin_lock_init(&qp->sq_lock); INIT_LIST_HEAD(&qp->list); mutex_lock(&rdev->qp_lock); list_add_tail(&qp->list, &rdev->qp_list); atomic_inc(&rdev->qp_count); mutex_unlock(&rdev->qp_lock); return qp; fail: kfree(qp); return NULL; } struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, struct ib_qp_init_attr *qp_init_attr, struct ib_udata *udata) { struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; struct bnxt_re_qp *qp; struct bnxt_re_cq *cq; struct bnxt_re_srq *srq; int rc, entries; if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) || (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) || (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) || (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) || (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data)) return ERR_PTR(-EINVAL); qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); qp->rdev = rdev; ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr); qp->qplib_qp.pd = &pd->qplib_pd; qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp); qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type); if (qp_init_attr->qp_type == IB_QPT_GSI && bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)) qp->qplib_qp.type = CMDQ_CREATE_QP_TYPE_GSI; if (qp->qplib_qp.type == IB_QPT_MAX) { dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported", qp->qplib_qp.type); rc = -EINVAL; goto fail; } qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data; qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false); qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; if (qp_init_attr->send_cq) { cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq, ib_cq); if (!cq) { dev_err(rdev_to_dev(rdev), "Send CQ not found"); rc = -EINVAL; goto fail; } qp->qplib_qp.scq = &cq->qplib_cq; qp->scq = cq; } if (qp_init_attr->recv_cq) { cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq, ib_cq); if (!cq) { dev_err(rdev_to_dev(rdev), "Receive CQ not found"); rc = -EINVAL; goto fail; } qp->qplib_qp.rcq = &cq->qplib_cq; qp->rcq = cq; } if (qp_init_attr->srq) { srq = container_of(qp_init_attr->srq, struct bnxt_re_srq, ib_srq); if (!srq) { dev_err(rdev_to_dev(rdev), "SRQ not found"); rc = -EINVAL; goto fail; } qp->qplib_qp.srq = &srq->qplib_srq; qp->qplib_qp.rq.max_wqe = 0; } else { /* Allocate 1 more than what's provided so posting max doesn't * mean empty */ entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1); qp->qplib_qp.rq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - qp_init_attr->cap.max_recv_wr; qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; } qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); if (qp_init_attr->qp_type == IB_QPT_GSI && !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) { /* Allocate 1 more than what's provided */ entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); qp->qplib_qp.sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - qp_init_attr->cap.max_send_wr; qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; qp->qplib_qp.sq.max_sge++; if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; qp->qplib_qp.sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2; qp->qplib_qp.dpi = &rdev->dpi_privileged; rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to create HW QP1"); goto fail; } /* Create a shadow QP to handle the QP1 traffic */ rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp); if (!rdev->qp1_sqp) { rc = -EINVAL; dev_err(rdev_to_dev(rdev), "Failed to create Shadow QP for QP1"); goto qp_destroy; } rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res, &qp->qplib_qp); if (!rdev->sqp_ah) { bnxt_qplib_destroy_qp(&rdev->qplib_res, &rdev->qp1_sqp->qplib_qp); rc = -EINVAL; dev_err(rdev_to_dev(rdev), "Failed to create AH entry for ShadowQP"); goto qp_destroy; } } else { /* Allocate 128 + 1 more than what's provided */ entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + BNXT_QPLIB_RESERVED_QP_WRS + 1); qp->qplib_qp.sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + BNXT_QPLIB_RESERVED_QP_WRS + 1); qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; /* * Reserving one slot for Phantom WQE. Application can * post one extra entry in this case. But allowing this to avoid * unexpected Queue full condition */ qp->qplib_qp.sq.q_full_delta -= 1; qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; if (udata) { rc = bnxt_re_init_user_qp(rdev, pd, qp, udata); if (rc) goto fail; } else { qp->qplib_qp.dpi = &rdev->dpi_privileged; } rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); goto free_umem; } } qp->ib_qp.qp_num = qp->qplib_qp.id; spin_lock_init(&qp->sq_lock); spin_lock_init(&qp->rq_lock); if (udata) { struct bnxt_re_qp_resp resp; resp.qpid = qp->ib_qp.qp_num; resp.rsvd = 0; rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to copy QP udata"); goto qp_destroy; } } INIT_LIST_HEAD(&qp->list); mutex_lock(&rdev->qp_lock); list_add_tail(&qp->list, &rdev->qp_list); atomic_inc(&rdev->qp_count); mutex_unlock(&rdev->qp_lock); return &qp->ib_qp; qp_destroy: bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); free_umem: ib_umem_release(qp->rumem); ib_umem_release(qp->sumem); fail: kfree(qp); return ERR_PTR(rc); } static u8 __from_ib_qp_state(enum ib_qp_state state) { switch (state) { case IB_QPS_RESET: return CMDQ_MODIFY_QP_NEW_STATE_RESET; case IB_QPS_INIT: return CMDQ_MODIFY_QP_NEW_STATE_INIT; case IB_QPS_RTR: return CMDQ_MODIFY_QP_NEW_STATE_RTR; case IB_QPS_RTS: return CMDQ_MODIFY_QP_NEW_STATE_RTS; case IB_QPS_SQD: return CMDQ_MODIFY_QP_NEW_STATE_SQD; case IB_QPS_SQE: return CMDQ_MODIFY_QP_NEW_STATE_SQE; case IB_QPS_ERR: default: return CMDQ_MODIFY_QP_NEW_STATE_ERR; } } static enum ib_qp_state __to_ib_qp_state(u8 state) { switch (state) { case CMDQ_MODIFY_QP_NEW_STATE_RESET: return IB_QPS_RESET; case CMDQ_MODIFY_QP_NEW_STATE_INIT: return IB_QPS_INIT; case CMDQ_MODIFY_QP_NEW_STATE_RTR: return IB_QPS_RTR; case CMDQ_MODIFY_QP_NEW_STATE_RTS: return IB_QPS_RTS; case CMDQ_MODIFY_QP_NEW_STATE_SQD: return IB_QPS_SQD; case CMDQ_MODIFY_QP_NEW_STATE_SQE: return IB_QPS_SQE; case CMDQ_MODIFY_QP_NEW_STATE_ERR: default: return IB_QPS_ERR; } } static u32 __from_ib_mtu(enum ib_mtu mtu) { switch (mtu) { case IB_MTU_256: return CMDQ_MODIFY_QP_PATH_MTU_MTU_256; case IB_MTU_512: return CMDQ_MODIFY_QP_PATH_MTU_MTU_512; case IB_MTU_1024: return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024; case IB_MTU_2048: return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; case IB_MTU_4096: return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096; default: return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; } } static enum ib_mtu __to_ib_mtu(u32 mtu) { switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) { case CMDQ_MODIFY_QP_PATH_MTU_MTU_256: return IB_MTU_256; case CMDQ_MODIFY_QP_PATH_MTU_MTU_512: return IB_MTU_512; case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024: return IB_MTU_1024; case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048: return IB_MTU_2048; case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096: return IB_MTU_4096; default: return IB_MTU_2048; } } /* Shared Receive Queues */ void bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) { struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); struct bnxt_re_dev *rdev = srq->rdev; struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq; struct bnxt_qplib_nq *nq = NULL; if (qplib_srq->cq) nq = qplib_srq->cq->nq; bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq); ib_umem_release(srq->umem); atomic_dec(&rdev->srq_count); if (nq) nq->budget--; } static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, struct bnxt_re_srq *srq, struct ib_udata *udata) { struct bnxt_re_srq_req ureq; struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq; struct ib_umem *umem; int bytes = 0; struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context( udata, struct bnxt_re_ucontext, ib_uctx); if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) return -EFAULT; bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE); bytes = PAGE_ALIGN(bytes); umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(umem)) return PTR_ERR(umem); srq->umem = umem; qplib_srq->sg_info.sglist = umem->sg_head.sgl; qplib_srq->sg_info.npages = ib_umem_num_pages(umem); qplib_srq->sg_info.nmap = umem->nmap; qplib_srq->srq_handle = ureq.srq_handle; qplib_srq->dpi = &cntx->dpi; return 0; } int bnxt_re_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *srq_init_attr, struct ib_udata *udata) { struct ib_pd *ib_pd = ib_srq->pd; struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); struct bnxt_qplib_nq *nq = NULL; int rc, entries; if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) { dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded"); rc = -EINVAL; goto exit; } if (srq_init_attr->srq_type != IB_SRQT_BASIC) { rc = -EOPNOTSUPP; goto exit; } srq->rdev = rdev; srq->qplib_srq.pd = &pd->qplib_pd; srq->qplib_srq.dpi = &rdev->dpi_privileged; /* Allocate 1 more than what's provided so posting max doesn't * mean empty */ entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1); if (entries > dev_attr->max_srq_wqes + 1) entries = dev_attr->max_srq_wqes + 1; srq->qplib_srq.max_wqe = entries; srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge; srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit; srq->srq_limit = srq_init_attr->attr.srq_limit; srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id; nq = &rdev->nq[0]; if (udata) { rc = bnxt_re_init_user_srq(rdev, pd, srq, udata); if (rc) goto fail; } rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq); if (rc) { dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!"); goto fail; } if (udata) { struct bnxt_re_srq_resp resp; resp.srqid = srq->qplib_srq.id; rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (rc) { dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!"); bnxt_qplib_destroy_srq(&rdev->qplib_res, &srq->qplib_srq); goto exit; } } if (nq) nq->budget++; atomic_inc(&rdev->srq_count); return 0; fail: ib_umem_release(srq->umem); exit: return rc; } int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, enum ib_srq_attr_mask srq_attr_mask, struct ib_udata *udata) { struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); struct bnxt_re_dev *rdev = srq->rdev; int rc; switch (srq_attr_mask) { case IB_SRQ_MAX_WR: /* SRQ resize is not supported */ break; case IB_SRQ_LIMIT: /* Change the SRQ threshold */ if (srq_attr->srq_limit > srq->qplib_srq.max_wqe) return -EINVAL; srq->qplib_srq.threshold = srq_attr->srq_limit; rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq); if (rc) { dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!"); return rc; } /* On success, update the shadow */ srq->srq_limit = srq_attr->srq_limit; /* No need to Build and send response back to udata */ break; default: dev_err(rdev_to_dev(rdev), "Unsupported srq_attr_mask 0x%x", srq_attr_mask); return -EINVAL; } return 0; } int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr) { struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); struct bnxt_re_srq tsrq; struct bnxt_re_dev *rdev = srq->rdev; int rc; /* Get live SRQ attr */ tsrq.qplib_srq.id = srq->qplib_srq.id; rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq); if (rc) { dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!"); return rc; } srq_attr->max_wr = srq->qplib_srq.max_wqe; srq_attr->max_sge = srq->qplib_srq.max_sge; srq_attr->srq_limit = tsrq.qplib_srq.threshold; return 0; } int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); struct bnxt_qplib_swqe wqe; unsigned long flags; int rc = 0; spin_lock_irqsave(&srq->lock, flags); while (wr) { /* Transcribe each ib_recv_wr to qplib_swqe */ wqe.num_sge = wr->num_sge; bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge); wqe.wr_id = wr->wr_id; wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe); if (rc) { *bad_wr = wr; break; } wr = wr->next; } spin_unlock_irqrestore(&srq->lock, flags); return rc; } static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp1_qp, int qp_attr_mask) { struct bnxt_re_qp *qp = rdev->qp1_sqp; int rc = 0; if (qp_attr_mask & IB_QP_STATE) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; qp->qplib_qp.state = qp1_qp->qplib_qp.state; } if (qp_attr_mask & IB_QP_PKEY_INDEX) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index; } if (qp_attr_mask & IB_QP_QKEY) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; /* Using a Random QKEY */ qp->qplib_qp.qkey = 0x81818181; } if (qp_attr_mask & IB_QP_SQ_PSN) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn; } rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); if (rc) dev_err(rdev_to_dev(rdev), "Failed to modify Shadow QP for QP1"); return rc; } int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_udata *udata) { struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_re_dev *rdev = qp->rdev; struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; enum ib_qp_state curr_qp_state, new_qp_state; int rc, entries; unsigned int flags; u8 nw_type; qp->qplib_qp.modify_flags = 0; if (qp_attr_mask & IB_QP_STATE) { curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state); new_qp_state = qp_attr->qp_state; if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state, ib_qp->qp_type, qp_attr_mask)) { dev_err(rdev_to_dev(rdev), "Invalid attribute mask: %#x specified ", qp_attr_mask); dev_err(rdev_to_dev(rdev), "for qpn: %#x type: %#x", ib_qp->qp_num, ib_qp->qp_type); dev_err(rdev_to_dev(rdev), "curr_qp_state=0x%x, new_qp_state=0x%x\n", curr_qp_state, new_qp_state); return -EINVAL; } qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state); if (!qp->sumem && qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { dev_dbg(rdev_to_dev(rdev), "Move QP = %p to flush list\n", qp); flags = bnxt_re_lock_cqs(qp); bnxt_qplib_add_flush_qp(&qp->qplib_qp); bnxt_re_unlock_cqs(qp, flags); } if (!qp->sumem && qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { dev_dbg(rdev_to_dev(rdev), "Move QP = %p out of flush list\n", qp); flags = bnxt_re_lock_cqs(qp); bnxt_qplib_clean_qp(&qp->qplib_qp); bnxt_re_unlock_cqs(qp, flags); } } if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY; qp->qplib_qp.en_sqd_async_notify = true; } if (qp_attr_mask & IB_QP_ACCESS_FLAGS) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS; qp->qplib_qp.access = __from_ib_access_flags(qp_attr->qp_access_flags); /* LOCAL_WRITE access must be set to allow RC receive */ qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; /* Temp: Set all params on QP as of now */ qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE; qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ; } if (qp_attr_mask & IB_QP_PKEY_INDEX) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; qp->qplib_qp.pkey_index = qp_attr->pkey_index; } if (qp_attr_mask & IB_QP_QKEY) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; qp->qplib_qp.qkey = qp_attr->qkey; } if (qp_attr_mask & IB_QP_AV) { const struct ib_global_route *grh = rdma_ah_read_grh(&qp_attr->ah_attr); const struct ib_gid_attr *sgid_attr; qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID | CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL | CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX | CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT | CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS | CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC | CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID; memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw, sizeof(qp->qplib_qp.ah.dgid.data)); qp->qplib_qp.ah.flow_label = grh->flow_label; /* If RoCE V2 is enabled, stack will have two entries for * each GID entry. Avoiding this duplicte entry in HW. Dividing * the GID index by 2 for RoCE V2 */ qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2; qp->qplib_qp.ah.host_sgid_index = grh->sgid_index; qp->qplib_qp.ah.hop_limit = grh->hop_limit; qp->qplib_qp.ah.traffic_class = grh->traffic_class; qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr); ether_addr_copy(qp->qplib_qp.ah.dmac, qp_attr->ah_attr.roce.dmac); sgid_attr = qp_attr->ah_attr.grh.sgid_attr; rc = rdma_read_gid_l2_fields(sgid_attr, NULL, &qp->qplib_qp.smac[0]); if (rc) return rc; nw_type = rdma_gid_attr_network_type(sgid_attr); switch (nw_type) { case RDMA_NETWORK_IPV4: qp->qplib_qp.nw_type = CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4; break; case RDMA_NETWORK_IPV6: qp->qplib_qp.nw_type = CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6; break; default: qp->qplib_qp.nw_type = CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1; break; } } if (qp_attr_mask & IB_QP_PATH_MTU) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu); qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu); } else if (qp_attr->qp_state == IB_QPS_RTR) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; qp->qplib_qp.path_mtu = __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu)); qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); } if (qp_attr_mask & IB_QP_TIMEOUT) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT; qp->qplib_qp.timeout = qp_attr->timeout; } if (qp_attr_mask & IB_QP_RETRY_CNT) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT; qp->qplib_qp.retry_cnt = qp_attr->retry_cnt; } if (qp_attr_mask & IB_QP_RNR_RETRY) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY; qp->qplib_qp.rnr_retry = qp_attr->rnr_retry; } if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER; qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer; } if (qp_attr_mask & IB_QP_RQ_PSN) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN; qp->qplib_qp.rq.psn = qp_attr->rq_psn; } if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC; /* Cap the max_rd_atomic to device max */ qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic, dev_attr->max_qp_rd_atom); } if (qp_attr_mask & IB_QP_SQ_PSN) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; qp->qplib_qp.sq.psn = qp_attr->sq_psn; } if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { if (qp_attr->max_dest_rd_atomic > dev_attr->max_qp_init_rd_atom) { dev_err(rdev_to_dev(rdev), "max_dest_rd_atomic requested%d is > dev_max%d", qp_attr->max_dest_rd_atomic, dev_attr->max_qp_init_rd_atom); return -EINVAL; } qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC; qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic; } if (qp_attr_mask & IB_QP_CAP) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE | CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE | CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE | CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE | CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA; if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) || (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) || (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) || (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) || (qp_attr->cap.max_inline_data >= dev_attr->max_inline_data)) { dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded"); return -EINVAL; } entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); qp->qplib_qp.sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - qp_attr->cap.max_send_wr; /* * Reserving one slot for Phantom WQE. Some application can * post one extra entry in this case. Allowing this to avoid * unexpected Queue full condition */ qp->qplib_qp.sq.q_full_delta -= 1; qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; if (qp->qplib_qp.rq.max_wqe) { entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); qp->qplib_qp.rq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - qp_attr->cap.max_recv_wr; qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; } else { /* SRQ was used prior, just ignore the RQ caps */ } } if (qp_attr_mask & IB_QP_DEST_QPN) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID; qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num; } rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to modify HW QP"); return rc; } if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask); return rc; } int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_re_dev *rdev = qp->rdev; struct bnxt_qplib_qp *qplib_qp; int rc; qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL); if (!qplib_qp) return -ENOMEM; qplib_qp->id = qp->qplib_qp.id; qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to query HW QP"); goto out; } qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state); qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0; qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access); qp_attr->pkey_index = qplib_qp->pkey_index; qp_attr->qkey = qplib_qp->qkey; qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label, qplib_qp->ah.host_sgid_index, qplib_qp->ah.hop_limit, qplib_qp->ah.traffic_class); rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data); rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl); ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac); qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu); qp_attr->timeout = qplib_qp->timeout; qp_attr->retry_cnt = qplib_qp->retry_cnt; qp_attr->rnr_retry = qplib_qp->rnr_retry; qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer; qp_attr->rq_psn = qplib_qp->rq.psn; qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic; qp_attr->sq_psn = qplib_qp->sq.psn; qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic; qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; qp_attr->dest_qp_num = qplib_qp->dest_qpn; qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe; qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge; qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; qp_init_attr->cap = qp_attr->cap; out: kfree(qplib_qp); return rc; } /* Routine for sending QP1 packets for RoCE V1 an V2 */ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp, const struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe, int payload_size) { struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah); struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah; const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr; struct bnxt_qplib_sge sge; u8 nw_type; u16 ether_type; union ib_gid dgid; bool is_eth = false; bool is_vlan = false; bool is_grh = false; bool is_udp = false; u8 ip_version = 0; u16 vlan_id = 0xFFFF; void *buf; int i, rc = 0; memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr)); rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL); if (rc) return rc; /* Get network header type for this GID */ nw_type = rdma_gid_attr_network_type(sgid_attr); switch (nw_type) { case RDMA_NETWORK_IPV4: nw_type = BNXT_RE_ROCEV2_IPV4_PACKET; break; case RDMA_NETWORK_IPV6: nw_type = BNXT_RE_ROCEV2_IPV6_PACKET; break; default: nw_type = BNXT_RE_ROCE_V1_PACKET; break; } memcpy(&dgid.raw, &qplib_ah->dgid, 16); is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP; if (is_udp) { if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) { ip_version = 4; ether_type = ETH_P_IP; } else { ip_version = 6; ether_type = ETH_P_IPV6; } is_grh = false; } else { ether_type = ETH_P_IBOE; is_grh = true; } is_eth = true; is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false; ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh, ip_version, is_udp, 0, &qp->qp1_hdr); /* ETH */ ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac); ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac); /* For vlan, check the sgid for vlan existence */ if (!is_vlan) { qp->qp1_hdr.eth.type = cpu_to_be16(ether_type); } else { qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type); qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id); } if (is_grh || (ip_version == 6)) { memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw, sizeof(sgid_attr->gid)); memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data, sizeof(sgid_attr->gid)); qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit; } if (ip_version == 4) { qp->qp1_hdr.ip4.tos = 0; qp->qp1_hdr.ip4.id = 0; qp->qp1_hdr.ip4.frag_off = htons(IP_DF); qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit; memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4); memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4); qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr); } if (is_udp) { qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT); qp->qp1_hdr.udp.sport = htons(0x8CD1); qp->qp1_hdr.udp.csum = 0; } /* BTH */ if (wr->opcode == IB_WR_SEND_WITH_IMM) { qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; qp->qp1_hdr.immediate_present = 1; } else { qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY; } if (wr->send_flags & IB_SEND_SOLICITED) qp->qp1_hdr.bth.solicited_event = 1; /* pad_count */ qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3; /* P_key for QP1 is for all members */ qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF); qp->qp1_hdr.bth.destination_qpn = IB_QP1; qp->qp1_hdr.bth.ack_req = 0; qp->send_psn++; qp->send_psn &= BTH_PSN_MASK; qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn); /* DETH */ /* Use the priviledged Q_Key for QP1 */ qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY); qp->qp1_hdr.deth.source_qpn = IB_QP1; /* Pack the QP1 to the transmit buffer */ buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge); if (buf) { ib_ud_header_pack(&qp->qp1_hdr, buf); for (i = wqe->num_sge; i; i--) { wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr; wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey; wqe->sg_list[i].size = wqe->sg_list[i - 1].size; } /* * Max Header buf size for IPV6 RoCE V2 is 86, * which is same as the QP1 SQ header buffer. * Header buf size for IPV4 RoCE V2 can be 66. * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20). * Subtract 20 bytes from QP1 SQ header buf size */ if (is_udp && ip_version == 4) sge.size -= 20; /* * Max Header buf size for RoCE V1 is 78. * ETH(14) + VLAN(4) + GRH(40) + BTH(20). * Subtract 8 bytes from QP1 SQ header buf size */ if (!is_udp) sge.size -= 8; /* Subtract 4 bytes for non vlan packets */ if (!is_vlan) sge.size -= 4; wqe->sg_list[0].addr = sge.addr; wqe->sg_list[0].lkey = sge.lkey; wqe->sg_list[0].size = sge.size; wqe->num_sge++; } else { dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!"); rc = -ENOMEM; } return rc; } /* For the MAD layer, it only provides the recv SGE the size of * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH, * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire * receive packet (334 bytes) with no VLAN and then copy the GRH * and the MAD datagram out to the provided SGE. */ static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp, const struct ib_recv_wr *wr, struct bnxt_qplib_swqe *wqe, int payload_size) { struct bnxt_qplib_sge ref, sge; u32 rq_prod_index; struct bnxt_re_sqp_entries *sqp_entry; rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp); if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) return -ENOMEM; /* Create 1 SGE to receive the entire * ethernet packet */ /* Save the reference from ULP */ ref.addr = wqe->sg_list[0].addr; ref.lkey = wqe->sg_list[0].lkey; ref.size = wqe->sg_list[0].size; sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index]; /* SGE 1 */ wqe->sg_list[0].addr = sge.addr; wqe->sg_list[0].lkey = sge.lkey; wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; sge.size -= wqe->sg_list[0].size; sqp_entry->sge.addr = ref.addr; sqp_entry->sge.lkey = ref.lkey; sqp_entry->sge.size = ref.size; /* Store the wrid for reporting completion */ sqp_entry->wrid = wqe->wr_id; /* change the wqe->wrid to table index */ wqe->wr_id = rq_prod_index; return 0; } static int is_ud_qp(struct bnxt_re_qp *qp) { return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD || qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI); } static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp, const struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe) { struct bnxt_re_ah *ah = NULL; if (is_ud_qp(qp)) { ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah); wqe->send.q_key = ud_wr(wr)->remote_qkey; wqe->send.dst_qp = ud_wr(wr)->remote_qpn; wqe->send.avid = ah->qplib_ah.id; } switch (wr->opcode) { case IB_WR_SEND: wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND; break; case IB_WR_SEND_WITH_IMM: wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM; wqe->send.imm_data = wr->ex.imm_data; break; case IB_WR_SEND_WITH_INV: wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV; wqe->send.inv_key = wr->ex.invalidate_rkey; break; default: return -EINVAL; } if (wr->send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; if (wr->send_flags & IB_SEND_FENCE) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; if (wr->send_flags & IB_SEND_SOLICITED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; if (wr->send_flags & IB_SEND_INLINE) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE; return 0; } static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe) { switch (wr->opcode) { case IB_WR_RDMA_WRITE: wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE; break; case IB_WR_RDMA_WRITE_WITH_IMM: wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM; wqe->rdma.imm_data = wr->ex.imm_data; break; case IB_WR_RDMA_READ: wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ; wqe->rdma.inv_key = wr->ex.invalidate_rkey; break; default: return -EINVAL; } wqe->rdma.remote_va = rdma_wr(wr)->remote_addr; wqe->rdma.r_key = rdma_wr(wr)->rkey; if (wr->send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; if (wr->send_flags & IB_SEND_FENCE) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; if (wr->send_flags & IB_SEND_SOLICITED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; if (wr->send_flags & IB_SEND_INLINE) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE; return 0; } static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe) { switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP; wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; wqe->atomic.swap_data = atomic_wr(wr)->swap; break; case IB_WR_ATOMIC_FETCH_AND_ADD: wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD; wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; break; default: return -EINVAL; } wqe->atomic.remote_va = atomic_wr(wr)->remote_addr; wqe->atomic.r_key = atomic_wr(wr)->rkey; if (wr->send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; if (wr->send_flags & IB_SEND_FENCE) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; if (wr->send_flags & IB_SEND_SOLICITED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; return 0; } static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe) { wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; /* Need unconditional fence for local invalidate * opcode to work as expected. */ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; if (wr->send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; if (wr->send_flags & IB_SEND_SOLICITED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; return 0; } static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr, struct bnxt_qplib_swqe *wqe) { struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr); struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl; int access = wr->access; wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0]; wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0]; wqe->frmr.page_list = mr->pages; wqe->frmr.page_list_len = mr->npages; wqe->frmr.levels = qplib_frpl->hwq.level + 1; wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; /* Need unconditional fence for reg_mr * opcode to function as expected. */ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; if (wr->wr.send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; if (access & IB_ACCESS_LOCAL_WRITE) wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE; if (access & IB_ACCESS_REMOTE_READ) wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ; if (access & IB_ACCESS_REMOTE_WRITE) wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE; if (access & IB_ACCESS_REMOTE_ATOMIC) wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC; if (access & IB_ACCESS_MW_BIND) wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND; wqe->frmr.l_key = wr->key; wqe->frmr.length = wr->mr->length; wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1; wqe->frmr.va = wr->mr->iova; return 0; } static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev, const struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe) { /* Copy the inline data to the data field */ u8 *in_data; u32 i, sge_len; void *sge_addr; in_data = wqe->inline_data; for (i = 0; i < wr->num_sge; i++) { sge_addr = (void *)(unsigned long) wr->sg_list[i].addr; sge_len = wr->sg_list[i].length; if ((sge_len + wqe->inline_len) > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) { dev_err(rdev_to_dev(rdev), "Inline data size requested > supported value"); return -EINVAL; } sge_len = wr->sg_list[i].length; memcpy(in_data, sge_addr, sge_len); in_data += wr->sg_list[i].length; wqe->inline_len += wr->sg_list[i].length; } return wqe->inline_len; } static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev, const struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe) { int payload_sz = 0; if (wr->send_flags & IB_SEND_INLINE) payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe); else payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list, wqe->num_sge); return payload_sz; } static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) { if ((qp->ib_qp.qp_type == IB_QPT_UD || qp->ib_qp.qp_type == IB_QPT_GSI || qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) && qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) { int qp_attr_mask; struct ib_qp_attr qp_attr; qp_attr_mask = IB_QP_STATE; qp_attr.qp_state = IB_QPS_RTS; bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL); qp->qplib_qp.wqe_cnt = 0; } } static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp, const struct ib_send_wr *wr) { struct bnxt_qplib_swqe wqe; int rc = 0, payload_sz = 0; unsigned long flags; spin_lock_irqsave(&qp->sq_lock, flags); memset(&wqe, 0, sizeof(wqe)); while (wr) { /* House keeping */ memset(&wqe, 0, sizeof(wqe)); /* Common */ wqe.num_sge = wr->num_sge; if (wr->num_sge > qp->qplib_qp.sq.max_sge) { dev_err(rdev_to_dev(rdev), "Limit exceeded for Send SGEs"); rc = -EINVAL; goto bad; } payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe); if (payload_sz < 0) { rc = -EINVAL; goto bad; } wqe.wr_id = wr->wr_id; wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND; rc = bnxt_re_build_send_wqe(qp, wr, &wqe); if (!rc) rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); bad: if (rc) { dev_err(rdev_to_dev(rdev), "Post send failed opcode = %#x rc = %d", wr->opcode, rc); break; } wr = wr->next; } bnxt_qplib_post_send_db(&qp->qplib_qp); bnxt_ud_qp_hw_stall_workaround(qp); spin_unlock_irqrestore(&qp->sq_lock, flags); return rc; } int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_qplib_swqe wqe; int rc = 0, payload_sz = 0; unsigned long flags; spin_lock_irqsave(&qp->sq_lock, flags); while (wr) { /* House keeping */ memset(&wqe, 0, sizeof(wqe)); /* Common */ wqe.num_sge = wr->num_sge; if (wr->num_sge > qp->qplib_qp.sq.max_sge) { dev_err(rdev_to_dev(qp->rdev), "Limit exceeded for Send SGEs"); rc = -EINVAL; goto bad; } payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe); if (payload_sz < 0) { rc = -EINVAL; goto bad; } wqe.wr_id = wr->wr_id; switch (wr->opcode) { case IB_WR_SEND: case IB_WR_SEND_WITH_IMM: if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) { rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe, payload_sz); if (rc) goto bad; wqe.rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC; } switch (wr->send_flags) { case IB_SEND_IP_CSUM: wqe.rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM; break; default: break; } /* fall through */ case IB_WR_SEND_WITH_INV: rc = bnxt_re_build_send_wqe(qp, wr, &wqe); break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_READ: rc = bnxt_re_build_rdma_wqe(wr, &wqe); break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: rc = bnxt_re_build_atomic_wqe(wr, &wqe); break; case IB_WR_RDMA_READ_WITH_INV: dev_err(rdev_to_dev(qp->rdev), "RDMA Read with Invalidate is not supported"); rc = -EINVAL; goto bad; case IB_WR_LOCAL_INV: rc = bnxt_re_build_inv_wqe(wr, &wqe); break; case IB_WR_REG_MR: rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe); break; default: /* Unsupported WRs */ dev_err(rdev_to_dev(qp->rdev), "WR (%#x) is not supported", wr->opcode); rc = -EINVAL; goto bad; } if (!rc) rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); bad: if (rc) { dev_err(rdev_to_dev(qp->rdev), "post_send failed op:%#x qps = %#x rc = %d\n", wr->opcode, qp->qplib_qp.state, rc); *bad_wr = wr; break; } wr = wr->next; } bnxt_qplib_post_send_db(&qp->qplib_qp); bnxt_ud_qp_hw_stall_workaround(qp); spin_unlock_irqrestore(&qp->sq_lock, flags); return rc; } static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp, const struct ib_recv_wr *wr) { struct bnxt_qplib_swqe wqe; int rc = 0; memset(&wqe, 0, sizeof(wqe)); while (wr) { /* House keeping */ memset(&wqe, 0, sizeof(wqe)); /* Common */ wqe.num_sge = wr->num_sge; if (wr->num_sge > qp->qplib_qp.rq.max_sge) { dev_err(rdev_to_dev(rdev), "Limit exceeded for Receive SGEs"); rc = -EINVAL; break; } bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge); wqe.wr_id = wr->wr_id; wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe); if (rc) break; wr = wr->next; } if (!rc) bnxt_qplib_post_recv_db(&qp->qplib_qp); return rc; } int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_qplib_swqe wqe; int rc = 0, payload_sz = 0; unsigned long flags; u32 count = 0; spin_lock_irqsave(&qp->rq_lock, flags); while (wr) { /* House keeping */ memset(&wqe, 0, sizeof(wqe)); /* Common */ wqe.num_sge = wr->num_sge; if (wr->num_sge > qp->qplib_qp.rq.max_sge) { dev_err(rdev_to_dev(qp->rdev), "Limit exceeded for Receive SGEs"); rc = -EINVAL; *bad_wr = wr; break; } payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge); wqe.wr_id = wr->wr_id; wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; if (ib_qp->qp_type == IB_QPT_GSI && qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI) rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe, payload_sz); if (!rc) rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe); if (rc) { *bad_wr = wr; break; } /* Ring DB if the RQEs posted reaches a threshold value */ if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) { bnxt_qplib_post_recv_db(&qp->qplib_qp); count = 0; } wr = wr->next; } if (count) bnxt_qplib_post_recv_db(&qp->qplib_qp); spin_unlock_irqrestore(&qp->rq_lock, flags); return rc; } /* Completion Queues */ void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) { struct bnxt_re_cq *cq; struct bnxt_qplib_nq *nq; struct bnxt_re_dev *rdev; cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); rdev = cq->rdev; nq = cq->qplib_cq.nq; bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); ib_umem_release(cq->umem); atomic_dec(&rdev->cq_count); nq->budget--; kfree(cq->cql); } int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev); struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); int rc, entries; int cqe = attr->cqe; struct bnxt_qplib_nq *nq = NULL; unsigned int nq_alloc_cnt; /* Validate CQ fields */ if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded"); return -EINVAL; } cq->rdev = rdev; cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq); entries = roundup_pow_of_two(cqe + 1); if (entries > dev_attr->max_cq_wqes + 1) entries = dev_attr->max_cq_wqes + 1; if (udata) { struct bnxt_re_cq_req req; struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context( udata, struct bnxt_re_ucontext, ib_uctx); if (ib_copy_from_udata(&req, udata, sizeof(req))) { rc = -EFAULT; goto fail; } cq->umem = ib_umem_get(udata, req.cq_va, entries * sizeof(struct cq_base), IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(cq->umem)) { rc = PTR_ERR(cq->umem); goto fail; } cq->qplib_cq.sg_info.sglist = cq->umem->sg_head.sgl; cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem); cq->qplib_cq.sg_info.nmap = cq->umem->nmap; cq->qplib_cq.dpi = &uctx->dpi; } else { cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe), GFP_KERNEL); if (!cq->cql) { rc = -ENOMEM; goto fail; } cq->qplib_cq.dpi = &rdev->dpi_privileged; } /* * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a * used for getting the NQ index. */ nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt); nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)]; cq->qplib_cq.max_wqe = entries; cq->qplib_cq.cnq_hw_ring_id = nq->ring_id; cq->qplib_cq.nq = nq; rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to create HW CQ"); goto fail; } cq->ib_cq.cqe = entries; cq->cq_period = cq->qplib_cq.period; nq->budget++; atomic_inc(&rdev->cq_count); spin_lock_init(&cq->cq_lock); if (udata) { struct bnxt_re_cq_resp resp; resp.cqid = cq->qplib_cq.id; resp.tail = cq->qplib_cq.hwq.cons; resp.phase = cq->qplib_cq.period; resp.rsvd = 0; rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata"); bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); goto c2fail; } } return 0; c2fail: ib_umem_release(cq->umem); fail: kfree(cq->cql); return rc; } static u8 __req_to_ib_wc_status(u8 qstatus) { switch (qstatus) { case CQ_REQ_STATUS_OK: return IB_WC_SUCCESS; case CQ_REQ_STATUS_BAD_RESPONSE_ERR: return IB_WC_BAD_RESP_ERR; case CQ_REQ_STATUS_LOCAL_LENGTH_ERR: return IB_WC_LOC_LEN_ERR; case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR: return IB_WC_LOC_QP_OP_ERR; case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR: return IB_WC_LOC_PROT_ERR; case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR: return IB_WC_GENERAL_ERR; case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR: return IB_WC_REM_INV_REQ_ERR; case CQ_REQ_STATUS_REMOTE_ACCESS_ERR: return IB_WC_REM_ACCESS_ERR; case CQ_REQ_STATUS_REMOTE_OPERATION_ERR: return IB_WC_REM_OP_ERR; case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR: return IB_WC_RNR_RETRY_EXC_ERR; case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR: return IB_WC_RETRY_EXC_ERR; case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR: return IB_WC_WR_FLUSH_ERR; default: return IB_WC_GENERAL_ERR; } return 0; } static u8 __rawqp1_to_ib_wc_status(u8 qstatus) { switch (qstatus) { case CQ_RES_RAWETH_QP1_STATUS_OK: return IB_WC_SUCCESS; case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR: return IB_WC_LOC_ACCESS_ERR; case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR: return IB_WC_LOC_LEN_ERR; case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR: return IB_WC_LOC_PROT_ERR; case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR: return IB_WC_LOC_QP_OP_ERR; case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR: return IB_WC_GENERAL_ERR; case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR: return IB_WC_WR_FLUSH_ERR; case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR: return IB_WC_WR_FLUSH_ERR; default: return IB_WC_GENERAL_ERR; } } static u8 __rc_to_ib_wc_status(u8 qstatus) { switch (qstatus) { case CQ_RES_RC_STATUS_OK: return IB_WC_SUCCESS; case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR: return IB_WC_LOC_ACCESS_ERR; case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR: return IB_WC_LOC_LEN_ERR; case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR: return IB_WC_LOC_PROT_ERR; case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR: return IB_WC_LOC_QP_OP_ERR; case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR: return IB_WC_GENERAL_ERR; case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR: return IB_WC_REM_INV_REQ_ERR; case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR: return IB_WC_WR_FLUSH_ERR; case CQ_RES_RC_STATUS_HW_FLUSH_ERR: return IB_WC_WR_FLUSH_ERR; default: return IB_WC_GENERAL_ERR; } } static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) { switch (cqe->type) { case BNXT_QPLIB_SWQE_TYPE_SEND: wc->opcode = IB_WC_SEND; break; case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM: wc->opcode = IB_WC_SEND; wc->wc_flags |= IB_WC_WITH_IMM; break; case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV: wc->opcode = IB_WC_SEND; wc->wc_flags |= IB_WC_WITH_INVALIDATE; break; case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE: wc->opcode = IB_WC_RDMA_WRITE; break; case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM: wc->opcode = IB_WC_RDMA_WRITE; wc->wc_flags |= IB_WC_WITH_IMM; break; case BNXT_QPLIB_SWQE_TYPE_RDMA_READ: wc->opcode = IB_WC_RDMA_READ; break; case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP: wc->opcode = IB_WC_COMP_SWAP; break; case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD: wc->opcode = IB_WC_FETCH_ADD; break; case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV: wc->opcode = IB_WC_LOCAL_INV; break; case BNXT_QPLIB_SWQE_TYPE_REG_MR: wc->opcode = IB_WC_REG_MR; break; default: wc->opcode = IB_WC_SEND; break; } wc->status = __req_to_ib_wc_status(cqe->status); } static int bnxt_re_check_packet_type(u16 raweth_qp1_flags, u16 raweth_qp1_flags2) { bool is_ipv6 = false, is_ipv4 = false; /* raweth_qp1_flags Bit 9-6 indicates itype */ if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE) != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE) return -1; if (raweth_qp1_flags2 & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC && raweth_qp1_flags2 & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) { /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */ (raweth_qp1_flags2 & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ? (is_ipv6 = true) : (is_ipv4 = true); return ((is_ipv6) ? BNXT_RE_ROCEV2_IPV6_PACKET : BNXT_RE_ROCEV2_IPV4_PACKET); } else { return BNXT_RE_ROCE_V1_PACKET; } } static int bnxt_re_to_ib_nw_type(int nw_type) { u8 nw_hdr_type = 0xFF; switch (nw_type) { case BNXT_RE_ROCE_V1_PACKET: nw_hdr_type = RDMA_NETWORK_ROCE_V1; break; case BNXT_RE_ROCEV2_IPV4_PACKET: nw_hdr_type = RDMA_NETWORK_IPV4; break; case BNXT_RE_ROCEV2_IPV6_PACKET: nw_hdr_type = RDMA_NETWORK_IPV6; break; } return nw_hdr_type; } static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev, void *rq_hdr_buf) { u8 *tmp_buf = NULL; struct ethhdr *eth_hdr; u16 eth_type; bool rc = false; tmp_buf = (u8 *)rq_hdr_buf; /* * If dest mac is not same as I/F mac, this could be a * loopback address or multicast address, check whether * it is a loopback packet */ if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) { tmp_buf += 4; /* Check the ether type */ eth_hdr = (struct ethhdr *)tmp_buf; eth_type = ntohs(eth_hdr->h_proto); switch (eth_type) { case ETH_P_IBOE: rc = true; break; case ETH_P_IP: case ETH_P_IPV6: { u32 len; struct udphdr *udp_hdr; len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)); tmp_buf += sizeof(struct ethhdr) + len; udp_hdr = (struct udphdr *)tmp_buf; if (ntohs(udp_hdr->dest) == ROCE_V2_UDP_DPORT) rc = true; break; } default: break; } } return rc; } static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp, struct bnxt_qplib_cqe *cqe) { struct bnxt_re_dev *rdev = qp1_qp->rdev; struct bnxt_re_sqp_entries *sqp_entry = NULL; struct bnxt_re_qp *qp = rdev->qp1_sqp; struct ib_send_wr *swr; struct ib_ud_wr udwr; struct ib_recv_wr rwr; int pkt_type = 0; u32 tbl_idx; void *rq_hdr_buf; dma_addr_t rq_hdr_buf_map; dma_addr_t shrq_hdr_buf_map; u32 offset = 0; u32 skip_bytes = 0; struct ib_sge s_sge[2]; struct ib_sge r_sge[2]; int rc; memset(&udwr, 0, sizeof(udwr)); memset(&rwr, 0, sizeof(rwr)); memset(&s_sge, 0, sizeof(s_sge)); memset(&r_sge, 0, sizeof(r_sge)); swr = &udwr.wr; tbl_idx = cqe->wr_id; rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf + (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size); rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp, tbl_idx); /* Shadow QP header buffer */ shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp, tbl_idx); sqp_entry = &rdev->sqp_tbl[tbl_idx]; /* Store this cqe */ memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe)); sqp_entry->qp1_qp = qp1_qp; /* Find packet type from the cqe */ pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags, cqe->raweth_qp1_flags2); if (pkt_type < 0) { dev_err(rdev_to_dev(rdev), "Invalid packet\n"); return -EINVAL; } /* Adjust the offset for the user buffer and post in the rq */ if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET) offset = 20; /* * QP1 loopback packet has 4 bytes of internal header before * ether header. Skip these four bytes. */ if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf)) skip_bytes = 4; /* First send SGE . Skip the ether header*/ s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE + skip_bytes; s_sge[0].lkey = 0xFFFFFFFF; s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 : BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6; /* Second Send SGE */ s_sge[1].addr = s_sge[0].addr + s_sge[0].length + BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE; if (pkt_type != BNXT_RE_ROCE_V1_PACKET) s_sge[1].addr += 8; s_sge[1].lkey = 0xFFFFFFFF; s_sge[1].length = 256; /* First recv SGE */ r_sge[0].addr = shrq_hdr_buf_map; r_sge[0].lkey = 0xFFFFFFFF; r_sge[0].length = 40; r_sge[1].addr = sqp_entry->sge.addr + offset; r_sge[1].lkey = sqp_entry->sge.lkey; r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset; /* Create receive work request */ rwr.num_sge = 2; rwr.sg_list = r_sge; rwr.wr_id = tbl_idx; rwr.next = NULL; rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to post Rx buffers to shadow QP"); return -ENOMEM; } swr->num_sge = 2; swr->sg_list = s_sge; swr->wr_id = tbl_idx; swr->opcode = IB_WR_SEND; swr->next = NULL; udwr.ah = &rdev->sqp_ah->ib_ah; udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id; udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey; /* post data received in the send queue */ rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr); return 0; } static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) { wc->opcode = IB_WC_RECV; wc->status = __rawqp1_to_ib_wc_status(cqe->status); wc->wc_flags |= IB_WC_GRH; } static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe, u16 *vid, u8 *sl) { bool ret = false; u32 metadata; u16 tpid; metadata = orig_cqe->raweth_qp1_metadata; if (orig_cqe->raweth_qp1_flags2 & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) { tpid = ((metadata & CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >> CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT); if (tpid == ETH_P_8021Q) { *vid = metadata & CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK; *sl = (metadata & CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >> CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT; ret = true; } } return ret; } static void bnxt_re_process_res_rc_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) { wc->opcode = IB_WC_RECV; wc->status = __rc_to_ib_wc_status(cqe->status); if (cqe->flags & CQ_RES_RC_FLAGS_IMM) wc->wc_flags |= IB_WC_WITH_IMM; if (cqe->flags & CQ_RES_RC_FLAGS_INV) wc->wc_flags |= IB_WC_WITH_INVALIDATE; if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) == (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; } static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp, struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) { struct bnxt_re_dev *rdev = qp->rdev; struct bnxt_re_qp *qp1_qp = NULL; struct bnxt_qplib_cqe *orig_cqe = NULL; struct bnxt_re_sqp_entries *sqp_entry = NULL; int nw_type; u32 tbl_idx; u16 vlan_id; u8 sl; tbl_idx = cqe->wr_id; sqp_entry = &rdev->sqp_tbl[tbl_idx]; qp1_qp = sqp_entry->qp1_qp; orig_cqe = &sqp_entry->cqe; wc->wr_id = sqp_entry->wrid; wc->byte_len = orig_cqe->length; wc->qp = &qp1_qp->ib_qp; wc->ex.imm_data = orig_cqe->immdata; wc->src_qp = orig_cqe->src_qp; memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) { wc->vlan_id = vlan_id; wc->sl = sl; wc->wc_flags |= IB_WC_WITH_VLAN; } wc->port_num = 1; wc->vendor_err = orig_cqe->status; wc->opcode = IB_WC_RECV; wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status); wc->wc_flags |= IB_WC_GRH; nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags, orig_cqe->raweth_qp1_flags2); if (nw_type >= 0) { wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type); wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; } } static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp, struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) { u8 nw_type; wc->opcode = IB_WC_RECV; wc->status = __rc_to_ib_wc_status(cqe->status); if (cqe->flags & CQ_RES_UD_FLAGS_IMM) wc->wc_flags |= IB_WC_WITH_IMM; /* report only on GSI QP for Thor */ if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) { wc->wc_flags |= IB_WC_GRH; memcpy(wc->smac, cqe->smac, ETH_ALEN); wc->wc_flags |= IB_WC_WITH_SMAC; if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) { wc->vlan_id = (cqe->cfa_meta & 0xFFF); if (wc->vlan_id < 0x1000) wc->wc_flags |= IB_WC_WITH_VLAN; } nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >> CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT; wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type); wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; } } static int send_phantom_wqe(struct bnxt_re_qp *qp) { struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; unsigned long flags; int rc = 0; spin_lock_irqsave(&qp->sq_lock, flags); rc = bnxt_re_bind_fence_mw(lib_qp); if (!rc) { lib_qp->sq.phantom_wqe_cnt++; dev_dbg(&lib_qp->sq.hwq.pdev->dev, "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", lib_qp->id, lib_qp->sq.hwq.prod, HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), lib_qp->sq.phantom_wqe_cnt); } spin_unlock_irqrestore(&qp->sq_lock, flags); return rc; } int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) { struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); struct bnxt_re_qp *qp; struct bnxt_qplib_cqe *cqe; int i, ncqe, budget; struct bnxt_qplib_q *sq; struct bnxt_qplib_qp *lib_qp; u32 tbl_idx; struct bnxt_re_sqp_entries *sqp_entry = NULL; unsigned long flags; spin_lock_irqsave(&cq->cq_lock, flags); budget = min_t(u32, num_entries, cq->max_cql); num_entries = budget; if (!cq->cql) { dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use"); goto exit; } cqe = &cq->cql[0]; while (budget) { lib_qp = NULL; ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp); if (lib_qp) { sq = &lib_qp->sq; if (sq->send_phantom) { qp = container_of(lib_qp, struct bnxt_re_qp, qplib_qp); if (send_phantom_wqe(qp) == -ENOMEM) dev_err(rdev_to_dev(cq->rdev), "Phantom failed! Scheduled to send again\n"); else sq->send_phantom = false; } } if (ncqe < budget) ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq, cqe + ncqe, budget - ncqe); if (!ncqe) break; for (i = 0; i < ncqe; i++, cqe++) { /* Transcribe each qplib_wqe back to ib_wc */ memset(wc, 0, sizeof(*wc)); wc->wr_id = cqe->wr_id; wc->byte_len = cqe->length; qp = container_of ((struct bnxt_qplib_qp *) (unsigned long)(cqe->qp_handle), struct bnxt_re_qp, qplib_qp); if (!qp) { dev_err(rdev_to_dev(cq->rdev), "POLL CQ : bad QP handle"); continue; } wc->qp = &qp->ib_qp; wc->ex.imm_data = cqe->immdata; wc->src_qp = cqe->src_qp; memcpy(wc->smac, cqe->smac, ETH_ALEN); wc->port_num = 1; wc->vendor_err = cqe->status; switch (cqe->opcode) { case CQ_BASE_CQE_TYPE_REQ: if (qp->rdev->qp1_sqp && qp->qplib_qp.id == qp->rdev->qp1_sqp->qplib_qp.id) { /* Handle this completion with * the stored completion */ memset(wc, 0, sizeof(*wc)); continue; } bnxt_re_process_req_wc(wc, cqe); break; case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: if (!cqe->status) { int rc = 0; rc = bnxt_re_process_raw_qp_pkt_rx (qp, cqe); if (!rc) { memset(wc, 0, sizeof(*wc)); continue; } cqe->status = -1; } /* Errors need not be looped back. * But change the wr_id to the one * stored in the table */ tbl_idx = cqe->wr_id; sqp_entry = &cq->rdev->sqp_tbl[tbl_idx]; wc->wr_id = sqp_entry->wrid; bnxt_re_process_res_rawqp1_wc(wc, cqe); break; case CQ_BASE_CQE_TYPE_RES_RC: bnxt_re_process_res_rc_wc(wc, cqe); break; case CQ_BASE_CQE_TYPE_RES_UD: if (qp->rdev->qp1_sqp && qp->qplib_qp.id == qp->rdev->qp1_sqp->qplib_qp.id) { /* Handle this completion with * the stored completion */ if (cqe->status) { continue; } else { bnxt_re_process_res_shadow_qp_wc (qp, wc, cqe); break; } } bnxt_re_process_res_ud_wc(qp, wc, cqe); break; default: dev_err(rdev_to_dev(cq->rdev), "POLL CQ : type 0x%x not handled", cqe->opcode); continue; } wc++; budget--; } } exit: spin_unlock_irqrestore(&cq->cq_lock, flags); return num_entries - budget; } int bnxt_re_req_notify_cq(struct ib_cq *ib_cq, enum ib_cq_notify_flags ib_cqn_flags) { struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); int type = 0, rc = 0; unsigned long flags; spin_lock_irqsave(&cq->cq_lock, flags); /* Trigger on the very next completion */ if (ib_cqn_flags & IB_CQ_NEXT_COMP) type = DBC_DBC_TYPE_CQ_ARMALL; /* Trigger on the next solicited completion */ else if (ib_cqn_flags & IB_CQ_SOLICITED) type = DBC_DBC_TYPE_CQ_ARMSE; /* Poll to see if there are missed events */ if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) && !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) { rc = 1; goto exit; } bnxt_qplib_req_notify_cq(&cq->qplib_cq, type); exit: spin_unlock_irqrestore(&cq->cq_lock, flags); return rc; } /* Memory Regions */ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags) { struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_re_mr *mr; u64 pbl = 0; int rc; mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); mr->rdev = rdev; mr->qplib_mr.pd = &pd->qplib_pd; mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; /* Allocate and register 0 as the address */ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); if (rc) goto fail; mr->qplib_mr.hwq.level = PBL_LVL_MAX; mr->qplib_mr.total_size = -1; /* Infinte length */ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false, PAGE_SIZE); if (rc) goto fail_mr; mr->ib_mr.lkey = mr->qplib_mr.lkey; if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_ATOMIC)) mr->ib_mr.rkey = mr->ib_mr.lkey; atomic_inc(&rdev->mr_count); return &mr->ib_mr; fail_mr: bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); fail: kfree(mr); return ERR_PTR(rc); } int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) { struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); struct bnxt_re_dev *rdev = mr->rdev; int rc; rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); if (rc) dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); if (mr->pages) { rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, &mr->qplib_frpl); kfree(mr->pages); mr->npages = 0; mr->pages = NULL; } ib_umem_release(mr->ib_umem); kfree(mr); atomic_dec(&rdev->mr_count); return rc; } static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr) { struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs)) return -ENOMEM; mr->pages[mr->npages++] = addr; return 0; } int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); mr->npages = 0; return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page); } struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type, u32 max_num_sg, struct ib_udata *udata) { struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_re_mr *mr = NULL; int rc; if (type != IB_MR_TYPE_MEM_REG) { dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type); return ERR_PTR(-EINVAL); } if (max_num_sg > MAX_PBL_LVL_1_PGS) return ERR_PTR(-EINVAL); mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); mr->rdev = rdev; mr->qplib_mr.pd = &pd->qplib_pd; mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR; mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); if (rc) goto bail; mr->ib_mr.lkey = mr->qplib_mr.lkey; mr->ib_mr.rkey = mr->ib_mr.lkey; mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); if (!mr->pages) { rc = -ENOMEM; goto fail; } rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res, &mr->qplib_frpl, max_num_sg); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to allocate HW FR page list"); goto fail_mr; } atomic_inc(&rdev->mr_count); return &mr->ib_mr; fail_mr: kfree(mr->pages); fail: bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); bail: kfree(mr); return ERR_PTR(rc); } struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, struct ib_udata *udata) { struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_re_mw *mw; int rc; mw = kzalloc(sizeof(*mw), GFP_KERNEL); if (!mw) return ERR_PTR(-ENOMEM); mw->rdev = rdev; mw->qplib_mw.pd = &pd->qplib_pd; mw->qplib_mw.type = (type == IB_MW_TYPE_1 ? CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 : CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw); if (rc) { dev_err(rdev_to_dev(rdev), "Allocate MW failed!"); goto fail; } mw->ib_mw.rkey = mw->qplib_mw.rkey; atomic_inc(&rdev->mw_count); return &mw->ib_mw; fail: kfree(mw); return ERR_PTR(rc); } int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) { struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw); struct bnxt_re_dev *rdev = mw->rdev; int rc; rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw); if (rc) { dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc); return rc; } kfree(mw); atomic_dec(&rdev->mw_count); return rc; } static int bnxt_re_page_size_ok(int page_shift) { switch (page_shift) { case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K: case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K: case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K: case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M: case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K: case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M: case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M: case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G: return 1; default: return 0; } } static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig, int page_shift) { u64 *pbl_tbl = pbl_tbl_orig; u64 page_size = BIT_ULL(page_shift); struct ib_block_iter biter; rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, page_size) *pbl_tbl++ = rdma_block_iter_dma_address(&biter); return pbl_tbl - pbl_tbl_orig; } /* uverbs */ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_udata *udata) { struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_re_mr *mr; struct ib_umem *umem; u64 *pbl_tbl = NULL; int umem_pgs, page_shift, rc; if (length > BNXT_RE_MAX_MR_SIZE) { dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n", length, BNXT_RE_MAX_MR_SIZE); return ERR_PTR(-ENOMEM); } mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); mr->rdev = rdev; mr->qplib_mr.pd = &pd->qplib_pd; mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR; rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to allocate MR"); goto free_mr; } /* The fixed portion of the rkey is the same as the lkey */ mr->ib_mr.rkey = mr->qplib_mr.rkey; umem = ib_umem_get(udata, start, length, mr_access_flags, 0); if (IS_ERR(umem)) { dev_err(rdev_to_dev(rdev), "Failed to get umem"); rc = -EFAULT; goto free_mrw; } mr->ib_umem = umem; mr->qplib_mr.va = virt_addr; umem_pgs = ib_umem_page_count(umem); if (!umem_pgs) { dev_err(rdev_to_dev(rdev), "umem is invalid!"); rc = -EINVAL; goto free_umem; } mr->qplib_mr.total_size = length; pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL); if (!pbl_tbl) { rc = -ENOMEM; goto free_umem; } page_shift = __ffs(ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M, virt_addr)); if (!bnxt_re_page_size_ok(page_shift)) { dev_err(rdev_to_dev(rdev), "umem page size unsupported!"); rc = -EFAULT; goto fail; } if (page_shift == BNXT_RE_PAGE_SHIFT_4K && length > BNXT_RE_MAX_MR_SIZE_LOW) { dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu", length, (u64)BNXT_RE_MAX_MR_SIZE_LOW); rc = -EINVAL; goto fail; } /* Map umem buf ptrs to the PBL */ umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift); rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl, umem_pgs, false, 1 << page_shift); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to register user MR"); goto fail; } kfree(pbl_tbl); mr->ib_mr.lkey = mr->qplib_mr.lkey; mr->ib_mr.rkey = mr->qplib_mr.lkey; atomic_inc(&rdev->mr_count); return &mr->ib_mr; fail: kfree(pbl_tbl); free_umem: ib_umem_release(umem); free_mrw: bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); free_mr: kfree(mr); return ERR_PTR(rc); } int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) { struct ib_device *ibdev = ctx->device; struct bnxt_re_ucontext *uctx = container_of(ctx, struct bnxt_re_ucontext, ib_uctx); struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; struct bnxt_re_uctx_resp resp; u32 chip_met_rev_num = 0; int rc; dev_dbg(rdev_to_dev(rdev), "ABI version requested %u", ibdev->ops.uverbs_abi_ver); if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) { dev_dbg(rdev_to_dev(rdev), " is different from the device %d ", BNXT_RE_ABI_VERSION); return -EPERM; } uctx->rdev = rdev; uctx->shpg = (void *)__get_free_page(GFP_KERNEL); if (!uctx->shpg) { rc = -ENOMEM; goto fail; } spin_lock_init(&uctx->sh_lock); resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX; chip_met_rev_num = rdev->chip_ctx.chip_num; chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_rev & 0xFF) << BNXT_RE_CHIP_ID0_CHIP_REV_SFT; chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_metal & 0xFF) << BNXT_RE_CHIP_ID0_CHIP_MET_SFT; resp.chip_id0 = chip_met_rev_num; /* Future extension of chip info */ resp.chip_id1 = 0; /*Temp, Use xa_alloc instead */ resp.dev_id = rdev->en_dev->pdev->devfn; resp.max_qp = rdev->qplib_ctx.qpc_count; resp.pg_size = PAGE_SIZE; resp.cqe_sz = sizeof(struct cq_base); resp.max_cqd = dev_attr->max_cq_wqes; resp.rsvd = 0; rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to copy user context"); rc = -EFAULT; goto cfail; } return 0; cfail: free_page((unsigned long)uctx->shpg); uctx->shpg = NULL; fail: return rc; } void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) { struct bnxt_re_ucontext *uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); struct bnxt_re_dev *rdev = uctx->rdev; if (uctx->shpg) free_page((unsigned long)uctx->shpg); if (uctx->dpi.dbr) { /* Free DPI only if this is the first PD allocated by the * application and mark the context dpi as NULL */ bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &rdev->qplib_res.dpi_tbl, &uctx->dpi); uctx->dpi.dbr = NULL; } } /* Helper function to mmap the virtual memory from user app */ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma) { struct bnxt_re_ucontext *uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); struct bnxt_re_dev *rdev = uctx->rdev; u64 pfn; if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; if (vma->vm_pgoff) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, PAGE_SIZE, vma->vm_page_prot)) { dev_err(rdev_to_dev(rdev), "Failed to map DPI"); return -EAGAIN; } } else { pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT; if (remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, vma->vm_page_prot)) { dev_err(rdev_to_dev(rdev), "Failed to map shared page"); return -EAGAIN; } } return 0; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1267_0
crossvul-cpp_data_bad_371_1
// url.c -- Object representing uniform resource locators // Copyright (C) 2008-2010 Markus Gutschke <markus@shellinabox.com> // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License version 2 as // published by the Free Software Foundation. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License along // with this program; if not, write to the Free Software Foundation, Inc., // 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. // // In addition to these license terms, the author grants the following // additional rights: // // If you modify this program, or any covered work, by linking or // combining it with the OpenSSL project's OpenSSL library (or a // modified version of that library), containing parts covered by the // terms of the OpenSSL or SSLeay licenses, the author // grants you additional permission to convey the resulting work. // Corresponding Source for a non-source form of such a combination // shall include the source code for the parts of OpenSSL used as well // as that of the covered work. // // You may at your option choose to remove this additional permission from // the work, or from any part of it. // // It is possible to build this program in a way that it loads OpenSSL // libraries at run-time. If doing so, the following notices are required // by the OpenSSL and SSLeay licenses: // // This product includes software developed by the OpenSSL Project // for use in the OpenSSL Toolkit. (http://www.openssl.org/) // // This product includes cryptographic software written by Eric Young // (eay@cryptsoft.com) // // // The most up-to-date version of this program is always available from // http://shellinabox.com #include "config.h" #define _XOPEN_SOURCE 500 #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef HAVE_STRINGS_H #include <strings.h> // for strncasecmp() #endif #include "libhttp/url.h" #include "logging/logging.h" #ifdef HAVE_UNUSED #defined ATTR_UNUSED __attribute__((unused)) #defined UNUSED(x) do { } while (0) #else #define ATTR_UNUSED #define UNUSED(x) do { (void)(x); } while (0) #endif static char *urlUnescape(char *s) { int warned = 0; char *r = s; for (char *u = s; *u; ) { char ch = *u++; if (ch == '+') { ch = ' '; } else if (ch == '%') { char c1 = *u; if ((c1 >= '0' && c1 <= '9') || ((c1 &= ~0x20) >= 'A' && c1 <= 'F')) { ch = c1 - (c1 > '9' ? 'A' - 10 : '0'); char c2 = *++u; if ((c2 >= '0' && c2 <= '9') || ((c2 &= ~0x20) >= 'A' && c2 <= 'F')) { ch = (ch << 4) + c2 - (c2 > '9' ? 'A' - 10 : '0'); ++u; } else if (!warned++) { warn("[http] Malformed URL encoded data \"%s\"!", r); } } else if (!warned++) { warn("[http] Malformed URL encoded data \"%s\"!", r); } } *s++ = ch; } *s = '\000'; return r; } static void urlDestroyHashMapEntry(void *arg ATTR_UNUSED, char *key, char *value) { UNUSED(arg); free(key); free(value); } static char *urlMakeString(const char *buf, int len) { if (!buf) { return NULL; } else { char *s; check(s = malloc(len + 1)); memcpy(s, buf, len); s[len] = '\000'; return s; } } static void urlParseQueryString(struct HashMap *hashmap, const char *query, int len) { const char *key = query; const char *value = NULL; for (const char *ampersand = query; len-- >= 0; ampersand++) { char ch = len >= 0 ? *ampersand : '\000'; if (ch == '=' && !value) { value = ampersand + 1; } else if (ch == '&' || len < 0) { int kl = (value ? value-1 : ampersand) - key; int vl = value ? ampersand - value : 0; if (kl) { char *k = urlMakeString(key, kl); urlUnescape(k); char *v = NULL; if (value) { v = urlMakeString(value, vl); urlUnescape(v); } addToHashMap(hashmap, k, v); } key = ampersand + 1; value = NULL; } if (!ch) { break; } } } static void urlParseHeaderLine(struct HashMap *hashmap, const char *s, int len) { while (s && len > 0) { while (len > 0 && (*s == ' ' || *s == ';')) { s++; len--; } const char *key = s; const char *value = NULL; while (len > 0 && *s != ';') { if (*s == '=' && value == NULL) { value = s + 1; } s++; len--; } int kl = (value ? value-1 : s) - key; int vl = value ? s - value : 0; if (kl) { char *k = urlMakeString(key, kl); for (char *t = k; *t; t++) { if (*t >= 'a' && *t <= 'z') { *t |= 0x20; } } char *v = NULL; if (value) { if (vl >= 2 && value[0] == '"' && value[vl-1] == '"') { value++; vl--; } v = urlMakeString(value, vl); } addToHashMap(hashmap, k, v); } } } static const char *urlMemstr(const char *buf, int len, const char *s) { int sLen = strlen(s); if (!sLen) { return buf; } while (len >= sLen) { if (len > sLen) { char *first = memchr(buf, *s, len - sLen); if (!first) { return NULL; } len -= first - buf; buf = first; } if (!memcmp(buf, s, sLen)) { return buf; } buf++; len--; } return NULL; } static int urlMemcmp(const char *buf, int len, const char *s) { int sLen = strlen(s); if (len < sLen) { return s[len]; } else { return memcmp(buf, s, sLen); } } static int urlMemcasecmp(const char *buf, int len, const char *s) { int sLen = strlen(s); if (len < sLen) { return s[len]; } else { return strncasecmp(buf, s, sLen); } } static void urlParsePart(struct URL *url, const char *buf, int len) { // Most browsers seem to forget quoting data in the header fields. This // means, it is quite possible for an HTML form to cause the submission of // unparseable "multipart/form-data". If this happens, we just give up // and ignore the malformed data. // Example: // <form method="POST" enctype="multipart/form-data"> // <input type="file" name="&quot;&#13;&#10;X: x=&quot;"> // <input type="submit"> // </form> char *name = NULL; for (const char *eol; !!(eol = urlMemstr(buf, len, "\r\n")); ) { if (buf == eol) { buf += 2; len -= 2; if (name) { char *value = len ? urlMakeString(buf, len) : NULL; addToHashMap(&url->args, name, value); name = NULL; } break; } else { if (!name && !urlMemcasecmp(buf, len, "content-disposition:")) { struct HashMap fields; initHashMap(&fields, urlDestroyHashMapEntry, NULL); urlParseHeaderLine(&fields, buf + 20, eol - buf - 20); if (getRefFromHashMap(&fields, "form-data")) { // We currently don't bother to deal with binary files (e.g. files // that include NUL characters). If this ever becomes necessary, // we could check for the existence of a "filename" field and use // that as an indicator to store the payload in something other // than "url->args". name = (char *)getFromHashMap(&fields, "name"); if (name && *name) { check(name = strdup(name)); } } destroyHashMap(&fields); } len -= eol - buf + 2; buf = eol + 2; } } free(name); } static void urlParsePostBody(struct URL *url, const struct HttpConnection *http, const char *buf, int len) { struct HashMap contentType; initHashMap(&contentType, urlDestroyHashMapEntry, NULL); const char *ctHeader = getFromHashMap(&http->header, "content-type"); urlParseHeaderLine(&contentType, ctHeader, ctHeader ? strlen(ctHeader) : 0); if (getRefFromHashMap(&contentType, "application/x-www-form-urlencoded")) { urlParseQueryString(&url->args, buf, len); } else if (getRefFromHashMap(&contentType, "multipart/form-data")) { const char *boundary = getFromHashMap(&contentType, "boundary"); if (boundary && *boundary) { const char *lastPart = NULL; for (const char *part = buf; len > 0; ) { const char *ptr; if ((part == buf && (ptr = urlMemstr(part, len, "--")) != NULL) || (ptr = urlMemstr(part, len, "\r\n--")) != NULL) { len -= ptr - part + (part == buf ? 2 : 4); part = ptr + (part == buf ? 2 : 4); if (!urlMemcmp(part, len, boundary)) { int i = strlen(boundary); len -= i; part += i; if (!urlMemcmp(part, len, "\r\n")) { len -= 2; part += 2; if (lastPart) { urlParsePart(url, lastPart, ptr - lastPart); } else { if (ptr != buf) { info("[http] Ignoring prologue before \"multipart/form-data\"!"); } } lastPart = part; } else if (!urlMemcmp(part, len, "--\r\n")) { len -= 4; part += 4; urlParsePart(url, lastPart, ptr - lastPart); lastPart = NULL; if (len > 0) { info("[http] Ignoring epilogue past end of \"multipart/" "form-data\"!"); } } } } } if (lastPart) { warn("[http] Missing final \"boundary\" for \"multipart/form-data\"!"); } } else { warn("[http] Missing \"boundary\" information for \"multipart/form-data\"!"); } } destroyHashMap(&contentType); } struct URL *newURL(const struct HttpConnection *http, const char *buf, int len) { struct URL *url; check(url = malloc(sizeof(struct URL))); initURL(url, http, buf, len); return url; } void initURL(struct URL *url, const struct HttpConnection *http, const char *buf, int len) { url->protocol = strdup(httpGetProtocol(http)); url->user = NULL; url->password = NULL; url->host = strdup(httpGetHost(http)); url->port = httpGetPort(http); url->path = strdup(httpGetPath(http)); url->pathinfo = strdup(httpGetPathInfo(http)); url->query = strdup(httpGetQuery(http)); url->anchor = NULL; url->url = NULL; initHashMap(&url->args, urlDestroyHashMapEntry, NULL); if (!strcmp(http->method, "GET")) { check(url->query); urlParseQueryString(&url->args, url->query, strlen(url->query)); } else if (!strcmp(http->method, "POST")) { urlParsePostBody(url, http, buf, len); } } void destroyURL(struct URL *url) { if (url) { free(url->protocol); free(url->user); free(url->password); free(url->host); free(url->path); free(url->pathinfo); free(url->query); free(url->anchor); free(url->url); destroyHashMap(&url->args); } } void deleteURL(struct URL *url) { destroyURL(url); free(url); } const char *urlGetProtocol(struct URL *url) { return url->protocol; } const char *urlGetUser(struct URL *url) { return url->user; } const char *urlGetPassword(struct URL *url) { return url->password; } const char *urlGetHost(struct URL *url) { return url->host; } int urlGetPort(struct URL *url) { return url->port; } const char *urlGetPath(struct URL *url) { return url->path; } const char *urlGetPathInfo(struct URL *url) { return url->pathinfo; } const char *urlGetQuery(struct URL *url) { return url->query; } const char *urlGetAnchor(struct URL *url) { return url->anchor; } const char *urlGetURL(struct URL *url) { if (!url->url) { const char *host = urlGetHost(url); int s_size = 8 + strlen(host) + 25 + strlen(url->path); check(*(char **)&url->url = malloc(s_size + 1)); *url->url = '\000'; strncat(url->url, url->protocol, s_size); strncat(url->url, "://", s_size); strncat(url->url, host, s_size); if (url->port != (strcmp(url->protocol, "http") ? 443 : 80)) { snprintf(strrchr(url->url, '\000'), 25, ":%d", url->port); } strncat(url->url, url->path, s_size); } return url->url; } const struct HashMap *urlGetArgs(struct URL *url) { return &url->args; } struct HashMap *urlParseQuery(const char *buf, int len) { struct HashMap *hashmap = newHashMap(urlDestroyHashMapEntry, NULL); urlParseQueryString(hashmap, buf, len); return hashmap; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_371_1
crossvul-cpp_data_bad_4422_2
// SPDX-License-Identifier: GPL-2.0-only /* * Xen event channels * * Xen models interrupts with abstract event channels. Because each * domain gets 1024 event channels, but NR_IRQ is not that large, we * must dynamically map irqs<->event channels. The event channels * interface with the rest of the kernel by defining a xen interrupt * chip. When an event is received, it is mapped to an irq and sent * through the normal interrupt processing path. * * There are four kinds of events which can be mapped to an event * channel: * * 1. Inter-domain notifications. This includes all the virtual * device events, since they're driven by front-ends in another domain * (typically dom0). * 2. VIRQs, typically used for timers. These are per-cpu events. * 3. IPIs. * 4. PIRQs - Hardware interrupts. * * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 */ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt #include <linux/linkage.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/moduleparam.h> #include <linux/string.h> #include <linux/memblock.h> #include <linux/slab.h> #include <linux/irqnr.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/cpuhotplug.h> #ifdef CONFIG_X86 #include <asm/desc.h> #include <asm/ptrace.h> #include <asm/idtentry.h> #include <asm/irq.h> #include <asm/io_apic.h> #include <asm/i8259.h> #include <asm/xen/pci.h> #endif #include <asm/sync_bitops.h> #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> #include <xen/page.h> #include <xen/xen.h> #include <xen/hvm.h> #include <xen/xen-ops.h> #include <xen/events.h> #include <xen/interface/xen.h> #include <xen/interface/event_channel.h> #include <xen/interface/hvm/hvm_op.h> #include <xen/interface/hvm/params.h> #include <xen/interface/physdev.h> #include <xen/interface/sched.h> #include <xen/interface/vcpu.h> #include <asm/hw_irq.h> #include "events_internal.h" const struct evtchn_ops *evtchn_ops; /* * This lock protects updates to the following mapping and reference-count * arrays. The lock does not need to be acquired to read the mapping tables. */ static DEFINE_MUTEX(irq_mapping_update_lock); /* * Lock protecting event handling loop against removing event channels. * Adding of event channels is no issue as the associated IRQ becomes active * only after everything is setup (before request_[threaded_]irq() the handler * can't be entered for an event, as the event channel will be unmasked only * then). */ static DEFINE_RWLOCK(evtchn_rwlock); /* * Lock hierarchy: * * irq_mapping_update_lock * evtchn_rwlock * IRQ-desc lock */ static LIST_HEAD(xen_irq_list_head); /* IRQ <-> VIRQ mapping. */ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; /* IRQ <-> IPI mapping */ static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; int **evtchn_to_irq; #ifdef CONFIG_X86 static unsigned long *pirq_eoi_map; #endif static bool (*pirq_needs_eoi)(unsigned irq); #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq))) #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq))) #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq)) /* Xen will never allocate port zero for any purpose. */ #define VALID_EVTCHN(chn) ((chn) != 0) static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY]; static struct irq_chip xen_dynamic_chip; static struct irq_chip xen_lateeoi_chip; static struct irq_chip xen_percpu_chip; static struct irq_chip xen_pirq_chip; static void enable_dynirq(struct irq_data *data); static void disable_dynirq(struct irq_data *data); static void clear_evtchn_to_irq_row(unsigned row) { unsigned col; for (col = 0; col < EVTCHN_PER_ROW; col++) WRITE_ONCE(evtchn_to_irq[row][col], -1); } static void clear_evtchn_to_irq_all(void) { unsigned row; for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) { if (evtchn_to_irq[row] == NULL) continue; clear_evtchn_to_irq_row(row); } } static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq) { unsigned row; unsigned col; if (evtchn >= xen_evtchn_max_channels()) return -EINVAL; row = EVTCHN_ROW(evtchn); col = EVTCHN_COL(evtchn); if (evtchn_to_irq[row] == NULL) { /* Unallocated irq entries return -1 anyway */ if (irq == -1) return 0; evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); if (evtchn_to_irq[row] == NULL) return -ENOMEM; clear_evtchn_to_irq_row(row); } WRITE_ONCE(evtchn_to_irq[row][col], irq); return 0; } int get_evtchn_to_irq(evtchn_port_t evtchn) { if (evtchn >= xen_evtchn_max_channels()) return -1; if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) return -1; return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]); } /* Get info for IRQ */ struct irq_info *info_for_irq(unsigned irq) { if (irq < nr_legacy_irqs()) return legacy_info_ptrs[irq]; else return irq_get_chip_data(irq); } static void set_info_for_irq(unsigned int irq, struct irq_info *info) { if (irq < nr_legacy_irqs()) legacy_info_ptrs[irq] = info; else irq_set_chip_data(irq, info); } /* Constructors for packed IRQ information. */ static int xen_irq_info_common_setup(struct irq_info *info, unsigned irq, enum xen_irq_type type, evtchn_port_t evtchn, unsigned short cpu) { int ret; BUG_ON(info->type != IRQT_UNBOUND && info->type != type); info->type = type; info->irq = irq; info->evtchn = evtchn; info->cpu = cpu; ret = set_evtchn_to_irq(evtchn, irq); if (ret < 0) return ret; irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); return xen_evtchn_port_setup(info); } static int xen_irq_info_evtchn_setup(unsigned irq, evtchn_port_t evtchn) { struct irq_info *info = info_for_irq(irq); return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0); } static int xen_irq_info_ipi_setup(unsigned cpu, unsigned irq, evtchn_port_t evtchn, enum ipi_vector ipi) { struct irq_info *info = info_for_irq(irq); info->u.ipi = ipi; per_cpu(ipi_to_irq, cpu)[ipi] = irq; return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0); } static int xen_irq_info_virq_setup(unsigned cpu, unsigned irq, evtchn_port_t evtchn, unsigned virq) { struct irq_info *info = info_for_irq(irq); info->u.virq = virq; per_cpu(virq_to_irq, cpu)[virq] = irq; return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0); } static int xen_irq_info_pirq_setup(unsigned irq, evtchn_port_t evtchn, unsigned pirq, unsigned gsi, uint16_t domid, unsigned char flags) { struct irq_info *info = info_for_irq(irq); info->u.pirq.pirq = pirq; info->u.pirq.gsi = gsi; info->u.pirq.domid = domid; info->u.pirq.flags = flags; return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0); } static void xen_irq_info_cleanup(struct irq_info *info) { set_evtchn_to_irq(info->evtchn, -1); info->evtchn = 0; } /* * Accessors for packed IRQ information. */ evtchn_port_t evtchn_from_irq(unsigned irq) { const struct irq_info *info = NULL; if (likely(irq < nr_irqs)) info = info_for_irq(irq); if (!info) return 0; return info->evtchn; } unsigned int irq_from_evtchn(evtchn_port_t evtchn) { return get_evtchn_to_irq(evtchn); } EXPORT_SYMBOL_GPL(irq_from_evtchn); int irq_from_virq(unsigned int cpu, unsigned int virq) { return per_cpu(virq_to_irq, cpu)[virq]; } static enum ipi_vector ipi_from_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_IPI); return info->u.ipi; } static unsigned virq_from_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_VIRQ); return info->u.virq; } static unsigned pirq_from_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_PIRQ); return info->u.pirq.pirq; } static enum xen_irq_type type_from_irq(unsigned irq) { return info_for_irq(irq)->type; } unsigned cpu_from_irq(unsigned irq) { return info_for_irq(irq)->cpu; } unsigned int cpu_from_evtchn(evtchn_port_t evtchn) { int irq = get_evtchn_to_irq(evtchn); unsigned ret = 0; if (irq != -1) ret = cpu_from_irq(irq); return ret; } #ifdef CONFIG_X86 static bool pirq_check_eoi_map(unsigned irq) { return test_bit(pirq_from_irq(irq), pirq_eoi_map); } #endif static bool pirq_needs_eoi_flag(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info->type != IRQT_PIRQ); return info->u.pirq.flags & PIRQ_NEEDS_EOI; } static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu) { int irq = get_evtchn_to_irq(evtchn); struct irq_info *info = info_for_irq(irq); BUG_ON(irq == -1); #ifdef CONFIG_SMP cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu)); #endif xen_evtchn_port_bind_to_cpu(info, cpu); info->cpu = cpu; } /** * notify_remote_via_irq - send event to remote end of event channel via irq * @irq: irq of event channel to send event to * * Unlike notify_remote_via_evtchn(), this is safe to use across * save/restore. Notifications on a broken connection are silently * dropped. */ void notify_remote_via_irq(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) notify_remote_via_evtchn(evtchn); } EXPORT_SYMBOL_GPL(notify_remote_via_irq); static void xen_irq_lateeoi_locked(struct irq_info *info) { evtchn_port_t evtchn; evtchn = info->evtchn; if (!VALID_EVTCHN(evtchn)) return; unmask_evtchn(evtchn); } void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags) { struct irq_info *info; unsigned long flags; read_lock_irqsave(&evtchn_rwlock, flags); info = info_for_irq(irq); if (info) xen_irq_lateeoi_locked(info); read_unlock_irqrestore(&evtchn_rwlock, flags); } EXPORT_SYMBOL_GPL(xen_irq_lateeoi); static void xen_irq_init(unsigned irq) { struct irq_info *info; #ifdef CONFIG_SMP /* By default all event channels notify CPU#0. */ cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0)); #endif info = kzalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) panic("Unable to allocate metadata for IRQ%d\n", irq); info->type = IRQT_UNBOUND; info->refcnt = -1; set_info_for_irq(irq, info); list_add_tail(&info->list, &xen_irq_list_head); } static int __must_check xen_allocate_irqs_dynamic(int nvec) { int i, irq = irq_alloc_descs(-1, 0, nvec, -1); if (irq >= 0) { for (i = 0; i < nvec; i++) xen_irq_init(irq + i); } return irq; } static inline int __must_check xen_allocate_irq_dynamic(void) { return xen_allocate_irqs_dynamic(1); } static int __must_check xen_allocate_irq_gsi(unsigned gsi) { int irq; /* * A PV guest has no concept of a GSI (since it has no ACPI * nor access to/knowledge of the physical APICs). Therefore * all IRQs are dynamically allocated from the entire IRQ * space. */ if (xen_pv_domain() && !xen_initial_domain()) return xen_allocate_irq_dynamic(); /* Legacy IRQ descriptors are already allocated by the arch. */ if (gsi < nr_legacy_irqs()) irq = gsi; else irq = irq_alloc_desc_at(gsi, -1); xen_irq_init(irq); return irq; } static void xen_free_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); unsigned long flags; if (WARN_ON(!info)) return; write_lock_irqsave(&evtchn_rwlock, flags); list_del(&info->list); set_info_for_irq(irq, NULL); WARN_ON(info->refcnt > 0); write_unlock_irqrestore(&evtchn_rwlock, flags); kfree(info); /* Legacy IRQ descriptors are managed by the arch. */ if (irq < nr_legacy_irqs()) return; irq_free_desc(irq); } static void xen_evtchn_close(evtchn_port_t port) { struct evtchn_close close; close.port = port; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) BUG(); } static void pirq_query_unmask(int irq) { struct physdev_irq_status_query irq_status; struct irq_info *info = info_for_irq(irq); BUG_ON(info->type != IRQT_PIRQ); irq_status.irq = pirq_from_irq(irq); if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) irq_status.flags = 0; info->u.pirq.flags &= ~PIRQ_NEEDS_EOI; if (irq_status.flags & XENIRQSTAT_needs_eoi) info->u.pirq.flags |= PIRQ_NEEDS_EOI; } static void eoi_pirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; int rc = 0; if (!VALID_EVTCHN(evtchn)) return; if (unlikely(irqd_is_setaffinity_pending(data)) && likely(!irqd_irq_disabled(data))) { int masked = test_and_set_mask(evtchn); clear_evtchn(evtchn); irq_move_masked_irq(data); if (!masked) unmask_evtchn(evtchn); } else clear_evtchn(evtchn); if (pirq_needs_eoi(data->irq)) { rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); WARN_ON(rc); } } static void mask_ack_pirq(struct irq_data *data) { disable_dynirq(data); eoi_pirq(data); } static unsigned int __startup_pirq(unsigned int irq) { struct evtchn_bind_pirq bind_pirq; struct irq_info *info = info_for_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq); int rc; BUG_ON(info->type != IRQT_PIRQ); if (VALID_EVTCHN(evtchn)) goto out; bind_pirq.pirq = pirq_from_irq(irq); /* NB. We are happy to share unless we are probing. */ bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ? BIND_PIRQ__WILL_SHARE : 0; rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); if (rc != 0) { pr_warn("Failed to obtain physical IRQ %d\n", irq); return 0; } evtchn = bind_pirq.port; pirq_query_unmask(irq); rc = set_evtchn_to_irq(evtchn, irq); if (rc) goto err; info->evtchn = evtchn; bind_evtchn_to_cpu(evtchn, 0); rc = xen_evtchn_port_setup(info); if (rc) goto err; out: unmask_evtchn(evtchn); eoi_pirq(irq_get_irq_data(irq)); return 0; err: pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); xen_evtchn_close(evtchn); return 0; } static unsigned int startup_pirq(struct irq_data *data) { return __startup_pirq(data->irq); } static void shutdown_pirq(struct irq_data *data) { unsigned int irq = data->irq; struct irq_info *info = info_for_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq); BUG_ON(info->type != IRQT_PIRQ); if (!VALID_EVTCHN(evtchn)) return; mask_evtchn(evtchn); xen_evtchn_close(evtchn); xen_irq_info_cleanup(info); } static void enable_pirq(struct irq_data *data) { enable_dynirq(data); } static void disable_pirq(struct irq_data *data) { disable_dynirq(data); } int xen_irq_from_gsi(unsigned gsi) { struct irq_info *info; list_for_each_entry(info, &xen_irq_list_head, list) { if (info->type != IRQT_PIRQ) continue; if (info->u.pirq.gsi == gsi) return info->irq; } return -1; } EXPORT_SYMBOL_GPL(xen_irq_from_gsi); static void __unbind_from_irq(unsigned int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); struct irq_info *info = info_for_irq(irq); if (info->refcnt > 0) { info->refcnt--; if (info->refcnt != 0) return; } if (VALID_EVTCHN(evtchn)) { unsigned int cpu = cpu_from_irq(irq); xen_evtchn_close(evtchn); switch (type_from_irq(irq)) { case IRQT_VIRQ: per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; break; case IRQT_IPI: per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1; break; default: break; } xen_irq_info_cleanup(info); } xen_free_irq(irq); } /* * Do not make any assumptions regarding the relationship between the * IRQ number returned here and the Xen pirq argument. * * Note: We don't assign an event channel until the irq actually started * up. Return an existing irq if we've already got one for the gsi. * * Shareable implies level triggered, not shareable implies edge * triggered here. */ int xen_bind_pirq_gsi_to_irq(unsigned gsi, unsigned pirq, int shareable, char *name) { int irq = -1; struct physdev_irq irq_op; int ret; mutex_lock(&irq_mapping_update_lock); irq = xen_irq_from_gsi(gsi); if (irq != -1) { pr_info("%s: returning irq %d for gsi %u\n", __func__, irq, gsi); goto out; } irq = xen_allocate_irq_gsi(gsi); if (irq < 0) goto out; irq_op.irq = irq; irq_op.vector = 0; /* Only the privileged domain can do this. For non-priv, the pcifront * driver provides a PCI bus that does the call to do exactly * this in the priv domain. */ if (xen_initial_domain() && HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { xen_free_irq(irq); irq = -ENOSPC; goto out; } ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF, shareable ? PIRQ_SHAREABLE : 0); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } pirq_query_unmask(irq); /* We try to use the handler with the appropriate semantic for the * type of interrupt: if the interrupt is an edge triggered * interrupt we use handle_edge_irq. * * On the other hand if the interrupt is level triggered we use * handle_fasteoi_irq like the native code does for this kind of * interrupts. * * Depending on the Xen version, pirq_needs_eoi might return true * not only for level triggered interrupts but for edge triggered * interrupts too. In any case Xen always honors the eoi mechanism, * not injecting any more pirqs of the same kind if the first one * hasn't received an eoi yet. Therefore using the fasteoi handler * is the right choice either way. */ if (shareable) irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_fasteoi_irq, name); else irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, name); out: mutex_unlock(&irq_mapping_update_lock); return irq; } #ifdef CONFIG_PCI_MSI int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) { int rc; struct physdev_get_free_pirq op_get_free_pirq; op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI; rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); WARN_ONCE(rc == -ENOSYS, "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n"); return rc ? -1 : op_get_free_pirq.pirq; } int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, int pirq, int nvec, const char *name, domid_t domid) { int i, irq, ret; mutex_lock(&irq_mapping_update_lock); irq = xen_allocate_irqs_dynamic(nvec); if (irq < 0) goto out; for (i = 0; i < nvec; i++) { irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name); ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid, i == 0 ? 0 : PIRQ_MSI_GROUP); if (ret < 0) goto error_irq; } ret = irq_set_msi_desc(irq, msidesc); if (ret < 0) goto error_irq; out: mutex_unlock(&irq_mapping_update_lock); return irq; error_irq: while (nvec--) __unbind_from_irq(irq + nvec); mutex_unlock(&irq_mapping_update_lock); return ret; } #endif int xen_destroy_irq(int irq) { struct physdev_unmap_pirq unmap_irq; struct irq_info *info = info_for_irq(irq); int rc = -ENOENT; mutex_lock(&irq_mapping_update_lock); /* * If trying to remove a vector in a MSI group different * than the first one skip the PIRQ unmap unless this vector * is the first one in the group. */ if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) { unmap_irq.pirq = info->u.pirq.pirq; unmap_irq.domid = info->u.pirq.domid; rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); /* If another domain quits without making the pci_disable_msix * call, the Xen hypervisor takes care of freeing the PIRQs * (free_domain_pirqs). */ if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) pr_info("domain %d does not have %d anymore\n", info->u.pirq.domid, info->u.pirq.pirq); else if (rc) { pr_warn("unmap irq failed %d\n", rc); goto out; } } xen_free_irq(irq); out: mutex_unlock(&irq_mapping_update_lock); return rc; } int xen_irq_from_pirq(unsigned pirq) { int irq; struct irq_info *info; mutex_lock(&irq_mapping_update_lock); list_for_each_entry(info, &xen_irq_list_head, list) { if (info->type != IRQT_PIRQ) continue; irq = info->irq; if (info->u.pirq.pirq == pirq) goto out; } irq = -1; out: mutex_unlock(&irq_mapping_update_lock); return irq; } int xen_pirq_from_irq(unsigned irq) { return pirq_from_irq(irq); } EXPORT_SYMBOL_GPL(xen_pirq_from_irq); static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip) { int irq; int ret; if (evtchn >= xen_evtchn_max_channels()) return -ENOMEM; mutex_lock(&irq_mapping_update_lock); irq = get_evtchn_to_irq(evtchn); if (irq == -1) { irq = xen_allocate_irq_dynamic(); if (irq < 0) goto out; irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "event"); ret = xen_irq_info_evtchn_setup(irq, evtchn); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } /* New interdomain events are bound to VCPU 0. */ bind_evtchn_to_cpu(evtchn, 0); } else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_EVTCHN); } out: mutex_unlock(&irq_mapping_update_lock); return irq; } int bind_evtchn_to_irq(evtchn_port_t evtchn) { return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip); } EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn) { return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip); } EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi); static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; evtchn_port_t evtchn; int ret, irq; mutex_lock(&irq_mapping_update_lock); irq = per_cpu(ipi_to_irq, cpu)[ipi]; if (irq == -1) { irq = xen_allocate_irq_dynamic(); if (irq < 0) goto out; irq_set_chip_and_handler_name(irq, &xen_percpu_chip, handle_percpu_irq, "ipi"); bind_ipi.vcpu = xen_vcpu_nr(cpu); if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0) BUG(); evtchn = bind_ipi.port; ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } bind_evtchn_to_cpu(evtchn, cpu); } else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_IPI); } out: mutex_unlock(&irq_mapping_update_lock); return irq; } static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain, evtchn_port_t remote_port, struct irq_chip *chip) { struct evtchn_bind_interdomain bind_interdomain; int err; bind_interdomain.remote_dom = remote_domain; bind_interdomain.remote_port = remote_port; err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &bind_interdomain); return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port, chip); } int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, evtchn_port_t remote_port) { return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, &xen_dynamic_chip); } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq); int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain, evtchn_port_t remote_port) { return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, &xen_lateeoi_chip); } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi); static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn) { struct evtchn_status status; evtchn_port_t port; int rc = -ENOENT; memset(&status, 0, sizeof(status)); for (port = 0; port < xen_evtchn_max_channels(); port++) { status.dom = DOMID_SELF; status.port = port; rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); if (rc < 0) continue; if (status.status != EVTCHNSTAT_virq) continue; if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) { *evtchn = port; break; } } return rc; } /** * xen_evtchn_nr_channels - number of usable event channel ports * * This may be less than the maximum supported by the current * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum * supported. */ unsigned xen_evtchn_nr_channels(void) { return evtchn_ops->nr_channels(); } EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels); int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) { struct evtchn_bind_virq bind_virq; evtchn_port_t evtchn = 0; int irq, ret; mutex_lock(&irq_mapping_update_lock); irq = per_cpu(virq_to_irq, cpu)[virq]; if (irq == -1) { irq = xen_allocate_irq_dynamic(); if (irq < 0) goto out; if (percpu) irq_set_chip_and_handler_name(irq, &xen_percpu_chip, handle_percpu_irq, "virq"); else irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, handle_edge_irq, "virq"); bind_virq.virq = virq; bind_virq.vcpu = xen_vcpu_nr(cpu); ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq); if (ret == 0) evtchn = bind_virq.port; else { if (ret == -EEXIST) ret = find_virq(virq, cpu, &evtchn); BUG_ON(ret < 0); } ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } bind_evtchn_to_cpu(evtchn, cpu); } else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_VIRQ); } out: mutex_unlock(&irq_mapping_update_lock); return irq; } static void unbind_from_irq(unsigned int irq) { mutex_lock(&irq_mapping_update_lock); __unbind_from_irq(irq); mutex_unlock(&irq_mapping_update_lock); } static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id, struct irq_chip *chip) { int irq, retval; irq = bind_evtchn_to_irq_chip(evtchn, chip); if (irq < 0) return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags, devname, dev_id, &xen_dynamic_chip); } EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags, devname, dev_id, &xen_lateeoi_chip); } EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi); static int bind_interdomain_evtchn_to_irqhandler_chip( unsigned int remote_domain, evtchn_port_t remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id, struct irq_chip *chip) { int irq, retval; irq = bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, chip); if (irq < 0) return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, evtchn_port_t remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain, remote_port, handler, irqflags, devname, dev_id, &xen_dynamic_chip); } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler); int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain, evtchn_port_t remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain, remote_port, handler, irqflags, devname, dev_id, &xen_lateeoi_chip); } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi); int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq, retval; irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU); if (irq < 0) return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); int bind_ipi_to_irqhandler(enum ipi_vector ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq, retval; irq = bind_ipi_to_irq(ipi, cpu); if (irq < 0) return irq; irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } void unbind_from_irqhandler(unsigned int irq, void *dev_id) { struct irq_info *info = info_for_irq(irq); if (WARN_ON(!info)) return; free_irq(irq, dev_id); unbind_from_irq(irq); } EXPORT_SYMBOL_GPL(unbind_from_irqhandler); /** * xen_set_irq_priority() - set an event channel priority. * @irq:irq bound to an event channel. * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN. */ int xen_set_irq_priority(unsigned irq, unsigned priority) { struct evtchn_set_priority set_priority; set_priority.port = evtchn_from_irq(irq); set_priority.priority = priority; return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority, &set_priority); } EXPORT_SYMBOL_GPL(xen_set_irq_priority); int evtchn_make_refcounted(evtchn_port_t evtchn) { int irq = get_evtchn_to_irq(evtchn); struct irq_info *info; if (irq == -1) return -ENOENT; info = info_for_irq(irq); if (!info) return -ENOENT; WARN_ON(info->refcnt != -1); info->refcnt = 1; return 0; } EXPORT_SYMBOL_GPL(evtchn_make_refcounted); int evtchn_get(evtchn_port_t evtchn) { int irq; struct irq_info *info; int err = -ENOENT; if (evtchn >= xen_evtchn_max_channels()) return -EINVAL; mutex_lock(&irq_mapping_update_lock); irq = get_evtchn_to_irq(evtchn); if (irq == -1) goto done; info = info_for_irq(irq); if (!info) goto done; err = -EINVAL; if (info->refcnt <= 0) goto done; info->refcnt++; err = 0; done: mutex_unlock(&irq_mapping_update_lock); return err; } EXPORT_SYMBOL_GPL(evtchn_get); void evtchn_put(evtchn_port_t evtchn) { int irq = get_evtchn_to_irq(evtchn); if (WARN_ON(irq == -1)) return; unbind_from_irq(irq); } EXPORT_SYMBOL_GPL(evtchn_put); void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) { int irq; #ifdef CONFIG_X86 if (unlikely(vector == XEN_NMI_VECTOR)) { int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu), NULL); if (rc < 0) printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc); return; } #endif irq = per_cpu(ipi_to_irq, cpu)[vector]; BUG_ON(irq < 0); notify_remote_via_irq(irq); } static void __xen_evtchn_do_upcall(void) { struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); int cpu = smp_processor_id(); read_lock(&evtchn_rwlock); do { vcpu_info->evtchn_upcall_pending = 0; xen_evtchn_handle_events(cpu); BUG_ON(!irqs_disabled()); virt_rmb(); /* Hypervisor can set upcall pending. */ } while (vcpu_info->evtchn_upcall_pending); read_unlock(&evtchn_rwlock); } void xen_evtchn_do_upcall(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); __xen_evtchn_do_upcall(); irq_exit(); set_irq_regs(old_regs); } void xen_hvm_evtchn_do_upcall(void) { __xen_evtchn_do_upcall(); } EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); /* Rebind a new event channel to an existing irq. */ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq) { struct irq_info *info = info_for_irq(irq); if (WARN_ON(!info)) return; /* Make sure the irq is masked, since the new event channel will also be masked. */ disable_irq(irq); mutex_lock(&irq_mapping_update_lock); /* After resume the irq<->evtchn mappings are all cleared out */ BUG_ON(get_evtchn_to_irq(evtchn) != -1); /* Expect irq to have been bound before, so there should be a proper type */ BUG_ON(info->type == IRQT_UNBOUND); (void)xen_irq_info_evtchn_setup(irq, evtchn); mutex_unlock(&irq_mapping_update_lock); bind_evtchn_to_cpu(evtchn, info->cpu); /* This will be deferred until interrupt is processed */ irq_set_affinity(irq, cpumask_of(info->cpu)); /* Unmask the event channel. */ enable_irq(irq); } /* Rebind an evtchn so that it gets delivered to a specific cpu */ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu) { struct evtchn_bind_vcpu bind_vcpu; int masked; if (!VALID_EVTCHN(evtchn)) return -1; if (!xen_support_evtchn_rebind()) return -1; /* Send future instances of this interrupt to other vcpu. */ bind_vcpu.port = evtchn; bind_vcpu.vcpu = xen_vcpu_nr(tcpu); /* * Mask the event while changing the VCPU binding to prevent * it being delivered on an unexpected VCPU. */ masked = test_and_set_mask(evtchn); /* * If this fails, it usually just indicates that we're dealing with a * virq or IPI channel, which don't actually need to be rebound. Ignore * it, but don't do the xenlinux-level rebind in that case. */ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) bind_evtchn_to_cpu(evtchn, tcpu); if (!masked) unmask_evtchn(evtchn); return 0; } static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, bool force) { unsigned tcpu = cpumask_first_and(dest, cpu_online_mask); int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu); if (!ret) irq_data_update_effective_affinity(data, cpumask_of(tcpu)); return ret; } /* To be called with desc->lock held. */ int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu) { struct irq_data *d = irq_desc_get_irq_data(desc); return set_affinity_irq(d, cpumask_of(tcpu), false); } EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn); static void enable_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); if (VALID_EVTCHN(evtchn)) unmask_evtchn(evtchn); } static void disable_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); if (VALID_EVTCHN(evtchn)) mask_evtchn(evtchn); } static void ack_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); if (!VALID_EVTCHN(evtchn)) return; if (unlikely(irqd_is_setaffinity_pending(data)) && likely(!irqd_irq_disabled(data))) { int masked = test_and_set_mask(evtchn); clear_evtchn(evtchn); irq_move_masked_irq(data); if (!masked) unmask_evtchn(evtchn); } else clear_evtchn(evtchn); } static void mask_ack_dynirq(struct irq_data *data) { disable_dynirq(data); ack_dynirq(data); } static int retrigger_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); int masked; if (!VALID_EVTCHN(evtchn)) return 0; masked = test_and_set_mask(evtchn); set_evtchn(evtchn); if (!masked) unmask_evtchn(evtchn); return 1; } static void restore_pirqs(void) { int pirq, rc, irq, gsi; struct physdev_map_pirq map_irq; struct irq_info *info; list_for_each_entry(info, &xen_irq_list_head, list) { if (info->type != IRQT_PIRQ) continue; pirq = info->u.pirq.pirq; gsi = info->u.pirq.gsi; irq = info->irq; /* save/restore of PT devices doesn't work, so at this point the * only devices present are GSI based emulated devices */ if (!gsi) continue; map_irq.domid = DOMID_SELF; map_irq.type = MAP_PIRQ_TYPE_GSI; map_irq.index = gsi; map_irq.pirq = pirq; rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); if (rc) { pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", gsi, irq, pirq, rc); xen_free_irq(irq); continue; } printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); __startup_pirq(irq); } } static void restore_cpu_virqs(unsigned int cpu) { struct evtchn_bind_virq bind_virq; evtchn_port_t evtchn; int virq, irq; for (virq = 0; virq < NR_VIRQS; virq++) { if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) continue; BUG_ON(virq_from_irq(irq) != virq); /* Get a new binding from Xen. */ bind_virq.virq = virq; bind_virq.vcpu = xen_vcpu_nr(cpu); if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq) != 0) BUG(); evtchn = bind_virq.port; /* Record the new mapping. */ (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq); bind_evtchn_to_cpu(evtchn, cpu); } } static void restore_cpu_ipis(unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; evtchn_port_t evtchn; int ipi, irq; for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) continue; BUG_ON(ipi_from_irq(irq) != ipi); /* Get a new binding from Xen. */ bind_ipi.vcpu = xen_vcpu_nr(cpu); if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0) BUG(); evtchn = bind_ipi.port; /* Record the new mapping. */ (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); bind_evtchn_to_cpu(evtchn, cpu); } } /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) clear_evtchn(evtchn); } EXPORT_SYMBOL(xen_clear_irq_pending); void xen_set_irq_pending(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) set_evtchn(evtchn); } bool xen_test_irq_pending(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); bool ret = false; if (VALID_EVTCHN(evtchn)) ret = test_evtchn(evtchn); return ret; } /* Poll waiting for an irq to become pending with timeout. In the usual case, * the irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq_timeout(int irq, u64 timeout) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) { struct sched_poll poll; poll.nr_ports = 1; poll.timeout = timeout; set_xen_guest_handle(poll.ports, &evtchn); if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) BUG(); } } EXPORT_SYMBOL(xen_poll_irq_timeout); /* Poll waiting for an irq to become pending. In the usual case, the * irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq(int irq) { xen_poll_irq_timeout(irq, 0 /* no timeout */); } /* Check whether the IRQ line is shared with other guests. */ int xen_test_irq_shared(int irq) { struct irq_info *info = info_for_irq(irq); struct physdev_irq_status_query irq_status; if (WARN_ON(!info)) return -ENOENT; irq_status.irq = info->u.pirq.pirq; if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) return 0; return !(irq_status.flags & XENIRQSTAT_shared); } EXPORT_SYMBOL_GPL(xen_test_irq_shared); void xen_irq_resume(void) { unsigned int cpu; struct irq_info *info; /* New event-channel space is not 'live' yet. */ xen_evtchn_resume(); /* No IRQ <-> event-channel mappings. */ list_for_each_entry(info, &xen_irq_list_head, list) info->evtchn = 0; /* zap event-channel binding */ clear_evtchn_to_irq_all(); for_each_possible_cpu(cpu) { restore_cpu_virqs(cpu); restore_cpu_ipis(cpu); } restore_pirqs(); } static struct irq_chip xen_dynamic_chip __read_mostly = { .name = "xen-dyn", .irq_disable = disable_dynirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, .irq_ack = ack_dynirq, .irq_mask_ack = mask_ack_dynirq, .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, }; static struct irq_chip xen_lateeoi_chip __read_mostly = { /* The chip name needs to contain "xen-dyn" for irqbalance to work. */ .name = "xen-dyn-lateeoi", .irq_disable = disable_dynirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, .irq_ack = mask_ack_dynirq, .irq_mask_ack = mask_ack_dynirq, .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, }; static struct irq_chip xen_pirq_chip __read_mostly = { .name = "xen-pirq", .irq_startup = startup_pirq, .irq_shutdown = shutdown_pirq, .irq_enable = enable_pirq, .irq_disable = disable_pirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, .irq_ack = eoi_pirq, .irq_eoi = eoi_pirq, .irq_mask_ack = mask_ack_pirq, .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, }; static struct irq_chip xen_percpu_chip __read_mostly = { .name = "xen-percpu", .irq_disable = disable_dynirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, .irq_ack = ack_dynirq, }; int xen_set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } EXPORT_SYMBOL_GPL(xen_set_callback_via); #ifdef CONFIG_XEN_PVHVM /* Vector callbacks are better than PCI interrupts to receive event * channel notifications because we can receive vector callbacks on any * vcpu and we don't need PCI support or APIC interactions. */ void xen_setup_callback_vector(void) { uint64_t callback_via; if (xen_have_vector_callback) { callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR); if (xen_set_callback_via(callback_via)) { pr_err("Request for Xen HVM callback vector failed\n"); xen_have_vector_callback = 0; } } } static __init void xen_alloc_callback_vector(void) { if (!xen_have_vector_callback) return; pr_info("Xen HVM callback vector for event delivery is enabled\n"); alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback); } #else void xen_setup_callback_vector(void) {} static inline void xen_alloc_callback_vector(void) {} #endif #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "xen." static bool fifo_events = true; module_param(fifo_events, bool, 0); static int xen_evtchn_cpu_prepare(unsigned int cpu) { int ret = 0; if (evtchn_ops->percpu_init) ret = evtchn_ops->percpu_init(cpu); return ret; } static int xen_evtchn_cpu_dead(unsigned int cpu) { int ret = 0; if (evtchn_ops->percpu_deinit) ret = evtchn_ops->percpu_deinit(cpu); return ret; } void __init xen_init_IRQ(void) { int ret = -EINVAL; evtchn_port_t evtchn; if (fifo_events) ret = xen_evtchn_fifo_init(); if (ret < 0) xen_evtchn_2l_init(); cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE, "xen/evtchn:prepare", xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead); evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()), sizeof(*evtchn_to_irq), GFP_KERNEL); BUG_ON(!evtchn_to_irq); /* No event channels are 'live' right now. */ for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++) mask_evtchn(evtchn); pirq_needs_eoi = pirq_needs_eoi_flag; #ifdef CONFIG_X86 if (xen_pv_domain()) { if (xen_initial_domain()) pci_xen_initial_domain(); } if (xen_feature(XENFEAT_hvm_callback_vector)) { xen_setup_callback_vector(); xen_alloc_callback_vector(); } if (xen_hvm_domain()) { native_init_IRQ(); /* pci_xen_hvm_init must be called after native_init_IRQ so that * __acpi_register_gsi can point at the right function */ pci_xen_hvm_init(); } else { int rc; struct physdev_pirq_eoi_gmfn eoi_gmfn; pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map); rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); if (rc != 0) { free_page((unsigned long) pirq_eoi_map); pirq_eoi_map = NULL; } else pirq_needs_eoi = pirq_check_eoi_map; } #endif }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_4422_2
crossvul-cpp_data_good_1235_0
/* * Copyright (c) 2017 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <net/addrconf.h> #include <linux/etherdevice.h> #include <linux/mlx5/vport.h> #include "mlx5_core.h" #include "lib/mlx5.h" #include "fpga/conn.h" #define MLX5_FPGA_PKEY 0xFFFF #define MLX5_FPGA_PKEY_INDEX 0 /* RoCE PKEY 0xFFFF is always at index 0 */ #define MLX5_FPGA_RECV_SIZE 2048 #define MLX5_FPGA_PORT_NUM 1 #define MLX5_FPGA_CQ_BUDGET 64 static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn, struct mlx5_fpga_dma_buf *buf) { struct device *dma_device; int err = 0; if (unlikely(!buf->sg[0].data)) goto out; dma_device = &conn->fdev->mdev->pdev->dev; buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data, buf->sg[0].size, buf->dma_dir); err = dma_mapping_error(dma_device, buf->sg[0].dma_addr); if (unlikely(err)) { mlx5_fpga_warn(conn->fdev, "DMA error on sg 0: %d\n", err); err = -ENOMEM; goto out; } if (!buf->sg[1].data) goto out; buf->sg[1].dma_addr = dma_map_single(dma_device, buf->sg[1].data, buf->sg[1].size, buf->dma_dir); err = dma_mapping_error(dma_device, buf->sg[1].dma_addr); if (unlikely(err)) { mlx5_fpga_warn(conn->fdev, "DMA error on sg 1: %d\n", err); dma_unmap_single(dma_device, buf->sg[0].dma_addr, buf->sg[0].size, buf->dma_dir); err = -ENOMEM; } out: return err; } static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn, struct mlx5_fpga_dma_buf *buf) { struct device *dma_device; dma_device = &conn->fdev->mdev->pdev->dev; if (buf->sg[1].data) dma_unmap_single(dma_device, buf->sg[1].dma_addr, buf->sg[1].size, buf->dma_dir); if (likely(buf->sg[0].data)) dma_unmap_single(dma_device, buf->sg[0].dma_addr, buf->sg[0].size, buf->dma_dir); } static int mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn *conn, struct mlx5_fpga_dma_buf *buf) { struct mlx5_wqe_data_seg *data; unsigned int ix; int err = 0; err = mlx5_fpga_conn_map_buf(conn, buf); if (unlikely(err)) goto out; if (unlikely(conn->qp.rq.pc - conn->qp.rq.cc >= conn->qp.rq.size)) { mlx5_fpga_conn_unmap_buf(conn, buf); return -EBUSY; } ix = conn->qp.rq.pc & (conn->qp.rq.size - 1); data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix); data->byte_count = cpu_to_be32(buf->sg[0].size); data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key); data->addr = cpu_to_be64(buf->sg[0].dma_addr); conn->qp.rq.pc++; conn->qp.rq.bufs[ix] = buf; /* Make sure that descriptors are written before doorbell record. */ dma_wmb(); *conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff); out: return err; } static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe) { /* ensure wqe is visible to device before updating doorbell record */ dma_wmb(); *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc); /* Make sure that doorbell record is visible before ringing */ wmb(); mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET); } static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn, struct mlx5_fpga_dma_buf *buf) { struct mlx5_wqe_ctrl_seg *ctrl; struct mlx5_wqe_data_seg *data; unsigned int ix, sgi; int size = 1; ix = conn->qp.sq.pc & (conn->qp.sq.size - 1); ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix); data = (void *)(ctrl + 1); for (sgi = 0; sgi < ARRAY_SIZE(buf->sg); sgi++) { if (!buf->sg[sgi].data) break; data->byte_count = cpu_to_be32(buf->sg[sgi].size); data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key); data->addr = cpu_to_be64(buf->sg[sgi].dma_addr); data++; size++; } ctrl->imm = 0; ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) | MLX5_OPCODE_SEND); ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.mqp.qpn << 8)); conn->qp.sq.pc++; conn->qp.sq.bufs[ix] = buf; mlx5_fpga_conn_notify_hw(conn, ctrl); } int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn, struct mlx5_fpga_dma_buf *buf) { unsigned long flags; int err; if (!conn->qp.active) return -ENOTCONN; buf->dma_dir = DMA_TO_DEVICE; err = mlx5_fpga_conn_map_buf(conn, buf); if (err) return err; spin_lock_irqsave(&conn->qp.sq.lock, flags); if (conn->qp.sq.pc - conn->qp.sq.cc >= conn->qp.sq.size) { list_add_tail(&buf->list, &conn->qp.sq.backlog); goto out_unlock; } mlx5_fpga_conn_post_send(conn, buf); out_unlock: spin_unlock_irqrestore(&conn->qp.sq.lock, flags); return err; } static int mlx5_fpga_conn_post_recv_buf(struct mlx5_fpga_conn *conn) { struct mlx5_fpga_dma_buf *buf; int err; buf = kzalloc(sizeof(*buf) + MLX5_FPGA_RECV_SIZE, 0); if (!buf) return -ENOMEM; buf->sg[0].data = (void *)(buf + 1); buf->sg[0].size = MLX5_FPGA_RECV_SIZE; buf->dma_dir = DMA_FROM_DEVICE; err = mlx5_fpga_conn_post_recv(conn, buf); if (err) kfree(buf); return err; } static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, struct mlx5_core_mkey *mkey) { int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); void *mkc; u32 *in; int err; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); MLX5_SET(mkc, mkc, pd, pdn); MLX5_SET(mkc, mkc, length64, 1); MLX5_SET(mkc, mkc, qpn, 0xffffff); err = mlx5_core_create_mkey(mdev, mkey, in, inlen); kvfree(in); return err; } static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn, struct mlx5_cqe64 *cqe, u8 status) { struct mlx5_fpga_dma_buf *buf; int ix, err; ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1); buf = conn->qp.rq.bufs[ix]; conn->qp.rq.bufs[ix] = NULL; conn->qp.rq.cc++; if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR))) mlx5_fpga_warn(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n", buf, conn->fpga_qpn, status); else mlx5_fpga_dbg(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n", buf, conn->fpga_qpn, status); mlx5_fpga_conn_unmap_buf(conn, buf); if (unlikely(status || !conn->qp.active)) { conn->qp.active = false; kfree(buf); return; } buf->sg[0].size = be32_to_cpu(cqe->byte_cnt); mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n", buf->sg[0].size); conn->recv_cb(conn->cb_arg, buf); buf->sg[0].size = MLX5_FPGA_RECV_SIZE; err = mlx5_fpga_conn_post_recv(conn, buf); if (unlikely(err)) { mlx5_fpga_warn(conn->fdev, "Failed to re-post recv buf: %d\n", err); kfree(buf); } } static void mlx5_fpga_conn_sq_cqe(struct mlx5_fpga_conn *conn, struct mlx5_cqe64 *cqe, u8 status) { struct mlx5_fpga_dma_buf *buf, *nextbuf; unsigned long flags; int ix; spin_lock_irqsave(&conn->qp.sq.lock, flags); ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1); buf = conn->qp.sq.bufs[ix]; conn->qp.sq.bufs[ix] = NULL; conn->qp.sq.cc++; /* Handle backlog still under the spinlock to ensure message post order */ if (unlikely(!list_empty(&conn->qp.sq.backlog))) { if (likely(conn->qp.active)) { nextbuf = list_first_entry(&conn->qp.sq.backlog, struct mlx5_fpga_dma_buf, list); list_del(&nextbuf->list); mlx5_fpga_conn_post_send(conn, nextbuf); } } spin_unlock_irqrestore(&conn->qp.sq.lock, flags); if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR))) mlx5_fpga_warn(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n", buf, conn->fpga_qpn, status); else mlx5_fpga_dbg(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n", buf, conn->fpga_qpn, status); mlx5_fpga_conn_unmap_buf(conn, buf); if (likely(buf->complete)) buf->complete(conn, conn->fdev, buf, status); if (unlikely(status)) conn->qp.active = false; } static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn, struct mlx5_cqe64 *cqe) { u8 opcode, status = 0; opcode = get_cqe_opcode(cqe); switch (opcode) { case MLX5_CQE_REQ_ERR: status = ((struct mlx5_err_cqe *)cqe)->syndrome; /* Fall through */ case MLX5_CQE_REQ: mlx5_fpga_conn_sq_cqe(conn, cqe, status); break; case MLX5_CQE_RESP_ERR: status = ((struct mlx5_err_cqe *)cqe)->syndrome; /* Fall through */ case MLX5_CQE_RESP_SEND: mlx5_fpga_conn_rq_cqe(conn, cqe, status); break; default: mlx5_fpga_warn(conn->fdev, "Unexpected cqe opcode %u\n", opcode); } } static void mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn *conn) { mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT, conn->fdev->conn_res.uar->map, conn->cq.wq.cc); } static void mlx5_fpga_conn_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event event) { struct mlx5_fpga_conn *conn; conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq); mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn); } static void mlx5_fpga_conn_event(struct mlx5_core_qp *mqp, int event) { struct mlx5_fpga_conn *conn; conn = container_of(mqp, struct mlx5_fpga_conn, qp.mqp); mlx5_fpga_warn(conn->fdev, "QP event %u on QP #%u\n", event, mqp->qpn); } static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn, unsigned int budget) { struct mlx5_cqe64 *cqe; while (budget) { cqe = mlx5_cqwq_get_cqe(&conn->cq.wq); if (!cqe) break; budget--; mlx5_cqwq_pop(&conn->cq.wq); mlx5_fpga_conn_handle_cqe(conn, cqe); mlx5_cqwq_update_db_record(&conn->cq.wq); } if (!budget) { tasklet_schedule(&conn->cq.tasklet); return; } mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc); /* ensure cq space is freed before enabling more cqes */ wmb(); mlx5_fpga_conn_arm_cq(conn); } static void mlx5_fpga_conn_cq_tasklet(unsigned long data) { struct mlx5_fpga_conn *conn = (void *)data; if (unlikely(!conn->qp.active)) return; mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET); } static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) { struct mlx5_fpga_conn *conn; conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq); if (unlikely(!conn->qp.active)) return; mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET); } static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) { struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0}; u32 out[MLX5_ST_SZ_DW(create_cq_out)]; struct mlx5_wq_param wqp; struct mlx5_cqe64 *cqe; int inlen, err, eqn; unsigned int irqn; void *cqc, *in; __be64 *pas; u32 i; cq_size = roundup_pow_of_two(cq_size); MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size)); wqp.buf_numa_node = mdev->priv.numa_node; wqp.db_numa_node = mdev->priv.numa_node; err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &conn->cq.wq, &conn->cq.wq_ctrl); if (err) return err; for (i = 0; i < mlx5_cqwq_get_size(&conn->cq.wq); i++) { cqe = mlx5_cqwq_get_wqe(&conn->cq.wq, i); cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK; } inlen = MLX5_ST_SZ_BYTES(create_cq_in) + sizeof(u64) * conn->cq.wq_ctrl.buf.npages; in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_cqwq; } err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn); if (err) { kvfree(in); goto err_cqwq; } cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index); MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma); pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas); mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas); err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen, out, sizeof(out)); kvfree(in); if (err) goto err_cqwq; conn->cq.mcq.cqe_sz = 64; conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db; conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1; *conn->cq.mcq.set_ci_db = 0; *conn->cq.mcq.arm_db = 0; conn->cq.mcq.vector = 0; conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete; conn->cq.mcq.event = mlx5_fpga_conn_cq_event; conn->cq.mcq.irqn = irqn; conn->cq.mcq.uar = fdev->conn_res.uar; tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet, (unsigned long)conn); mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn); goto out; err_cqwq: mlx5_wq_destroy(&conn->cq.wq_ctrl); out: return err; } static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn) { tasklet_disable(&conn->cq.tasklet); tasklet_kill(&conn->cq.tasklet); mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq); mlx5_wq_destroy(&conn->cq.wq_ctrl); } static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc) { struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; struct mlx5_wq_param wqp; wqp.buf_numa_node = mdev->priv.numa_node; wqp.db_numa_node = mdev->priv.numa_node; return mlx5_wq_qp_create(mdev, &wqp, qpc, &conn->qp.wq, &conn->qp.wq_ctrl); } static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn, unsigned int tx_size, unsigned int rx_size) { struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {0}; void *in = NULL, *qpc; int err, inlen; conn->qp.rq.pc = 0; conn->qp.rq.cc = 0; conn->qp.rq.size = roundup_pow_of_two(rx_size); conn->qp.sq.pc = 0; conn->qp.sq.cc = 0; conn->qp.sq.size = roundup_pow_of_two(tx_size); MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4); MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(conn->qp.rq.size)); MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(conn->qp.sq.size)); err = mlx5_fpga_conn_create_wq(conn, temp_qpc); if (err) goto out; conn->qp.rq.bufs = kvcalloc(conn->qp.rq.size, sizeof(conn->qp.rq.bufs[0]), GFP_KERNEL); if (!conn->qp.rq.bufs) { err = -ENOMEM; goto err_wq; } conn->qp.sq.bufs = kvcalloc(conn->qp.sq.size, sizeof(conn->qp.sq.bufs[0]), GFP_KERNEL); if (!conn->qp.sq.bufs) { err = -ENOMEM; goto err_rq_bufs; } inlen = MLX5_ST_SZ_BYTES(create_qp_in) + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * conn->qp.wq_ctrl.buf.npages; in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_sq_bufs; } qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); MLX5_SET(qpc, qpc, uar_page, fdev->conn_res.uar->index); MLX5_SET(qpc, qpc, log_page_size, conn->qp.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET(qpc, qpc, fre, 1); MLX5_SET(qpc, qpc, rlky, 1); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); MLX5_SET(qpc, qpc, pd, fdev->conn_res.pdn); MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4); MLX5_SET(qpc, qpc, log_rq_size, ilog2(conn->qp.rq.size)); MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size)); MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn); MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn); MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma); if (MLX5_CAP_GEN(mdev, cqe_version) == 1) MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas)); err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen); if (err) goto err_sq_bufs; conn->qp.mqp.event = mlx5_fpga_conn_event; mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.mqp.qpn); goto out; err_sq_bufs: kvfree(conn->qp.sq.bufs); err_rq_bufs: kvfree(conn->qp.rq.bufs); err_wq: mlx5_wq_destroy(&conn->qp.wq_ctrl); out: kvfree(in); return err; } static void mlx5_fpga_conn_free_recv_bufs(struct mlx5_fpga_conn *conn) { int ix; for (ix = 0; ix < conn->qp.rq.size; ix++) { if (!conn->qp.rq.bufs[ix]) continue; mlx5_fpga_conn_unmap_buf(conn, conn->qp.rq.bufs[ix]); kfree(conn->qp.rq.bufs[ix]); conn->qp.rq.bufs[ix] = NULL; } } static void mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn *conn) { struct mlx5_fpga_dma_buf *buf, *temp; int ix; for (ix = 0; ix < conn->qp.sq.size; ix++) { buf = conn->qp.sq.bufs[ix]; if (!buf) continue; conn->qp.sq.bufs[ix] = NULL; mlx5_fpga_conn_unmap_buf(conn, buf); if (!buf->complete) continue; buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR); } list_for_each_entry_safe(buf, temp, &conn->qp.sq.backlog, list) { mlx5_fpga_conn_unmap_buf(conn, buf); if (!buf->complete) continue; buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR); } } static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn) { mlx5_core_destroy_qp(conn->fdev->mdev, &conn->qp.mqp); mlx5_fpga_conn_free_recv_bufs(conn); mlx5_fpga_conn_flush_send_bufs(conn); kvfree(conn->qp.sq.bufs); kvfree(conn->qp.rq.bufs); mlx5_wq_destroy(&conn->qp.wq_ctrl); } static inline int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn) { struct mlx5_core_dev *mdev = conn->fdev->mdev; mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.mqp.qpn); return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, NULL, &conn->qp.mqp); } static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn) { struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; u32 *qpc = NULL; int err; mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.mqp.qpn); qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL); if (!qpc) { err = -ENOMEM; goto out; } MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX); MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM); MLX5_SET(qpc, qpc, pd, conn->fdev->conn_res.pdn); MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn); MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn); MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma); err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc, &conn->qp.mqp); if (err) { mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err); goto out; } out: kfree(qpc); return err; } static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn) { struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; u32 *qpc = NULL; int err; mlx5_fpga_dbg(conn->fdev, "QP RTR\n"); qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL); if (!qpc) { err = -ENOMEM; goto out; } MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_1K_BYTES); MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg)); MLX5_SET(qpc, qpc, remote_qpn, conn->fpga_qpn); MLX5_SET(qpc, qpc, next_rcv_psn, MLX5_GET(fpga_qpc, conn->fpga_qpc, next_send_psn)); MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX); MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM); ether_addr_copy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32), MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_mac_47_32)); MLX5_SET(qpc, qpc, primary_address_path.udp_sport, MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port)); MLX5_SET(qpc, qpc, primary_address_path.src_addr_index, conn->qp.sgid_index); MLX5_SET(qpc, qpc, primary_address_path.hop_limit, 0); memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip), MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_ip), MLX5_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip)); err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc, &conn->qp.mqp); if (err) { mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err); goto out; } out: kfree(qpc); return err; } static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn) { struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; u32 *qpc = NULL; u32 opt_mask; int err; mlx5_fpga_dbg(conn->fdev, "QP RTS\n"); qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL); if (!qpc) { err = -ENOMEM; goto out; } MLX5_SET(qpc, qpc, log_ack_req_freq, 8); MLX5_SET(qpc, qpc, min_rnr_nak, 0x12); MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x12); /* ~1.07s */ MLX5_SET(qpc, qpc, next_send_psn, MLX5_GET(fpga_qpc, conn->fpga_qpc, next_rcv_psn)); MLX5_SET(qpc, qpc, retry_count, 7); MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */ opt_mask = MLX5_QP_OPTPAR_RNR_TIMEOUT; err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, opt_mask, qpc, &conn->qp.mqp); if (err) { mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err); goto out; } out: kfree(qpc); return err; } static int mlx5_fpga_conn_connect(struct mlx5_fpga_conn *conn) { struct mlx5_fpga_device *fdev = conn->fdev; int err; MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_ACTIVE); err = mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn, MLX5_FPGA_QPC_STATE, &conn->fpga_qpc); if (err) { mlx5_fpga_err(fdev, "Failed to activate FPGA RC QP: %d\n", err); goto out; } err = mlx5_fpga_conn_reset_qp(conn); if (err) { mlx5_fpga_err(fdev, "Failed to change QP state to reset\n"); goto err_fpga_qp; } err = mlx5_fpga_conn_init_qp(conn); if (err) { mlx5_fpga_err(fdev, "Failed to modify QP from RESET to INIT\n"); goto err_fpga_qp; } conn->qp.active = true; while (!mlx5_fpga_conn_post_recv_buf(conn)) ; err = mlx5_fpga_conn_rtr_qp(conn); if (err) { mlx5_fpga_err(fdev, "Failed to change QP state from INIT to RTR\n"); goto err_recv_bufs; } err = mlx5_fpga_conn_rts_qp(conn); if (err) { mlx5_fpga_err(fdev, "Failed to change QP state from RTR to RTS\n"); goto err_recv_bufs; } goto out; err_recv_bufs: mlx5_fpga_conn_free_recv_bufs(conn); err_fpga_qp: MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT); if (mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn, MLX5_FPGA_QPC_STATE, &conn->fpga_qpc)) mlx5_fpga_err(fdev, "Failed to revert FPGA QP to INIT\n"); out: return err; } struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev, struct mlx5_fpga_conn_attr *attr, enum mlx5_ifc_fpga_qp_type qp_type) { struct mlx5_fpga_conn *ret, *conn; u8 *remote_mac, *remote_ip; int err; if (!attr->recv_cb) return ERR_PTR(-EINVAL); conn = kzalloc(sizeof(*conn), GFP_KERNEL); if (!conn) return ERR_PTR(-ENOMEM); conn->fdev = fdev; INIT_LIST_HEAD(&conn->qp.sq.backlog); spin_lock_init(&conn->qp.sq.lock); conn->recv_cb = attr->recv_cb; conn->cb_arg = attr->cb_arg; remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32); err = mlx5_query_mac_address(fdev->mdev, remote_mac); if (err) { mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err); ret = ERR_PTR(err); goto err; } /* Build Modified EUI-64 IPv6 address from the MAC address */ remote_ip = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_ip); remote_ip[0] = 0xfe; remote_ip[1] = 0x80; addrconf_addr_eui48(&remote_ip[8], remote_mac); err = mlx5_core_reserved_gid_alloc(fdev->mdev, &conn->qp.sgid_index); if (err) { mlx5_fpga_err(fdev, "Failed to allocate SGID: %d\n", err); ret = ERR_PTR(err); goto err; } err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, MLX5_ROCE_VERSION_2, MLX5_ROCE_L3_TYPE_IPV6, remote_ip, remote_mac, true, 0, MLX5_FPGA_PORT_NUM); if (err) { mlx5_fpga_err(fdev, "Failed to set SGID: %d\n", err); ret = ERR_PTR(err); goto err_rsvd_gid; } mlx5_fpga_dbg(fdev, "Reserved SGID index %u\n", conn->qp.sgid_index); /* Allow for one cqe per rx/tx wqe, plus one cqe for the next wqe, * created during processing of the cqe */ err = mlx5_fpga_conn_create_cq(conn, (attr->tx_size + attr->rx_size) * 2); if (err) { mlx5_fpga_err(fdev, "Failed to create CQ: %d\n", err); ret = ERR_PTR(err); goto err_gid; } mlx5_fpga_conn_arm_cq(conn); err = mlx5_fpga_conn_create_qp(conn, attr->tx_size, attr->rx_size); if (err) { mlx5_fpga_err(fdev, "Failed to create QP: %d\n", err); ret = ERR_PTR(err); goto err_cq; } MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT); MLX5_SET(fpga_qpc, conn->fpga_qpc, qp_type, qp_type); MLX5_SET(fpga_qpc, conn->fpga_qpc, st, MLX5_FPGA_QPC_ST_RC); MLX5_SET(fpga_qpc, conn->fpga_qpc, ether_type, ETH_P_8021Q); MLX5_SET(fpga_qpc, conn->fpga_qpc, vid, 0); MLX5_SET(fpga_qpc, conn->fpga_qpc, next_rcv_psn, 1); MLX5_SET(fpga_qpc, conn->fpga_qpc, next_send_psn, 0); MLX5_SET(fpga_qpc, conn->fpga_qpc, pkey, MLX5_FPGA_PKEY); MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.mqp.qpn); MLX5_SET(fpga_qpc, conn->fpga_qpc, rnr_retry, 7); MLX5_SET(fpga_qpc, conn->fpga_qpc, retry_count, 7); err = mlx5_fpga_create_qp(fdev->mdev, &conn->fpga_qpc, &conn->fpga_qpn); if (err) { mlx5_fpga_err(fdev, "Failed to create FPGA RC QP: %d\n", err); ret = ERR_PTR(err); goto err_qp; } err = mlx5_fpga_conn_connect(conn); if (err) { ret = ERR_PTR(err); goto err_conn; } mlx5_fpga_dbg(fdev, "FPGA QPN is %u\n", conn->fpga_qpn); ret = conn; goto out; err_conn: mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn); err_qp: mlx5_fpga_conn_destroy_qp(conn); err_cq: mlx5_fpga_conn_destroy_cq(conn); err_gid: mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL, NULL, false, 0, MLX5_FPGA_PORT_NUM); err_rsvd_gid: mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index); err: kfree(conn); out: return ret; } void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn) { struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; int err = 0; conn->qp.active = false; tasklet_disable(&conn->cq.tasklet); synchronize_irq(conn->cq.mcq.irqn); mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn); err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, NULL, &conn->qp.mqp); if (err) mlx5_fpga_warn(fdev, "qp_modify 2ERR failed: %d\n", err); mlx5_fpga_conn_destroy_qp(conn); mlx5_fpga_conn_destroy_cq(conn); mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0, NULL, NULL, false, 0, MLX5_FPGA_PORT_NUM); mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index); kfree(conn); } int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev) { int err; err = mlx5_nic_vport_enable_roce(fdev->mdev); if (err) { mlx5_fpga_err(fdev, "Failed to enable RoCE: %d\n", err); goto out; } fdev->conn_res.uar = mlx5_get_uars_page(fdev->mdev); if (IS_ERR(fdev->conn_res.uar)) { err = PTR_ERR(fdev->conn_res.uar); mlx5_fpga_err(fdev, "get_uars_page failed, %d\n", err); goto err_roce; } mlx5_fpga_dbg(fdev, "Allocated UAR index %u\n", fdev->conn_res.uar->index); err = mlx5_core_alloc_pd(fdev->mdev, &fdev->conn_res.pdn); if (err) { mlx5_fpga_err(fdev, "alloc pd failed, %d\n", err); goto err_uar; } mlx5_fpga_dbg(fdev, "Allocated PD %u\n", fdev->conn_res.pdn); err = mlx5_fpga_conn_create_mkey(fdev->mdev, fdev->conn_res.pdn, &fdev->conn_res.mkey); if (err) { mlx5_fpga_err(fdev, "create mkey failed, %d\n", err); goto err_dealloc_pd; } mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey.key); return 0; err_dealloc_pd: mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn); err_uar: mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar); err_roce: mlx5_nic_vport_disable_roce(fdev->mdev); out: return err; } void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev) { mlx5_core_destroy_mkey(fdev->mdev, &fdev->conn_res.mkey); mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn); mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar); mlx5_nic_vport_disable_roce(fdev->mdev); }
./CrossVul/dataset_final_sorted/CWE-400/c/good_1235_0
crossvul-cpp_data_bad_1249_0
/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * BSD LICENSE * * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include "iwl-trans.h" #include "iwl-fh.h" #include "iwl-context-info-gen3.h" #include "internal.h" #include "iwl-prph.h" int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, const struct fw_img *fw) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_context_info_gen3 *ctxt_info_gen3; struct iwl_prph_scratch *prph_scratch; struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl; struct iwl_prph_info *prph_info; void *iml_img; u32 control_flags = 0; int ret; int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE, trans->cfg->min_txq_size); /* Allocate prph scratch */ prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch), &trans_pcie->prph_scratch_dma_addr, GFP_KERNEL); if (!prph_scratch) return -ENOMEM; prph_sc_ctrl = &prph_scratch->ctrl_cfg; prph_sc_ctrl->version.version = 0; prph_sc_ctrl->version.mac_id = cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4); control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K | IWL_PRPH_SCRATCH_MTR_MODE | (IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT) | IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | IWL_PRPH_SCRATCH_EDBG_DEST_DRAM; prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags); /* initialize RX default queue */ prph_sc_ctrl->rbd_cfg.free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); /* Configure debug, for integration */ if (!iwl_trans_dbg_ini_valid(trans)) iwl_pcie_alloc_fw_monitor(trans, 0); if (trans->dbg.num_blocks) { prph_sc_ctrl->hwm_cfg.hwm_base_addr = cpu_to_le64(trans->dbg.fw_mon[0].physical); prph_sc_ctrl->hwm_cfg.hwm_size = cpu_to_le32(trans->dbg.fw_mon[0].size); } /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram); if (ret) { dma_free_coherent(trans->dev, sizeof(*prph_scratch), prph_scratch, trans_pcie->prph_scratch_dma_addr); return ret; } /* Allocate prph information * currently we don't assign to the prph info anything, but it would get * assigned later */ prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info), &trans_pcie->prph_info_dma_addr, GFP_KERNEL); if (!prph_info) return -ENOMEM; /* Allocate context info */ ctxt_info_gen3 = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info_gen3), &trans_pcie->ctxt_info_dma_addr, GFP_KERNEL); if (!ctxt_info_gen3) return -ENOMEM; ctxt_info_gen3->prph_info_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr); ctxt_info_gen3->prph_scratch_base_addr = cpu_to_le64(trans_pcie->prph_scratch_dma_addr); ctxt_info_gen3->prph_scratch_size = cpu_to_le32(sizeof(*prph_scratch)); ctxt_info_gen3->cr_head_idx_arr_base_addr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma); ctxt_info_gen3->tr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->rxq->tr_tail_dma); ctxt_info_gen3->cr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->rxq->cr_tail_dma); ctxt_info_gen3->cr_idx_arr_size = cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS); ctxt_info_gen3->tr_idx_arr_size = cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS); ctxt_info_gen3->mtr_base_addr = cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); ctxt_info_gen3->mcr_base_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); ctxt_info_gen3->mtr_size = cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size)); ctxt_info_gen3->mcr_size = cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE)); trans_pcie->ctxt_info_gen3 = ctxt_info_gen3; trans_pcie->prph_info = prph_info; trans_pcie->prph_scratch = prph_scratch; /* Allocate IML */ iml_img = dma_alloc_coherent(trans->dev, trans->iml_len, &trans_pcie->iml_dma_addr, GFP_KERNEL); if (!iml_img) return -ENOMEM; memcpy(iml_img, trans->iml, trans->iml_len); iwl_enable_fw_load_int_ctx_info(trans); /* kick FW self load */ iwl_write64(trans, CSR_CTXT_INFO_ADDR, trans_pcie->ctxt_info_dma_addr); iwl_write64(trans, CSR_IML_DATA_ADDR, trans_pcie->iml_dma_addr); iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len); iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); else iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT); return 0; } void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!trans_pcie->ctxt_info_gen3) return; dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), trans_pcie->ctxt_info_gen3, trans_pcie->ctxt_info_dma_addr); trans_pcie->ctxt_info_dma_addr = 0; trans_pcie->ctxt_info_gen3 = NULL; iwl_pcie_ctxt_info_free_fw_img(trans); dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch), trans_pcie->prph_scratch, trans_pcie->prph_scratch_dma_addr); trans_pcie->prph_scratch_dma_addr = 0; trans_pcie->prph_scratch = NULL; dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info), trans_pcie->prph_info, trans_pcie->prph_info_dma_addr); trans_pcie->prph_info_dma_addr = 0; trans_pcie->prph_info = NULL; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1249_0
crossvul-cpp_data_good_1268_0
// SPDX-License-Identifier: ISC /* * Copyright (c) 2007-2011 Atheros Communications Inc. * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc. * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com> */ #include <linux/module.h> #include <linux/usb.h> #include "debug.h" #include "core.h" #include "bmi.h" #include "hif.h" #include "htc.h" #include "usb.h" static void ath10k_usb_post_recv_transfers(struct ath10k *ar, struct ath10k_usb_pipe *recv_pipe); /* inlined helper functions */ static inline enum ath10k_htc_ep_id eid_from_htc_hdr(struct ath10k_htc_hdr *htc_hdr) { return (enum ath10k_htc_ep_id)htc_hdr->eid; } static inline bool is_trailer_only_msg(struct ath10k_htc_hdr *htc_hdr) { return __le16_to_cpu(htc_hdr->len) == htc_hdr->trailer_len; } /* pipe/urb operations */ static struct ath10k_urb_context * ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe) { struct ath10k_urb_context *urb_context = NULL; unsigned long flags; spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); if (!list_empty(&pipe->urb_list_head)) { urb_context = list_first_entry(&pipe->urb_list_head, struct ath10k_urb_context, link); list_del(&urb_context->link); pipe->urb_cnt--; } spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); return urb_context; } static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe, struct ath10k_urb_context *urb_context) { unsigned long flags; spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); pipe->urb_cnt++; list_add(&urb_context->link, &pipe->urb_list_head); spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); } static void ath10k_usb_cleanup_recv_urb(struct ath10k_urb_context *urb_context) { dev_kfree_skb(urb_context->skb); urb_context->skb = NULL; ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); } static void ath10k_usb_free_pipe_resources(struct ath10k *ar, struct ath10k_usb_pipe *pipe) { struct ath10k_urb_context *urb_context; if (!pipe->ar_usb) { /* nothing allocated for this pipe */ return; } ath10k_dbg(ar, ATH10K_DBG_USB, "usb free resources lpipe %d hpipe 0x%x urbs %d avail %d\n", pipe->logical_pipe_num, pipe->usb_pipe_handle, pipe->urb_alloc, pipe->urb_cnt); if (pipe->urb_alloc != pipe->urb_cnt) { ath10k_dbg(ar, ATH10K_DBG_USB, "usb urb leak lpipe %d hpipe 0x%x urbs %d avail %d\n", pipe->logical_pipe_num, pipe->usb_pipe_handle, pipe->urb_alloc, pipe->urb_cnt); } for (;;) { urb_context = ath10k_usb_alloc_urb_from_pipe(pipe); if (!urb_context) break; kfree(urb_context); } } static void ath10k_usb_cleanup_pipe_resources(struct ath10k *ar) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); int i; for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) ath10k_usb_free_pipe_resources(ar, &ar_usb->pipes[i]); } /* hif usb rx/tx completion functions */ static void ath10k_usb_recv_complete(struct urb *urb) { struct ath10k_urb_context *urb_context = urb->context; struct ath10k_usb_pipe *pipe = urb_context->pipe; struct ath10k *ar = pipe->ar_usb->ar; struct sk_buff *skb; int status = 0; ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb recv pipe %d stat %d len %d urb 0x%pK\n", pipe->logical_pipe_num, urb->status, urb->actual_length, urb); if (urb->status != 0) { status = -EIO; switch (urb->status) { case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* no need to spew these errors when device * removed or urb killed due to driver shutdown */ status = -ECANCELED; break; default: ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb recv pipe %d ep 0x%2.2x failed: %d\n", pipe->logical_pipe_num, pipe->ep_address, urb->status); break; } goto cleanup_recv_urb; } if (urb->actual_length == 0) goto cleanup_recv_urb; skb = urb_context->skb; /* we are going to pass it up */ urb_context->skb = NULL; skb_put(skb, urb->actual_length); /* note: queue implements a lock */ skb_queue_tail(&pipe->io_comp_queue, skb); schedule_work(&pipe->io_complete_work); cleanup_recv_urb: ath10k_usb_cleanup_recv_urb(urb_context); if (status == 0 && pipe->urb_cnt >= pipe->urb_cnt_thresh) { /* our free urbs are piling up, post more transfers */ ath10k_usb_post_recv_transfers(ar, pipe); } } static void ath10k_usb_transmit_complete(struct urb *urb) { struct ath10k_urb_context *urb_context = urb->context; struct ath10k_usb_pipe *pipe = urb_context->pipe; struct ath10k *ar = pipe->ar_usb->ar; struct sk_buff *skb; if (urb->status != 0) { ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "pipe: %d, failed:%d\n", pipe->logical_pipe_num, urb->status); } skb = urb_context->skb; urb_context->skb = NULL; ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); /* note: queue implements a lock */ skb_queue_tail(&pipe->io_comp_queue, skb); schedule_work(&pipe->io_complete_work); } /* pipe operations */ static void ath10k_usb_post_recv_transfers(struct ath10k *ar, struct ath10k_usb_pipe *recv_pipe) { struct ath10k_urb_context *urb_context; struct urb *urb; int usb_status; for (;;) { urb_context = ath10k_usb_alloc_urb_from_pipe(recv_pipe); if (!urb_context) break; urb_context->skb = dev_alloc_skb(ATH10K_USB_RX_BUFFER_SIZE); if (!urb_context->skb) goto err; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) goto err; usb_fill_bulk_urb(urb, recv_pipe->ar_usb->udev, recv_pipe->usb_pipe_handle, urb_context->skb->data, ATH10K_USB_RX_BUFFER_SIZE, ath10k_usb_recv_complete, urb_context); ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb bulk recv submit %d 0x%x ep 0x%2.2x len %d buf 0x%pK\n", recv_pipe->logical_pipe_num, recv_pipe->usb_pipe_handle, recv_pipe->ep_address, ATH10K_USB_RX_BUFFER_SIZE, urb_context->skb); usb_anchor_urb(urb, &recv_pipe->urb_submitted); usb_status = usb_submit_urb(urb, GFP_ATOMIC); if (usb_status) { ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb bulk recv failed: %d\n", usb_status); usb_unanchor_urb(urb); usb_free_urb(urb); goto err; } usb_free_urb(urb); } return; err: ath10k_usb_cleanup_recv_urb(urb_context); } static void ath10k_usb_flush_all(struct ath10k *ar) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); int i; for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) { if (ar_usb->pipes[i].ar_usb) { usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted); cancel_work_sync(&ar_usb->pipes[i].io_complete_work); } } } static void ath10k_usb_start_recv_pipes(struct ath10k *ar) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA].urb_cnt_thresh = 1; ath10k_usb_post_recv_transfers(ar, &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]); } static void ath10k_usb_tx_complete(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htc_hdr *htc_hdr; struct ath10k_htc_ep *ep; htc_hdr = (struct ath10k_htc_hdr *)skb->data; ep = &ar->htc.endpoint[htc_hdr->eid]; ath10k_htc_notify_tx_completion(ep, skb); /* The TX complete handler now owns the skb... */ } static void ath10k_usb_rx_complete(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htc *htc = &ar->htc; struct ath10k_htc_hdr *htc_hdr; enum ath10k_htc_ep_id eid; struct ath10k_htc_ep *ep; u16 payload_len; u8 *trailer; int ret; htc_hdr = (struct ath10k_htc_hdr *)skb->data; eid = eid_from_htc_hdr(htc_hdr); ep = &ar->htc.endpoint[eid]; if (ep->service_id == 0) { ath10k_warn(ar, "ep %d is not connected\n", eid); goto out_free_skb; } payload_len = le16_to_cpu(htc_hdr->len); if (!payload_len) { ath10k_warn(ar, "zero length frame received, firmware crashed?\n"); goto out_free_skb; } if (payload_len < htc_hdr->trailer_len) { ath10k_warn(ar, "malformed frame received, firmware crashed?\n"); goto out_free_skb; } if (htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT) { trailer = skb->data + sizeof(*htc_hdr) + payload_len - htc_hdr->trailer_len; ret = ath10k_htc_process_trailer(htc, trailer, htc_hdr->trailer_len, eid, NULL, NULL); if (ret) goto out_free_skb; if (is_trailer_only_msg(htc_hdr)) goto out_free_skb; /* strip off the trailer from the skb since it should not * be passed on to upper layers */ skb_trim(skb, skb->len - htc_hdr->trailer_len); } skb_pull(skb, sizeof(*htc_hdr)); ep->ep_ops.ep_rx_complete(ar, skb); /* The RX complete handler now owns the skb... */ return; out_free_skb: dev_kfree_skb(skb); } static void ath10k_usb_io_comp_work(struct work_struct *work) { struct ath10k_usb_pipe *pipe = container_of(work, struct ath10k_usb_pipe, io_complete_work); struct ath10k *ar = pipe->ar_usb->ar; struct sk_buff *skb; while ((skb = skb_dequeue(&pipe->io_comp_queue))) { if (pipe->flags & ATH10K_USB_PIPE_FLAG_TX) ath10k_usb_tx_complete(ar, skb); else ath10k_usb_rx_complete(ar, skb); } } #define ATH10K_USB_MAX_DIAG_CMD (sizeof(struct ath10k_usb_ctrl_diag_cmd_write)) #define ATH10K_USB_MAX_DIAG_RESP (sizeof(struct ath10k_usb_ctrl_diag_resp_read)) static void ath10k_usb_destroy(struct ath10k *ar) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); ath10k_usb_flush_all(ar); ath10k_usb_cleanup_pipe_resources(ar); usb_set_intfdata(ar_usb->interface, NULL); kfree(ar_usb->diag_cmd_buffer); kfree(ar_usb->diag_resp_buffer); } static int ath10k_usb_hif_start(struct ath10k *ar) { int i; struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); ath10k_usb_start_recv_pipes(ar); /* set the TX resource avail threshold for each TX pipe */ for (i = ATH10K_USB_PIPE_TX_CTRL; i <= ATH10K_USB_PIPE_TX_DATA_HP; i++) { ar_usb->pipes[i].urb_cnt_thresh = ar_usb->pipes[i].urb_alloc / 2; } return 0; } static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id, struct ath10k_hif_sg_item *items, int n_items) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct ath10k_usb_pipe *pipe = &ar_usb->pipes[pipe_id]; struct ath10k_urb_context *urb_context; struct sk_buff *skb; struct urb *urb; int ret, i; for (i = 0; i < n_items; i++) { urb_context = ath10k_usb_alloc_urb_from_pipe(pipe); if (!urb_context) { ret = -ENOMEM; goto err; } skb = items[i].transfer_context; urb_context->skb = skb; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { ret = -ENOMEM; goto err_free_urb_to_pipe; } usb_fill_bulk_urb(urb, ar_usb->udev, pipe->usb_pipe_handle, skb->data, skb->len, ath10k_usb_transmit_complete, urb_context); if (!(skb->len % pipe->max_packet_size)) { /* hit a max packet boundary on this pipe */ urb->transfer_flags |= URB_ZERO_PACKET; } usb_anchor_urb(urb, &pipe->urb_submitted); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb bulk transmit failed: %d\n", ret); usb_unanchor_urb(urb); usb_free_urb(urb); ret = -EINVAL; goto err_free_urb_to_pipe; } usb_free_urb(urb); } return 0; err_free_urb_to_pipe: ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); err: return ret; } static void ath10k_usb_hif_stop(struct ath10k *ar) { ath10k_usb_flush_all(ar); } static u16 ath10k_usb_hif_get_free_queue_number(struct ath10k *ar, u8 pipe_id) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); return ar_usb->pipes[pipe_id].urb_cnt; } static int ath10k_usb_submit_ctrl_out(struct ath10k *ar, u8 req, u16 value, u16 index, void *data, u32 size) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); u8 *buf = NULL; int ret; if (size > 0) { buf = kmemdup(data, size, GFP_KERNEL); if (!buf) return -ENOMEM; } /* note: if successful returns number of bytes transferred */ ret = usb_control_msg(ar_usb->udev, usb_sndctrlpipe(ar_usb->udev, 0), req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, buf, size, 1000); if (ret < 0) { ath10k_warn(ar, "Failed to submit usb control message: %d\n", ret); kfree(buf); return ret; } kfree(buf); return 0; } static int ath10k_usb_submit_ctrl_in(struct ath10k *ar, u8 req, u16 value, u16 index, void *data, u32 size) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); u8 *buf = NULL; int ret; if (size > 0) { buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; } /* note: if successful returns number of bytes transferred */ ret = usb_control_msg(ar_usb->udev, usb_rcvctrlpipe(ar_usb->udev, 0), req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, buf, size, 2 * HZ); if (ret < 0) { ath10k_warn(ar, "Failed to read usb control message: %d\n", ret); kfree(buf); return ret; } memcpy((u8 *)data, buf, size); kfree(buf); return 0; } static int ath10k_usb_ctrl_msg_exchange(struct ath10k *ar, u8 req_val, u8 *req_buf, u32 req_len, u8 resp_val, u8 *resp_buf, u32 *resp_len) { int ret; /* send command */ ret = ath10k_usb_submit_ctrl_out(ar, req_val, 0, 0, req_buf, req_len); if (ret) goto err; /* get response */ if (resp_buf) { ret = ath10k_usb_submit_ctrl_in(ar, resp_val, 0, 0, resp_buf, *resp_len); if (ret) goto err; } return 0; err: return ret; } static int ath10k_usb_hif_diag_read(struct ath10k *ar, u32 address, void *buf, size_t buf_len) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct ath10k_usb_ctrl_diag_cmd_read *cmd; u32 resp_len; int ret; if (buf_len < sizeof(struct ath10k_usb_ctrl_diag_resp_read)) return -EINVAL; cmd = (struct ath10k_usb_ctrl_diag_cmd_read *)ar_usb->diag_cmd_buffer; memset(cmd, 0, sizeof(*cmd)); cmd->cmd = ATH10K_USB_CTRL_DIAG_CC_READ; cmd->address = cpu_to_le32(address); resp_len = sizeof(struct ath10k_usb_ctrl_diag_resp_read); ret = ath10k_usb_ctrl_msg_exchange(ar, ATH10K_USB_CONTROL_REQ_DIAG_CMD, (u8 *)cmd, sizeof(*cmd), ATH10K_USB_CONTROL_REQ_DIAG_RESP, ar_usb->diag_resp_buffer, &resp_len); if (ret) return ret; if (resp_len != sizeof(struct ath10k_usb_ctrl_diag_resp_read)) return -EMSGSIZE; memcpy(buf, ar_usb->diag_resp_buffer, sizeof(struct ath10k_usb_ctrl_diag_resp_read)); return 0; } static int ath10k_usb_hif_diag_write(struct ath10k *ar, u32 address, const void *data, int nbytes) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct ath10k_usb_ctrl_diag_cmd_write *cmd; int ret; if (nbytes != sizeof(cmd->value)) return -EINVAL; cmd = (struct ath10k_usb_ctrl_diag_cmd_write *)ar_usb->diag_cmd_buffer; memset(cmd, 0, sizeof(*cmd)); cmd->cmd = cpu_to_le32(ATH10K_USB_CTRL_DIAG_CC_WRITE); cmd->address = cpu_to_le32(address); memcpy(&cmd->value, data, nbytes); ret = ath10k_usb_ctrl_msg_exchange(ar, ATH10K_USB_CONTROL_REQ_DIAG_CMD, (u8 *)cmd, sizeof(*cmd), 0, NULL, NULL); if (ret) return ret; return 0; } static int ath10k_usb_bmi_exchange_msg(struct ath10k *ar, void *req, u32 req_len, void *resp, u32 *resp_len) { int ret; if (req) { ret = ath10k_usb_submit_ctrl_out(ar, ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD, 0, 0, req, req_len); if (ret) { ath10k_warn(ar, "unable to send the bmi data to the device: %d\n", ret); return ret; } } if (resp) { ret = ath10k_usb_submit_ctrl_in(ar, ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP, 0, 0, resp, *resp_len); if (ret) { ath10k_warn(ar, "Unable to read the bmi data from the device: %d\n", ret); return ret; } } return 0; } static void ath10k_usb_hif_get_default_pipe(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe) { *ul_pipe = ATH10K_USB_PIPE_TX_CTRL; *dl_pipe = ATH10K_USB_PIPE_RX_CTRL; } static int ath10k_usb_hif_map_service_to_pipe(struct ath10k *ar, u16 svc_id, u8 *ul_pipe, u8 *dl_pipe) { switch (svc_id) { case ATH10K_HTC_SVC_ID_RSVD_CTRL: case ATH10K_HTC_SVC_ID_WMI_CONTROL: *ul_pipe = ATH10K_USB_PIPE_TX_CTRL; /* due to large control packets, shift to data pipe */ *dl_pipe = ATH10K_USB_PIPE_RX_DATA; break; case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: *ul_pipe = ATH10K_USB_PIPE_TX_DATA_LP; /* Disable rxdata2 directly, it will be enabled * if FW enable rxdata2 */ *dl_pipe = ATH10K_USB_PIPE_RX_DATA; break; default: return -EPERM; } return 0; } /* This op is currently only used by htc_wait_target if the HTC ready * message times out. It is not applicable for USB since there is nothing * we can do if the HTC ready message does not arrive in time. * TODO: Make this op non mandatory by introducing a NULL check in the * hif op wrapper. */ static void ath10k_usb_hif_send_complete_check(struct ath10k *ar, u8 pipe, int force) { } static int ath10k_usb_hif_power_up(struct ath10k *ar, enum ath10k_firmware_mode fw_mode) { return 0; } static void ath10k_usb_hif_power_down(struct ath10k *ar) { ath10k_usb_flush_all(ar); } #ifdef CONFIG_PM static int ath10k_usb_hif_suspend(struct ath10k *ar) { return -EOPNOTSUPP; } static int ath10k_usb_hif_resume(struct ath10k *ar) { return -EOPNOTSUPP; } #endif static const struct ath10k_hif_ops ath10k_usb_hif_ops = { .tx_sg = ath10k_usb_hif_tx_sg, .diag_read = ath10k_usb_hif_diag_read, .diag_write = ath10k_usb_hif_diag_write, .exchange_bmi_msg = ath10k_usb_bmi_exchange_msg, .start = ath10k_usb_hif_start, .stop = ath10k_usb_hif_stop, .map_service_to_pipe = ath10k_usb_hif_map_service_to_pipe, .get_default_pipe = ath10k_usb_hif_get_default_pipe, .send_complete_check = ath10k_usb_hif_send_complete_check, .get_free_queue_number = ath10k_usb_hif_get_free_queue_number, .power_up = ath10k_usb_hif_power_up, .power_down = ath10k_usb_hif_power_down, #ifdef CONFIG_PM .suspend = ath10k_usb_hif_suspend, .resume = ath10k_usb_hif_resume, #endif }; static u8 ath10k_usb_get_logical_pipe_num(u8 ep_address, int *urb_count) { u8 pipe_num = ATH10K_USB_PIPE_INVALID; switch (ep_address) { case ATH10K_USB_EP_ADDR_APP_CTRL_IN: pipe_num = ATH10K_USB_PIPE_RX_CTRL; *urb_count = RX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_DATA_IN: pipe_num = ATH10K_USB_PIPE_RX_DATA; *urb_count = RX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_INT_IN: pipe_num = ATH10K_USB_PIPE_RX_INT; *urb_count = RX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_DATA2_IN: pipe_num = ATH10K_USB_PIPE_RX_DATA2; *urb_count = RX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_CTRL_OUT: pipe_num = ATH10K_USB_PIPE_TX_CTRL; *urb_count = TX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_DATA_LP_OUT: pipe_num = ATH10K_USB_PIPE_TX_DATA_LP; *urb_count = TX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT: pipe_num = ATH10K_USB_PIPE_TX_DATA_MP; *urb_count = TX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT: pipe_num = ATH10K_USB_PIPE_TX_DATA_HP; *urb_count = TX_URB_COUNT; break; default: /* note: there may be endpoints not currently used */ break; } return pipe_num; } static int ath10k_usb_alloc_pipe_resources(struct ath10k *ar, struct ath10k_usb_pipe *pipe, int urb_cnt) { struct ath10k_urb_context *urb_context; int i; INIT_LIST_HEAD(&pipe->urb_list_head); init_usb_anchor(&pipe->urb_submitted); for (i = 0; i < urb_cnt; i++) { urb_context = kzalloc(sizeof(*urb_context), GFP_KERNEL); if (!urb_context) return -ENOMEM; urb_context->pipe = pipe; /* we are only allocate the urb contexts here, the actual URB * is allocated from the kernel as needed to do a transaction */ pipe->urb_alloc++; ath10k_usb_free_urb_to_pipe(pipe, urb_context); } ath10k_dbg(ar, ATH10K_DBG_USB, "usb alloc resources lpipe %d hpipe 0x%x urbs %d\n", pipe->logical_pipe_num, pipe->usb_pipe_handle, pipe->urb_alloc); return 0; } static int ath10k_usb_setup_pipe_resources(struct ath10k *ar, struct usb_interface *interface) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct usb_host_interface *iface_desc = interface->cur_altsetting; struct usb_endpoint_descriptor *endpoint; struct ath10k_usb_pipe *pipe; int ret, i, urbcount; u8 pipe_num; ath10k_dbg(ar, ATH10K_DBG_USB, "usb setting up pipes using interface\n"); /* walk decriptors and setup pipes */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) { ath10k_dbg(ar, ATH10K_DBG_USB, "usb %s bulk ep 0x%2.2x maxpktsz %d\n", ATH10K_USB_IS_DIR_IN (endpoint->bEndpointAddress) ? "rx" : "tx", endpoint->bEndpointAddress, le16_to_cpu(endpoint->wMaxPacketSize)); } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) { ath10k_dbg(ar, ATH10K_DBG_USB, "usb %s int ep 0x%2.2x maxpktsz %d interval %d\n", ATH10K_USB_IS_DIR_IN (endpoint->bEndpointAddress) ? "rx" : "tx", endpoint->bEndpointAddress, le16_to_cpu(endpoint->wMaxPacketSize), endpoint->bInterval); } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) { /* TODO for ISO */ ath10k_dbg(ar, ATH10K_DBG_USB, "usb %s isoc ep 0x%2.2x maxpktsz %d interval %d\n", ATH10K_USB_IS_DIR_IN (endpoint->bEndpointAddress) ? "rx" : "tx", endpoint->bEndpointAddress, le16_to_cpu(endpoint->wMaxPacketSize), endpoint->bInterval); } urbcount = 0; pipe_num = ath10k_usb_get_logical_pipe_num(endpoint->bEndpointAddress, &urbcount); if (pipe_num == ATH10K_USB_PIPE_INVALID) continue; pipe = &ar_usb->pipes[pipe_num]; if (pipe->ar_usb) /* hmmm..pipe was already setup */ continue; pipe->ar_usb = ar_usb; pipe->logical_pipe_num = pipe_num; pipe->ep_address = endpoint->bEndpointAddress; pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize); if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) { if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { pipe->usb_pipe_handle = usb_rcvbulkpipe(ar_usb->udev, pipe->ep_address); } else { pipe->usb_pipe_handle = usb_sndbulkpipe(ar_usb->udev, pipe->ep_address); } } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) { if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { pipe->usb_pipe_handle = usb_rcvintpipe(ar_usb->udev, pipe->ep_address); } else { pipe->usb_pipe_handle = usb_sndintpipe(ar_usb->udev, pipe->ep_address); } } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) { /* TODO for ISO */ if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { pipe->usb_pipe_handle = usb_rcvisocpipe(ar_usb->udev, pipe->ep_address); } else { pipe->usb_pipe_handle = usb_sndisocpipe(ar_usb->udev, pipe->ep_address); } } pipe->ep_desc = endpoint; if (!ATH10K_USB_IS_DIR_IN(pipe->ep_address)) pipe->flags |= ATH10K_USB_PIPE_FLAG_TX; ret = ath10k_usb_alloc_pipe_resources(ar, pipe, urbcount); if (ret) return ret; } return 0; } static int ath10k_usb_create(struct ath10k *ar, struct usb_interface *interface) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct usb_device *dev = interface_to_usbdev(interface); struct ath10k_usb_pipe *pipe; int ret, i; usb_set_intfdata(interface, ar_usb); spin_lock_init(&ar_usb->cs_lock); ar_usb->udev = dev; ar_usb->interface = interface; for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) { pipe = &ar_usb->pipes[i]; INIT_WORK(&pipe->io_complete_work, ath10k_usb_io_comp_work); skb_queue_head_init(&pipe->io_comp_queue); } ar_usb->diag_cmd_buffer = kzalloc(ATH10K_USB_MAX_DIAG_CMD, GFP_KERNEL); if (!ar_usb->diag_cmd_buffer) { ret = -ENOMEM; goto err; } ar_usb->diag_resp_buffer = kzalloc(ATH10K_USB_MAX_DIAG_RESP, GFP_KERNEL); if (!ar_usb->diag_resp_buffer) { ret = -ENOMEM; goto err; } ret = ath10k_usb_setup_pipe_resources(ar, interface); if (ret) goto err; return 0; err: ath10k_usb_destroy(ar); return ret; } /* ath10k usb driver registered functions */ static int ath10k_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct ath10k *ar; struct ath10k_usb *ar_usb; struct usb_device *dev = interface_to_usbdev(interface); int ret, vendor_id, product_id; enum ath10k_hw_rev hw_rev; struct ath10k_bus_params bus_params = {}; /* Assumption: All USB based chipsets (so far) are QCA9377 based. * If there will be newer chipsets that does not use the hw reg * setup as defined in qca6174_regs and qca6174_values, this * assumption is no longer valid and hw_rev must be setup differently * depending on chipset. */ hw_rev = ATH10K_HW_QCA9377; ar = ath10k_core_create(sizeof(*ar_usb), &dev->dev, ATH10K_BUS_USB, hw_rev, &ath10k_usb_hif_ops); if (!ar) { dev_err(&dev->dev, "failed to allocate core\n"); return -ENOMEM; } usb_get_dev(dev); vendor_id = le16_to_cpu(dev->descriptor.idVendor); product_id = le16_to_cpu(dev->descriptor.idProduct); ath10k_dbg(ar, ATH10K_DBG_BOOT, "usb new func vendor 0x%04x product 0x%04x\n", vendor_id, product_id); ar_usb = ath10k_usb_priv(ar); ret = ath10k_usb_create(ar, interface); ar_usb->ar = ar; ar->dev_id = product_id; ar->id.vendor = vendor_id; ar->id.device = product_id; bus_params.dev_type = ATH10K_DEV_TYPE_HL; /* TODO: don't know yet how to get chip_id with USB */ bus_params.chip_id = 0; ret = ath10k_core_register(ar, &bus_params); if (ret) { ath10k_warn(ar, "failed to register driver core: %d\n", ret); goto err; } /* TODO: remove this once USB support is fully implemented */ ath10k_warn(ar, "Warning: ath10k USB support is incomplete, don't expect anything to work!\n"); return 0; err: ath10k_core_destroy(ar); usb_put_dev(dev); return ret; } static void ath10k_usb_remove(struct usb_interface *interface) { struct ath10k_usb *ar_usb; ar_usb = usb_get_intfdata(interface); if (!ar_usb) return; ath10k_core_unregister(ar_usb->ar); ath10k_usb_destroy(ar_usb->ar); usb_put_dev(interface_to_usbdev(interface)); ath10k_core_destroy(ar_usb->ar); } #ifdef CONFIG_PM static int ath10k_usb_pm_suspend(struct usb_interface *interface, pm_message_t message) { struct ath10k_usb *ar_usb = usb_get_intfdata(interface); ath10k_usb_flush_all(ar_usb->ar); return 0; } static int ath10k_usb_pm_resume(struct usb_interface *interface) { struct ath10k_usb *ar_usb = usb_get_intfdata(interface); struct ath10k *ar = ar_usb->ar; ath10k_usb_post_recv_transfers(ar, &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]); return 0; } #else #define ath10k_usb_pm_suspend NULL #define ath10k_usb_pm_resume NULL #endif /* table of devices that work with this driver */ static struct usb_device_id ath10k_usb_ids[] = { {USB_DEVICE(0x13b1, 0x0042)}, /* Linksys WUSB6100M */ { /* Terminating entry */ }, }; MODULE_DEVICE_TABLE(usb, ath10k_usb_ids); static struct usb_driver ath10k_usb_driver = { .name = "ath10k_usb", .probe = ath10k_usb_probe, .suspend = ath10k_usb_pm_suspend, .resume = ath10k_usb_pm_resume, .disconnect = ath10k_usb_remove, .id_table = ath10k_usb_ids, .supports_autosuspend = true, .disable_hub_initiated_lpm = 1, }; module_usb_driver(ath10k_usb_driver); MODULE_AUTHOR("Atheros Communications, Inc."); MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN USB devices"); MODULE_LICENSE("Dual BSD/GPL");
./CrossVul/dataset_final_sorted/CWE-400/c/good_1268_0
crossvul-cpp_data_bad_1237_0
/* * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/random.h> #include <linux/vmalloc.h> #include <linux/hardirq.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> #include "mlx5_core.h" #include "lib/eq.h" #include "lib/mlx5.h" #include "lib/pci_vsc.h" #include "diag/fw_tracer.h" enum { MLX5_HEALTH_POLL_INTERVAL = 2 * HZ, MAX_MISSES = 3, }; enum { MLX5_HEALTH_SYNDR_FW_ERR = 0x1, MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7, MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR = 0x8, MLX5_HEALTH_SYNDR_CRC_ERR = 0x9, MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa, MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb, MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc, MLX5_HEALTH_SYNDR_EQ_ERR = 0xd, MLX5_HEALTH_SYNDR_EQ_INV = 0xe, MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf, MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10 }; enum { MLX5_DROP_NEW_HEALTH_WORK, }; enum { MLX5_SENSOR_NO_ERR = 0, MLX5_SENSOR_PCI_COMM_ERR = 1, MLX5_SENSOR_PCI_ERR = 2, MLX5_SENSOR_NIC_DISABLED = 3, MLX5_SENSOR_NIC_SW_RESET = 4, MLX5_SENSOR_FW_SYND_RFR = 5, }; u8 mlx5_get_nic_state(struct mlx5_core_dev *dev) { return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 7; } void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state) { u32 cur_cmdq_addr_l_sz; cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz); iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) | state << MLX5_NIC_IFC_OFFSET, &dev->iseg->cmdq_addr_l_sz); } static bool sensor_pci_not_working(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; /* Offline PCI reads return 0xffffffff */ return (ioread32be(&h->fw_ver) == 0xffffffff); } static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; u32 rfr = ioread32be(&h->rfr) >> MLX5_RFR_OFFSET; u8 synd = ioread8(&h->synd); if (rfr && synd) mlx5_core_dbg(dev, "FW requests reset, synd: %d\n", synd); return rfr && synd; } static u32 check_fatal_sensors(struct mlx5_core_dev *dev) { if (sensor_pci_not_working(dev)) return MLX5_SENSOR_PCI_COMM_ERR; if (pci_channel_offline(dev->pdev)) return MLX5_SENSOR_PCI_ERR; if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) return MLX5_SENSOR_NIC_DISABLED; if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET) return MLX5_SENSOR_NIC_SW_RESET; if (sensor_fw_synd_rfr(dev)) return MLX5_SENSOR_FW_SYND_RFR; return MLX5_SENSOR_NO_ERR; } static int lock_sem_sw_reset(struct mlx5_core_dev *dev, bool lock) { enum mlx5_vsc_state state; int ret; if (!mlx5_core_is_pf(dev)) return -EBUSY; /* Try to lock GW access, this stage doesn't return * EBUSY because locked GW does not mean that other PF * already started the reset. */ ret = mlx5_vsc_gw_lock(dev); if (ret == -EBUSY) return -EINVAL; if (ret) return ret; state = lock ? MLX5_VSC_LOCK : MLX5_VSC_UNLOCK; /* At this stage, if the return status == EBUSY, then we know * for sure that another PF started the reset, so don't allow * another reset. */ ret = mlx5_vsc_sem_set_space(dev, MLX5_SEMAPHORE_SW_RESET, state); if (ret) mlx5_core_warn(dev, "Failed to lock SW reset semaphore\n"); /* Unlock GW access */ mlx5_vsc_gw_unlock(dev); return ret; } static bool reset_fw_if_needed(struct mlx5_core_dev *dev) { bool supported = (ioread32be(&dev->iseg->initializing) >> MLX5_FW_RESET_SUPPORTED_OFFSET) & 1; u32 fatal_error; if (!supported) return false; /* The reset only needs to be issued by one PF. The health buffer is * shared between all functions, and will be cleared during a reset. * Check again to avoid a redundant 2nd reset. If the fatal erros was * PCI related a reset won't help. */ fatal_error = check_fatal_sensors(dev); if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR || fatal_error == MLX5_SENSOR_NIC_DISABLED || fatal_error == MLX5_SENSOR_NIC_SW_RESET) { mlx5_core_warn(dev, "Not issuing FW reset. Either it's already done or won't help."); return false; } mlx5_core_warn(dev, "Issuing FW Reset\n"); /* Write the NIC interface field to initiate the reset, the command * interface address also resides here, don't overwrite it. */ mlx5_set_nic_state(dev, MLX5_NIC_IFC_SW_RESET); return true; } void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) { mutex_lock(&dev->intf_state_mutex); if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) goto unlock; if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) { dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; goto unlock; } if (check_fatal_sensors(dev) || force) { dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; mlx5_cmd_flush(dev); } mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); unlock: mutex_unlock(&dev->intf_state_mutex); } #define MLX5_CRDUMP_WAIT_MS 60000 #define MLX5_FW_RESET_WAIT_MS 1000 void mlx5_error_sw_reset(struct mlx5_core_dev *dev) { unsigned long end, delay_ms = MLX5_FW_RESET_WAIT_MS; int lock = -EBUSY; mutex_lock(&dev->intf_state_mutex); if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) goto unlock; mlx5_core_err(dev, "start\n"); if (check_fatal_sensors(dev) == MLX5_SENSOR_FW_SYND_RFR) { /* Get cr-dump and reset FW semaphore */ lock = lock_sem_sw_reset(dev, true); if (lock == -EBUSY) { delay_ms = MLX5_CRDUMP_WAIT_MS; goto recover_from_sw_reset; } /* Execute SW reset */ reset_fw_if_needed(dev); } recover_from_sw_reset: /* Recover from SW reset */ end = jiffies + msecs_to_jiffies(delay_ms); do { if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) break; cond_resched(); } while (!time_after(jiffies, end)); if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) { dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n", mlx5_get_nic_state(dev), delay_ms); } /* Release FW semaphore if you are the lock owner */ if (!lock) lock_sem_sw_reset(dev, false); mlx5_core_err(dev, "end\n"); unlock: mutex_unlock(&dev->intf_state_mutex); } static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) { u8 nic_interface = mlx5_get_nic_state(dev); switch (nic_interface) { case MLX5_NIC_IFC_FULL: mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n"); break; case MLX5_NIC_IFC_DISABLED: mlx5_core_warn(dev, "starting teardown\n"); break; case MLX5_NIC_IFC_NO_DRAM_NIC: mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n"); break; case MLX5_NIC_IFC_SW_RESET: /* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases: * 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded * and this is a VF), this is not recoverable by SW reset. * Logging of this is handled elsewhere. * 2. FW reset has been issued by another function, driver can * be reloaded to recover after the mode switches to * MLX5_NIC_IFC_DISABLED. */ if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR) mlx5_core_warn(dev, "NIC SW reset in progress\n"); break; default: mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n", nic_interface); } mlx5_disable_device(dev); } /* How much time to wait until health resetting the driver (in msecs) */ #define MLX5_RECOVERY_WAIT_MSECS 60000 static int mlx5_health_try_recover(struct mlx5_core_dev *dev) { unsigned long end; mlx5_core_warn(dev, "handling bad device here\n"); mlx5_handle_bad_state(dev); end = jiffies + msecs_to_jiffies(MLX5_RECOVERY_WAIT_MSECS); while (sensor_pci_not_working(dev)) { if (time_after(jiffies, end)) { mlx5_core_err(dev, "health recovery flow aborted, PCI reads still not working\n"); return -EIO; } msleep(100); } mlx5_core_err(dev, "starting health recovery flow\n"); mlx5_recover_device(dev); if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state) || check_fatal_sensors(dev)) { mlx5_core_err(dev, "health recovery failed\n"); return -EIO; } return 0; } static const char *hsynd_str(u8 synd) { switch (synd) { case MLX5_HEALTH_SYNDR_FW_ERR: return "firmware internal error"; case MLX5_HEALTH_SYNDR_IRISC_ERR: return "irisc not responding"; case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR: return "unrecoverable hardware error"; case MLX5_HEALTH_SYNDR_CRC_ERR: return "firmware CRC error"; case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR: return "ICM fetch PCI error"; case MLX5_HEALTH_SYNDR_HW_FTL_ERR: return "HW fatal error\n"; case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR: return "async EQ buffer overrun"; case MLX5_HEALTH_SYNDR_EQ_ERR: return "EQ error"; case MLX5_HEALTH_SYNDR_EQ_INV: return "Invalid EQ referenced"; case MLX5_HEALTH_SYNDR_FFSER_ERR: return "FFSER error"; case MLX5_HEALTH_SYNDR_HIGH_TEMP: return "High temperature"; default: return "unrecognized error"; } } static void print_health_info(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; char fw_str[18]; u32 fw; int i; /* If the syndrome is 0, the device is OK and no need to print buffer */ if (!ioread8(&h->synd)) return; for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) mlx5_core_err(dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i)); mlx5_core_err(dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr)); mlx5_core_err(dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra)); sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); mlx5_core_err(dev, "fw_ver %s\n", fw_str); mlx5_core_err(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); mlx5_core_err(dev, "irisc_index %d\n", ioread8(&h->irisc_index)); mlx5_core_err(dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd))); mlx5_core_err(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); fw = ioread32be(&h->fw_ver); mlx5_core_err(dev, "raw fw_ver 0x%08x\n", fw); } static int mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg) { struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter); struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; u8 synd; int err; synd = ioread8(&h->synd); err = devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd); if (err || !synd) return err; return devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd)); } struct mlx5_fw_reporter_ctx { u8 err_synd; int miss_counter; }; static int mlx5_fw_reporter_ctx_pairs_put(struct devlink_fmsg *fmsg, struct mlx5_fw_reporter_ctx *fw_reporter_ctx) { int err; err = devlink_fmsg_u8_pair_put(fmsg, "syndrome", fw_reporter_ctx->err_synd); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "fw_miss_counter", fw_reporter_ctx->miss_counter); if (err) return err; return 0; } static int mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev, struct devlink_fmsg *fmsg) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; int err; int i; if (!ioread8(&h->synd)) return 0; err = devlink_fmsg_pair_nest_start(fmsg, "health buffer"); if (err) return err; err = devlink_fmsg_obj_nest_start(fmsg); if (err) return err; err = devlink_fmsg_arr_pair_nest_start(fmsg, "assert_var"); if (err) return err; for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) { err = devlink_fmsg_u32_put(fmsg, ioread32be(h->assert_var + i)); if (err) return err; } err = devlink_fmsg_arr_pair_nest_end(fmsg); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "assert_exit_ptr", ioread32be(&h->assert_exit_ptr)); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "assert_callra", ioread32be(&h->assert_callra)); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id)); if (err) return err; err = devlink_fmsg_u8_pair_put(fmsg, "irisc_index", ioread8(&h->irisc_index)); if (err) return err; err = devlink_fmsg_u8_pair_put(fmsg, "synd", ioread8(&h->synd)); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd", ioread16be(&h->ext_synd)); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "raw_fw_ver", ioread32be(&h->fw_ver)); if (err) return err; err = devlink_fmsg_obj_nest_end(fmsg); if (err) return err; return devlink_fmsg_pair_nest_end(fmsg); } static int mlx5_fw_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *priv_ctx) { struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter); int err; err = mlx5_fw_tracer_trigger_core_dump_general(dev); if (err) return err; if (priv_ctx) { struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx; err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx); if (err) return err; } err = mlx5_fw_reporter_heath_buffer_data_put(dev, fmsg); if (err) return err; return mlx5_fw_tracer_get_saved_traces_objects(dev->tracer, fmsg); } static void mlx5_fw_reporter_err_work(struct work_struct *work) { struct mlx5_fw_reporter_ctx fw_reporter_ctx; struct mlx5_core_health *health; health = container_of(work, struct mlx5_core_health, report_work); if (IS_ERR_OR_NULL(health->fw_reporter)) return; fw_reporter_ctx.err_synd = health->synd; fw_reporter_ctx.miss_counter = health->miss_counter; if (fw_reporter_ctx.err_synd) { devlink_health_report(health->fw_reporter, "FW syndrom reported", &fw_reporter_ctx); return; } if (fw_reporter_ctx.miss_counter) devlink_health_report(health->fw_reporter, "FW miss counter reported", &fw_reporter_ctx); } static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = { .name = "fw", .diagnose = mlx5_fw_reporter_diagnose, .dump = mlx5_fw_reporter_dump, }; static int mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter, void *priv_ctx) { struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter); return mlx5_health_try_recover(dev); } #define MLX5_CR_DUMP_CHUNK_SIZE 256 static int mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *priv_ctx) { struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter); u32 crdump_size = dev->priv.health.crdump_size; u32 *cr_data; u32 data_size; u32 offset; int err; if (!mlx5_core_is_pf(dev)) return -EPERM; cr_data = kvmalloc(crdump_size, GFP_KERNEL); if (!cr_data) return -ENOMEM; err = mlx5_crdump_collect(dev, cr_data); if (err) return err; if (priv_ctx) { struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx; err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx); if (err) goto free_data; } err = devlink_fmsg_arr_pair_nest_start(fmsg, "crdump_data"); if (err) goto free_data; for (offset = 0; offset < crdump_size; offset += data_size) { if (crdump_size - offset < MLX5_CR_DUMP_CHUNK_SIZE) data_size = crdump_size - offset; else data_size = MLX5_CR_DUMP_CHUNK_SIZE; err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset, data_size); if (err) goto free_data; } err = devlink_fmsg_arr_pair_nest_end(fmsg); free_data: kvfree(cr_data); return err; } static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work) { struct mlx5_fw_reporter_ctx fw_reporter_ctx; struct mlx5_core_health *health; struct mlx5_core_dev *dev; struct mlx5_priv *priv; health = container_of(work, struct mlx5_core_health, fatal_report_work); priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); mlx5_enter_error_state(dev, false); if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) { if (mlx5_health_try_recover(dev)) mlx5_core_err(dev, "health recovery failed\n"); return; } fw_reporter_ctx.err_synd = health->synd; fw_reporter_ctx.miss_counter = health->miss_counter; devlink_health_report(health->fw_fatal_reporter, "FW fatal error reported", &fw_reporter_ctx); } static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = { .name = "fw_fatal", .recover = mlx5_fw_fatal_reporter_recover, .dump = mlx5_fw_fatal_reporter_dump, }; #define MLX5_REPORTER_FW_GRACEFUL_PERIOD 1200000 static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct devlink *devlink = priv_to_devlink(dev); health->fw_reporter = devlink_health_reporter_create(devlink, &mlx5_fw_reporter_ops, 0, false, dev); if (IS_ERR(health->fw_reporter)) mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n", PTR_ERR(health->fw_reporter)); health->fw_fatal_reporter = devlink_health_reporter_create(devlink, &mlx5_fw_fatal_reporter_ops, MLX5_REPORTER_FW_GRACEFUL_PERIOD, true, dev); if (IS_ERR(health->fw_fatal_reporter)) mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n", PTR_ERR(health->fw_fatal_reporter)); } static void mlx5_fw_reporters_destroy(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; if (!IS_ERR_OR_NULL(health->fw_reporter)) devlink_health_reporter_destroy(health->fw_reporter); if (!IS_ERR_OR_NULL(health->fw_fatal_reporter)) devlink_health_reporter_destroy(health->fw_fatal_reporter); } static unsigned long get_next_poll_jiffies(void) { unsigned long next; get_random_bytes(&next, sizeof(next)); next %= HZ; next += jiffies + MLX5_HEALTH_POLL_INTERVAL; return next; } void mlx5_trigger_health_work(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) queue_work(health->wq, &health->fatal_report_work); else mlx5_core_err(dev, "new health works are not permitted at this stage\n"); spin_unlock_irqrestore(&health->wq_lock, flags); } static void poll_health(struct timer_list *t) { struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer); struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; u32 fatal_error; u8 prev_synd; u32 count; if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) goto out; fatal_error = check_fatal_sensors(dev); if (fatal_error && !health->fatal_error) { mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error); dev->priv.health.fatal_error = fatal_error; print_health_info(dev); mlx5_trigger_health_work(dev); goto out; } count = ioread32be(health->health_counter); if (count == health->prev) ++health->miss_counter; else health->miss_counter = 0; health->prev = count; if (health->miss_counter == MAX_MISSES) { mlx5_core_err(dev, "device's health compromised - reached miss count\n"); print_health_info(dev); queue_work(health->wq, &health->report_work); } prev_synd = health->synd; health->synd = ioread8(&h->synd); if (health->synd && health->synd != prev_synd) queue_work(health->wq, &health->report_work); out: mod_timer(&health->timer, get_next_poll_jiffies()); } void mlx5_start_health_poll(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; timer_setup(&health->timer, poll_health, 0); health->fatal_error = MLX5_SENSOR_NO_ERR; clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); health->health = &dev->iseg->health; health->health_counter = &dev->iseg->health_counter; health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL); add_timer(&health->timer); } void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; if (disable_health) { spin_lock_irqsave(&health->wq_lock, flags); set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); spin_unlock_irqrestore(&health->wq_lock, flags); } del_timer_sync(&health->timer); } void mlx5_drain_health_wq(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); spin_unlock_irqrestore(&health->wq_lock, flags); cancel_work_sync(&health->report_work); cancel_work_sync(&health->fatal_report_work); } void mlx5_health_flush(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; flush_workqueue(health->wq); } void mlx5_health_cleanup(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; destroy_workqueue(health->wq); mlx5_fw_reporters_destroy(dev); } int mlx5_health_init(struct mlx5_core_dev *dev) { struct mlx5_core_health *health; char *name; mlx5_fw_reporters_create(dev); health = &dev->priv.health; name = kmalloc(64, GFP_KERNEL); if (!name) goto out_err; strcpy(name, "mlx5_health"); strcat(name, dev_name(dev->device)); health->wq = create_singlethread_workqueue(name); kfree(name); if (!health->wq) goto out_err; spin_lock_init(&health->wq_lock); INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work); INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work); return 0; out_err: mlx5_fw_reporters_destroy(dev); return -ENOMEM; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1237_0
crossvul-cpp_data_bad_5356_3
/* * IPV4 GSO/GRO offload support * Linux INET implementation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * GRE GSO support */ #include <linux/skbuff.h> #include <linux/init.h> #include <net/protocol.h> #include <net/gre.h> static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; __be16 protocol = skb->protocol; u16 mac_len = skb->mac_len; int gre_offset, outer_hlen; bool need_csum, ufo; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | SKB_GSO_SIT))) goto out; if (!skb->encapsulation) goto out; if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr))) goto out; if (unlikely(!pskb_may_pull(skb, tnl_hlen))) goto out; /* setup inner skb. */ skb->encapsulation = 0; __skb_pull(skb, tnl_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); skb->protocol = skb->inner_protocol; need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); skb->encap_hdr_csum = need_csum; ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); features &= skb->dev->hw_enc_features; /* The only checksum offload we care about from here on out is the * outer one so strip the existing checksum feature flags based * on the fact that we will be computing our checksum in software. */ if (ufo) { features &= ~NETIF_F_CSUM_MASK; if (!need_csum) features |= NETIF_F_HW_CSUM; } /* segment inner packet. */ segs = skb_mac_gso_segment(skb, features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, mac_len); goto out; } outer_hlen = skb_tnl_header_len(skb); gre_offset = outer_hlen - tnl_hlen; skb = segs; do { struct gre_base_hdr *greh; __be32 *pcsum; /* Set up inner headers if we are offloading inner checksum */ if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } skb->mac_len = mac_len; skb->protocol = protocol; __skb_push(skb, outer_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb_set_transport_header(skb, gre_offset); if (!need_csum) continue; greh = (struct gre_base_hdr *)skb_transport_header(skb); pcsum = (__be32 *)(greh + 1); *pcsum = 0; *(__sum16 *)pcsum = gso_make_checksum(skb, 0); } while ((skb = skb->next)); out: return segs; } static struct sk_buff **gre_gro_receive(struct sk_buff **head, struct sk_buff *skb) { struct sk_buff **pp = NULL; struct sk_buff *p; const struct gre_base_hdr *greh; unsigned int hlen, grehlen; unsigned int off; int flush = 1; struct packet_offload *ptype; __be16 type; off = skb_gro_offset(skb); hlen = off + sizeof(*greh); greh = skb_gro_header_fast(skb, off); if (skb_gro_header_hard(skb, hlen)) { greh = skb_gro_header_slow(skb, hlen, off); if (unlikely(!greh)) goto out; } /* Only support version 0 and K (key), C (csum) flags. Note that * although the support for the S (seq#) flag can be added easily * for GRO, this is problematic for GSO hence can not be enabled * here because a GRO pkt may end up in the forwarding path, thus * requiring GSO support to break it up correctly. */ if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0) goto out; type = greh->protocol; rcu_read_lock(); ptype = gro_find_receive_by_type(type); if (!ptype) goto out_unlock; grehlen = GRE_HEADER_SECTION; if (greh->flags & GRE_KEY) grehlen += GRE_HEADER_SECTION; if (greh->flags & GRE_CSUM) grehlen += GRE_HEADER_SECTION; hlen = off + grehlen; if (skb_gro_header_hard(skb, hlen)) { greh = skb_gro_header_slow(skb, hlen, off); if (unlikely(!greh)) goto out_unlock; } /* Don't bother verifying checksum if we're going to flush anyway. */ if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) { if (skb_gro_checksum_simple_validate(skb)) goto out_unlock; skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 0, null_compute_pseudo); } for (p = *head; p; p = p->next) { const struct gre_base_hdr *greh2; if (!NAPI_GRO_CB(p)->same_flow) continue; /* The following checks are needed to ensure only pkts * from the same tunnel are considered for aggregation. * The criteria for "the same tunnel" includes: * 1) same version (we only support version 0 here) * 2) same protocol (we only support ETH_P_IP for now) * 3) same set of flags * 4) same key if the key field is present. */ greh2 = (struct gre_base_hdr *)(p->data + off); if (greh2->flags != greh->flags || greh2->protocol != greh->protocol) { NAPI_GRO_CB(p)->same_flow = 0; continue; } if (greh->flags & GRE_KEY) { /* compare keys */ if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } } } skb_gro_pull(skb, grehlen); /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ skb_gro_postpull_rcsum(skb, greh, grehlen); pp = ptype->callbacks.gro_receive(head, skb); flush = 0; out_unlock: rcu_read_unlock(); out: NAPI_GRO_CB(skb)->flush |= flush; return pp; } static int gre_gro_complete(struct sk_buff *skb, int nhoff) { struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff); struct packet_offload *ptype; unsigned int grehlen = sizeof(*greh); int err = -ENOENT; __be16 type; skb->encapsulation = 1; skb_shinfo(skb)->gso_type = SKB_GSO_GRE; type = greh->protocol; if (greh->flags & GRE_KEY) grehlen += GRE_HEADER_SECTION; if (greh->flags & GRE_CSUM) grehlen += GRE_HEADER_SECTION; rcu_read_lock(); ptype = gro_find_complete_by_type(type); if (ptype) err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); rcu_read_unlock(); skb_set_inner_mac_header(skb, nhoff + grehlen); return err; } static const struct net_offload gre_offload = { .callbacks = { .gso_segment = gre_gso_segment, .gro_receive = gre_gro_receive, .gro_complete = gre_gro_complete, }, }; static int __init gre_offload_init(void) { return inet_add_offload(&gre_offload, IPPROTO_GRE); } device_initcall(gre_offload_init);
./CrossVul/dataset_final_sorted/CWE-400/c/bad_5356_3
crossvul-cpp_data_good_551_0
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2012 * All rights reserved * * This file is part of GPAC / ISO Media File Format sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/isomedia_dev.h> #ifndef GPAC_DISABLE_ISOM void co64_del(GF_Box *s) { GF_ChunkLargeOffsetBox *ptr; ptr = (GF_ChunkLargeOffsetBox *) s; if (ptr == NULL) return; if (ptr->offsets) gf_free(ptr->offsets); gf_free(ptr); } GF_Err co64_Read(GF_Box *s,GF_BitStream *bs) { u32 entries; GF_ChunkLargeOffsetBox *ptr = (GF_ChunkLargeOffsetBox *) s; ptr->nb_entries = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4) if (ptr->nb_entries > ptr->size / 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in co64\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->offsets = (u64 *) gf_malloc(ptr->nb_entries * sizeof(u64) ); if (ptr->offsets == NULL) return GF_OUT_OF_MEM; ptr->alloc_size = ptr->nb_entries; for (entries = 0; entries < ptr->nb_entries; entries++) { ptr->offsets[entries] = gf_bs_read_u64(bs); } return GF_OK; } GF_Box *co64_New() { ISOM_DECL_BOX_ALLOC(GF_ChunkLargeOffsetBox, GF_ISOM_BOX_TYPE_CO64); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err co64_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_ChunkLargeOffsetBox *ptr = (GF_ChunkLargeOffsetBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i = 0; i < ptr->nb_entries; i++ ) { gf_bs_write_u64(bs, ptr->offsets[i]); } return GF_OK; } GF_Err co64_Size(GF_Box *s) { GF_ChunkLargeOffsetBox *ptr = (GF_ChunkLargeOffsetBox *) s; ptr->size += 4 + (8 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void cprt_del(GF_Box *s) { GF_CopyrightBox *ptr = (GF_CopyrightBox *) s; if (ptr == NULL) return; if (ptr->notice) gf_free(ptr->notice); gf_free(ptr); } GF_Box *chpl_New() { ISOM_DECL_BOX_ALLOC(GF_ChapterListBox, GF_ISOM_BOX_TYPE_CHPL); tmp->list = gf_list_new(); tmp->version = 1; return (GF_Box *)tmp; } void chpl_del(GF_Box *s) { GF_ChapterListBox *ptr = (GF_ChapterListBox *) s; if (ptr == NULL) return; while (gf_list_count(ptr->list)) { GF_ChapterEntry *ce = (GF_ChapterEntry *)gf_list_get(ptr->list, 0); if (ce->name) gf_free(ce->name); gf_free(ce); gf_list_rem(ptr->list, 0); } gf_list_del(ptr->list); gf_free(ptr); } /*this is using chpl format according to some NeroRecode samples*/ GF_Err chpl_Read(GF_Box *s,GF_BitStream *bs) { GF_ChapterEntry *ce; u32 nb_chaps, len, i, count; GF_ChapterListBox *ptr = (GF_ChapterListBox *)s; /*reserved or ???*/ gf_bs_read_u32(bs); nb_chaps = gf_bs_read_u8(bs); count = 0; while (nb_chaps) { GF_SAFEALLOC(ce, GF_ChapterEntry); if (!ce) return GF_OUT_OF_MEM; ce->start_time = gf_bs_read_u64(bs); len = gf_bs_read_u8(bs); if (len) { ce->name = (char *)gf_malloc(sizeof(char)*(len+1)); gf_bs_read_data(bs, ce->name, len); ce->name[len] = 0; } else { ce->name = gf_strdup(""); } for (i=0; i<count; i++) { GF_ChapterEntry *ace = (GF_ChapterEntry *) gf_list_get(ptr->list, i); if (ace->start_time >= ce->start_time) { gf_list_insert(ptr->list, ce, i); ce = NULL; break; } } if (ce) gf_list_add(ptr->list, ce); count++; nb_chaps--; } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err chpl_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 count, i; GF_ChapterListBox *ptr = (GF_ChapterListBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; count = gf_list_count(ptr->list); gf_bs_write_u32(bs, 0); gf_bs_write_u8(bs, count); for (i=0; i<count; i++) { u32 len; GF_ChapterEntry *ce = (GF_ChapterEntry *)gf_list_get(ptr->list, i); gf_bs_write_u64(bs, ce->start_time); if (ce->name) { len = (u32) strlen(ce->name); if (len>255) len = 255; gf_bs_write_u8(bs, len); gf_bs_write_data(bs, ce->name, len); } else { gf_bs_write_u8(bs, 0); } } return GF_OK; } GF_Err chpl_Size(GF_Box *s) { u32 count, i; GF_ChapterListBox *ptr = (GF_ChapterListBox *)s; ptr->size += 5; count = gf_list_count(ptr->list); for (i=0; i<count; i++) { GF_ChapterEntry *ce = (GF_ChapterEntry *)gf_list_get(ptr->list, i); ptr->size += 9; /*64bit time stamp + 8bit str len*/ if (ce->name) ptr->size += strlen(ce->name); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Err cprt_Read(GF_Box *s,GF_BitStream *bs) { GF_CopyrightBox *ptr = (GF_CopyrightBox *)s; gf_bs_read_int(bs, 1); //the spec is unclear here, just says "the value 0 is interpreted as undetermined" ptr->packedLanguageCode[0] = gf_bs_read_int(bs, 5); ptr->packedLanguageCode[1] = gf_bs_read_int(bs, 5); ptr->packedLanguageCode[2] = gf_bs_read_int(bs, 5); ISOM_DECREASE_SIZE(ptr, 2); //but before or after compaction ?? We assume before if (ptr->packedLanguageCode[0] || ptr->packedLanguageCode[1] || ptr->packedLanguageCode[2]) { ptr->packedLanguageCode[0] += 0x60; ptr->packedLanguageCode[1] += 0x60; ptr->packedLanguageCode[2] += 0x60; } else { ptr->packedLanguageCode[0] = 'u'; ptr->packedLanguageCode[1] = 'n'; ptr->packedLanguageCode[2] = 'd'; } if (ptr->size) { u32 bytesToRead = (u32) ptr->size; ptr->notice = (char*)gf_malloc(bytesToRead * sizeof(char)); if (ptr->notice == NULL) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->notice, bytesToRead); } return GF_OK; } GF_Box *cprt_New() { ISOM_DECL_BOX_ALLOC(GF_CopyrightBox, GF_ISOM_BOX_TYPE_CPRT); tmp->packedLanguageCode[0] = 'u'; tmp->packedLanguageCode[1] = 'n'; tmp->packedLanguageCode[2] = 'd'; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err cprt_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_CopyrightBox *ptr = (GF_CopyrightBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, 0, 1); if (ptr->packedLanguageCode[0]) { gf_bs_write_int(bs, ptr->packedLanguageCode[0] - 0x60, 5); gf_bs_write_int(bs, ptr->packedLanguageCode[1] - 0x60, 5); gf_bs_write_int(bs, ptr->packedLanguageCode[2] - 0x60, 5); } else { gf_bs_write_int(bs, 0, 15); } if (ptr->notice) { gf_bs_write_data(bs, ptr->notice, (u32) (strlen(ptr->notice) + 1) ); } return GF_OK; } GF_Err cprt_Size(GF_Box *s) { GF_CopyrightBox *ptr = (GF_CopyrightBox *)s; ptr->size += 2; if (ptr->notice) ptr->size += strlen(ptr->notice) + 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void kind_del(GF_Box *s) { GF_KindBox *ptr = (GF_KindBox *) s; if (ptr == NULL) return; if (ptr->schemeURI) gf_free(ptr->schemeURI); if (ptr->value) gf_free(ptr->value); gf_free(ptr); } GF_Err kind_Read(GF_Box *s,GF_BitStream *bs) { GF_KindBox *ptr = (GF_KindBox *)s; if (ptr->size) { u32 bytesToRead = (u32) ptr->size; char *data; u32 schemeURIlen; data = (char*)gf_malloc(bytesToRead * sizeof(char)); if (data == NULL) return GF_OUT_OF_MEM; gf_bs_read_data(bs, data, bytesToRead); /*safety check in case the string is not null-terminated*/ if (data[bytesToRead-1]) { char *str = (char*)gf_malloc((u32) bytesToRead + 1); memcpy(str, data, (u32) bytesToRead); str[ptr->size] = 0; gf_free(data); data = str; bytesToRead++; } ptr->schemeURI = gf_strdup(data); schemeURIlen = (u32) strlen(data); if (bytesToRead > schemeURIlen+1) { /* read the value */ char *data_value = data + schemeURIlen +1; ptr->value = gf_strdup(data_value); } gf_free(data); } return GF_OK; } GF_Box *kind_New() { ISOM_DECL_BOX_ALLOC(GF_KindBox, GF_ISOM_BOX_TYPE_KIND); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err kind_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_KindBox *ptr = (GF_KindBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->schemeURI, (u32) (strlen(ptr->schemeURI) + 1 )); if (ptr->value) { gf_bs_write_data(bs, ptr->value, (u32) (strlen(ptr->value) + 1) ); } return GF_OK; } GF_Err kind_Size(GF_Box *s) { GF_KindBox *ptr = (GF_KindBox *)s; ptr->size += strlen(ptr->schemeURI) + 1; if (ptr->value) { ptr->size += strlen(ptr->value) + 1; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ctts_del(GF_Box *s) { GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *)s; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err ctts_Read(GF_Box *s, GF_BitStream *bs) { u32 i; u32 sampleCount; GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *)s; ptr->nb_entries = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->nb_entries > ptr->size / 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in ctts\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->alloc_size = ptr->nb_entries; ptr->entries = (GF_DttsEntry *)gf_malloc(sizeof(GF_DttsEntry)*ptr->alloc_size); if (!ptr->entries) return GF_OUT_OF_MEM; sampleCount = 0; for (i=0; i<ptr->nb_entries; i++) { ptr->entries[i].sampleCount = gf_bs_read_u32(bs); if (ptr->version) ptr->entries[i].decodingOffset = gf_bs_read_int(bs, 32); else ptr->entries[i].decodingOffset = (s32) gf_bs_read_u32(bs); sampleCount += ptr->entries[i].sampleCount; } #ifndef GPAC_DISABLE_ISOM_WRITE ptr->w_LastSampleNumber = sampleCount; #endif return GF_OK; } GF_Box *ctts_New() { ISOM_DECL_BOX_ALLOC(GF_CompositionOffsetBox, GF_ISOM_BOX_TYPE_CTTS); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ctts_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i=0; i<ptr->nb_entries; i++ ) { gf_bs_write_u32(bs, ptr->entries[i].sampleCount); if (ptr->version) { gf_bs_write_int(bs, ptr->entries[i].decodingOffset, 32); } else { gf_bs_write_u32(bs, (u32) ptr->entries[i].decodingOffset); } } return GF_OK; } GF_Err ctts_Size(GF_Box *s) { GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *) s; ptr->size += 4 + (8 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void cslg_del(GF_Box *s) { GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s; if (ptr == NULL) return; gf_free(ptr); return; } GF_Err cslg_Read(GF_Box *s, GF_BitStream *bs) { GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s; ptr->compositionToDTSShift = gf_bs_read_int(bs, 32); ptr->leastDecodeToDisplayDelta = gf_bs_read_int(bs, 32); ptr->greatestDecodeToDisplayDelta = gf_bs_read_int(bs, 32); ptr->compositionStartTime = gf_bs_read_int(bs, 32); ptr->compositionEndTime = gf_bs_read_int(bs, 32); return GF_OK; } GF_Box *cslg_New() { ISOM_DECL_BOX_ALLOC(GF_CompositionToDecodeBox, GF_ISOM_BOX_TYPE_CSLG); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err cslg_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->compositionToDTSShift, 32); gf_bs_write_int(bs, ptr->leastDecodeToDisplayDelta, 32); gf_bs_write_int(bs, ptr->greatestDecodeToDisplayDelta, 32); gf_bs_write_int(bs, ptr->compositionStartTime, 32); gf_bs_write_int(bs, ptr->compositionEndTime, 32); return GF_OK; } GF_Err cslg_Size(GF_Box *s) { GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s; ptr->size += 20; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ccst_del(GF_Box *s) { GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s; if (ptr) gf_free(ptr); return; } GF_Err ccst_Read(GF_Box *s, GF_BitStream *bs) { GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->all_ref_pics_intra = gf_bs_read_int(bs, 1); ptr->intra_pred_used = gf_bs_read_int(bs, 1); ptr->max_ref_per_pic = gf_bs_read_int(bs, 4); ptr->reserved = gf_bs_read_int(bs, 26); return GF_OK; } GF_Box *ccst_New() { ISOM_DECL_BOX_ALLOC(GF_CodingConstraintsBox, GF_ISOM_BOX_TYPE_CCST); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ccst_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->all_ref_pics_intra, 1); gf_bs_write_int(bs, ptr->intra_pred_used, 1); gf_bs_write_int(bs, ptr->max_ref_per_pic, 4); gf_bs_write_int(bs, 0, 26); return GF_OK; } GF_Err ccst_Size(GF_Box *s) { GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void url_del(GF_Box *s) { GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s; if (ptr == NULL) return; if (ptr->location) gf_free(ptr->location); gf_free(ptr); return; } GF_Err url_Read(GF_Box *s, GF_BitStream *bs) { GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s; if (ptr->size) { ptr->location = (char*)gf_malloc((u32) ptr->size); if (! ptr->location) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->location, (u32)ptr->size); } return GF_OK; } GF_Box *url_New() { ISOM_DECL_BOX_ALLOC(GF_DataEntryURLBox, GF_ISOM_BOX_TYPE_URL); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err url_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; //the flag set indicates we have a string (WE HAVE TO for URLs) if ( !(ptr->flags & 1)) { if (ptr->location) { gf_bs_write_data(bs, ptr->location, (u32)strlen(ptr->location) + 1); } } return GF_OK; } GF_Err url_Size(GF_Box *s) { GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s; if ( !(ptr->flags & 1)) { if (ptr->location) ptr->size += 1 + strlen(ptr->location); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void urn_del(GF_Box *s) { GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s; if (ptr == NULL) return; if (ptr->location) gf_free(ptr->location); if (ptr->nameURN) gf_free(ptr->nameURN); gf_free(ptr); } GF_Err urn_Read(GF_Box *s, GF_BitStream *bs) { u32 i, to_read; char *tmpName; GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s; if (! ptr->size ) return GF_OK; //here we have to handle that in a clever way to_read = (u32) ptr->size; tmpName = (char*)gf_malloc(sizeof(char) * to_read); if (!tmpName) return GF_OUT_OF_MEM; //get the data gf_bs_read_data(bs, tmpName, to_read); //then get the break i = 0; while ( (i < to_read) && (tmpName[i] != 0) ) { i++; } //check the data is consistent if (i == to_read) { gf_free(tmpName); return GF_ISOM_INVALID_FILE; } //no NULL char, URL is not specified if (i == to_read - 1) { ptr->nameURN = tmpName; ptr->location = NULL; return GF_OK; } //OK, this has both URN and URL ptr->nameURN = (char*)gf_malloc(sizeof(char) * (i+1)); if (!ptr->nameURN) { gf_free(tmpName); return GF_OUT_OF_MEM; } memcpy(ptr->nameURN, tmpName, i + 1); if (tmpName[to_read - 1] != 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] urn box cointains invalid location field\n" )); } else { ptr->location = (char*)gf_malloc(sizeof(char) * (to_read - i - 1)); if (!ptr->location) { gf_free(tmpName); gf_free(ptr->nameURN); ptr->nameURN = NULL; return GF_OUT_OF_MEM; } memcpy(ptr->location, tmpName + i + 1, (to_read - i - 1)); } gf_free(tmpName); return GF_OK; } GF_Box *urn_New() { ISOM_DECL_BOX_ALLOC(GF_DataEntryURNBox, GF_ISOM_BOX_TYPE_URN); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err urn_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; //the flag set indicates we have a string (WE HAVE TO for URLs) if ( !(ptr->flags & 1)) { //to check, the spec says: First name, then location if (ptr->nameURN) { gf_bs_write_data(bs, ptr->nameURN, (u32)strlen(ptr->nameURN) + 1); } if (ptr->location) { gf_bs_write_data(bs, ptr->location, (u32)strlen(ptr->location) + 1); } } return GF_OK; } GF_Err urn_Size(GF_Box *s) { GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s; if ( !(ptr->flags & 1)) { if (ptr->nameURN) ptr->size += 1 + strlen(ptr->nameURN); if (ptr->location) ptr->size += 1 + strlen(ptr->location); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void unkn_del(GF_Box *s) { GF_UnknownBox *ptr = (GF_UnknownBox *) s; if (!s) return; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err unkn_Read(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 bytesToRead, sub_size, sub_a; GF_BitStream *sub_bs; GF_UnknownBox *ptr = (GF_UnknownBox *)s; if (ptr->size > 0xFFFFFFFF) return GF_ISOM_INVALID_FILE; bytesToRead = (u32) (ptr->size); if (!bytesToRead) return GF_OK; if (bytesToRead>1000000) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Unknown box %s (0x%08X) with payload larger than 1 MBytes, ignoring\n", gf_4cc_to_str(ptr->type), ptr->type )); gf_bs_skip_bytes(bs, ptr->dataSize); return GF_OK; } ptr->data = (char*)gf_malloc(bytesToRead); if (ptr->data == NULL ) return GF_OUT_OF_MEM; ptr->dataSize = bytesToRead; gf_bs_read_data(bs, ptr->data, ptr->dataSize); //try to parse container boxes, check if next 8 bytes match a subbox sub_bs = gf_bs_new(ptr->data, ptr->dataSize, GF_BITSTREAM_READ); sub_size = gf_bs_read_u32(sub_bs); sub_a = gf_bs_read_u8(sub_bs); e = (sub_size && (sub_size <= ptr->dataSize)) ? GF_OK : GF_NOT_SUPPORTED; if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED; sub_a = gf_bs_read_u8(sub_bs); if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED; sub_a = gf_bs_read_u8(sub_bs); if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED; sub_a = gf_bs_read_u8(sub_bs); if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED; if (e == GF_OK) { gf_bs_seek(sub_bs, 0); e = gf_isom_box_array_read(s, sub_bs, gf_isom_box_add_default); } gf_bs_del(sub_bs); if (e==GF_OK) { gf_free(ptr->data); ptr->data = NULL; ptr->dataSize = 0; } else if (s->other_boxes) { gf_isom_box_array_del(s->other_boxes); s->other_boxes=NULL; } return GF_OK; } GF_Box *unkn_New() { ISOM_DECL_BOX_ALLOC(GF_UnknownBox, GF_ISOM_BOX_TYPE_UNKNOWN); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err unkn_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 type = s->type; GF_UnknownBox *ptr = (GF_UnknownBox *)s; if (!s) return GF_BAD_PARAM; ptr->type = ptr->original_4cc; e = gf_isom_box_write_header(s, bs); ptr->type = type; if (e) return e; if (ptr->dataSize && ptr->data) { gf_bs_write_data(bs, ptr->data, ptr->dataSize); } return GF_OK; } GF_Err unkn_Size(GF_Box *s) { GF_UnknownBox *ptr = (GF_UnknownBox *)s; if (ptr->dataSize && ptr->data) { ptr->size += ptr->dataSize; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void def_cont_box_del(GF_Box *s) { if (s) gf_free(s); } GF_Err def_cont_box_Read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, gf_isom_box_add_default); } GF_Box *def_cont_box_New() { ISOM_DECL_BOX_ALLOC(GF_Box, 0); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITEHintSa GF_Err def_cont_box_Write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err def_cont_box_Size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void uuid_del(GF_Box *s) { GF_UnknownUUIDBox *ptr = (GF_UnknownUUIDBox *) s; if (!s) return; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err uuid_Read(GF_Box *s, GF_BitStream *bs) { u32 bytesToRead; GF_UnknownUUIDBox *ptr = (GF_UnknownUUIDBox *)s; if (ptr->size > 0xFFFFFFFF) return GF_ISOM_INVALID_FILE; bytesToRead = (u32) (ptr->size); if (bytesToRead) { ptr->data = (char*)gf_malloc(bytesToRead); if (ptr->data == NULL ) return GF_OUT_OF_MEM; ptr->dataSize = bytesToRead; gf_bs_read_data(bs, ptr->data, ptr->dataSize); } return GF_OK; } GF_Box *uuid_New() { ISOM_DECL_BOX_ALLOC(GF_UnknownUUIDBox, GF_ISOM_BOX_TYPE_UUID); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err uuid_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_UnknownUUIDBox *ptr = (GF_UnknownUUIDBox*)s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->data) { gf_bs_write_data(bs, ptr->data, ptr->dataSize); } return GF_OK; } GF_Err uuid_Size(GF_Box *s) { GF_UnknownUUIDBox*ptr = (GF_UnknownUUIDBox*)s; ptr->size += ptr->dataSize; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void dinf_del(GF_Box *s) { GF_DataInformationBox *ptr = (GF_DataInformationBox *)s; if (ptr == NULL) return; gf_isom_box_del((GF_Box *)ptr->dref); gf_free(ptr); } GF_Err dinf_AddBox(GF_Box *s, GF_Box *a) { GF_DataInformationBox *ptr = (GF_DataInformationBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_DREF: if (ptr->dref) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->dref = (GF_DataReferenceBox *)a; return GF_OK; default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err dinf_Read(GF_Box *s, GF_BitStream *bs) { GF_Err e = gf_isom_box_array_read(s, bs, dinf_AddBox); if (e) { return e; } if (!((GF_DataInformationBox *)s)->dref) { GF_Box* dref; GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing dref box in dinf\n")); dref = gf_isom_box_new(GF_ISOM_BOX_TYPE_DREF); ((GF_DataInformationBox *)s)->dref = (GF_DataReferenceBox *)dref; gf_isom_box_add_for_dump_mode(s, dref); } return GF_OK; } GF_Box *dinf_New() { ISOM_DECL_BOX_ALLOC(GF_DataInformationBox, GF_ISOM_BOX_TYPE_DINF); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dinf_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DataInformationBox *ptr = (GF_DataInformationBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->dref) { e = gf_isom_box_write((GF_Box *)ptr->dref, bs); if (e) return e; } return GF_OK; } GF_Err dinf_Size(GF_Box *s) { GF_Err e; GF_DataInformationBox *ptr = (GF_DataInformationBox *)s; if (ptr->dref) { e = gf_isom_box_size((GF_Box *) ptr->dref); if (e) return e; ptr->size += ptr->dref->size; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void dref_del(GF_Box *s) { GF_DataReferenceBox *ptr = (GF_DataReferenceBox *) s; if (ptr == NULL) return; gf_free(ptr); } GF_Err dref_AddDataEntry(GF_Box *ptr, GF_Box *entry) { return gf_isom_box_add_default(ptr, entry); } GF_Err dref_Read(GF_Box *s, GF_BitStream *bs) { GF_DataReferenceBox *ptr = (GF_DataReferenceBox *)s; if (ptr == NULL) return GF_BAD_PARAM; gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); return gf_isom_box_array_read(s, bs, dref_AddDataEntry); } GF_Box *dref_New() { ISOM_DECL_BOX_ALLOC(GF_DataReferenceBox, GF_ISOM_BOX_TYPE_DREF); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dref_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 count; GF_DataReferenceBox *ptr = (GF_DataReferenceBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; count = ptr->other_boxes ? gf_list_count(ptr->other_boxes) : 0; gf_bs_write_u32(bs, count); return GF_OK; } GF_Err dref_Size(GF_Box *s) { GF_DataReferenceBox *ptr = (GF_DataReferenceBox *)s; if (!s) return GF_BAD_PARAM; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void edts_del(GF_Box *s) { GF_EditBox *ptr = (GF_EditBox *) s; gf_isom_box_del((GF_Box *)ptr->editList); gf_free(ptr); } GF_Err edts_AddBox(GF_Box *s, GF_Box *a) { GF_EditBox *ptr = (GF_EditBox *)s; if (a->type == GF_ISOM_BOX_TYPE_ELST) { if (ptr->editList) return GF_BAD_PARAM; ptr->editList = (GF_EditListBox *)a; return GF_OK; } else { return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err edts_Read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, edts_AddBox); } GF_Box *edts_New() { ISOM_DECL_BOX_ALLOC(GF_EditBox, GF_ISOM_BOX_TYPE_EDTS); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err edts_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_EditBox *ptr = (GF_EditBox *)s; //here we have a trick: if editList is empty, skip the box if (ptr->editList && gf_list_count(ptr->editList->entryList)) { e = gf_isom_box_write_header(s, bs); if (e) return e; e = gf_isom_box_write((GF_Box *) ptr->editList, bs); if (e) return e; } return GF_OK; } GF_Err edts_Size(GF_Box *s) { GF_Err e; GF_EditBox *ptr = (GF_EditBox *)s; //here we have a trick: if editList is empty, skip the box if (!ptr->editList || ! gf_list_count(ptr->editList->entryList)) { ptr->size = 0; } else { e = gf_isom_box_size((GF_Box *)ptr->editList); if (e) return e; ptr->size += ptr->editList->size; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void elst_del(GF_Box *s) { GF_EditListBox *ptr; GF_EdtsEntry *p; u32 nb_entries; u32 i; ptr = (GF_EditListBox *)s; if (ptr == NULL) return; nb_entries = gf_list_count(ptr->entryList); for (i = 0; i < nb_entries; i++) { p = (GF_EdtsEntry*)gf_list_get(ptr->entryList, i); if (p) gf_free(p); } gf_list_del(ptr->entryList); gf_free(ptr); } GF_Err elst_Read(GF_Box *s, GF_BitStream *bs) { u32 entries; s32 tr; u32 nb_entries; GF_EdtsEntry *p; GF_EditListBox *ptr = (GF_EditListBox *)s; nb_entries = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->version == 1) { if (nb_entries > ptr->size / 20) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in ctts\n", nb_entries)); return GF_ISOM_INVALID_FILE; } } else { if (nb_entries > ptr->size / 12) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in ctts\n", nb_entries)); return GF_ISOM_INVALID_FILE; } } for (entries = 0; entries < nb_entries; entries++) { p = (GF_EdtsEntry *) gf_malloc(sizeof(GF_EdtsEntry)); if (!p) return GF_OUT_OF_MEM; if (ptr->version == 1) { p->segmentDuration = gf_bs_read_u64(bs); p->mediaTime = (s64) gf_bs_read_u64(bs); } else { p->segmentDuration = gf_bs_read_u32(bs); tr = gf_bs_read_u32(bs); p->mediaTime = (s64) tr; } p->mediaRate = gf_bs_read_u16(bs); gf_bs_read_u16(bs); gf_list_add(ptr->entryList, p); } return GF_OK; } GF_Box *elst_New() { ISOM_DECL_BOX_ALLOC(GF_EditListBox, GF_ISOM_BOX_TYPE_ELST); tmp->entryList = gf_list_new(); if (!tmp->entryList) { gf_free(tmp); return NULL; } return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err elst_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; u32 nb_entries; GF_EdtsEntry *p; GF_EditListBox *ptr = (GF_EditListBox *)s; if (!ptr) return GF_BAD_PARAM; nb_entries = gf_list_count(ptr->entryList); e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, nb_entries); for (i = 0; i < nb_entries; i++ ) { p = (GF_EdtsEntry*)gf_list_get(ptr->entryList, i); if (ptr->version == 1) { gf_bs_write_u64(bs, p->segmentDuration); gf_bs_write_u64(bs, p->mediaTime); } else { gf_bs_write_u32(bs, (u32) p->segmentDuration); gf_bs_write_u32(bs, (s32) p->mediaTime); } gf_bs_write_u16(bs, p->mediaRate); gf_bs_write_u16(bs, 0); } return GF_OK; } GF_Err elst_Size(GF_Box *s) { u32 durtimebytes; u32 i, nb_entries; GF_EditListBox *ptr = (GF_EditListBox *)s; //entry count ptr->size += 4; nb_entries = gf_list_count(ptr->entryList); ptr->version = 0; for (i=0; i<nb_entries; i++) { GF_EdtsEntry *p = (GF_EdtsEntry*)gf_list_get(ptr->entryList, i); if ((p->segmentDuration>0xFFFFFFFF) || (p->mediaTime>0xFFFFFFFF)) { ptr->version = 1; break; } } durtimebytes = (ptr->version == 1 ? 16 : 8) + 4; ptr->size += (nb_entries * durtimebytes); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void esds_del(GF_Box *s) { GF_ESDBox *ptr = (GF_ESDBox *)s; if (ptr == NULL) return; if (ptr->desc) gf_odf_desc_del((GF_Descriptor *)ptr->desc); gf_free(ptr); } GF_Err esds_Read(GF_Box *s, GF_BitStream *bs) { GF_Err e=GF_OK; u32 descSize; char *enc_desc; u32 SLIsPredefined(GF_SLConfig *sl); GF_ESDBox *ptr = (GF_ESDBox *)s; descSize = (u32) (ptr->size); if (descSize) { enc_desc = (char*)gf_malloc(sizeof(char) * descSize); if (!enc_desc) return GF_OUT_OF_MEM; //get the payload gf_bs_read_data(bs, enc_desc, descSize); //send it to the OD Codec e = gf_odf_desc_read(enc_desc, descSize, (GF_Descriptor **) &ptr->desc); //OK, free our desc gf_free(enc_desc); if (ptr->desc && (ptr->desc->tag!=GF_ODF_ESD_TAG) ) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid descriptor tag 0x%x in esds\n", ptr->desc->tag)); gf_odf_desc_del((GF_Descriptor*)ptr->desc); ptr->desc=NULL; return GF_ISOM_INVALID_FILE; } if (e) { ptr->desc = NULL; } else { /*fix broken files*/ if (!ptr->desc->URLString) { if (!ptr->desc->slConfig) { ptr->desc->slConfig = (GF_SLConfig *) gf_odf_desc_new(GF_ODF_SLC_TAG); ptr->desc->slConfig->predefined = SLPredef_MP4; } else if (ptr->desc->slConfig->predefined != SLPredef_MP4) { ptr->desc->slConfig->predefined = SLPredef_MP4; gf_odf_slc_set_pref(ptr->desc->slConfig); } } } } return e; } GF_Box *esds_New() { ISOM_DECL_BOX_ALLOC(GF_ESDBox, GF_ISOM_BOX_TYPE_ESDS); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err esds_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; char *enc_desc; u32 descSize = 0; GF_ESDBox *ptr = (GF_ESDBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; e = gf_odf_desc_write((GF_Descriptor *)ptr->desc, &enc_desc, &descSize); if (e) return e; gf_bs_write_data(bs, enc_desc, descSize); //free our buffer gf_free(enc_desc); return GF_OK; } GF_Err esds_Size(GF_Box *s) { u32 descSize = 0; GF_ESDBox *ptr = (GF_ESDBox *)s; descSize = gf_odf_desc_size((GF_Descriptor *)ptr->desc); ptr->size += descSize; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void free_del(GF_Box *s) { GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err free_Read(GF_Box *s, GF_BitStream *bs) { u32 bytesToRead; GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s; if (ptr->size > 0xFFFFFFFF) return GF_IO_ERR; bytesToRead = (u32) (ptr->size); if (bytesToRead) { ptr->data = (char*)gf_malloc(bytesToRead * sizeof(char)); gf_bs_read_data(bs, ptr->data, bytesToRead); ptr->dataSize = bytesToRead; } return GF_OK; } GF_Box *free_New() { ISOM_DECL_BOX_ALLOC(GF_FreeSpaceBox, GF_ISOM_BOX_TYPE_FREE); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err free_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s; if (ptr->original_4cc) { u32 t = s->type; s->type=ptr->original_4cc; e = gf_isom_box_write_header(s, bs); s->type=t; } else { e = gf_isom_box_write_header(s, bs); } if (e) return e; if (ptr->dataSize) { if (ptr->data) { gf_bs_write_data(bs, ptr->data, ptr->dataSize); } else { u32 i = 0; while (i<ptr->dataSize) { gf_bs_write_u8(bs, 0); i++; } } } return GF_OK; } GF_Err free_Size(GF_Box *s) { GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s; ptr->size += ptr->dataSize; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ftyp_del(GF_Box *s) { GF_FileTypeBox *ptr = (GF_FileTypeBox *) s; if (ptr->altBrand) gf_free(ptr->altBrand); gf_free(ptr); } GF_Box *ftyp_New() { ISOM_DECL_BOX_ALLOC(GF_FileTypeBox, GF_ISOM_BOX_TYPE_FTYP); return (GF_Box *)tmp; } GF_Err ftyp_Read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_FileTypeBox *ptr = (GF_FileTypeBox *)s; if (ptr->size < 8) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Found ftyp with size < 8, likely broken!\n")); return GF_BAD_PARAM; } ptr->majorBrand = gf_bs_read_u32(bs); ptr->minorVersion = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 8); ptr->altCount = ( (u32) (ptr->size)) / 4; if (!ptr->altCount) return GF_OK; if (ptr->altCount * 4 != (u32) (ptr->size)) return GF_ISOM_INVALID_FILE; ptr->altBrand = (u32*)gf_malloc(sizeof(u32)*ptr->altCount); for (i = 0; i<ptr->altCount; i++) { ptr->altBrand[i] = gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ftyp_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_FileTypeBox *ptr = (GF_FileTypeBox *) s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->majorBrand); gf_bs_write_u32(bs, ptr->minorVersion); for (i=0; i<ptr->altCount; i++) { gf_bs_write_u32(bs, ptr->altBrand[i]); } return GF_OK; } GF_Err ftyp_Size(GF_Box *s) { GF_FileTypeBox *ptr = (GF_FileTypeBox *)s; ptr->size += 8 + ptr->altCount * 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void gnrm_del(GF_Box *s) { GF_GenericSampleEntryBox *ptr = (GF_GenericSampleEntryBox *)s; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)ptr); if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Box *gnrm_New() { ISOM_DECL_BOX_ALLOC(GF_GenericSampleEntryBox, GF_ISOM_BOX_TYPE_GNRM); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); return (GF_Box *)tmp; } //dummy GF_Err gnrm_Read(GF_Box *s, GF_BitStream *bs) { return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err gnrm_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_GenericSampleEntryBox *ptr = (GF_GenericSampleEntryBox *)s; //carefull we are not writing the box type but the entry type so switch for write ptr->type = ptr->EntryType; e = gf_isom_box_write_header(s, bs); if (e) return e; ptr->type = GF_ISOM_BOX_TYPE_GNRM; gf_bs_write_data(bs, ptr->reserved, 6); gf_bs_write_u16(bs, ptr->dataReferenceIndex); gf_bs_write_data(bs, ptr->data, ptr->data_size); return GF_OK; } GF_Err gnrm_Size(GF_Box *s) { GF_GenericSampleEntryBox *ptr = (GF_GenericSampleEntryBox *)s; s->type = GF_ISOM_BOX_TYPE_GNRM; ptr->size += 8+ptr->data_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void gnrv_del(GF_Box *s) { GF_GenericVisualSampleEntryBox *ptr = (GF_GenericVisualSampleEntryBox *)s; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)ptr); if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Box *gnrv_New() { ISOM_DECL_BOX_ALLOC(GF_GenericVisualSampleEntryBox, GF_ISOM_BOX_TYPE_GNRV); gf_isom_video_sample_entry_init((GF_VisualSampleEntryBox*) tmp); return (GF_Box *)tmp; } //dummy GF_Err gnrv_Read(GF_Box *s, GF_BitStream *bs) { return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err gnrv_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_GenericVisualSampleEntryBox *ptr = (GF_GenericVisualSampleEntryBox *)s; //carefull we are not writing the box type but the entry type so switch for write ptr->type = ptr->EntryType; e = gf_isom_box_write_header(s, bs); if (e) return e; ptr->type = GF_ISOM_BOX_TYPE_GNRV; gf_isom_video_sample_entry_write((GF_VisualSampleEntryBox *)ptr, bs); gf_bs_write_data(bs, ptr->data, ptr->data_size); return GF_OK; } GF_Err gnrv_Size(GF_Box *s) { GF_GenericVisualSampleEntryBox *ptr = (GF_GenericVisualSampleEntryBox *)s; s->type = GF_ISOM_BOX_TYPE_GNRV; gf_isom_video_sample_entry_size((GF_VisualSampleEntryBox *)s); ptr->size += ptr->data_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void gnra_del(GF_Box *s) { GF_GenericAudioSampleEntryBox *ptr = (GF_GenericAudioSampleEntryBox *)s; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)ptr); if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Box *gnra_New() { ISOM_DECL_BOX_ALLOC(GF_GenericAudioSampleEntryBox, GF_ISOM_BOX_TYPE_GNRA); gf_isom_audio_sample_entry_init((GF_AudioSampleEntryBox*) tmp); return (GF_Box *)tmp; } //dummy GF_Err gnra_Read(GF_Box *s, GF_BitStream *bs) { return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err gnra_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_GenericAudioSampleEntryBox *ptr = (GF_GenericAudioSampleEntryBox *)s; //carefull we are not writing the box type but the entry type so switch for write ptr->type = ptr->EntryType; e = gf_isom_box_write_header(s, bs); if (e) return e; ptr->type = GF_ISOM_BOX_TYPE_GNRA; gf_isom_audio_sample_entry_write((GF_AudioSampleEntryBox *)ptr, bs); if (ptr->data) { gf_bs_write_data(bs, ptr->data, ptr->data_size); } return GF_OK; } GF_Err gnra_Size(GF_Box *s) { GF_GenericAudioSampleEntryBox *ptr = (GF_GenericAudioSampleEntryBox *)s; s->type = GF_ISOM_BOX_TYPE_GNRA; gf_isom_audio_sample_entry_size((GF_AudioSampleEntryBox *)s); ptr->size += ptr->data_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void hdlr_del(GF_Box *s) { GF_HandlerBox *ptr = (GF_HandlerBox *)s; if (ptr == NULL) return; if (ptr->nameUTF8) gf_free(ptr->nameUTF8); gf_free(ptr); } GF_Err hdlr_Read(GF_Box *s, GF_BitStream *bs) { GF_HandlerBox *ptr = (GF_HandlerBox *)s; ptr->reserved1 = gf_bs_read_u32(bs); ptr->handlerType = gf_bs_read_u32(bs); gf_bs_read_data(bs, (char*)ptr->reserved2, 12); ISOM_DECREASE_SIZE(ptr, 20); if (ptr->size) { size_t len; ptr->nameUTF8 = (char*)gf_malloc((u32) ptr->size); if (ptr->nameUTF8 == NULL) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->nameUTF8, (u32) ptr->size); /*safety check in case the string is not null-terminated*/ if (ptr->nameUTF8[ptr->size-1]) { char *str = (char*)gf_malloc((u32) ptr->size + 1); memcpy(str, ptr->nameUTF8, (u32) ptr->size); str[ptr->size] = 0; gf_free(ptr->nameUTF8); ptr->nameUTF8 = str; } //patch for old QT files if (ptr->size > 1 && ptr->nameUTF8[0] == ptr->size-1) { len = strlen(ptr->nameUTF8 + 1); memmove(ptr->nameUTF8, ptr->nameUTF8+1, len ); ptr->nameUTF8[len] = 0; ptr->store_counted_string = GF_TRUE; } } return GF_OK; } GF_Box *hdlr_New() { ISOM_DECL_BOX_ALLOC(GF_HandlerBox, GF_ISOM_BOX_TYPE_HDLR); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err hdlr_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_HandlerBox *ptr = (GF_HandlerBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->reserved1); gf_bs_write_u32(bs, ptr->handlerType); gf_bs_write_data(bs, (char*)ptr->reserved2, 12); if (ptr->nameUTF8) { u32 len = (u32)strlen(ptr->nameUTF8); if (ptr->store_counted_string) { gf_bs_write_u8(bs, len); gf_bs_write_data(bs, ptr->nameUTF8, len); } else { gf_bs_write_data(bs, ptr->nameUTF8, len); gf_bs_write_u8(bs, 0); } } else { gf_bs_write_u8(bs, 0); } return GF_OK; } GF_Err hdlr_Size(GF_Box *s) { GF_HandlerBox *ptr = (GF_HandlerBox *)s; ptr->size += 20 + 1; //null term or counted string if (ptr->nameUTF8) { ptr->size += strlen(ptr->nameUTF8); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void hinf_del(GF_Box *s) { GF_HintInfoBox *hinf = (GF_HintInfoBox *)s; gf_free(hinf); } GF_Box *hinf_New() { ISOM_DECL_BOX_ALLOC(GF_HintInfoBox, GF_ISOM_BOX_TYPE_HINF); tmp->other_boxes = gf_list_new(); return (GF_Box *)tmp; } GF_Err hinf_AddBox(GF_Box *s, GF_Box *a) { GF_MAXRBox *maxR; GF_HintInfoBox *hinf = (GF_HintInfoBox *)s; u32 i; switch (a->type) { case GF_ISOM_BOX_TYPE_MAXR: i=0; while ((maxR = (GF_MAXRBox *)gf_list_enum(hinf->other_boxes, &i))) { if ((maxR->type==GF_ISOM_BOX_TYPE_MAXR) && (maxR->granularity == ((GF_MAXRBox *)a)->granularity)) return GF_ISOM_INVALID_FILE; } break; } return gf_isom_box_add_default(s, a); } GF_Err hinf_Read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, hinf_AddBox); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err hinf_Write(GF_Box *s, GF_BitStream *bs) { // GF_HintInfoBox *ptr = (GF_HintInfoBox *)s; if (!s) return GF_BAD_PARAM; return gf_isom_box_write_header(s, bs); } GF_Err hinf_Size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void hmhd_del(GF_Box *s) { GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err hmhd_Read(GF_Box *s,GF_BitStream *bs) { GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s; ptr->maxPDUSize = gf_bs_read_u16(bs); ptr->avgPDUSize = gf_bs_read_u16(bs); ptr->maxBitrate = gf_bs_read_u32(bs); ptr->avgBitrate = gf_bs_read_u32(bs); ptr->slidingAverageBitrate = gf_bs_read_u32(bs); return GF_OK; } GF_Box *hmhd_New() { ISOM_DECL_BOX_ALLOC(GF_HintMediaHeaderBox, GF_ISOM_BOX_TYPE_HMHD); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err hmhd_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->maxPDUSize); gf_bs_write_u16(bs, ptr->avgPDUSize); gf_bs_write_u32(bs, ptr->maxBitrate); gf_bs_write_u32(bs, ptr->avgBitrate); gf_bs_write_u32(bs, ptr->slidingAverageBitrate); return GF_OK; } GF_Err hmhd_Size(GF_Box *s) { GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s; ptr->size += 16; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *hnti_New() { ISOM_DECL_BOX_ALLOC(GF_HintTrackInfoBox, GF_ISOM_BOX_TYPE_HNTI); return (GF_Box *)tmp; } void hnti_del(GF_Box *a) { gf_free(a); } GF_Err hnti_AddBox(GF_Box *s, GF_Box *a) { GF_HintTrackInfoBox *hnti = (GF_HintTrackInfoBox *)s; if (!hnti || !a) return GF_BAD_PARAM; switch (a->type) { //this is the value for GF_RTPBox - same as HintSampleEntry for RTP !!! case GF_ISOM_BOX_TYPE_RTP: case GF_ISOM_BOX_TYPE_SDP: if (hnti->SDP) return GF_BAD_PARAM; hnti->SDP = a; break; default: break; } return gf_isom_box_add_default(s, a); } GF_Err hnti_Read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read_ex(s, bs, hnti_AddBox, s->type); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err hnti_Write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err hnti_Size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** GF_SDPBox **********************************************************/ void sdp_del(GF_Box *s) { GF_SDPBox *ptr = (GF_SDPBox *)s; if (ptr->sdpText) gf_free(ptr->sdpText); gf_free(ptr); } GF_Err sdp_Read(GF_Box *s, GF_BitStream *bs) { u32 length; GF_SDPBox *ptr = (GF_SDPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; length = (u32) (ptr->size); //sdp text has no delimiter !!! ptr->sdpText = (char*)gf_malloc(sizeof(char) * (length+1)); if (!ptr->sdpText) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->sdpText, length); ptr->sdpText[length] = 0; return GF_OK; } GF_Box *sdp_New() { ISOM_DECL_BOX_ALLOC(GF_SDPBox, GF_ISOM_BOX_TYPE_SDP); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sdp_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SDPBox *ptr = (GF_SDPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; //don't write the NULL char!!! gf_bs_write_data(bs, ptr->sdpText, (u32) strlen(ptr->sdpText)); return GF_OK; } GF_Err sdp_Size(GF_Box *s) { GF_SDPBox *ptr = (GF_SDPBox *)s; //don't count the NULL char!!! ptr->size += strlen(ptr->sdpText); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void rtp_hnti_del(GF_Box *s) { GF_RTPBox *ptr = (GF_RTPBox *)s; if (ptr->sdpText) gf_free(ptr->sdpText); gf_free(ptr); } GF_Err rtp_hnti_Read(GF_Box *s, GF_BitStream *bs) { u32 length; GF_RTPBox *ptr = (GF_RTPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; ISOM_DECREASE_SIZE(ptr, 4) ptr->subType = gf_bs_read_u32(bs); length = (u32) (ptr->size); //sdp text has no delimiter !!! ptr->sdpText = (char*)gf_malloc(sizeof(char) * (length+1)); if (!ptr->sdpText) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->sdpText, length); ptr->sdpText[length] = 0; return GF_OK; } GF_Box *rtp_hnti_New() { ISOM_DECL_BOX_ALLOC(GF_RTPBox, GF_ISOM_BOX_TYPE_RTP); tmp->subType = GF_ISOM_BOX_TYPE_SDP; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err rtp_hnti_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_RTPBox *ptr = (GF_RTPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->subType); //don't write the NULL char!!! gf_bs_write_data(bs, ptr->sdpText, (u32) strlen(ptr->sdpText)); return GF_OK; } GF_Err rtp_hnti_Size(GF_Box *s) { GF_RTPBox *ptr = (GF_RTPBox *)s; ptr->size += 4 + strlen(ptr->sdpText); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TRPY GF_Box **********************************************************/ void trpy_del(GF_Box *s) { gf_free((GF_TRPYBox *)s); } GF_Err trpy_Read(GF_Box *s, GF_BitStream *bs) { GF_TRPYBox *ptr = (GF_TRPYBox *)s; ptr->nbBytes = gf_bs_read_u64(bs); return GF_OK; } GF_Box *trpy_New() { ISOM_DECL_BOX_ALLOC(GF_TRPYBox, GF_ISOM_BOX_TYPE_TRPY); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trpy_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TRPYBox *ptr = (GF_TRPYBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbBytes); return GF_OK; } GF_Err trpy_Size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TOTL GF_Box **********************************************************/ void totl_del(GF_Box *s) { gf_free((GF_TRPYBox *)s); } GF_Err totl_Read(GF_Box *s, GF_BitStream *bs) { GF_TOTLBox *ptr = (GF_TOTLBox *)s; ptr->nbBytes = gf_bs_read_u32(bs); return GF_OK; } GF_Box *totl_New() { ISOM_DECL_BOX_ALLOC(GF_TOTLBox, GF_ISOM_BOX_TYPE_TOTL); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err totl_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TOTLBox *ptr = (GF_TOTLBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nbBytes); return GF_OK; } GF_Err totl_Size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** NUMP GF_Box **********************************************************/ void nump_del(GF_Box *s) { gf_free((GF_NUMPBox *)s); } GF_Err nump_Read(GF_Box *s, GF_BitStream *bs) { GF_NUMPBox *ptr = (GF_NUMPBox *)s; ptr->nbPackets = gf_bs_read_u64(bs); return GF_OK; } GF_Box *nump_New() { ISOM_DECL_BOX_ALLOC(GF_NUMPBox, GF_ISOM_BOX_TYPE_NUMP); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err nump_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_NUMPBox *ptr = (GF_NUMPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbPackets); return GF_OK; } GF_Err nump_Size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** NPCK GF_Box **********************************************************/ void npck_del(GF_Box *s) { gf_free((GF_NPCKBox *)s); } GF_Err npck_Read(GF_Box *s, GF_BitStream *bs) { GF_NPCKBox *ptr = (GF_NPCKBox *)s; ptr->nbPackets = gf_bs_read_u32(bs); return GF_OK; } GF_Box *npck_New() { ISOM_DECL_BOX_ALLOC(GF_NPCKBox, GF_ISOM_BOX_TYPE_NPCK); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err npck_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_NPCKBox *ptr = (GF_NPCKBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nbPackets); return GF_OK; } GF_Err npck_Size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TPYL GF_Box **********************************************************/ void tpyl_del(GF_Box *s) { gf_free((GF_NTYLBox *)s); } GF_Err tpyl_Read(GF_Box *s, GF_BitStream *bs) { GF_NTYLBox *ptr = (GF_NTYLBox *)s; if (ptr == NULL) return GF_BAD_PARAM; ptr->nbBytes = gf_bs_read_u64(bs); return GF_OK; } GF_Box *tpyl_New() { ISOM_DECL_BOX_ALLOC(GF_NTYLBox, GF_ISOM_BOX_TYPE_TPYL); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tpyl_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_NTYLBox *ptr = (GF_NTYLBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbBytes); return GF_OK; } GF_Err tpyl_Size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TPAY GF_Box **********************************************************/ void tpay_del(GF_Box *s) { gf_free((GF_TPAYBox *)s); } GF_Err tpay_Read(GF_Box *s, GF_BitStream *bs) { GF_TPAYBox *ptr = (GF_TPAYBox *)s; ptr->nbBytes = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tpay_New() { ISOM_DECL_BOX_ALLOC(GF_TPAYBox, GF_ISOM_BOX_TYPE_TPAY); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tpay_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TPAYBox *ptr = (GF_TPAYBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nbBytes); return GF_OK; } GF_Err tpay_Size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** MAXR GF_Box **********************************************************/ void maxr_del(GF_Box *s) { gf_free((GF_MAXRBox *)s); } GF_Err maxr_Read(GF_Box *s, GF_BitStream *bs) { GF_MAXRBox *ptr = (GF_MAXRBox *)s; if (ptr == NULL) return GF_BAD_PARAM; ptr->granularity = gf_bs_read_u32(bs); ptr->maxDataRate = gf_bs_read_u32(bs); return GF_OK; } GF_Box *maxr_New() { ISOM_DECL_BOX_ALLOC(GF_MAXRBox, GF_ISOM_BOX_TYPE_MAXR); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err maxr_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MAXRBox *ptr = (GF_MAXRBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->granularity); gf_bs_write_u32(bs, ptr->maxDataRate); return GF_OK; } GF_Err maxr_Size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** DMED GF_Box **********************************************************/ void dmed_del(GF_Box *s) { gf_free((GF_DMEDBox *)s); } GF_Err dmed_Read(GF_Box *s, GF_BitStream *bs) { GF_DMEDBox *ptr = (GF_DMEDBox *)s; ptr->nbBytes = gf_bs_read_u64(bs); return GF_OK; } GF_Box *dmed_New() { ISOM_DECL_BOX_ALLOC(GF_DMEDBox, GF_ISOM_BOX_TYPE_DMED); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dmed_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DMEDBox *ptr = (GF_DMEDBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbBytes); return GF_OK; } GF_Err dmed_Size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** DIMM GF_Box **********************************************************/ void dimm_del(GF_Box *s) { gf_free((GF_DIMMBox *)s); } GF_Err dimm_Read(GF_Box *s, GF_BitStream *bs) { GF_DIMMBox *ptr = (GF_DIMMBox *)s; ptr->nbBytes = gf_bs_read_u64(bs); return GF_OK; } GF_Box *dimm_New() { ISOM_DECL_BOX_ALLOC(GF_DIMMBox, GF_ISOM_BOX_TYPE_DIMM); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dimm_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DIMMBox *ptr = (GF_DIMMBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbBytes); return GF_OK; } GF_Err dimm_Size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** DREP GF_Box **********************************************************/ void drep_del(GF_Box *s) { gf_free((GF_DREPBox *)s); } GF_Err drep_Read(GF_Box *s, GF_BitStream *bs) { GF_DREPBox *ptr = (GF_DREPBox *)s; ptr->nbBytes = gf_bs_read_u64(bs); return GF_OK; } GF_Box *drep_New() { ISOM_DECL_BOX_ALLOC(GF_DREPBox, GF_ISOM_BOX_TYPE_DREP); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err drep_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DREPBox *ptr = (GF_DREPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbBytes); return GF_OK; } GF_Err drep_Size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TMIN GF_Box **********************************************************/ void tmin_del(GF_Box *s) { gf_free((GF_TMINBox *)s); } GF_Err tmin_Read(GF_Box *s, GF_BitStream *bs) { GF_TMINBox *ptr = (GF_TMINBox *)s; ptr->minTime = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tmin_New() { ISOM_DECL_BOX_ALLOC(GF_TMINBox, GF_ISOM_BOX_TYPE_TMIN); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tmin_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TMINBox *ptr = (GF_TMINBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->minTime); return GF_OK; } GF_Err tmin_Size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TMAX GF_Box **********************************************************/ void tmax_del(GF_Box *s) { gf_free((GF_TMAXBox *)s); } GF_Err tmax_Read(GF_Box *s, GF_BitStream *bs) { GF_TMAXBox *ptr = (GF_TMAXBox *)s; ptr->maxTime = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tmax_New() { ISOM_DECL_BOX_ALLOC(GF_TMAXBox, GF_ISOM_BOX_TYPE_TMAX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tmax_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TMAXBox *ptr = (GF_TMAXBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->maxTime); return GF_OK; } GF_Err tmax_Size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** PMAX GF_Box **********************************************************/ void pmax_del(GF_Box *s) { gf_free((GF_PMAXBox *)s); } GF_Err pmax_Read(GF_Box *s, GF_BitStream *bs) { GF_PMAXBox *ptr = (GF_PMAXBox *)s; ptr->maxSize = gf_bs_read_u32(bs); return GF_OK; } GF_Box *pmax_New() { ISOM_DECL_BOX_ALLOC(GF_PMAXBox, GF_ISOM_BOX_TYPE_PMAX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pmax_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_PMAXBox *ptr = (GF_PMAXBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->maxSize); return GF_OK; } GF_Err pmax_Size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** DMAX GF_Box **********************************************************/ void dmax_del(GF_Box *s) { gf_free((GF_DMAXBox *)s); } GF_Err dmax_Read(GF_Box *s, GF_BitStream *bs) { GF_DMAXBox *ptr = (GF_DMAXBox *)s; ptr->maxDur = gf_bs_read_u32(bs); return GF_OK; } GF_Box *dmax_New() { ISOM_DECL_BOX_ALLOC(GF_DMAXBox, GF_ISOM_BOX_TYPE_DMAX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dmax_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DMAXBox *ptr = (GF_DMAXBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->maxDur); return GF_OK; } GF_Err dmax_Size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** PAYT GF_Box **********************************************************/ void payt_del(GF_Box *s) { GF_PAYTBox *payt = (GF_PAYTBox *)s; if (payt->payloadString) gf_free(payt->payloadString); gf_free(payt); } GF_Err payt_Read(GF_Box *s, GF_BitStream *bs) { u32 length; GF_PAYTBox *ptr = (GF_PAYTBox *)s; ptr->payloadCode = gf_bs_read_u32(bs); length = gf_bs_read_u8(bs); ptr->payloadString = (char*)gf_malloc(sizeof(char) * (length+1) ); if (! ptr->payloadString) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->payloadString, length); ptr->payloadString[length] = 0; ISOM_DECREASE_SIZE(ptr, (4+length+1) ); return GF_OK; } GF_Box *payt_New() { ISOM_DECL_BOX_ALLOC(GF_PAYTBox, GF_ISOM_BOX_TYPE_PAYT); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err payt_Write(GF_Box *s, GF_BitStream *bs) { u32 len; GF_Err e; GF_PAYTBox *ptr = (GF_PAYTBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->payloadCode); len = (u32) strlen(ptr->payloadString); gf_bs_write_u8(bs, len); if (len) gf_bs_write_data(bs, ptr->payloadString, len); return GF_OK; } GF_Err payt_Size(GF_Box *s) { GF_PAYTBox *ptr = (GF_PAYTBox *)s; s->size += 4; if (ptr->payloadString) ptr->size += strlen(ptr->payloadString) + 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** PAYT GF_Box **********************************************************/ void name_del(GF_Box *s) { GF_NameBox *name = (GF_NameBox *)s; if (name->string) gf_free(name->string); gf_free(name); } GF_Err name_Read(GF_Box *s, GF_BitStream *bs) { u32 length; GF_NameBox *ptr = (GF_NameBox *)s; length = (u32) (ptr->size); ptr->string = (char*)gf_malloc(sizeof(char) * (length+1)); if (! ptr->string) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->string, length); ptr->string[length] = 0; return GF_OK; } GF_Box *name_New() { ISOM_DECL_BOX_ALLOC(GF_NameBox, GF_ISOM_BOX_TYPE_NAME); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err name_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_NameBox *ptr = (GF_NameBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->string) { gf_bs_write_data(bs, ptr->string, (u32) strlen(ptr->string) + 1); } return GF_OK; } GF_Err name_Size(GF_Box *s) { GF_NameBox *ptr = (GF_NameBox *)s; if (ptr->string) ptr->size += strlen(ptr->string) + 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void tssy_del(GF_Box *s) { gf_free(s); } GF_Err tssy_Read(GF_Box *s, GF_BitStream *bs) { GF_TimeStampSynchronyBox *ptr = (GF_TimeStampSynchronyBox *)s; gf_bs_read_int(bs, 6); ptr->timestamp_sync = gf_bs_read_int(bs, 2); return GF_OK; } GF_Box *tssy_New() { ISOM_DECL_BOX_ALLOC(GF_TimeStampSynchronyBox, GF_ISOM_BOX_TYPE_TSSY); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tssy_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TimeStampSynchronyBox *ptr = (GF_TimeStampSynchronyBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_int(bs, 0, 6); gf_bs_write_int(bs, ptr->timestamp_sync, 2); return GF_OK; } GF_Err tssy_Size(GF_Box *s) { s->size += 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void srpp_del(GF_Box *s) { GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s; if (ptr->info) gf_isom_box_del((GF_Box*)ptr->info); if (ptr->scheme_type) gf_isom_box_del((GF_Box*)ptr->scheme_type); gf_free(s); } GF_Err srpp_AddBox(GF_Box *s, GF_Box *a) { GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_SCHI: if (ptr->info) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->info = (GF_SchemeInformationBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_SCHM: if (ptr->scheme_type) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->scheme_type = (GF_SchemeTypeBox *)a; return GF_OK; } return gf_isom_box_add_default(s, a); } GF_Err srpp_Read(GF_Box *s, GF_BitStream *bs) { GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s; ISOM_DECREASE_SIZE(s, 16) ptr->encryption_algorithm_rtp = gf_bs_read_u32(bs); ptr->encryption_algorithm_rtcp = gf_bs_read_u32(bs); ptr->integrity_algorithm_rtp = gf_bs_read_u32(bs); ptr->integrity_algorithm_rtp = gf_bs_read_u32(bs); return gf_isom_box_array_read(s, bs, gf_isom_box_add_default); } GF_Box *srpp_New() { ISOM_DECL_BOX_ALLOC(GF_SRTPProcessBox, GF_ISOM_BOX_TYPE_SRPP); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err srpp_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->encryption_algorithm_rtp); gf_bs_write_u32(bs, ptr->encryption_algorithm_rtcp); gf_bs_write_u32(bs, ptr->integrity_algorithm_rtp); gf_bs_write_u32(bs, ptr->integrity_algorithm_rtcp); if (ptr->info) { e = gf_isom_box_write((GF_Box*)ptr->info, bs); if (e) return e; } if (ptr->scheme_type) { e = gf_isom_box_write((GF_Box*)ptr->scheme_type, bs); if (e) return e; } return GF_OK; } GF_Err srpp_Size(GF_Box *s) { GF_Err e; GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s; s->size += 16; if (ptr->info) { e = gf_isom_box_size((GF_Box*)ptr->info); if (e) return e; ptr->size += ptr->info->size; } if (ptr->scheme_type) { e = gf_isom_box_size((GF_Box*)ptr->scheme_type); if (e) return e; ptr->size += ptr->scheme_type->size; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void rssr_del(GF_Box *s) { gf_free(s); } GF_Err rssr_Read(GF_Box *s, GF_BitStream *bs) { GF_ReceivedSsrcBox *ptr = (GF_ReceivedSsrcBox *)s; ptr->ssrc = gf_bs_read_u32(bs); return GF_OK; } GF_Box *rssr_New() { ISOM_DECL_BOX_ALLOC(GF_ReceivedSsrcBox, GF_ISOM_BOX_TYPE_RSSR); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err rssr_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ReceivedSsrcBox *ptr = (GF_ReceivedSsrcBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->ssrc); return GF_OK; } GF_Err rssr_Size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void iods_del(GF_Box *s) { GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s; if (ptr == NULL) return; if (ptr->descriptor) gf_odf_desc_del(ptr->descriptor); gf_free(ptr); } GF_Err iods_Read(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 descSize; char *desc; GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s; //use the OD codec... descSize = (u32) (ptr->size); desc = (char*)gf_malloc(sizeof(char) * descSize); gf_bs_read_data(bs, desc, descSize); e = gf_odf_desc_read(desc, descSize, &ptr->descriptor); //OK, free our desc gf_free(desc); return e; } GF_Box *iods_New() { ISOM_DECL_BOX_ALLOC(GF_ObjectDescriptorBox, GF_ISOM_BOX_TYPE_IODS); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err iods_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 descSize; char *desc; GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; //call our OD codec e = gf_odf_desc_write(ptr->descriptor, &desc, &descSize); if (e) return e; gf_bs_write_data(bs, desc, descSize); //and free our stuff maybe!! gf_free(desc); return GF_OK; } GF_Err iods_Size(GF_Box *s) { GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s; ptr->size += gf_odf_desc_size(ptr->descriptor); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mdat_del(GF_Box *s) { GF_MediaDataBox *ptr = (GF_MediaDataBox *)s; if (!s) return; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err mdat_Read(GF_Box *s, GF_BitStream *bs) { GF_MediaDataBox *ptr = (GF_MediaDataBox *)s; if (ptr == NULL) return GF_BAD_PARAM; ptr->dataSize = s->size; ptr->bsOffset = gf_bs_get_position(bs); //then skip these bytes gf_bs_skip_bytes(bs, ptr->dataSize); return GF_OK; } GF_Box *mdat_New() { ISOM_DECL_BOX_ALLOC(GF_MediaDataBox, GF_ISOM_BOX_TYPE_MDAT); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mdat_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MediaDataBox *ptr = (GF_MediaDataBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; //make sure we have some data ... //if not, we handle that independantly (edit files) if (ptr->data) { gf_bs_write_data(bs, ptr->data, (u32) ptr->dataSize); } return GF_OK; } GF_Err mdat_Size(GF_Box *s) { GF_MediaDataBox *ptr = (GF_MediaDataBox *)s; ptr->size += ptr->dataSize; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mdhd_del(GF_Box *s) { GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err mdhd_Read(GF_Box *s, GF_BitStream *bs) { GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s; if (ptr->version == 1) { ptr->creationTime = gf_bs_read_u64(bs); ptr->modificationTime = gf_bs_read_u64(bs); ptr->timeScale = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u64(bs); } else { ptr->creationTime = gf_bs_read_u32(bs); ptr->modificationTime = gf_bs_read_u32(bs); ptr->timeScale = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u32(bs); } if (!ptr->timeScale) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Media header timescale is 0 - defaulting to 90000\n" )); ptr->timeScale = 90000; } ptr->original_duration = ptr->duration; //our padding bit gf_bs_read_int(bs, 1); //the spec is unclear here, just says "the value 0 is interpreted as undetermined" ptr->packedLanguage[0] = gf_bs_read_int(bs, 5); ptr->packedLanguage[1] = gf_bs_read_int(bs, 5); ptr->packedLanguage[2] = gf_bs_read_int(bs, 5); //but before or after compaction ?? We assume before if (ptr->packedLanguage[0] || ptr->packedLanguage[1] || ptr->packedLanguage[2]) { ptr->packedLanguage[0] += 0x60; ptr->packedLanguage[1] += 0x60; ptr->packedLanguage[2] += 0x60; } else { ptr->packedLanguage[0] = 'u'; ptr->packedLanguage[1] = 'n'; ptr->packedLanguage[2] = 'd'; } ptr->reserved = gf_bs_read_u16(bs); return GF_OK; } GF_Box *mdhd_New() { ISOM_DECL_BOX_ALLOC(GF_MediaHeaderBox, GF_ISOM_BOX_TYPE_MDHD); tmp->packedLanguage[0] = 'u'; tmp->packedLanguage[1] = 'n'; tmp->packedLanguage[2] = 'd'; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mdhd_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version == 1) { gf_bs_write_u64(bs, ptr->creationTime); gf_bs_write_u64(bs, ptr->modificationTime); gf_bs_write_u32(bs, ptr->timeScale); gf_bs_write_u64(bs, ptr->duration); } else { gf_bs_write_u32(bs, (u32) ptr->creationTime); gf_bs_write_u32(bs, (u32) ptr->modificationTime); gf_bs_write_u32(bs, ptr->timeScale); gf_bs_write_u32(bs, (u32) ptr->duration); } //SPECS: BIT(1) of padding gf_bs_write_int(bs, 0, 1); gf_bs_write_int(bs, ptr->packedLanguage[0] - 0x60, 5); gf_bs_write_int(bs, ptr->packedLanguage[1] - 0x60, 5); gf_bs_write_int(bs, ptr->packedLanguage[2] - 0x60, 5); gf_bs_write_u16(bs, ptr->reserved); return GF_OK; } GF_Err mdhd_Size(GF_Box *s) { GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s; ptr->version = (ptr->duration>0xFFFFFFFF) ? 1 : 0; ptr->size += 4; ptr->size += (ptr->version == 1) ? 28 : 16; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mdia_del(GF_Box *s) { GF_MediaBox *ptr = (GF_MediaBox *)s; if (ptr == NULL) return; if (ptr->mediaHeader) gf_isom_box_del((GF_Box *)ptr->mediaHeader); if (ptr->information) gf_isom_box_del((GF_Box *)ptr->information); if (ptr->handler) gf_isom_box_del((GF_Box *)ptr->handler); gf_free(ptr); } GF_Err mdia_AddBox(GF_Box *s, GF_Box *a) { GF_MediaBox *ptr = (GF_MediaBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_MDHD: if (ptr->mediaHeader) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mediaHeader = (GF_MediaHeaderBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_HDLR: if (ptr->handler) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->handler = (GF_HandlerBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_MINF: if (ptr->information) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->information = (GF_MediaInformationBox *)a; return GF_OK; default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err mdia_Read(GF_Box *s, GF_BitStream *bs) { GF_Err e = gf_isom_box_array_read(s, bs, mdia_AddBox); if (e) return e; if (!((GF_MediaBox *)s)->information) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MediaInformationBox\n")); return GF_ISOM_INVALID_FILE; } if (!((GF_MediaBox *)s)->handler) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing HandlerBox\n")); return GF_ISOM_INVALID_FILE; } if (!((GF_MediaBox *)s)->mediaHeader) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MediaHeaderBox\n")); return GF_ISOM_INVALID_FILE; } return GF_OK; } GF_Box *mdia_New() { ISOM_DECL_BOX_ALLOC(GF_MediaBox, GF_ISOM_BOX_TYPE_MDIA); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mdia_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MediaBox *ptr = (GF_MediaBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; //Header first if (ptr->mediaHeader) { e = gf_isom_box_write((GF_Box *) ptr->mediaHeader, bs); if (e) return e; } //then handler if (ptr->handler) { e = gf_isom_box_write((GF_Box *) ptr->handler, bs); if (e) return e; } if (ptr->information) { e = gf_isom_box_write((GF_Box *) ptr->information, bs); if (e) return e; } return GF_OK; } GF_Err mdia_Size(GF_Box *s) { GF_Err e; GF_MediaBox *ptr = (GF_MediaBox *)s; if (ptr->mediaHeader) { e = gf_isom_box_size((GF_Box *) ptr->mediaHeader); if (e) return e; ptr->size += ptr->mediaHeader->size; } if (ptr->handler) { e = gf_isom_box_size((GF_Box *) ptr->handler); if (e) return e; ptr->size += ptr->handler->size; } if (ptr->information) { e = gf_isom_box_size((GF_Box *) ptr->information); if (e) return e; ptr->size += ptr->information->size; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mfra_del(GF_Box *s) { GF_MovieFragmentRandomAccessBox *ptr = (GF_MovieFragmentRandomAccessBox *)s; if (ptr == NULL) return; if (ptr->mfro) gf_isom_box_del((GF_Box*)ptr->mfro); gf_isom_box_array_del(ptr->tfra_list); gf_free(ptr); } GF_Box *mfra_New() { ISOM_DECL_BOX_ALLOC(GF_MovieFragmentRandomAccessBox, GF_ISOM_BOX_TYPE_MFRA); tmp->tfra_list = gf_list_new(); return (GF_Box *)tmp; } GF_Err mfra_AddBox(GF_Box *s, GF_Box *a) { GF_MovieFragmentRandomAccessBox *ptr = (GF_MovieFragmentRandomAccessBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_TFRA: return gf_list_add(ptr->tfra_list, a); case GF_ISOM_BOX_TYPE_MFRO: if (ptr->mfro) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mfro = (GF_MovieFragmentRandomAccessOffsetBox *)a; return GF_OK; default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err mfra_Read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, mfra_AddBox); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mfra_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MovieFragmentRandomAccessBox *ptr = (GF_MovieFragmentRandomAccessBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; e = gf_isom_box_array_write(s, ptr->tfra_list, bs); if (e) return e; if (ptr->mfro) { e = gf_isom_box_write((GF_Box *) ptr->mfro, bs); if (e) return e; } return GF_OK; } GF_Err mfra_Size(GF_Box *s) { GF_Err e; GF_MovieFragmentRandomAccessBox *ptr = (GF_MovieFragmentRandomAccessBox *)s; if (ptr->mfro) { e = gf_isom_box_size((GF_Box *)ptr->mfro); if (e) return e; ptr->size += ptr->mfro->size; } return gf_isom_box_array_size(s, ptr->tfra_list); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void tfra_del(GF_Box *s) { GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s; if (ptr == NULL) return; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Box *tfra_New() { ISOM_DECL_BOX_ALLOC(GF_TrackFragmentRandomAccessBox, GF_ISOM_BOX_TYPE_TFRA); return (GF_Box *)tmp; } GF_Err tfra_Read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_RandomAccessEntry *p = 0; GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s; if (ptr->size < 12) return GF_ISOM_INVALID_FILE; ptr->track_id = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (gf_bs_read_int(bs, 26) != 0) return GF_ISOM_INVALID_FILE; ptr->traf_bits = (gf_bs_read_int(bs, 2) + 1) * 8; ptr->trun_bits = (gf_bs_read_int(bs, 2) + 1) * 8; ptr->sample_bits = (gf_bs_read_int(bs, 2) + 1) * 8; ISOM_DECREASE_SIZE(ptr, 4); ptr->nb_entries = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->version == 1) { if (ptr->nb_entries > ptr->size / (16+(ptr->traf_bits+ptr->trun_bits+ptr->sample_bits)/8)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in traf\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } } else { if (ptr->nb_entries > ptr->size / (8+(ptr->traf_bits+ptr->trun_bits+ptr->sample_bits)/8)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in traf\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } } if (ptr->nb_entries) { p = (GF_RandomAccessEntry *) gf_malloc(sizeof(GF_RandomAccessEntry) * ptr->nb_entries); if (!p) return GF_OUT_OF_MEM; } ptr->entries = p; for (i=0; i<ptr->nb_entries; i++) { memset(p, 0, sizeof(GF_RandomAccessEntry)); if (ptr->version == 1) { p->time = gf_bs_read_u64(bs); p->moof_offset = gf_bs_read_u64(bs); } else { p->time = gf_bs_read_u32(bs); p->moof_offset = gf_bs_read_u32(bs); } p->traf_number = gf_bs_read_int(bs, ptr->traf_bits); p->trun_number = gf_bs_read_int(bs, ptr->trun_bits); p->sample_number = gf_bs_read_int(bs, ptr->sample_bits); ++p; } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tfra_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->track_id); gf_bs_write_int(bs, 0, 26); gf_bs_write_int(bs, ptr->traf_bits/8 - 1, 2); gf_bs_write_int(bs, ptr->trun_bits/8 - 1, 2); gf_bs_write_int(bs, ptr->sample_bits/8 - 1, 2); gf_bs_write_u32(bs, ptr->nb_entries); for (i=0; i<ptr->nb_entries; i++) { GF_RandomAccessEntry *p = &ptr->entries[i]; if (ptr->version==1) { gf_bs_write_u64(bs, p->time); gf_bs_write_u64(bs, p->moof_offset); } else { gf_bs_write_u32(bs, (u32) p->time); gf_bs_write_u32(bs, (u32) p->moof_offset); } gf_bs_write_int(bs, p->traf_number, ptr->traf_bits); gf_bs_write_int(bs, p->trun_number, ptr->trun_bits); gf_bs_write_int(bs, p->sample_number, ptr->sample_bits); } return GF_OK; } GF_Err tfra_Size(GF_Box *s) { GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s; ptr->size += 12; ptr->size += ptr->nb_entries * ( ((ptr->version==1) ? 16 : 8 ) + ptr->traf_bits/8 + ptr->trun_bits/8 + ptr->sample_bits/8); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mfro_del(GF_Box *s) { GF_MovieFragmentRandomAccessOffsetBox *ptr = (GF_MovieFragmentRandomAccessOffsetBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Box *mfro_New() { ISOM_DECL_BOX_ALLOC(GF_MovieFragmentRandomAccessOffsetBox, GF_ISOM_BOX_TYPE_MFRO); return (GF_Box *)tmp; } GF_Err mfro_Read(GF_Box *s, GF_BitStream *bs) { GF_MovieFragmentRandomAccessOffsetBox *ptr = (GF_MovieFragmentRandomAccessOffsetBox *)s; ptr->container_size = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mfro_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MovieFragmentRandomAccessOffsetBox *ptr = (GF_MovieFragmentRandomAccessOffsetBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->container_size); return GF_OK; } GF_Err mfro_Size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void elng_del(GF_Box *s) { GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s; if (ptr == NULL) return; if (ptr->extended_language) gf_free(ptr->extended_language); gf_free(ptr); } GF_Err elng_Read(GF_Box *s, GF_BitStream *bs) { GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s; if (ptr->size) { ptr->extended_language = (char*)gf_malloc((u32) ptr->size); if (ptr->extended_language == NULL) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->extended_language, (u32) ptr->size); /*safety check in case the string is not null-terminated*/ if (ptr->extended_language[ptr->size-1]) { char *str = (char*)gf_malloc((u32) ptr->size + 1); memcpy(str, ptr->extended_language, (u32) ptr->size); str[ptr->size] = 0; gf_free(ptr->extended_language); ptr->extended_language = str; } } return GF_OK; } GF_Box *elng_New() { ISOM_DECL_BOX_ALLOC(GF_MediaBox, GF_ISOM_BOX_TYPE_ELNG); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err elng_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->extended_language) { gf_bs_write_data(bs, ptr->extended_language, (u32)(strlen(ptr->extended_language)+1)); } return GF_OK; } GF_Err elng_Size(GF_Box *s) { GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s; if (ptr->extended_language) { ptr->size += strlen(ptr->extended_language)+1; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void mfhd_del(GF_Box *s) { GF_MovieFragmentHeaderBox *ptr = (GF_MovieFragmentHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err mfhd_Read(GF_Box *s, GF_BitStream *bs) { GF_MovieFragmentHeaderBox *ptr = (GF_MovieFragmentHeaderBox *)s; ptr->sequence_number = gf_bs_read_u32(bs); return GF_OK; } GF_Box *mfhd_New() { ISOM_DECL_BOX_ALLOC(GF_MovieFragmentHeaderBox, GF_ISOM_BOX_TYPE_MFHD); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mfhd_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MovieFragmentHeaderBox *ptr = (GF_MovieFragmentHeaderBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->sequence_number); return GF_OK; } GF_Err mfhd_Size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void minf_del(GF_Box *s) { GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s; if (ptr == NULL) return; //if we have a Handler not self-contained, delete it (the self-contained belongs to the movie) if (ptr->dataHandler) { gf_isom_datamap_close(ptr); } if (ptr->InfoHeader) gf_isom_box_del((GF_Box *)ptr->InfoHeader); if (ptr->dataInformation) gf_isom_box_del((GF_Box *)ptr->dataInformation); if (ptr->sampleTable) gf_isom_box_del((GF_Box *)ptr->sampleTable); gf_free(ptr); } GF_Err minf_AddBox(GF_Box *s, GF_Box *a) { GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_NMHD: case GF_ISOM_BOX_TYPE_STHD: case GF_ISOM_BOX_TYPE_VMHD: case GF_ISOM_BOX_TYPE_SMHD: case GF_ISOM_BOX_TYPE_HMHD: case GF_ISOM_BOX_TYPE_GMHD: if (ptr->InfoHeader) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->InfoHeader = a; return GF_OK; case GF_ISOM_BOX_TYPE_DINF: if (ptr->dataInformation) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->dataInformation = (GF_DataInformationBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_STBL: if (ptr->sampleTable ) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->sampleTable = (GF_SampleTableBox *)a; return GF_OK; default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err minf_Read(GF_Box *s, GF_BitStream *bs) { GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s; GF_Err e; e = gf_isom_box_array_read(s, bs, minf_AddBox); if (! ptr->dataInformation) { GF_Box *dinf, *dref, *url; Bool dump_mode = GF_FALSE; GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing DataInformationBox\n")); //commented on purpose, we are still able to handle the file, we only throw an error but keep processing // e = GF_ISOM_INVALID_FILE; //add a dinf box to avoid any access to a null dinf dinf = gf_isom_box_new(GF_ISOM_BOX_TYPE_DINF); if (!dinf) return GF_OUT_OF_MEM; if (ptr->InfoHeader && gf_list_find(ptr->other_boxes, ptr->InfoHeader)>=0) dump_mode = GF_TRUE; if (ptr->sampleTable && gf_list_find(ptr->other_boxes, ptr->sampleTable)>=0) dump_mode = GF_TRUE; ptr->dataInformation = (GF_DataInformationBox *)dinf; dref = gf_isom_box_new(GF_ISOM_BOX_TYPE_DREF); if (!dref) return GF_OUT_OF_MEM; e = dinf_AddBox(dinf, dref); url = gf_isom_box_new(GF_ISOM_BOX_TYPE_URL); if (!url) return GF_OUT_OF_MEM; ((GF_FullBox*)url)->flags = 1; e = gf_isom_box_add_default(dref, url); if (dump_mode) { gf_list_add(ptr->other_boxes, ptr->dataInformation); if (!dinf->other_boxes) dinf->other_boxes = gf_list_new(); gf_list_add(dinf->other_boxes, dref); } } return e; } GF_Box *minf_New() { ISOM_DECL_BOX_ALLOC(GF_MediaInformationBox, GF_ISOM_BOX_TYPE_MINF); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err minf_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; //Header first if (ptr->InfoHeader) { e = gf_isom_box_write((GF_Box *) ptr->InfoHeader, bs); if (e) return e; } //then dataInfo if (ptr->dataInformation) { e = gf_isom_box_write((GF_Box *) ptr->dataInformation, bs); if (e) return e; } //then sampleTable if (ptr->sampleTable) { e = gf_isom_box_write((GF_Box *) ptr->sampleTable, bs); if (e) return e; } return GF_OK; } GF_Err minf_Size(GF_Box *s) { GF_Err e; GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s; if (ptr->InfoHeader) { e = gf_isom_box_size((GF_Box *) ptr->InfoHeader); if (e) return e; ptr->size += ptr->InfoHeader->size; } if (ptr->dataInformation) { e = gf_isom_box_size((GF_Box *) ptr->dataInformation); if (e) return e; ptr->size += ptr->dataInformation->size; } if (ptr->sampleTable) { e = gf_isom_box_size((GF_Box *) ptr->sampleTable); if (e) return e; ptr->size += ptr->sampleTable->size; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void moof_del(GF_Box *s) { GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *)s; if (ptr == NULL) return; if (ptr->mfhd) gf_isom_box_del((GF_Box *) ptr->mfhd); gf_isom_box_array_del(ptr->TrackList); if (ptr->mdat) gf_free(ptr->mdat); gf_free(ptr); } GF_Err moof_AddBox(GF_Box *s, GF_Box *a) { GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_MFHD: if (ptr->mfhd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mfhd = (GF_MovieFragmentHeaderBox *) a; return GF_OK; case GF_ISOM_BOX_TYPE_TRAF: return gf_list_add(ptr->TrackList, a); case GF_ISOM_BOX_TYPE_PSSH: default: return gf_isom_box_add_default(s, a); } } GF_Err moof_Read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, moof_AddBox); } GF_Box *moof_New() { ISOM_DECL_BOX_ALLOC(GF_MovieFragmentBox, GF_ISOM_BOX_TYPE_MOOF); tmp->TrackList = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err moof_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; //Header First if (ptr->mfhd) { e = gf_isom_box_write((GF_Box *) ptr->mfhd, bs); if (e) return e; } //then the track list return gf_isom_box_array_write(s, ptr->TrackList, bs); } GF_Err moof_Size(GF_Box *s) { GF_Err e; GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *)s; if (ptr->mfhd) { e = gf_isom_box_size((GF_Box *)ptr->mfhd); if (e) return e; ptr->size += ptr->mfhd->size; } return gf_isom_box_array_size(s, ptr->TrackList); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void moov_del(GF_Box *s) { GF_MovieBox *ptr = (GF_MovieBox *)s; if (ptr == NULL) return; if (ptr->mvhd) gf_isom_box_del((GF_Box *)ptr->mvhd); if (ptr->meta) gf_isom_box_del((GF_Box *)ptr->meta); if (ptr->iods) gf_isom_box_del((GF_Box *)ptr->iods); if (ptr->udta) gf_isom_box_del((GF_Box *)ptr->udta); #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (ptr->mvex) gf_isom_box_del((GF_Box *)ptr->mvex); #endif gf_isom_box_array_del(ptr->trackList); gf_free(ptr); } GF_Err moov_AddBox(GF_Box *s, GF_Box *a) { GF_MovieBox *ptr = (GF_MovieBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_IODS: if (ptr->iods) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->iods = (GF_ObjectDescriptorBox *)a; //if no IOD, delete the box if (!ptr->iods->descriptor) { ptr->iods = NULL; gf_isom_box_del(a); } return GF_OK; case GF_ISOM_BOX_TYPE_MVHD: if (ptr->mvhd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mvhd = (GF_MovieHeaderBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_UDTA: if (ptr->udta) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->udta = (GF_UserDataBox *)a; return GF_OK; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS case GF_ISOM_BOX_TYPE_MVEX: if (ptr->mvex) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mvex = (GF_MovieExtendsBox *)a; ptr->mvex->mov = ptr->mov; return GF_OK; #endif case GF_ISOM_BOX_TYPE_META: if (ptr->meta) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->meta = (GF_MetaBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_TRAK: //set our pointer to this obj ((GF_TrackBox *)a)->moov = ptr; return gf_list_add(ptr->trackList, a); case GF_ISOM_BOX_TYPE_PSSH: default: return gf_isom_box_add_default(s, a); } } GF_Err moov_Read(GF_Box *s, GF_BitStream *bs) { GF_Err e; e = gf_isom_box_array_read(s, bs, moov_AddBox); if (e) { return e; } else { if (!((GF_MovieBox *)s)->mvhd) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MovieHeaderBox\n")); return GF_ISOM_INVALID_FILE; } } return e; } GF_Box *moov_New() { ISOM_DECL_BOX_ALLOC(GF_MovieBox, GF_ISOM_BOX_TYPE_MOOV); tmp->trackList = gf_list_new(); if (!tmp->trackList) { gf_free(tmp); return NULL; } return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err moov_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MovieBox *ptr = (GF_MovieBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->mvhd) { e = gf_isom_box_write((GF_Box *) ptr->mvhd, bs); if (e) return e; } if (ptr->iods) { e = gf_isom_box_write((GF_Box *) ptr->iods, bs); if (e) return e; } if (ptr->meta) { e = gf_isom_box_write((GF_Box *) ptr->meta, bs); if (e) return e; } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (ptr->mvex && !ptr->mvex_after_traks) { e = gf_isom_box_write((GF_Box *) ptr->mvex, bs); if (e) return e; } #endif e = gf_isom_box_array_write(s, ptr->trackList, bs); if (e) return e; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (ptr->mvex && ptr->mvex_after_traks) { e = gf_isom_box_write((GF_Box *) ptr->mvex, bs); if (e) return e; } #endif if (ptr->udta) { e = gf_isom_box_write((GF_Box *) ptr->udta, bs); if (e) return e; } return GF_OK; } GF_Err moov_Size(GF_Box *s) { GF_Err e; GF_MovieBox *ptr = (GF_MovieBox *)s; if (ptr->mvhd) { e = gf_isom_box_size((GF_Box *) ptr->mvhd); if (e) return e; ptr->size += ptr->mvhd->size; } if (ptr->iods) { e = gf_isom_box_size((GF_Box *) ptr->iods); if (e) return e; ptr->size += ptr->iods->size; } if (ptr->udta) { e = gf_isom_box_size((GF_Box *) ptr->udta); if (e) return e; ptr->size += ptr->udta->size; } if (ptr->meta) { e = gf_isom_box_size((GF_Box *) ptr->meta); if (e) return e; ptr->size += ptr->meta->size; } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (ptr->mvex) { e = gf_isom_box_size((GF_Box *) ptr->mvex); if (e) return e; ptr->size += ptr->mvex->size; } #endif return gf_isom_box_array_size(s, ptr->trackList); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void audio_sample_entry_del(GF_Box *s) { GF_MPEGAudioSampleEntryBox *ptr = (GF_MPEGAudioSampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->esd) gf_isom_box_del((GF_Box *)ptr->esd); if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc); if (ptr->cfg_ac3) gf_isom_box_del((GF_Box *)ptr->cfg_ac3); if (ptr->cfg_3gpp) gf_isom_box_del((GF_Box *)ptr->cfg_3gpp); gf_free(ptr); } GF_Err audio_sample_entry_AddBox(GF_Box *s, GF_Box *a) { GF_MPEGAudioSampleEntryBox *ptr = (GF_MPEGAudioSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_ESDS: if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->esd = (GF_ESDBox *)a; break; case GF_ISOM_BOX_TYPE_SINF: gf_list_add(ptr->protections, a); break; case GF_ISOM_BOX_TYPE_DAMR: case GF_ISOM_BOX_TYPE_DEVC: case GF_ISOM_BOX_TYPE_DQCP: case GF_ISOM_BOX_TYPE_DSMV: ptr->cfg_3gpp = (GF_3GPPConfigBox *) a; /*for 3GP config, remember sample entry type in config*/ ptr->cfg_3gpp->cfg.type = ptr->type; break; case GF_ISOM_BOX_TYPE_DAC3: ptr->cfg_ac3 = (GF_AC3ConfigBox *) a; break; case GF_ISOM_BOX_TYPE_DEC3: ptr->cfg_ac3 = (GF_AC3ConfigBox *) a; break; case GF_ISOM_BOX_TYPE_MHA1: case GF_ISOM_BOX_TYPE_MHA2: case GF_ISOM_BOX_TYPE_MHM1: case GF_ISOM_BOX_TYPE_MHM2: ptr->cfg_mha = (GF_MHAConfigBox *) a; break; case GF_ISOM_BOX_TYPE_UNKNOWN: /*HACK for QT files: get the esds box from the track*/ if (s->type == GF_ISOM_BOX_TYPE_MP4A) { GF_UnknownBox *wave = (GF_UnknownBox *)a; if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr) //wave subboxes may have been properly parsed if ((wave->original_4cc == GF_QT_BOX_TYPE_WAVE) && gf_list_count(wave->other_boxes)) { u32 i; for (i =0; i<gf_list_count(wave->other_boxes); i++) { GF_Box *inner_box = (GF_Box *)gf_list_get(wave->other_boxes, i); if (inner_box->type == GF_ISOM_BOX_TYPE_ESDS) { ptr->esd = (GF_ESDBox *)inner_box; } } return gf_isom_box_add_default(s, a); } //unknown fomat, look for 'es' (esds) and try to parse box if (wave->data != NULL) { u32 offset = 0; while (offset + 5 < wave->dataSize && (wave->data[offset + 4] != 'e') && (wave->data[offset + 5] != 's')) { offset++; } if (offset + 5 < wave->dataSize) { GF_Box *a; GF_Err e; GF_BitStream *bs = gf_bs_new(wave->data + offset, wave->dataSize - offset, GF_BITSTREAM_READ); e = gf_isom_box_parse(&a, bs); gf_bs_del(bs); if (e) return e; ptr->esd = (GF_ESDBox *)a; gf_isom_box_add_for_dump_mode((GF_Box *)ptr, a); gf_isom_box_del(a); return GF_OK; } GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Cannot process box %s!\n", gf_4cc_to_str(wave->original_4cc))); } } return gf_isom_box_add_default(s, a); default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err audio_sample_entry_Read(GF_Box *s, GF_BitStream *bs) { GF_MPEGAudioSampleEntryBox *ptr; char *data; u8 a, b, c, d; u32 i, size, v, nb_alnum; GF_Err e; u64 pos, start; ptr = (GF_MPEGAudioSampleEntryBox *)s; start = gf_bs_get_position(bs); gf_bs_seek(bs, start + 8); v = gf_bs_read_u16(bs); if (v) ptr->is_qtff = 1; //try to disambiguate QTFF v1 and MP4 v1 audio sample entries ... if (v==1) { //go to end of ISOM audio sample entry, skip 4 byte (box size field), read 4 bytes (box type) and check if this looks like a box gf_bs_seek(bs, start + 8 + 20 + 4); a = gf_bs_read_u8(bs); b = gf_bs_read_u8(bs); c = gf_bs_read_u8(bs); d = gf_bs_read_u8(bs); nb_alnum = 0; if (isalnum(a)) nb_alnum++; if (isalnum(b)) nb_alnum++; if (isalnum(c)) nb_alnum++; if (isalnum(d)) nb_alnum++; if (nb_alnum>2) ptr->is_qtff = 0; } gf_bs_seek(bs, start); e = gf_isom_audio_sample_entry_read((GF_AudioSampleEntryBox*)s, bs); if (e) return e; pos = gf_bs_get_position(bs); size = (u32) s->size; e = gf_isom_box_array_read(s, bs, audio_sample_entry_AddBox); if (!e) return GF_OK; if (size<8) return GF_ISOM_INVALID_FILE; /*hack for some weird files (possibly recorded with live.com tools, needs further investigations)*/ gf_bs_seek(bs, pos); data = (char*)gf_malloc(sizeof(char) * size); gf_bs_read_data(bs, data, size); for (i=0; i<size-8; i++) { if (GF_4CC(data[i+4], data[i+5], data[i+6], data[i+7]) == GF_ISOM_BOX_TYPE_ESDS) { GF_BitStream *mybs = gf_bs_new(data + i, size - i, GF_BITSTREAM_READ); e = gf_isom_box_parse((GF_Box **)&ptr->esd, mybs); gf_bs_del(mybs); break; } } gf_free(data); return e; } GF_Box *audio_sample_entry_New() { ISOM_DECL_BOX_ALLOC(GF_MPEGAudioSampleEntryBox, GF_ISOM_BOX_TYPE_MP4A); gf_isom_audio_sample_entry_init((GF_AudioSampleEntryBox*)tmp); return (GF_Box *)tmp; } GF_Box *enca_New() { ISOM_DECL_BOX_ALLOC(GF_MPEGAudioSampleEntryBox, GF_ISOM_BOX_TYPE_ENCA); gf_isom_audio_sample_entry_init((GF_AudioSampleEntryBox*)tmp); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err audio_sample_entry_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MPEGAudioSampleEntryBox *ptr = (GF_MPEGAudioSampleEntryBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_isom_audio_sample_entry_write((GF_AudioSampleEntryBox*)s, bs); if (ptr->esd) { e = gf_isom_box_write((GF_Box *)ptr->esd, bs); if (e) return e; } if (ptr->cfg_3gpp) { e = gf_isom_box_write((GF_Box *)ptr->cfg_3gpp, bs); if (e) return e; } if (ptr->cfg_ac3) { e = gf_isom_box_write((GF_Box *)ptr->cfg_ac3, bs); if (e) return e; } return gf_isom_box_array_write(s, ptr->protections, bs); } GF_Err audio_sample_entry_Size(GF_Box *s) { GF_Err e; GF_MPEGAudioSampleEntryBox *ptr = (GF_MPEGAudioSampleEntryBox *)s; gf_isom_audio_sample_entry_size((GF_AudioSampleEntryBox*)s); if (ptr->esd) { e = gf_isom_box_size((GF_Box *)ptr->esd); if (e) return e; ptr->size += ptr->esd->size; } if (ptr->cfg_3gpp) { e = gf_isom_box_size((GF_Box *)ptr->cfg_3gpp); if (e) return e; ptr->size += ptr->cfg_3gpp->size; } if (ptr->cfg_ac3) { e = gf_isom_box_size((GF_Box *)ptr->cfg_ac3); if (e) return e; ptr->size += ptr->cfg_ac3->size; } return gf_isom_box_array_size(s, ptr->protections); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void gen_sample_entry_del(GF_Box *s) { GF_SampleEntryBox *ptr = (GF_SampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); gf_free(ptr); } GF_Err gen_sample_entry_Read(GF_Box *s, GF_BitStream *bs) { return gf_isom_base_sample_entry_read((GF_SampleEntryBox *)s, bs); } GF_Box *gen_sample_entry_New() { ISOM_DECL_BOX_ALLOC(GF_SampleEntryBox, GF_QT_BOX_TYPE_C608);//type will be overriten gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err gen_sample_entry_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SampleEntryBox *ptr = (GF_SampleEntryBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->reserved, 6); gf_bs_write_u16(bs, ptr->dataReferenceIndex); return gf_isom_box_array_write(s, ptr->protections, bs); } GF_Err gen_sample_entry_Size(GF_Box *s) { GF_SampleEntryBox *ptr = (GF_SampleEntryBox *)s; ptr->size += 8; return gf_isom_box_array_size(s, ptr->protections); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mp4s_del(GF_Box *s) { GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->esd) gf_isom_box_del((GF_Box *)ptr->esd); if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc); gf_free(ptr); } GF_Err mp4s_AddBox(GF_Box *s, GF_Box *a) { GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_ESDS: if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->esd = (GF_ESDBox *)a; break; case GF_ISOM_BOX_TYPE_SINF: gf_list_add(ptr->protections, a); break; default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err mp4s_Read(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s; e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs); if (e) return e; ISOM_DECREASE_SIZE(ptr, 8); return gf_isom_box_array_read(s, bs, mp4s_AddBox); } GF_Box *mp4s_New() { ISOM_DECL_BOX_ALLOC(GF_MPEGSampleEntryBox, GF_ISOM_BOX_TYPE_MP4S); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); return (GF_Box *)tmp; } GF_Box *encs_New() { ISOM_DECL_BOX_ALLOC(GF_MPEGSampleEntryBox, GF_ISOM_BOX_TYPE_ENCS); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mp4s_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->reserved, 6); gf_bs_write_u16(bs, ptr->dataReferenceIndex); e = gf_isom_box_write((GF_Box *)ptr->esd, bs); if (e) return e; return gf_isom_box_array_write(s, ptr->protections, bs); } GF_Err mp4s_Size(GF_Box *s) { GF_Err e; GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s; ptr->size += 8; e = gf_isom_box_size((GF_Box *)ptr->esd); if (e) return e; ptr->size += ptr->esd->size; return gf_isom_box_array_size(s, ptr->protections); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void video_sample_entry_del(GF_Box *s) { GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->esd) gf_isom_box_del((GF_Box *)ptr->esd); if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc); /*for publishing*/ if (ptr->emul_esd) gf_odf_desc_del((GF_Descriptor *)ptr->emul_esd); if (ptr->avc_config) gf_isom_box_del((GF_Box *) ptr->avc_config); if (ptr->svc_config) gf_isom_box_del((GF_Box *) ptr->svc_config); if (ptr->mvc_config) gf_isom_box_del((GF_Box *) ptr->mvc_config); if (ptr->hevc_config) gf_isom_box_del((GF_Box *) ptr->hevc_config); if (ptr->lhvc_config) gf_isom_box_del((GF_Box *) ptr->lhvc_config); if (ptr->av1_config) gf_isom_box_del((GF_Box *)ptr->av1_config); if (ptr->vp_config) gf_isom_box_del((GF_Box *)ptr->vp_config); if (ptr->cfg_3gpp) gf_isom_box_del((GF_Box *)ptr->cfg_3gpp); if (ptr->descr) gf_isom_box_del((GF_Box *) ptr->descr); if (ptr->ipod_ext) gf_isom_box_del((GF_Box *)ptr->ipod_ext); if (ptr->pasp) gf_isom_box_del((GF_Box *)ptr->pasp); if (ptr->clap) gf_isom_box_del((GF_Box *)ptr->clap); if (ptr->rinf) gf_isom_box_del((GF_Box *)ptr->rinf); if (ptr->ccst) gf_isom_box_del((GF_Box *)ptr->ccst); if (ptr->rvcc) gf_isom_box_del((GF_Box *)ptr->rvcc); if (ptr->auxi) gf_isom_box_del((GF_Box *)ptr->auxi); gf_free(ptr); } GF_Err video_sample_entry_AddBox(GF_Box *s, GF_Box *a) { GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_ESDS: if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->esd = (GF_ESDBox *)a; break; case GF_ISOM_BOX_TYPE_SINF: gf_list_add(ptr->protections, a); break; case GF_ISOM_BOX_TYPE_RINF: if (ptr->rinf) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->rinf = (GF_RestrictedSchemeInfoBox *) a; break; case GF_ISOM_BOX_TYPE_AVCC: if (ptr->avc_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->avc_config = (GF_AVCConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_HVCC: if (ptr->hevc_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->hevc_config = (GF_HEVCConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_SVCC: if (ptr->svc_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->svc_config = (GF_AVCConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_MVCC: if (ptr->mvc_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mvc_config = (GF_AVCConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_LHVC: if (ptr->lhvc_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->lhvc_config = (GF_HEVCConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_AV1C: if (ptr->av1_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->av1_config = (GF_AV1ConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_VPCC: if (ptr->vp_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->vp_config = (GF_VPConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_M4DS: if (ptr->descr) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->descr = (GF_MPEG4ExtensionDescriptorsBox *)a; break; case GF_ISOM_BOX_TYPE_UUID: if (! memcmp(((GF_UnknownUUIDBox*)a)->uuid, GF_ISOM_IPOD_EXT, 16)) { if (ptr->ipod_ext) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->ipod_ext = (GF_UnknownUUIDBox *)a; } else { return gf_isom_box_add_default(s, a); } break; case GF_ISOM_BOX_TYPE_D263: ptr->cfg_3gpp = (GF_3GPPConfigBox *)a; /*for 3GP config, remember sample entry type in config*/ ptr->cfg_3gpp->cfg.type = ptr->type; break; break; case GF_ISOM_BOX_TYPE_PASP: if (ptr->pasp) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->pasp = (GF_PixelAspectRatioBox *)a; break; case GF_ISOM_BOX_TYPE_CLAP: if (ptr->clap) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->clap = (GF_CleanApertureBox *)a; break; case GF_ISOM_BOX_TYPE_CCST: if (ptr->ccst) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->ccst = (GF_CodingConstraintsBox *)a; break; case GF_ISOM_BOX_TYPE_AUXI: if (ptr->auxi) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->auxi = (GF_AuxiliaryTypeInfoBox *)a; break; case GF_ISOM_BOX_TYPE_RVCC: if (ptr->rvcc) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->rvcc = (GF_RVCConfigurationBox *)a; break; default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err video_sample_entry_Read(GF_Box *s, GF_BitStream *bs) { GF_MPEGVisualSampleEntryBox *mp4v = (GF_MPEGVisualSampleEntryBox*)s; GF_Err e; e = gf_isom_video_sample_entry_read((GF_VisualSampleEntryBox *)s, bs); if (e) return e; e = gf_isom_box_array_read(s, bs, video_sample_entry_AddBox); if (e) return e; /*this is an AVC sample desc*/ if (mp4v->avc_config || mp4v->svc_config || mp4v->mvc_config) AVC_RewriteESDescriptor(mp4v); /*this is an HEVC sample desc*/ if (mp4v->hevc_config || mp4v->lhvc_config || (mp4v->type==GF_ISOM_BOX_TYPE_HVT1)) HEVC_RewriteESDescriptor(mp4v); /*this is an AV1 sample desc*/ if (mp4v->av1_config) AV1_RewriteESDescriptor(mp4v); return GF_OK; } GF_Box *video_sample_entry_New() { GF_MPEGVisualSampleEntryBox *tmp; GF_SAFEALLOC(tmp, GF_MPEGVisualSampleEntryBox); if (tmp == NULL) return NULL; gf_isom_video_sample_entry_init((GF_VisualSampleEntryBox *)tmp); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err video_sample_entry_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_isom_video_sample_entry_write((GF_VisualSampleEntryBox *)s, bs); /*mp4v*/ if (ptr->esd) { e = gf_isom_box_write((GF_Box *)ptr->esd, bs); if (e) return e; } /*mp4v*/ else if (ptr->cfg_3gpp) { e = gf_isom_box_write((GF_Box *)ptr->cfg_3gpp, bs); if (e) return e; } /*avc or hevc or av1*/ else { if (ptr->avc_config && ptr->avc_config->config) { e = gf_isom_box_write((GF_Box *) ptr->avc_config, bs); if (e) return e; } if (ptr->hevc_config && ptr->hevc_config->config) { e = gf_isom_box_write((GF_Box *) ptr->hevc_config, bs); if (e) return e; } if (ptr->ipod_ext) { e = gf_isom_box_write((GF_Box *) ptr->ipod_ext, bs); if (e) return e; } if (ptr->descr) { e = gf_isom_box_write((GF_Box *) ptr->descr, bs); if (e) return e; } if (ptr->svc_config && ptr->svc_config->config) { e = gf_isom_box_write((GF_Box *) ptr->svc_config, bs); if (e) return e; } if (ptr->mvc_config && ptr->mvc_config->config) { e = gf_isom_box_write((GF_Box *) ptr->mvc_config, bs); if (e) return e; } if (ptr->lhvc_config && ptr->lhvc_config->config) { e = gf_isom_box_write((GF_Box *) ptr->lhvc_config, bs); if (e) return e; } if (ptr->av1_config && ptr->av1_config->config) { e = gf_isom_box_write((GF_Box *)ptr->av1_config, bs); if (e) return e; } if (ptr->vp_config && ptr->vp_config->config) { e = gf_isom_box_write((GF_Box *)ptr->vp_config, bs); if (e) return e; } } if (ptr->pasp) { e = gf_isom_box_write((GF_Box *)ptr->pasp, bs); if (e) return e; } if (ptr->clap) { e = gf_isom_box_write((GF_Box *)ptr->clap, bs); if (e) return e; } if (ptr->ccst) { e = gf_isom_box_write((GF_Box *)ptr->ccst, bs); if (e) return e; } if (ptr->auxi) { e = gf_isom_box_write((GF_Box *)ptr->auxi, bs); if (e) return e; } if (ptr->rvcc) { e = gf_isom_box_write((GF_Box *)ptr->rvcc, bs); if (e) return e; } if (ptr->rinf) { e = gf_isom_box_write((GF_Box *)ptr->rinf, bs); if (e) return e; } return gf_isom_box_array_write(s, ptr->protections, bs); } GF_Err video_sample_entry_Size(GF_Box *s) { GF_Err e; GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s; gf_isom_video_sample_entry_size((GF_VisualSampleEntryBox *)s); if (ptr->esd) { e = gf_isom_box_size((GF_Box *)ptr->esd); if (e) return e; ptr->size += ptr->esd->size; } else if (ptr->cfg_3gpp) { e = gf_isom_box_size((GF_Box *)ptr->cfg_3gpp); if (e) return e; ptr->size += ptr->cfg_3gpp->size; } else { switch (ptr->type) { case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_SVC2: case GF_ISOM_BOX_TYPE_MVC1: case GF_ISOM_BOX_TYPE_MVC2: if (!ptr->avc_config && !ptr->svc_config && !ptr->mvc_config) return GF_ISOM_INVALID_FILE; break; case GF_ISOM_BOX_TYPE_VP08: case GF_ISOM_BOX_TYPE_VP09: if (!ptr->vp_config) { return GF_ISOM_INVALID_FILE; } break; case GF_ISOM_BOX_TYPE_AV01: if (!ptr->av1_config) { return GF_ISOM_INVALID_FILE; } break; case GF_ISOM_BOX_TYPE_HVC1: case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC2: case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_LHV1: case GF_ISOM_BOX_TYPE_LHE1: //commented on purpose, HVT1 tracks have no config associated // case GF_ISOM_BOX_TYPE_HVT1: // case GF_ISOM_BOX_TYPE_HVT2: if (!ptr->hevc_config && !ptr->lhvc_config) { return GF_ISOM_INVALID_FILE; } break; default: break; } if (ptr->hevc_config && ptr->hevc_config->config) { e = gf_isom_box_size((GF_Box *)ptr->hevc_config); if (e) return e; ptr->size += ptr->hevc_config->size; } if (ptr->avc_config && ptr->avc_config->config) { e = gf_isom_box_size((GF_Box *) ptr->avc_config); if (e) return e; ptr->size += ptr->avc_config->size; } if (ptr->svc_config && ptr->svc_config->config) { e = gf_isom_box_size((GF_Box *) ptr->svc_config); if (e) return e; ptr->size += ptr->svc_config->size; } if (ptr->mvc_config && ptr->mvc_config->config) { e = gf_isom_box_size((GF_Box *) ptr->mvc_config); if (e) return e; ptr->size += ptr->mvc_config->size; } if (ptr->lhvc_config && ptr->lhvc_config->config) { e = gf_isom_box_size((GF_Box *) ptr->lhvc_config); if (e) return e; ptr->size += ptr->lhvc_config->size; } if (ptr->av1_config && ptr->av1_config->config) { e = gf_isom_box_size((GF_Box *)ptr->av1_config); if (e) return e; ptr->size += ptr->av1_config->size; } if (ptr->vp_config && ptr->vp_config->config) { e = gf_isom_box_size((GF_Box *)ptr->vp_config); if (e) return e; ptr->size += ptr->vp_config->size; } if (ptr->ipod_ext) { e = gf_isom_box_size((GF_Box *) ptr->ipod_ext); if (e) return e; ptr->size += ptr->ipod_ext->size; } if (ptr->descr) { e = gf_isom_box_size((GF_Box *) ptr->descr); if (e) return e; ptr->size += ptr->descr->size; } } if (ptr->pasp) { e = gf_isom_box_size((GF_Box *)ptr->pasp); if (e) return e; ptr->size += ptr->pasp->size; } if (ptr->clap) { e = gf_isom_box_size((GF_Box *)ptr->clap); if (e) return e; ptr->size += ptr->clap->size; } if (ptr->ccst) { e = gf_isom_box_size((GF_Box *)ptr->ccst); if (e) return e; ptr->size += ptr->ccst->size; } if (ptr->auxi) { e = gf_isom_box_size((GF_Box *)ptr->auxi); if (e) return e; ptr->size += ptr->auxi->size; } if (ptr->rvcc) { e = gf_isom_box_size((GF_Box *)ptr->rvcc); if (e) return e; ptr->size += ptr->rvcc->size; } if (ptr->rinf) { e = gf_isom_box_size((GF_Box *)ptr->rinf); if (e) return e; ptr->size += ptr->rinf->size; } return gf_isom_box_array_size(s, ptr->protections); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void mvex_del(GF_Box *s) { GF_MovieExtendsBox *ptr = (GF_MovieExtendsBox *)s; if (ptr == NULL) return; if (ptr->mehd) gf_isom_box_del((GF_Box*)ptr->mehd); gf_isom_box_array_del(ptr->TrackExList); gf_isom_box_array_del(ptr->TrackExPropList); ptr->mehd = NULL; ptr->TrackExList = NULL; ptr->TrackExPropList = NULL; gf_free(ptr); } GF_Err mvex_AddBox(GF_Box *s, GF_Box *a) { GF_MovieExtendsBox *ptr = (GF_MovieExtendsBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_TREX: return gf_list_add(ptr->TrackExList, a); case GF_ISOM_BOX_TYPE_TREP: return gf_list_add(ptr->TrackExPropList, a); case GF_ISOM_BOX_TYPE_MEHD: if (ptr->mehd) break; ptr->mehd = (GF_MovieExtendsHeaderBox*)a; return GF_OK; default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err mvex_Read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, mvex_AddBox); } GF_Box *mvex_New() { ISOM_DECL_BOX_ALLOC(GF_MovieExtendsBox, GF_ISOM_BOX_TYPE_MVEX); tmp->TrackExList = gf_list_new(); if (!tmp->TrackExList) { gf_free(tmp); return NULL; } tmp->TrackExPropList = gf_list_new(); if (!tmp->TrackExPropList) { gf_list_del(tmp->TrackExList); gf_free(tmp); return NULL; } return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mvex_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MovieExtendsBox *ptr = (GF_MovieExtendsBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->mehd) { e = gf_isom_box_write((GF_Box *)ptr->mehd, bs); if (e) return e; } e = gf_isom_box_array_write(s, ptr->TrackExList, bs); if (e) return e; return gf_isom_box_array_write(s, ptr->TrackExPropList, bs); } GF_Err mvex_Size(GF_Box *s) { GF_Err e; GF_MovieExtendsBox *ptr = (GF_MovieExtendsBox *)s; if (ptr->mehd) { e = gf_isom_box_size((GF_Box *)ptr->mehd); if (e) return e; ptr->size += ptr->mehd->size; } e = gf_isom_box_array_size(s, ptr->TrackExList); if (e) return e; return gf_isom_box_array_size(s, ptr->TrackExPropList); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *mehd_New() { ISOM_DECL_BOX_ALLOC(GF_MovieExtendsHeaderBox, GF_ISOM_BOX_TYPE_MEHD); return (GF_Box *)tmp; } void mehd_del(GF_Box *s) { gf_free(s); } GF_Err mehd_Read(GF_Box *s, GF_BitStream *bs) { GF_MovieExtendsHeaderBox *ptr = (GF_MovieExtendsHeaderBox *)s; if (ptr->version==1) { ptr->fragment_duration = gf_bs_read_u64(bs); } else { ptr->fragment_duration = (u64) gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mehd_Write(GF_Box *s, GF_BitStream *bs) { GF_MovieExtendsHeaderBox *ptr = (GF_MovieExtendsHeaderBox *)s; GF_Err e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version == 1) { gf_bs_write_u64(bs, ptr->fragment_duration); } else { gf_bs_write_u32(bs, (u32) ptr->fragment_duration); } return GF_OK; } GF_Err mehd_Size(GF_Box *s) { GF_MovieExtendsHeaderBox *ptr = (GF_MovieExtendsHeaderBox *)s; ptr->version = (ptr->fragment_duration>0xFFFFFFFF) ? 1 : 0; s->size += (ptr->version == 1) ? 8 : 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void mvhd_del(GF_Box *s) { GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err mvhd_Read(GF_Box *s, GF_BitStream *bs) { GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s; if (ptr == NULL) return GF_BAD_PARAM; if (ptr->version == 1) { ptr->creationTime = gf_bs_read_u64(bs); ptr->modificationTime = gf_bs_read_u64(bs); ptr->timeScale = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u64(bs); } else { ptr->creationTime = gf_bs_read_u32(bs); ptr->modificationTime = gf_bs_read_u32(bs); ptr->timeScale = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u32(bs); } if (!ptr->timeScale) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Movie header timescale is invalid (0) - defaulting to 600\n" )); ptr->timeScale = 600; } ptr->preferredRate = gf_bs_read_u32(bs); ptr->preferredVolume = gf_bs_read_u16(bs); gf_bs_read_data(bs, ptr->reserved, 10); ptr->matrixA = gf_bs_read_u32(bs); ptr->matrixB = gf_bs_read_u32(bs); ptr->matrixU = gf_bs_read_u32(bs); ptr->matrixC = gf_bs_read_u32(bs); ptr->matrixD = gf_bs_read_u32(bs); ptr->matrixV = gf_bs_read_u32(bs); ptr->matrixX = gf_bs_read_u32(bs); ptr->matrixY = gf_bs_read_u32(bs); ptr->matrixW = gf_bs_read_u32(bs); ptr->previewTime = gf_bs_read_u32(bs); ptr->previewDuration = gf_bs_read_u32(bs); ptr->posterTime = gf_bs_read_u32(bs); ptr->selectionTime = gf_bs_read_u32(bs); ptr->selectionDuration = gf_bs_read_u32(bs); ptr->currentTime = gf_bs_read_u32(bs); ptr->nextTrackID = gf_bs_read_u32(bs); ptr->original_duration = ptr->duration; return GF_OK; } GF_Box *mvhd_New() { ISOM_DECL_BOX_ALLOC(GF_MovieHeaderBox, GF_ISOM_BOX_TYPE_MVHD); tmp->preferredRate = (1<<16); tmp->preferredVolume = (1<<8); tmp->matrixA = (1<<16); tmp->matrixD = (1<<16); tmp->matrixW = (1<<30); tmp->nextTrackID = 1; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mvhd_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version == 1) { gf_bs_write_u64(bs, ptr->creationTime); gf_bs_write_u64(bs, ptr->modificationTime); gf_bs_write_u32(bs, ptr->timeScale); gf_bs_write_u64(bs, ptr->duration); } else { gf_bs_write_u32(bs, (u32) ptr->creationTime); gf_bs_write_u32(bs, (u32) ptr->modificationTime); gf_bs_write_u32(bs, ptr->timeScale); gf_bs_write_u32(bs, (u32) ptr->duration); } gf_bs_write_u32(bs, ptr->preferredRate); gf_bs_write_u16(bs, ptr->preferredVolume); gf_bs_write_data(bs, ptr->reserved, 10); gf_bs_write_u32(bs, ptr->matrixA); gf_bs_write_u32(bs, ptr->matrixB); gf_bs_write_u32(bs, ptr->matrixU); gf_bs_write_u32(bs, ptr->matrixC); gf_bs_write_u32(bs, ptr->matrixD); gf_bs_write_u32(bs, ptr->matrixV); gf_bs_write_u32(bs, ptr->matrixX); gf_bs_write_u32(bs, ptr->matrixY); gf_bs_write_u32(bs, ptr->matrixW); gf_bs_write_u32(bs, ptr->previewTime); gf_bs_write_u32(bs, ptr->previewDuration); gf_bs_write_u32(bs, ptr->posterTime); gf_bs_write_u32(bs, ptr->selectionTime); gf_bs_write_u32(bs, ptr->selectionDuration); gf_bs_write_u32(bs, ptr->currentTime); gf_bs_write_u32(bs, ptr->nextTrackID); return GF_OK; } GF_Err mvhd_Size(GF_Box *s) { GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s; if (ptr->duration==(u64) -1) ptr->version = 0; else ptr->version = (ptr->duration>0xFFFFFFFF) ? 1 : 0; ptr->size += (ptr->version == 1) ? 28 : 16; ptr->size += 80; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void nmhd_del(GF_Box *s) { GF_MPEGMediaHeaderBox *ptr = (GF_MPEGMediaHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err nmhd_Read(GF_Box *s, GF_BitStream *bs) { return GF_OK; } GF_Box *nmhd_New() { ISOM_DECL_BOX_ALLOC(GF_MPEGMediaHeaderBox, GF_ISOM_BOX_TYPE_NMHD); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err nmhd_Write(GF_Box *s, GF_BitStream *bs) { return gf_isom_full_box_write(s, bs); } GF_Err nmhd_Size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void padb_del(GF_Box *s) { GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *) s; if (ptr == NULL) return; if (ptr->padbits) gf_free(ptr->padbits); gf_free(ptr); } GF_Err padb_Read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *)s; ptr->SampleCount = gf_bs_read_u32(bs); ptr->padbits = (u8 *)gf_malloc(sizeof(u8)*ptr->SampleCount); for (i=0; i<ptr->SampleCount; i += 2) { gf_bs_read_int(bs, 1); if (i+1 < ptr->SampleCount) { ptr->padbits[i+1] = gf_bs_read_int(bs, 3); } else { gf_bs_read_int(bs, 3); } gf_bs_read_int(bs, 1); ptr->padbits[i] = gf_bs_read_int(bs, 3); } return GF_OK; } GF_Box *padb_New() { ISOM_DECL_BOX_ALLOC(GF_PaddingBitsBox, GF_ISOM_BOX_TYPE_PADB); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err padb_Write(GF_Box *s, GF_BitStream *bs) { u32 i; GF_Err e; GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->SampleCount, 32); for (i=0 ; i<ptr->SampleCount; i += 2) { gf_bs_write_int(bs, 0, 1); if (i+1 < ptr->SampleCount) { gf_bs_write_int(bs, ptr->padbits[i+1], 3); } else { gf_bs_write_int(bs, 0, 3); } gf_bs_write_int(bs, 0, 1); gf_bs_write_int(bs, ptr->padbits[i], 3); } return GF_OK; } GF_Err padb_Size(GF_Box *s) { GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *)s; ptr->size += 4; if (ptr->SampleCount) ptr->size += (ptr->SampleCount + 1) / 2; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void rely_del(GF_Box *s) { GF_RelyHintBox *rely = (GF_RelyHintBox *)s; gf_free(rely); } GF_Err rely_Read(GF_Box *s, GF_BitStream *bs) { GF_RelyHintBox *ptr = (GF_RelyHintBox *)s; ptr->reserved = gf_bs_read_int(bs, 6); ptr->prefered = gf_bs_read_int(bs, 1); ptr->required = gf_bs_read_int(bs, 1); return GF_OK; } GF_Box *rely_New() { ISOM_DECL_BOX_ALLOC(GF_RelyHintBox, GF_ISOM_BOX_TYPE_RELY); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err rely_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_RelyHintBox *ptr = (GF_RelyHintBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->reserved, 6); gf_bs_write_int(bs, ptr->prefered, 1); gf_bs_write_int(bs, ptr->required, 1); return GF_OK; } GF_Err rely_Size(GF_Box *s) { s->size += 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void rtpo_del(GF_Box *s) { GF_RTPOBox *rtpo = (GF_RTPOBox *)s; gf_free(rtpo); } GF_Err rtpo_Read(GF_Box *s, GF_BitStream *bs) { GF_RTPOBox *ptr = (GF_RTPOBox *)s; ptr->timeOffset = gf_bs_read_u32(bs); return GF_OK; } GF_Box *rtpo_New() { ISOM_DECL_BOX_ALLOC(GF_RTPOBox, GF_ISOM_BOX_TYPE_RTPO); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err rtpo_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_RTPOBox *ptr = (GF_RTPOBox *)s; if (ptr == NULL) return GF_BAD_PARAM; //here we have no pb, just remembed that some entries will have to //be 4-bytes aligned ... e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->timeOffset); return GF_OK; } GF_Err rtpo_Size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void smhd_del(GF_Box *s) { GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s; if (ptr == NULL ) return; gf_free(ptr); } GF_Err smhd_Read(GF_Box *s, GF_BitStream *bs) { GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s; ptr->balance = gf_bs_read_u16(bs); ptr->reserved = gf_bs_read_u16(bs); return GF_OK; } GF_Box *smhd_New() { ISOM_DECL_BOX_ALLOC(GF_SoundMediaHeaderBox, GF_ISOM_BOX_TYPE_SMHD); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err smhd_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->balance); gf_bs_write_u16(bs, ptr->reserved); return GF_OK; } GF_Err smhd_Size(GF_Box *s) { GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s; ptr->reserved = 0; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void snro_del(GF_Box *s) { GF_SeqOffHintEntryBox *snro = (GF_SeqOffHintEntryBox *)s; gf_free(snro); } GF_Err snro_Read(GF_Box *s, GF_BitStream *bs) { GF_SeqOffHintEntryBox *ptr = (GF_SeqOffHintEntryBox *)s; ptr->SeqOffset = gf_bs_read_u32(bs); return GF_OK; } GF_Box *snro_New() { ISOM_DECL_BOX_ALLOC(GF_SeqOffHintEntryBox, GF_ISOM_BOX_TYPE_SNRO); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err snro_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SeqOffHintEntryBox *ptr = (GF_SeqOffHintEntryBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->SeqOffset); return GF_OK; } GF_Err snro_Size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stbl_del(GF_Box *s) { GF_SampleTableBox *ptr = (GF_SampleTableBox *)s; if (ptr == NULL) return; if (ptr->ChunkOffset) gf_isom_box_del(ptr->ChunkOffset); if (ptr->CompositionOffset) gf_isom_box_del((GF_Box *) ptr->CompositionOffset); if (ptr->CompositionToDecode) gf_isom_box_del((GF_Box *) ptr->CompositionToDecode); if (ptr->DegradationPriority) gf_isom_box_del((GF_Box *) ptr->DegradationPriority); if (ptr->SampleDescription) gf_isom_box_del((GF_Box *) ptr->SampleDescription); if (ptr->SampleSize) gf_isom_box_del((GF_Box *) ptr->SampleSize); if (ptr->SampleToChunk) gf_isom_box_del((GF_Box *) ptr->SampleToChunk); if (ptr->ShadowSync) gf_isom_box_del((GF_Box *) ptr->ShadowSync); if (ptr->SyncSample) gf_isom_box_del((GF_Box *) ptr->SyncSample); if (ptr->TimeToSample) gf_isom_box_del((GF_Box *) ptr->TimeToSample); if (ptr->SampleDep) gf_isom_box_del((GF_Box *) ptr->SampleDep); if (ptr->PaddingBits) gf_isom_box_del((GF_Box *) ptr->PaddingBits); if (ptr->sub_samples) gf_isom_box_array_del(ptr->sub_samples); if (ptr->sampleGroups) gf_isom_box_array_del(ptr->sampleGroups); if (ptr->sampleGroupsDescription) gf_isom_box_array_del(ptr->sampleGroupsDescription); if (ptr->sai_sizes) gf_isom_box_array_del(ptr->sai_sizes); if (ptr->sai_offsets) gf_isom_box_array_del(ptr->sai_offsets); if (ptr->traf_map) { if (ptr->traf_map->sample_num) gf_free(ptr->traf_map->sample_num); gf_free(ptr->traf_map); } gf_free(ptr); } GF_Err stbl_AddBox(GF_Box *s, GF_Box *a) { GF_SampleTableBox *ptr = (GF_SampleTableBox *)s; if (!a) return GF_OK; switch (a->type) { case GF_ISOM_BOX_TYPE_STTS: if (ptr->TimeToSample) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->TimeToSample = (GF_TimeToSampleBox *)a; break; case GF_ISOM_BOX_TYPE_CTTS: if (ptr->CompositionOffset) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->CompositionOffset = (GF_CompositionOffsetBox *)a; break; case GF_ISOM_BOX_TYPE_CSLG: if (ptr->CompositionToDecode) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->CompositionToDecode = (GF_CompositionToDecodeBox *)a; break; case GF_ISOM_BOX_TYPE_STSS: if (ptr->SyncSample) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->SyncSample = (GF_SyncSampleBox *)a; break; case GF_ISOM_BOX_TYPE_STSD: if (ptr->SampleDescription) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->SampleDescription =(GF_SampleDescriptionBox *)a; break; case GF_ISOM_BOX_TYPE_STZ2: case GF_ISOM_BOX_TYPE_STSZ: if (ptr->SampleSize) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->SampleSize = (GF_SampleSizeBox *)a; break; case GF_ISOM_BOX_TYPE_STSC: if (ptr->SampleToChunk) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->SampleToChunk = (GF_SampleToChunkBox *)a; break; case GF_ISOM_BOX_TYPE_PADB: if (ptr->PaddingBits) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->PaddingBits = (GF_PaddingBitsBox *) a; break; //WARNING: AS THIS MAY CHANGE DYNAMICALLY DURING EDIT, case GF_ISOM_BOX_TYPE_CO64: case GF_ISOM_BOX_TYPE_STCO: if (ptr->ChunkOffset) { gf_isom_box_del(ptr->ChunkOffset); } ptr->ChunkOffset = a; return GF_OK; case GF_ISOM_BOX_TYPE_STSH: if (ptr->ShadowSync) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->ShadowSync = (GF_ShadowSyncBox *)a; break; case GF_ISOM_BOX_TYPE_STDP: if (ptr->DegradationPriority) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->DegradationPriority = (GF_DegradationPriorityBox *)a; break; case GF_ISOM_BOX_TYPE_SDTP: if (ptr->SampleDep) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->SampleDep= (GF_SampleDependencyTypeBox *)a; break; case GF_ISOM_BOX_TYPE_SUBS: if (!ptr->sub_samples) ptr->sub_samples = gf_list_new(); gf_list_add(ptr->sub_samples, a); //check subsample box { GF_SubSampleInformationBox *subs = (GF_SubSampleInformationBox *)a; GF_SubSampleInfoEntry *ent = gf_list_get(subs->Samples, 0); if (!ent) { gf_list_rem(subs->Samples, 0); GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] first entry in SubSample in track SampleTable is invalid\n")); } else if (ent->sample_delta==0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] first entry in SubSample in track SampleTable has sample_delta of 0, should be one. Fixing\n")); ent->sample_delta = 1; } } break; case GF_ISOM_BOX_TYPE_SBGP: if (!ptr->sampleGroups) ptr->sampleGroups = gf_list_new(); gf_list_add(ptr->sampleGroups, a); break; case GF_ISOM_BOX_TYPE_SGPD: if (!ptr->sampleGroupsDescription) ptr->sampleGroupsDescription = gf_list_new(); gf_list_add(ptr->sampleGroupsDescription, a); break; case GF_ISOM_BOX_TYPE_SAIZ: if (!ptr->sai_sizes) ptr->sai_sizes = gf_list_new(); gf_list_add(ptr->sai_sizes, a); break; case GF_ISOM_BOX_TYPE_SAIO: if (!ptr->sai_offsets) ptr->sai_offsets = gf_list_new(); gf_list_add(ptr->sai_offsets, a); break; default: return gf_isom_box_add_default((GF_Box *)ptr, a); } return GF_OK; } GF_Err stbl_Read(GF_Box *s, GF_BitStream *bs) { GF_Err e; //we need to parse DegPrior in a special way GF_SampleTableBox *ptr = (GF_SampleTableBox *)s; e = gf_isom_box_array_read(s, bs, stbl_AddBox); if (e) return e; if (!ptr->SyncSample) ptr->no_sync_found = 1; ptr->nb_sgpd_in_stbl = gf_list_count(ptr->sampleGroupsDescription); ptr->nb_other_boxes_in_stbl = gf_list_count(ptr->other_boxes); return GF_OK; } GF_Box *stbl_New() { ISOM_DECL_BOX_ALLOC(GF_SampleTableBox, GF_ISOM_BOX_TYPE_STBL); //maxSamplePer chunk is 10 by default tmp->MaxSamplePerChunk = 10; tmp->groupID = 1; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stbl_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SampleTableBox *ptr = (GF_SampleTableBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->SampleDescription) { e = gf_isom_box_write((GF_Box *) ptr->SampleDescription, bs); if (e) return e; } if (ptr->TimeToSample) { e = gf_isom_box_write((GF_Box *) ptr->TimeToSample, bs); if (e) return e; } if (ptr->CompositionOffset) { e = gf_isom_box_write((GF_Box *) ptr->CompositionOffset, bs); if (e) return e; } if (ptr->CompositionToDecode) { e = gf_isom_box_write((GF_Box *) ptr->CompositionToDecode, bs); if (e) return e; } if (ptr->SyncSample) { e = gf_isom_box_write((GF_Box *) ptr->SyncSample, bs); if (e) return e; } if (ptr->ShadowSync) { e = gf_isom_box_write((GF_Box *) ptr->ShadowSync, bs); if (e) return e; } if (ptr->SampleToChunk) { e = gf_isom_box_write((GF_Box *) ptr->SampleToChunk, bs); if (e) return e; } if (ptr->SampleSize) { e = gf_isom_box_write((GF_Box *) ptr->SampleSize, bs); if (e) return e; } if (ptr->ChunkOffset) { e = gf_isom_box_write(ptr->ChunkOffset, bs); if (e) return e; } if (ptr->DegradationPriority) { e = gf_isom_box_write((GF_Box *) ptr->DegradationPriority, bs); if (e) return e; } if (ptr->SampleDep && ptr->SampleDep->sampleCount) { e = gf_isom_box_write((GF_Box *) ptr->SampleDep, bs); if (e) return e; } if (ptr->PaddingBits) { e = gf_isom_box_write((GF_Box *) ptr->PaddingBits, bs); if (e) return e; } if (ptr->sub_samples) { e = gf_isom_box_array_write(s, ptr->sub_samples, bs); if (e) return e; } if (ptr->sampleGroupsDescription) { e = gf_isom_box_array_write(s, ptr->sampleGroupsDescription, bs); if (e) return e; } if (ptr->sampleGroups) { e = gf_isom_box_array_write(s, ptr->sampleGroups, bs); if (e) return e; } if (ptr->sai_sizes) { e = gf_isom_box_array_write(s, ptr->sai_sizes, bs); if (e) return e; } if (ptr->sai_offsets) { e = gf_isom_box_array_write(s, ptr->sai_offsets, bs); if (e) return e; } return GF_OK; } GF_Err stbl_Size(GF_Box *s) { GF_Err e; GF_SampleTableBox *ptr = (GF_SampleTableBox *)s; //Mandatory boxs (but not internally :) if (ptr->SampleDescription) { e = gf_isom_box_size((GF_Box *) ptr->SampleDescription); if (e) return e; ptr->size += ptr->SampleDescription->size; } if (ptr->SampleSize) { e = gf_isom_box_size((GF_Box *) ptr->SampleSize); if (e) return e; ptr->size += ptr->SampleSize->size; } if (ptr->SampleToChunk) { e = gf_isom_box_size((GF_Box *) ptr->SampleToChunk); if (e) return e; ptr->size += ptr->SampleToChunk->size; } if (ptr->TimeToSample) { e = gf_isom_box_size((GF_Box *) ptr->TimeToSample); if (e) return e; ptr->size += ptr->TimeToSample->size; } if (ptr->ChunkOffset) { e = gf_isom_box_size(ptr->ChunkOffset); if (e) return e; ptr->size += ptr->ChunkOffset->size; } //optional boxs if (ptr->CompositionOffset) { e = gf_isom_box_size((GF_Box *) ptr->CompositionOffset); if (e) return e; ptr->size += ptr->CompositionOffset->size; } if (ptr->CompositionToDecode) { e = gf_isom_box_size((GF_Box *) ptr->CompositionToDecode); if (e) return e; ptr->size += ptr->CompositionToDecode->size; } if (ptr->DegradationPriority) { e = gf_isom_box_size((GF_Box *) ptr->DegradationPriority); if (e) return e; ptr->size += ptr->DegradationPriority->size; } if (ptr->ShadowSync) { e = gf_isom_box_size((GF_Box *) ptr->ShadowSync); if (e) return e; ptr->size += ptr->ShadowSync->size; } if (ptr->SyncSample) { e = gf_isom_box_size((GF_Box *) ptr->SyncSample); if (e) return e; ptr->size += ptr->SyncSample->size; } if (ptr->SampleDep && ptr->SampleDep->sampleCount) { e = gf_isom_box_size((GF_Box *) ptr->SampleDep); if (e) return e; ptr->size += ptr->SampleDep->size; } //padb if (ptr->PaddingBits) { e = gf_isom_box_size((GF_Box *) ptr->PaddingBits); if (e) return e; ptr->size += ptr->PaddingBits->size; } if (ptr->sub_samples) { e = gf_isom_box_array_size(s, ptr->sub_samples); if (e) return e; } if (ptr->sampleGroups) { e = gf_isom_box_array_size(s, ptr->sampleGroups); if (e) return e; } if (ptr->sampleGroupsDescription) { e = gf_isom_box_array_size(s, ptr->sampleGroupsDescription); if (e) return e; } if (ptr->sai_sizes) { e = gf_isom_box_array_size(s, ptr->sai_sizes); if (e) return e; } if (ptr->sai_offsets) { e = gf_isom_box_array_size(s, ptr->sai_offsets); if (e) return e; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stco_del(GF_Box *s) { GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s; if (ptr == NULL) return; if (ptr->offsets) gf_free(ptr->offsets); gf_free(ptr); } GF_Err stco_Read(GF_Box *s, GF_BitStream *bs) { u32 entries; GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s; ptr->nb_entries = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->nb_entries > ptr->size / 4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stco\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } if (ptr->nb_entries) { ptr->offsets = (u32 *) gf_malloc(ptr->nb_entries * sizeof(u32) ); if (ptr->offsets == NULL) return GF_OUT_OF_MEM; ptr->alloc_size = ptr->nb_entries; for (entries = 0; entries < ptr->nb_entries; entries++) { ptr->offsets[entries] = gf_bs_read_u32(bs); } } return GF_OK; } GF_Box *stco_New() { ISOM_DECL_BOX_ALLOC(GF_ChunkOffsetBox, GF_ISOM_BOX_TYPE_STCO); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stco_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i = 0; i < ptr->nb_entries; i++) { gf_bs_write_u32(bs, ptr->offsets[i]); } return GF_OK; } GF_Err stco_Size(GF_Box *s) { GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s; ptr->size += 4 + (4 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stdp_del(GF_Box *s) { GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s; if (ptr == NULL ) return; if (ptr->priorities) gf_free(ptr->priorities); gf_free(ptr); } //this is called through stbl_read... GF_Err stdp_Read(GF_Box *s, GF_BitStream *bs) { u32 entry; GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s; /*out-of-order stdp, assume no padding at the end and take the entire remaining data for entries*/ if (!ptr->nb_entries) ptr->nb_entries = (u32) ptr->size / 2; else if (ptr->nb_entries > ptr->size / 2) return GF_ISOM_INVALID_FILE; ptr->priorities = (u16 *) gf_malloc(ptr->nb_entries * sizeof(u16)); if (ptr->priorities == NULL) return GF_OUT_OF_MEM; for (entry = 0; entry < ptr->nb_entries; entry++) { ptr->priorities[entry] = gf_bs_read_u16(bs); } ISOM_DECREASE_SIZE(ptr, (2*ptr->nb_entries) ); return GF_OK; } GF_Box *stdp_New() { ISOM_DECL_BOX_ALLOC(GF_DegradationPriorityBox, GF_ISOM_BOX_TYPE_STDP); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stdp_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; for (i = 0; i < ptr->nb_entries; i++) { gf_bs_write_u16(bs, ptr->priorities[i]); } return GF_OK; } GF_Err stdp_Size(GF_Box *s) { GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s; ptr->size += (2 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stsc_del(GF_Box *s) { GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s; if (ptr == NULL) return; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err stsc_Read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s; ptr->nb_entries = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->nb_entries > ptr->size / 12) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsc\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->alloc_size = ptr->nb_entries; ptr->entries = gf_malloc(sizeof(GF_StscEntry)*ptr->alloc_size); if (!ptr->entries) return GF_OUT_OF_MEM; for (i = 0; i < ptr->nb_entries; i++) { ptr->entries[i].firstChunk = gf_bs_read_u32(bs); ptr->entries[i].samplesPerChunk = gf_bs_read_u32(bs); ptr->entries[i].sampleDescriptionIndex = gf_bs_read_u32(bs); ptr->entries[i].isEdited = 0; ptr->entries[i].nextChunk = 0; if (!ptr->entries[i].firstChunk) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] invalid first chunk 0 in stsc entry\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } //update the next chunk in the previous entry if (i) ptr->entries[i-1].nextChunk = ptr->entries[i].firstChunk; } ptr->currentIndex = 0; ptr->firstSampleInCurrentChunk = 0; ptr->currentChunk = 0; ptr->ghostNumber = 0; return GF_OK; } GF_Box *stsc_New() { ISOM_DECL_BOX_ALLOC(GF_SampleToChunkBox, GF_ISOM_BOX_TYPE_STSC); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stsc_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i=0; i<ptr->nb_entries; i++) { gf_bs_write_u32(bs, ptr->entries[i].firstChunk); gf_bs_write_u32(bs, ptr->entries[i].samplesPerChunk); gf_bs_write_u32(bs, ptr->entries[i].sampleDescriptionIndex); } return GF_OK; } GF_Err stsc_Size(GF_Box *s) { GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s; ptr->size += 4 + (12 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stsd_del(GF_Box *s) { GF_SampleDescriptionBox *ptr = (GF_SampleDescriptionBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err stsd_AddBox(GF_Box *s, GF_Box *a) { GF_UnknownBox *def; GF_SampleDescriptionBox *ptr = (GF_SampleDescriptionBox *)s; if (!a) return GF_OK; if (gf_box_valid_in_parent(a, "stsd")) { return gf_isom_box_add_default((GF_Box*)ptr, a); } switch (a->type) { //unknown sample description: we need a specific box to handle the data ref index //rather than a default box ... case GF_ISOM_BOX_TYPE_UNKNOWN: def = (GF_UnknownBox *)a; /*we need at least 8 bytes for unknown sample entries*/ if (def->dataSize < 8) { gf_isom_box_del(a); return GF_OK; } return gf_isom_box_add_default((GF_Box*)ptr, a); default: GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Cannot process box of type %s\n", gf_4cc_to_str(a->type))); return GF_ISOM_INVALID_FILE; } } GF_Err stsd_Read(GF_Box *s, GF_BitStream *bs) { gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(s, 4) return gf_isom_box_array_read_ex(s, bs, stsd_AddBox, GF_ISOM_BOX_TYPE_STSD); } GF_Box *stsd_New() { ISOM_DECL_BOX_ALLOC(GF_SampleDescriptionBox, GF_ISOM_BOX_TYPE_STSD); tmp->other_boxes = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stsd_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 nb_entries; GF_SampleDescriptionBox *ptr = (GF_SampleDescriptionBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; nb_entries = gf_list_count(ptr->other_boxes); gf_bs_write_u32(bs, nb_entries); return GF_OK; } GF_Err stsd_Size(GF_Box *s) { GF_SampleDescriptionBox *ptr = (GF_SampleDescriptionBox *)s; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stsh_del(GF_Box *s) { u32 i = 0; GF_StshEntry *ent; GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s; if (ptr == NULL) return; while ( (ent = (GF_StshEntry *)gf_list_enum(ptr->entries, &i)) ) { gf_free(ent); } gf_list_del(ptr->entries); gf_free(ptr); } GF_Err stsh_Read(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 count, i; GF_StshEntry *ent; GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s; count = gf_bs_read_u32(bs); for (i = 0; i < count; i++) { ent = (GF_StshEntry *) gf_malloc(sizeof(GF_StshEntry)); if (!ent) return GF_OUT_OF_MEM; ent->shadowedSampleNumber = gf_bs_read_u32(bs); ent->syncSampleNumber = gf_bs_read_u32(bs); e = gf_list_add(ptr->entries, ent); if (e) return e; } return GF_OK; } GF_Box *stsh_New() { ISOM_DECL_BOX_ALLOC(GF_ShadowSyncBox, GF_ISOM_BOX_TYPE_STSH); tmp->entries = gf_list_new(); if (!tmp->entries) { gf_free(tmp); return NULL; } return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stsh_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_StshEntry *ent; GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, gf_list_count(ptr->entries)); i=0; while ((ent = (GF_StshEntry *)gf_list_enum(ptr->entries, &i))) { gf_bs_write_u32(bs, ent->shadowedSampleNumber); gf_bs_write_u32(bs, ent->syncSampleNumber); } return GF_OK; } GF_Err stsh_Size(GF_Box *s) { GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s; ptr->size += 4 + (8 * gf_list_count(ptr->entries)); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stss_del(GF_Box *s) { GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s; if (ptr == NULL) return; if (ptr->sampleNumbers) gf_free(ptr->sampleNumbers); gf_free(ptr); } GF_Err stss_Read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s; ptr->nb_entries = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->nb_entries > ptr->size / 4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stss\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->alloc_size = ptr->nb_entries; ptr->sampleNumbers = (u32 *) gf_malloc( ptr->alloc_size * sizeof(u32)); if (ptr->sampleNumbers == NULL) return GF_OUT_OF_MEM; for (i = 0; i < ptr->nb_entries; i++) { ptr->sampleNumbers[i] = gf_bs_read_u32(bs); } return GF_OK; } GF_Box *stss_New() { ISOM_DECL_BOX_ALLOC(GF_SyncSampleBox, GF_ISOM_BOX_TYPE_STSS); return (GF_Box*)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stss_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i = 0; i < ptr->nb_entries; i++) { gf_bs_write_u32(bs, ptr->sampleNumbers[i]); } return GF_OK; } GF_Err stss_Size(GF_Box *s) { GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s; ptr->size += 4 + (4 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stsz_del(GF_Box *s) { GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s; if (ptr == NULL) return; if (ptr->sizes) gf_free(ptr->sizes); gf_free(ptr); } GF_Err stsz_Read(GF_Box *s, GF_BitStream *bs) { u32 i, estSize; GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s; if (ptr == NULL) return GF_BAD_PARAM; //support for CompactSizes if (s->type == GF_ISOM_BOX_TYPE_STSZ) { ptr->sampleSize = gf_bs_read_u32(bs); ptr->sampleCount = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 8); } else { //24-reserved gf_bs_read_int(bs, 24); i = gf_bs_read_u8(bs); ptr->sampleCount = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 8); switch (i) { case 4: case 8: case 16: ptr->sampleSize = i; break; default: //try to fix the file //no samples, no parsing pb if (!ptr->sampleCount) { ptr->sampleSize = 16; return GF_OK; } estSize = (u32) (ptr->size) / ptr->sampleCount; if (!estSize && ((ptr->sampleCount+1)/2 == (ptr->size)) ) { ptr->sampleSize = 4; break; } else if (estSize == 1 || estSize == 2) { ptr->sampleSize = 8 * estSize; } else { return GF_ISOM_INVALID_FILE; } } } if (s->type == GF_ISOM_BOX_TYPE_STSZ) { if (! ptr->sampleSize && ptr->sampleCount) { if (ptr->sampleCount > ptr->size / 4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsz\n", ptr->sampleCount)); return GF_ISOM_INVALID_FILE; } ptr->sizes = (u32 *) gf_malloc(ptr->sampleCount * sizeof(u32)); ptr->alloc_size = ptr->sampleCount; if (! ptr->sizes) return GF_OUT_OF_MEM; for (i = 0; i < ptr->sampleCount; i++) { ptr->sizes[i] = gf_bs_read_u32(bs); } } } else { if (ptr->sampleSize==4) { if (ptr->sampleCount / 2 > ptr->size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsz\n", ptr->sampleCount)); return GF_ISOM_INVALID_FILE; } } else { if (ptr->sampleCount > ptr->size / (ptr->sampleSize/8)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsz\n", ptr->sampleCount)); return GF_ISOM_INVALID_FILE; } } //note we could optimize the mem usage by keeping the table compact //in memory. But that would complicate both caching and editing //we therefore keep all sizes as u32 and uncompress the table ptr->sizes = (u32 *) gf_malloc(ptr->sampleCount * sizeof(u32)); if (! ptr->sizes) return GF_OUT_OF_MEM; ptr->alloc_size = ptr->sampleCount; for (i = 0; i < ptr->sampleCount; ) { switch (ptr->sampleSize) { case 4: ptr->sizes[i] = gf_bs_read_int(bs, 4); if (i+1 < ptr->sampleCount) { ptr->sizes[i+1] = gf_bs_read_int(bs, 4); } else { //0 padding in odd sample count gf_bs_read_int(bs, 4); } i += 2; break; default: ptr->sizes[i] = gf_bs_read_int(bs, ptr->sampleSize); i += 1; break; } } } return GF_OK; } GF_Box *stsz_New() { ISOM_DECL_BOX_ALLOC(GF_SampleSizeBox, 0); //type is unknown here, can be regular or compact table return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stsz_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; //in both versions this is still valid if (ptr->type == GF_ISOM_BOX_TYPE_STSZ) { gf_bs_write_u32(bs, ptr->sampleSize); } else { gf_bs_write_u24(bs, 0); gf_bs_write_u8(bs, ptr->sampleSize); } gf_bs_write_u32(bs, ptr->sampleCount); if (ptr->type == GF_ISOM_BOX_TYPE_STSZ) { if (! ptr->sampleSize) { for (i = 0; i < ptr->sampleCount; i++) { gf_bs_write_u32(bs, ptr->sizes ? ptr->sizes[i] : 0); } } } else { for (i = 0; i < ptr->sampleCount; ) { switch (ptr->sampleSize) { case 4: gf_bs_write_int(bs, ptr->sizes[i], 4); if (i+1 < ptr->sampleCount) { gf_bs_write_int(bs, ptr->sizes[i+1], 4); } else { //0 padding in odd sample count gf_bs_write_int(bs, 0, 4); } i += 2; break; default: gf_bs_write_int(bs, ptr->sizes[i], ptr->sampleSize); i += 1; break; } } } return GF_OK; } GF_Err stsz_Size(GF_Box *s) { u32 i, fieldSize, size; GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s; ptr->size += 8; if (!ptr->sampleCount) return GF_OK; //regular table if (ptr->type == GF_ISOM_BOX_TYPE_STSZ) { if (ptr->sampleSize) return GF_OK; ptr->size += (4 * ptr->sampleCount); return GF_OK; } fieldSize = 4; size = ptr->sizes[0]; for (i=0; i < ptr->sampleCount; i++) { if (ptr->sizes[i] <= 0xF) continue; //switch to 8-bit table else if (ptr->sizes[i] <= 0xFF) { fieldSize = 8; } //switch to 16-bit table else if (ptr->sizes[i] <= 0xFFFF) { fieldSize = 16; } //switch to 32-bit table else { fieldSize = 32; } //check the size if (size != ptr->sizes[i]) size = 0; } //if all samples are of the same size, switch to regular (more compact) if (size) { ptr->type = GF_ISOM_BOX_TYPE_STSZ; ptr->sampleSize = size; gf_free(ptr->sizes); ptr->sizes = NULL; } if (fieldSize == 32) { //oops, doesn't fit in a compact table ptr->type = GF_ISOM_BOX_TYPE_STSZ; ptr->size += (4 * ptr->sampleCount); return GF_OK; } //make sure we are a compact table (no need to change the mem representation) ptr->type = GF_ISOM_BOX_TYPE_STZ2; ptr->sampleSize = fieldSize; if (fieldSize == 4) { //do not forget the 0 padding field for odd count ptr->size += (ptr->sampleCount + 1) / 2; } else { ptr->size += (ptr->sampleCount) * (fieldSize/8); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stts_del(GF_Box *s) { GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err stts_Read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s; #ifndef GPAC_DISABLE_ISOM_WRITE ptr->w_LastDTS = 0; #endif ptr->nb_entries = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->nb_entries > ptr->size / 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stts\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->alloc_size = ptr->nb_entries; ptr->entries = gf_malloc(sizeof(GF_SttsEntry)*ptr->alloc_size); if (!ptr->entries) return GF_OUT_OF_MEM; for (i=0; i<ptr->nb_entries; i++) { ptr->entries[i].sampleCount = gf_bs_read_u32(bs); ptr->entries[i].sampleDelta = gf_bs_read_u32(bs); #ifndef GPAC_DISABLE_ISOM_WRITE ptr->w_currentSampleNum += ptr->entries[i].sampleCount; ptr->w_LastDTS += (u64)ptr->entries[i].sampleCount * ptr->entries[i].sampleDelta; #endif if (!ptr->entries[i].sampleDelta) { if ((i+1<ptr->nb_entries) ) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Found stts entry with sample_delta=0 - forbidden ! Fixing to 1\n" )); ptr->entries[i].sampleDelta = 1; } else if (ptr->entries[i].sampleCount>1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] more than one stts entry at the end of the track with sample_delta=0 - forbidden ! Fixing to 1\n" )); ptr->entries[i].sampleDelta = 1; } } else if ((s32) ptr->entries[i].sampleDelta < 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] stts entry %d has negative duration %d - forbidden ! Fixing to 1, sync may get lost (consider reimport raw media)\n", i, (s32) ptr->entries[i].sampleDelta )); ptr->entries[i].sampleDelta = 1; } } if (ptr->size<(ptr->nb_entries*8)) return GF_ISOM_INVALID_FILE; ISOM_DECREASE_SIZE(ptr, ptr->nb_entries*8); //remove the last sample delta. #ifndef GPAC_DISABLE_ISOM_WRITE if (ptr->nb_entries) ptr->w_LastDTS -= ptr->entries[ptr->nb_entries-1].sampleDelta; #endif return GF_OK; } GF_Box *stts_New() { ISOM_DECL_BOX_ALLOC(GF_TimeToSampleBox, GF_ISOM_BOX_TYPE_STTS); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stts_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i=0; i<ptr->nb_entries; i++) { gf_bs_write_u32(bs, ptr->entries[i].sampleCount); gf_bs_write_u32(bs, ptr->entries[i].sampleDelta); } return GF_OK; } GF_Err stts_Size(GF_Box *s) { GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s; ptr->size += 4 + (8 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void tfhd_del(GF_Box *s) { GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err tfhd_Read(GF_Box *s, GF_BitStream *bs) { GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *)s; ptr->trackID = gf_bs_read_u32(bs); //The rest depends on the flags if (ptr->flags & GF_ISOM_TRAF_BASE_OFFSET) { ptr->base_data_offset = gf_bs_read_u64(bs); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DESC) { ptr->sample_desc_index = gf_bs_read_u32(bs); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DUR) { ptr->def_sample_duration = gf_bs_read_u32(bs); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_SIZE) { ptr->def_sample_size = gf_bs_read_u32(bs); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_FLAGS) { ptr->def_sample_flags = gf_bs_read_u32(bs); } return GF_OK; } GF_Box *tfhd_New() { ISOM_DECL_BOX_ALLOC(GF_TrackFragmentHeaderBox, GF_ISOM_BOX_TYPE_TFHD); //NO FLAGS SET BY DEFAULT return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tfhd_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->trackID); //The rest depends on the flags if (ptr->flags & GF_ISOM_TRAF_BASE_OFFSET) { gf_bs_write_u64(bs, ptr->base_data_offset); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DESC) { gf_bs_write_u32(bs, ptr->sample_desc_index); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DUR) { gf_bs_write_u32(bs, ptr->def_sample_duration); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_SIZE) { gf_bs_write_u32(bs, ptr->def_sample_size); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_FLAGS) { gf_bs_write_u32(bs, ptr->def_sample_flags); } return GF_OK; } GF_Err tfhd_Size(GF_Box *s) { GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *)s; ptr->size += 4; //The rest depends on the flags if (ptr->flags & GF_ISOM_TRAF_BASE_OFFSET) ptr->size += 8; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DESC) ptr->size += 4; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DUR) ptr->size += 4; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_SIZE) ptr->size += 4; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_FLAGS) ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void tims_del(GF_Box *s) { GF_TSHintEntryBox *tims = (GF_TSHintEntryBox *)s; gf_free(tims); } GF_Err tims_Read(GF_Box *s, GF_BitStream *bs) { GF_TSHintEntryBox *ptr = (GF_TSHintEntryBox *)s; ptr->timeScale = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tims_New() { ISOM_DECL_BOX_ALLOC(GF_TSHintEntryBox, GF_ISOM_BOX_TYPE_TIMS); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tims_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TSHintEntryBox *ptr = (GF_TSHintEntryBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->timeScale); return GF_OK; } GF_Err tims_Size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void tkhd_del(GF_Box *s) { GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); return; } GF_Err tkhd_Read(GF_Box *s, GF_BitStream *bs) { GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s; if (ptr->version == 1) { ptr->creationTime = gf_bs_read_u64(bs); ptr->modificationTime = gf_bs_read_u64(bs); ptr->trackID = gf_bs_read_u32(bs); ptr->reserved1 = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u64(bs); } else { ptr->creationTime = gf_bs_read_u32(bs); ptr->modificationTime = gf_bs_read_u32(bs); ptr->trackID = gf_bs_read_u32(bs); ptr->reserved1 = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u32(bs); } ptr->reserved2[0] = gf_bs_read_u32(bs); ptr->reserved2[1] = gf_bs_read_u32(bs); ptr->layer = gf_bs_read_u16(bs); ptr->alternate_group = gf_bs_read_u16(bs); ptr->volume = gf_bs_read_u16(bs); ptr->reserved3 = gf_bs_read_u16(bs); ptr->matrix[0] = gf_bs_read_u32(bs); ptr->matrix[1] = gf_bs_read_u32(bs); ptr->matrix[2] = gf_bs_read_u32(bs); ptr->matrix[3] = gf_bs_read_u32(bs); ptr->matrix[4] = gf_bs_read_u32(bs); ptr->matrix[5] = gf_bs_read_u32(bs); ptr->matrix[6] = gf_bs_read_u32(bs); ptr->matrix[7] = gf_bs_read_u32(bs); ptr->matrix[8] = gf_bs_read_u32(bs); ptr->width = gf_bs_read_u32(bs); ptr->height = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tkhd_New() { ISOM_DECL_BOX_ALLOC(GF_TrackHeaderBox, GF_ISOM_BOX_TYPE_TKHD); tmp->matrix[0] = 0x00010000; tmp->matrix[4] = 0x00010000; tmp->matrix[8] = 0x40000000; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tkhd_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version == 1) { gf_bs_write_u64(bs, ptr->creationTime); gf_bs_write_u64(bs, ptr->modificationTime); gf_bs_write_u32(bs, ptr->trackID); gf_bs_write_u32(bs, ptr->reserved1); gf_bs_write_u64(bs, ptr->duration); } else { gf_bs_write_u32(bs, (u32) ptr->creationTime); gf_bs_write_u32(bs, (u32) ptr->modificationTime); gf_bs_write_u32(bs, ptr->trackID); gf_bs_write_u32(bs, ptr->reserved1); gf_bs_write_u32(bs, (u32) ptr->duration); } gf_bs_write_u32(bs, ptr->reserved2[0]); gf_bs_write_u32(bs, ptr->reserved2[1]); gf_bs_write_u16(bs, ptr->layer); gf_bs_write_u16(bs, ptr->alternate_group); gf_bs_write_u16(bs, ptr->volume); gf_bs_write_u16(bs, ptr->reserved3); gf_bs_write_u32(bs, ptr->matrix[0]); gf_bs_write_u32(bs, ptr->matrix[1]); gf_bs_write_u32(bs, ptr->matrix[2]); gf_bs_write_u32(bs, ptr->matrix[3]); gf_bs_write_u32(bs, ptr->matrix[4]); gf_bs_write_u32(bs, ptr->matrix[5]); gf_bs_write_u32(bs, ptr->matrix[6]); gf_bs_write_u32(bs, ptr->matrix[7]); gf_bs_write_u32(bs, ptr->matrix[8]); gf_bs_write_u32(bs, ptr->width); gf_bs_write_u32(bs, ptr->height); return GF_OK; } GF_Err tkhd_Size(GF_Box *s) { GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s; if (ptr->duration==(u64) -1) ptr->version = 0; else ptr->version = (ptr->duration>0xFFFFFFFF) ? 1 : 0; ptr->size += (ptr->version == 1) ? 32 : 20; ptr->size += 60; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void traf_del(GF_Box *s) { GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *)s; if (ptr == NULL) return; if (ptr->tfhd) gf_isom_box_del((GF_Box *) ptr->tfhd); if (ptr->sdtp) gf_isom_box_del((GF_Box *) ptr->sdtp); if (ptr->sub_samples) gf_isom_box_array_del(ptr->sub_samples); if (ptr->tfdt) gf_isom_box_del((GF_Box *) ptr->tfdt); if (ptr->sample_encryption) gf_isom_box_del((GF_Box *) ptr->sample_encryption); gf_isom_box_array_del(ptr->TrackRuns); if (ptr->sampleGroups) gf_isom_box_array_del(ptr->sampleGroups); if (ptr->sampleGroupsDescription) gf_isom_box_array_del(ptr->sampleGroupsDescription); if (ptr->sai_sizes) gf_isom_box_array_del(ptr->sai_sizes); if (ptr->sai_offsets) gf_isom_box_array_del(ptr->sai_offsets); gf_free(ptr); } GF_Err traf_AddBox(GF_Box *s, GF_Box *a) { GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_TFHD: if (ptr->tfhd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->tfhd = (GF_TrackFragmentHeaderBox *) a; return GF_OK; case GF_ISOM_BOX_TYPE_TRUN: return gf_list_add(ptr->TrackRuns, a); case GF_ISOM_BOX_TYPE_SDTP: if (ptr->sdtp) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->sdtp = (GF_SampleDependencyTypeBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_TFDT: if (ptr->tfdt) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->tfdt = (GF_TFBaseMediaDecodeTimeBox*) a; return GF_OK; case GF_ISOM_BOX_TYPE_SUBS: if (!ptr->sub_samples) ptr->sub_samples = gf_list_new(); return gf_list_add(ptr->sub_samples, a); case GF_ISOM_BOX_TYPE_SBGP: if (!ptr->sampleGroups) ptr->sampleGroups = gf_list_new(); gf_list_add(ptr->sampleGroups, a); return GF_OK; case GF_ISOM_BOX_TYPE_SGPD: if (!ptr->sampleGroupsDescription) ptr->sampleGroupsDescription = gf_list_new(); gf_list_add(ptr->sampleGroupsDescription, a); return GF_OK; case GF_ISOM_BOX_TYPE_SAIZ: if (!ptr->sai_sizes) ptr->sai_sizes = gf_list_new(); gf_list_add(ptr->sai_sizes, a); return GF_OK; case GF_ISOM_BOX_TYPE_SAIO: if (!ptr->sai_offsets) ptr->sai_offsets = gf_list_new(); gf_list_add(ptr->sai_offsets, a); return GF_OK; //we will throw an error if both PIFF_PSEC and SENC are found. Not such files seen yet case GF_ISOM_BOX_TYPE_UUID: if ( ((GF_UUIDBox *)a)->internal_4cc==GF_ISOM_BOX_UUID_PSEC) { if (ptr->sample_encryption) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->sample_encryption = (GF_SampleEncryptionBox *)a; ptr->sample_encryption->traf = ptr; return GF_OK; } else { return gf_isom_box_add_default(s, a); } case GF_ISOM_BOX_TYPE_SENC: if (ptr->sample_encryption) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->sample_encryption = (GF_SampleEncryptionBox *)a; ptr->sample_encryption->traf = ptr; return GF_OK; default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err traf_Read(GF_Box *s, GF_BitStream *bs) { GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *)s; GF_Err e = gf_isom_box_array_read(s, bs, traf_AddBox); if (e) return e; if (!ptr->tfhd) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing TrackFragmentHeaderBox \n")); return GF_ISOM_INVALID_FILE; } return GF_OK; } GF_Box *traf_New() { ISOM_DECL_BOX_ALLOC(GF_TrackFragmentBox, GF_ISOM_BOX_TYPE_TRAF); tmp->TrackRuns = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Box *tfxd_New() { ISOM_DECL_BOX_ALLOC(GF_MSSTimeExtBox, GF_ISOM_BOX_TYPE_UUID); tmp->internal_4cc = GF_ISOM_BOX_UUID_TFXD; return (GF_Box *)tmp; } void tfxd_del(GF_Box *s) { gf_free(s); } GF_Err tfxd_Read(GF_Box *s, GF_BitStream *bs) { GF_MSSTimeExtBox *ptr = (GF_MSSTimeExtBox *)s; if (ptr->size<4) return GF_ISOM_INVALID_FILE; ptr->version = gf_bs_read_u8(bs); ptr->flags = gf_bs_read_u24(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->version == 0x01) { ptr->absolute_time_in_track_timescale = gf_bs_read_u64(bs); ptr->fragment_duration_in_track_timescale = gf_bs_read_u64(bs); } else { ptr->absolute_time_in_track_timescale = gf_bs_read_u32(bs); ptr->fragment_duration_in_track_timescale = gf_bs_read_u32(bs); } return GF_OK; } GF_Err tfxd_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e = GF_OK; GF_MSSTimeExtBox *uuid = (GF_MSSTimeExtBox*)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u8(bs, 1); gf_bs_write_u24(bs, 0); gf_bs_write_u64(bs, uuid->absolute_time_in_track_timescale); gf_bs_write_u64(bs, uuid->fragment_duration_in_track_timescale); return GF_OK; } GF_Err tfxd_Size(GF_Box *s) { s->size += 20; return GF_OK; } GF_Err traf_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; //Header first if (ptr->tfhd) { e = gf_isom_box_write((GF_Box *) ptr->tfhd, bs); if (e) return e; } if (ptr->sub_samples) { e = gf_isom_box_array_write(s, ptr->sub_samples, bs); if (e) return e; } if (ptr->tfdt) { e = gf_isom_box_write((GF_Box *) ptr->tfdt, bs); if (e) return e; } if (ptr->sdtp) { e = gf_isom_box_write((GF_Box *) ptr->sdtp, bs); if (e) return e; } if (ptr->sampleGroupsDescription) { e = gf_isom_box_array_write(s, ptr->sampleGroupsDescription, bs); if (e) return e; } if (ptr->sampleGroups) { e = gf_isom_box_array_write(s, ptr->sampleGroups, bs); if (e) return e; } if (ptr->sai_sizes) { e = gf_isom_box_array_write(s, ptr->sai_sizes, bs); if (e) return e; } if (ptr->sai_offsets) { e = gf_isom_box_array_write(s, ptr->sai_offsets, bs); if (e) return e; } e = gf_isom_box_array_write(s, ptr->TrackRuns, bs); if (e) return e; if (ptr->sample_encryption) { e = gf_isom_box_write((GF_Box *) ptr->sample_encryption, bs); if (e) return e; } //tfxd should be last ... if (ptr->tfxd) { e = gf_isom_box_write((GF_Box *) ptr->tfxd, bs); if (e) return e; } return GF_OK; } GF_Err traf_Size(GF_Box *s) { GF_Err e; GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *)s; if (ptr->tfhd) { e = gf_isom_box_size((GF_Box *) ptr->tfhd); if (e) return e; ptr->size += ptr->tfhd->size; } if (ptr->sub_samples) { e = gf_isom_box_array_size(s, ptr->sub_samples); if (e) return e; } if (ptr->sdtp) { e = gf_isom_box_size((GF_Box *) ptr->sdtp); if (e) return e; ptr->size += ptr->sdtp->size; } if (ptr->tfdt) { e = gf_isom_box_size((GF_Box *) ptr->tfdt); if (e) return e; ptr->size += ptr->tfdt->size; } if (ptr->sampleGroups) { e = gf_isom_box_array_size(s, ptr->sampleGroups); if (e) return e; } if (ptr->sampleGroupsDescription) { e = gf_isom_box_array_size(s, ptr->sampleGroupsDescription); if (e) return e; } if (ptr->sai_sizes) { e = gf_isom_box_array_size(s, ptr->sai_sizes); if (e) return e; } if (ptr->sai_offsets) { e = gf_isom_box_array_size(s, ptr->sai_offsets); if (e) return e; } if (ptr->sample_encryption) { e = gf_isom_box_size((GF_Box *) ptr->sample_encryption); if (e) return e; ptr->size += ptr->sample_encryption->size; } if (ptr->tfxd) { e = gf_isom_box_size((GF_Box *)ptr->tfxd); if (e) return e; s->size += ptr->tfxd->size; } return gf_isom_box_array_size(s, ptr->TrackRuns); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void trak_del(GF_Box *s) { GF_TrackBox *ptr = (GF_TrackBox *) s; if (ptr == NULL) return; if (ptr->Header) gf_isom_box_del((GF_Box *)ptr->Header); if (ptr->udta) gf_isom_box_del((GF_Box *)ptr->udta); if (ptr->Media) gf_isom_box_del((GF_Box *)ptr->Media); if (ptr->References) gf_isom_box_del((GF_Box *)ptr->References); if (ptr->editBox) gf_isom_box_del((GF_Box *)ptr->editBox); if (ptr->meta) gf_isom_box_del((GF_Box *)ptr->meta); if (ptr->name) gf_free(ptr->name); if (ptr->groups) gf_isom_box_del((GF_Box *)ptr->groups); gf_free(ptr); } static void gf_isom_check_sample_desc(GF_TrackBox *trak) { GF_BitStream *bs; GF_UnknownBox *a; u32 i; if (!trak->Media || !trak->Media->information) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Track with no media box !\n" )); return; } if (!trak->Media->information->sampleTable) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Track with no sample table !\n" )); trak->Media->information->sampleTable = (GF_SampleTableBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_STBL); } if (!trak->Media->information->sampleTable->SampleDescription) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Track with no sample description box !\n" )); trak->Media->information->sampleTable->SampleDescription = (GF_SampleDescriptionBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_STSD); return; } i=0; while ((a = (GF_UnknownBox*)gf_list_enum(trak->Media->information->sampleTable->SampleDescription->other_boxes, &i))) { switch (a->type) { case GF_ISOM_BOX_TYPE_MP4S: case GF_ISOM_BOX_TYPE_ENCS: case GF_ISOM_BOX_TYPE_MP4A: case GF_ISOM_BOX_TYPE_ENCA: case GF_ISOM_BOX_TYPE_MP4V: case GF_ISOM_BOX_TYPE_ENCV: case GF_ISOM_BOX_TYPE_RESV: case GF_ISOM_SUBTYPE_3GP_AMR: case GF_ISOM_SUBTYPE_3GP_AMR_WB: case GF_ISOM_SUBTYPE_3GP_EVRC: case GF_ISOM_SUBTYPE_3GP_QCELP: case GF_ISOM_SUBTYPE_3GP_SMV: case GF_ISOM_SUBTYPE_3GP_H263: case GF_ISOM_BOX_TYPE_GHNT: case GF_ISOM_BOX_TYPE_RTP_STSD: case GF_ISOM_BOX_TYPE_SRTP_STSD: case GF_ISOM_BOX_TYPE_FDP_STSD: case GF_ISOM_BOX_TYPE_RRTP_STSD: case GF_ISOM_BOX_TYPE_RTCP_STSD: case GF_ISOM_BOX_TYPE_METX: case GF_ISOM_BOX_TYPE_METT: case GF_ISOM_BOX_TYPE_STXT: case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_MVC1: case GF_ISOM_BOX_TYPE_HVC1: case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC2: case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_HVT1: case GF_ISOM_BOX_TYPE_LHV1: case GF_ISOM_BOX_TYPE_LHE1: case GF_ISOM_BOX_TYPE_AV01: case GF_ISOM_BOX_TYPE_VP08: case GF_ISOM_BOX_TYPE_VP09: case GF_ISOM_BOX_TYPE_AV1C: case GF_ISOM_BOX_TYPE_TX3G: case GF_ISOM_BOX_TYPE_TEXT: case GF_ISOM_BOX_TYPE_ENCT: case GF_ISOM_BOX_TYPE_DIMS: case GF_ISOM_BOX_TYPE_AC3: case GF_ISOM_BOX_TYPE_EC3: case GF_ISOM_BOX_TYPE_LSR1: case GF_ISOM_BOX_TYPE_WVTT: case GF_ISOM_BOX_TYPE_STPP: case GF_ISOM_BOX_TYPE_SBTT: case GF_ISOM_BOX_TYPE_MP3: case GF_ISOM_BOX_TYPE_JPEG: case GF_ISOM_BOX_TYPE_PNG: case GF_ISOM_BOX_TYPE_JP2K: case GF_ISOM_BOX_TYPE_MHA1: case GF_ISOM_BOX_TYPE_MHA2: case GF_ISOM_BOX_TYPE_MHM1: case GF_ISOM_BOX_TYPE_MHM2: case GF_QT_BOX_TYPE_AUDIO_RAW: case GF_QT_BOX_TYPE_AUDIO_TWOS: case GF_QT_BOX_TYPE_AUDIO_SOWT: case GF_QT_BOX_TYPE_AUDIO_FL32: case GF_QT_BOX_TYPE_AUDIO_FL64: case GF_QT_BOX_TYPE_AUDIO_IN24: case GF_QT_BOX_TYPE_AUDIO_IN32: case GF_QT_BOX_TYPE_AUDIO_ULAW: case GF_QT_BOX_TYPE_AUDIO_ALAW: case GF_QT_BOX_TYPE_AUDIO_ADPCM: case GF_QT_BOX_TYPE_AUDIO_IMA_ADPCM: case GF_QT_BOX_TYPE_AUDIO_DVCA: case GF_QT_BOX_TYPE_AUDIO_QDMC: case GF_QT_BOX_TYPE_AUDIO_QDMC2: case GF_QT_BOX_TYPE_AUDIO_QCELP: case GF_QT_BOX_TYPE_AUDIO_kMP3: continue; case GF_ISOM_BOX_TYPE_UNKNOWN: break; default: if (gf_box_valid_in_parent((GF_Box *) a, "stsd")) { continue; } GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Unexpected box %s in stsd!\n", gf_4cc_to_str(a->type))); continue; } //we are sure to have an unknown box here assert(a->type==GF_ISOM_BOX_TYPE_UNKNOWN); if (!a->data || (a->dataSize<8) ) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Sample description %s does not have at least 8 bytes!\n", gf_4cc_to_str(a->original_4cc) )); continue; } else if (a->dataSize > a->size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Sample description %s has wrong data size %d!\n", gf_4cc_to_str(a->original_4cc), a->dataSize)); continue; } #define STSD_SWITCH_BOX(_box) \ if (gf_bs_available(bs)) { \ u64 pos = gf_bs_get_position(bs); \ u32 count_subb = 0; \ GF_Err e;\ gf_bs_set_cookie(bs, 1);\ e = gf_isom_box_array_read((GF_Box *) _box, bs, gf_isom_box_add_default); \ count_subb = _box->other_boxes ? gf_list_count(_box->other_boxes) : 0; \ if (!count_subb || e) { \ gf_bs_seek(bs, pos); \ _box->data_size = (u32) gf_bs_available(bs); \ if (_box->data_size) { \ _box->data = a->data; \ a->data = NULL; \ memmove(_box->data, _box->data + pos, _box->data_size); \ } \ } else { \ _box->data_size = 0; \ } \ } \ gf_bs_del(bs); \ if (!_box->data_size && _box->data) { \ gf_free(_box->data); \ _box->data = NULL; \ } \ _box->size = 0; \ _box->EntryType = a->original_4cc; \ gf_list_rem(trak->Media->information->sampleTable->SampleDescription->other_boxes, i-1); \ gf_isom_box_del((GF_Box *)a); \ gf_list_insert(trak->Media->information->sampleTable->SampleDescription->other_boxes, _box, i-1); \ /*only process visual or audio*/ switch (trak->Media->handler->handlerType) { case GF_ISOM_MEDIA_VISUAL: case GF_ISOM_MEDIA_AUXV: case GF_ISOM_MEDIA_PICT: { GF_GenericVisualSampleEntryBox *genv = (GF_GenericVisualSampleEntryBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_GNRV); bs = gf_bs_new(a->data, a->dataSize, GF_BITSTREAM_READ); genv->size = a->size-8; gf_isom_video_sample_entry_read((GF_VisualSampleEntryBox *) genv, bs); STSD_SWITCH_BOX(genv) } break; case GF_ISOM_MEDIA_AUDIO: { GF_GenericAudioSampleEntryBox *gena = (GF_GenericAudioSampleEntryBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_GNRA); gena->size = a->size-8; bs = gf_bs_new(a->data, a->dataSize, GF_BITSTREAM_READ); gf_isom_audio_sample_entry_read((GF_AudioSampleEntryBox *) gena, bs); STSD_SWITCH_BOX(gena) } break; default: { GF_Err e; GF_GenericSampleEntryBox *genm = (GF_GenericSampleEntryBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_GNRM); genm->size = a->size-8; bs = gf_bs_new(a->data, a->dataSize, GF_BITSTREAM_READ); e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)genm, bs); if (e) return; STSD_SWITCH_BOX(genm) } break; } } } GF_Err trak_AddBox(GF_Box *s, GF_Box *a) { GF_TrackBox *ptr = (GF_TrackBox *)s; if (!a) return GF_OK; switch(a->type) { case GF_ISOM_BOX_TYPE_TKHD: if (ptr->Header) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->Header = (GF_TrackHeaderBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_EDTS: if (ptr->editBox) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->editBox = (GF_EditBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_UDTA: if (ptr->udta) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->udta = (GF_UserDataBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_META: if (ptr->meta) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->meta = (GF_MetaBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_TREF: if (ptr->References) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->References = (GF_TrackReferenceBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_MDIA: if (ptr->Media) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->Media = (GF_MediaBox *)a; ((GF_MediaBox *)a)->mediaTrack = ptr; return GF_OK; case GF_ISOM_BOX_TYPE_TRGR: if (ptr->groups) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->groups = (GF_TrackGroupBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_SENC: ptr->sample_encryption = (GF_SampleEncryptionBox*)a; return gf_isom_box_add_default((GF_Box *)ptr, a); case GF_ISOM_BOX_TYPE_UUID: if (((GF_UnknownUUIDBox *)a)->internal_4cc == GF_ISOM_BOX_UUID_PSEC) { ptr->sample_encryption = (GF_SampleEncryptionBox*) a; return gf_isom_box_add_default((GF_Box *)ptr, a); } default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err trak_Read(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TrackBox *ptr = (GF_TrackBox *)s; e = gf_isom_box_array_read(s, bs, trak_AddBox); if (e) return e; gf_isom_check_sample_desc(ptr); if (!ptr->Header) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing TrackHeaderBox\n")); return GF_ISOM_INVALID_FILE; } if (!ptr->Media) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MediaBox\n")); return GF_ISOM_INVALID_FILE; } if (!ptr->Media->information || !ptr->Media->information->sampleTable) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid MediaBox\n")); return GF_ISOM_INVALID_FILE; } for (i=0; i<gf_list_count(ptr->Media->information->sampleTable->other_boxes); i++) { GF_Box *a = gf_list_get(ptr->Media->information->sampleTable->other_boxes, i); if ((a->type ==GF_ISOM_BOX_TYPE_UUID) && (((GF_UUIDBox *)a)->internal_4cc == GF_ISOM_BOX_UUID_PSEC)) { ptr->sample_encryption = (struct __sample_encryption_box *) a; break; } else if (a->type == GF_ISOM_BOX_TYPE_SENC) { ptr->sample_encryption = (struct __sample_encryption_box *)a; break; } } return e; } GF_Box *trak_New() { ISOM_DECL_BOX_ALLOC(GF_TrackBox, GF_ISOM_BOX_TYPE_TRAK); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trak_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackBox *ptr = (GF_TrackBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->Header) { e = gf_isom_box_write((GF_Box *) ptr->Header, bs); if (e) return e; } if (ptr->References) { e = gf_isom_box_write((GF_Box *) ptr->References, bs); if (e) return e; } if (ptr->editBox) { e = gf_isom_box_write((GF_Box *) ptr->editBox, bs); if (e) return e; } if (ptr->Media) { e = gf_isom_box_write((GF_Box *) ptr->Media, bs); if (e) return e; } if (ptr->meta) { e = gf_isom_box_write((GF_Box *) ptr->meta, bs); if (e) return e; } if (ptr->groups) { e = gf_isom_box_write((GF_Box *) ptr->groups, bs); if (e) return e; } if (ptr->udta) { e = gf_isom_box_write((GF_Box *) ptr->udta, bs); if (e) return e; } return GF_OK; } GF_Err trak_Size(GF_Box *s) { GF_Err e; GF_TrackBox *ptr = (GF_TrackBox *)s; if (ptr->Header) { e = gf_isom_box_size((GF_Box *) ptr->Header); if (e) return e; ptr->size += ptr->Header->size; } if (ptr->udta) { e = gf_isom_box_size((GF_Box *) ptr->udta); if (e) return e; ptr->size += ptr->udta->size; } if (ptr->References) { e = gf_isom_box_size((GF_Box *) ptr->References); if (e) return e; ptr->size += ptr->References->size; } if (ptr->editBox) { e = gf_isom_box_size((GF_Box *) ptr->editBox); if (e) return e; ptr->size += ptr->editBox->size; } if (ptr->Media) { e = gf_isom_box_size((GF_Box *) ptr->Media); if (e) return e; ptr->size += ptr->Media->size; } if (ptr->meta) { e = gf_isom_box_size((GF_Box *) ptr->meta); if (e) return e; ptr->size += ptr->meta->size; } if (ptr->groups) { e = gf_isom_box_size((GF_Box *) ptr->groups); if (e) return e; ptr->size += ptr->groups->size; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stri_del(GF_Box *s) { GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s; if (ptr == NULL) return; if (ptr->attribute_list) gf_free(ptr->attribute_list); gf_free(ptr); } GF_Err stri_Read(GF_Box *s, GF_BitStream *bs) { size_t i; GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s; ptr->switch_group = gf_bs_read_u16(bs); ptr->alternate_group = gf_bs_read_u16(bs); ptr->sub_track_id = gf_bs_read_u32(bs); ptr->size -= 8; ptr->attribute_count = ptr->size / 4; GF_SAFE_ALLOC_N(ptr->attribute_list, (size_t)ptr->attribute_count, u32); if (!ptr->attribute_list) return GF_OUT_OF_MEM; for (i = 0; i < ptr->attribute_count; i++) { ptr->attribute_list[i] = gf_bs_read_u32(bs); } return GF_OK; } GF_Box *stri_New() { ISOM_DECL_BOX_ALLOC(GF_SubTrackInformationBox, GF_ISOM_BOX_TYPE_STRI); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stri_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->switch_group); gf_bs_write_u16(bs, ptr->alternate_group); gf_bs_write_u32(bs, ptr->sub_track_id); for (i = 0; i < ptr->attribute_count; i++) { gf_bs_write_u32(bs, ptr->attribute_list[i]); } return GF_OK; } GF_Err stri_Size(GF_Box *s) { GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s; ptr->size += 8 + 4 * ptr->attribute_count; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stsg_del(GF_Box *s) { GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s; if (ptr == NULL) return; if (ptr->group_description_index) gf_free(ptr->group_description_index); gf_free(ptr); } GF_Err stsg_Read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s; ISOM_DECREASE_SIZE(s, 6); ptr->grouping_type = gf_bs_read_u32(bs); ptr->nb_groups = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(s, ptr->nb_groups*4); GF_SAFE_ALLOC_N(ptr->group_description_index, ptr->nb_groups, u32); if (!ptr->group_description_index) return GF_OUT_OF_MEM; for (i = 0; i < ptr->nb_groups; i++) { ptr->group_description_index[i] = gf_bs_read_u32(bs); } return GF_OK; } GF_Box *stsg_New() { ISOM_DECL_BOX_ALLOC(GF_SubTrackSampleGroupBox, GF_ISOM_BOX_TYPE_STSG); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stsg_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->grouping_type); gf_bs_write_u16(bs, ptr->nb_groups); for (i = 0; i < ptr->nb_groups; i++) { gf_bs_write_u32(bs, ptr->group_description_index[i]); } return GF_OK; } GF_Err stsg_Size(GF_Box *s) { GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s; ptr->size += 6 + 4 * ptr->nb_groups; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void strk_del(GF_Box *s) { GF_SubTrackBox *ptr = (GF_SubTrackBox *)s; if (ptr == NULL) return; if (ptr->info) gf_isom_box_del((GF_Box *)ptr->info); gf_free(ptr); } GF_Err strk_AddBox(GF_Box *s, GF_Box *a) { GF_SubTrackBox *ptr = (GF_SubTrackBox *)s; if (!a) return GF_OK; switch (a->type) { case GF_ISOM_BOX_TYPE_STRI: if (ptr->info) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->info = (GF_SubTrackInformationBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_STRD: if (ptr->strd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->strd = a; return GF_OK; default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err strk_Read(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SubTrackBox *ptr = (GF_SubTrackBox *)s; e = gf_isom_box_array_read(s, bs, strk_AddBox); if (e) return e; if (!ptr->info) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing SubTrackInformationBox\n")); return GF_ISOM_INVALID_FILE; } return e; } GF_Box *strk_New() { ISOM_DECL_BOX_ALLOC(GF_SubTrackBox, GF_ISOM_BOX_TYPE_STRK); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err strk_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SubTrackBox *ptr = (GF_SubTrackBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->info) { e = gf_isom_box_write((GF_Box *)ptr->info, bs); if (e) return e; } return GF_OK; } GF_Err strk_Size(GF_Box *s) { GF_Err e; GF_SubTrackBox *ptr = (GF_SubTrackBox *)s; if (ptr->info) { e = gf_isom_box_size((GF_Box *)ptr->info); if (e) return e; ptr->size += ptr->info->size; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Err tref_AddBox(GF_Box *ptr, GF_Box *a) { return gf_isom_box_add_default(ptr, a); } void tref_del(GF_Box *s) { GF_TrackReferenceBox *ptr = (GF_TrackReferenceBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err tref_Read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read_ex(s, bs, gf_isom_box_add_default, s->type); } GF_Box *tref_New() { ISOM_DECL_BOX_ALLOC(GF_TrackReferenceBox, GF_ISOM_BOX_TYPE_TREF); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tref_Write(GF_Box *s, GF_BitStream *bs) { // GF_TrackReferenceBox *ptr = (GF_TrackReferenceBox *)s; return gf_isom_box_write_header(s, bs); } GF_Err tref_Size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void reftype_del(GF_Box *s) { GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s; if (!ptr) return; if (ptr->trackIDs) gf_free(ptr->trackIDs); gf_free(ptr); } GF_Err reftype_Read(GF_Box *s, GF_BitStream *bs) { u32 bytesToRead; u32 i; GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s; bytesToRead = (u32) (ptr->size); if (!bytesToRead) return GF_OK; ptr->trackIDCount = (u32) (bytesToRead) / sizeof(u32); ptr->trackIDs = (u32 *) gf_malloc(ptr->trackIDCount * sizeof(u32)); if (!ptr->trackIDs) return GF_OUT_OF_MEM; for (i = 0; i < ptr->trackIDCount; i++) { ptr->trackIDs[i] = gf_bs_read_u32(bs); } return GF_OK; } GF_Box *reftype_New() { ISOM_DECL_BOX_ALLOC(GF_TrackReferenceTypeBox, GF_ISOM_BOX_TYPE_REFT); return (GF_Box *)tmp; } GF_Err reftype_AddRefTrack(GF_TrackReferenceTypeBox *ref, u32 trackID, u16 *outRefIndex) { u32 i; if (!ref || !trackID) return GF_BAD_PARAM; if (outRefIndex) *outRefIndex = 0; //don't add a dep if already here !! for (i = 0; i < ref->trackIDCount; i++) { if (ref->trackIDs[i] == trackID) { if (outRefIndex) *outRefIndex = i+1; return GF_OK; } } ref->trackIDs = (u32 *) gf_realloc(ref->trackIDs, (ref->trackIDCount + 1) * sizeof(u32) ); if (!ref->trackIDs) return GF_OUT_OF_MEM; ref->trackIDs[ref->trackIDCount] = trackID; ref->trackIDCount++; if (outRefIndex) *outRefIndex = ref->trackIDCount; return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err reftype_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s; ptr->type = ptr->reference_type; if (!ptr->trackIDCount) return GF_OK; e = gf_isom_box_write_header(s, bs); ptr->type = GF_ISOM_BOX_TYPE_REFT; if (e) return e; for (i = 0; i < ptr->trackIDCount; i++) { gf_bs_write_u32(bs, ptr->trackIDs[i]); } return GF_OK; } GF_Err reftype_Size(GF_Box *s) { GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s; if (!ptr->trackIDCount) ptr->size=0; else ptr->size += (ptr->trackIDCount * sizeof(u32)); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void trex_del(GF_Box *s) { GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err trex_Read(GF_Box *s, GF_BitStream *bs) { GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *)s; ptr->trackID = gf_bs_read_u32(bs); ptr->def_sample_desc_index = gf_bs_read_u32(bs); ptr->def_sample_duration = gf_bs_read_u32(bs); ptr->def_sample_size = gf_bs_read_u32(bs); ptr->def_sample_flags = gf_bs_read_u32(bs); return GF_OK; } GF_Box *trex_New() { ISOM_DECL_BOX_ALLOC(GF_TrackExtendsBox, GF_ISOM_BOX_TYPE_TREX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trex_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->trackID); //we always write 1 in trex default sample desc as using 0 breaks chrome/opera/... gf_bs_write_u32(bs, ptr->def_sample_desc_index ? ptr->def_sample_desc_index : 1); gf_bs_write_u32(bs, ptr->def_sample_duration); gf_bs_write_u32(bs, ptr->def_sample_size); gf_bs_write_u32(bs, ptr->def_sample_flags); return GF_OK; } GF_Err trex_Size(GF_Box *s) { GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *)s; ptr->size += 20; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void trep_del(GF_Box *s) { GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err trep_Read(GF_Box *s, GF_BitStream *bs) { GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *)s; ptr->trackID = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); return gf_isom_box_array_read(s, bs, gf_isom_box_add_default); } GF_Box *trep_New() { ISOM_DECL_BOX_ALLOC(GF_TrackExtensionPropertiesBox, GF_ISOM_BOX_TYPE_TREP); tmp->other_boxes = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trep_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->trackID); return GF_OK; } GF_Err trep_Size(GF_Box *s) { GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *)s; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void trun_del(GF_Box *s) { GF_TrunEntry *p; GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s; if (ptr == NULL) return; while (gf_list_count(ptr->entries)) { p = (GF_TrunEntry*)gf_list_get(ptr->entries, 0); gf_list_rem(ptr->entries, 0); gf_free(p); } gf_list_del(ptr->entries); if (ptr->cache) gf_bs_del(ptr->cache); gf_free(ptr); } GF_Err trun_Read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_TrunEntry *p; GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s; //check this is a good file if ((ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) && (ptr->flags & GF_ISOM_TRUN_FLAGS)) return GF_ISOM_INVALID_FILE; ptr->sample_count = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); //The rest depends on the flags if (ptr->flags & GF_ISOM_TRUN_DATA_OFFSET) { ptr->data_offset = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); } if (ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) { ptr->first_sample_flags = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); } if (! (ptr->flags & (GF_ISOM_TRUN_DURATION | GF_ISOM_TRUN_SIZE | GF_ISOM_TRUN_FLAGS | GF_ISOM_TRUN_CTS_OFFSET) ) ) { GF_SAFEALLOC(p, GF_TrunEntry); p->nb_pack = ptr->sample_count; gf_list_add(ptr->entries, p); return GF_OK; } //read each entry (even though nothing may be written) for (i=0; i<ptr->sample_count; i++) { u32 trun_size = 0; p = (GF_TrunEntry *) gf_malloc(sizeof(GF_TrunEntry)); if (!p) return GF_OUT_OF_MEM; memset(p, 0, sizeof(GF_TrunEntry)); if (ptr->flags & GF_ISOM_TRUN_DURATION) { p->Duration = gf_bs_read_u32(bs); trun_size += 4; } if (ptr->flags & GF_ISOM_TRUN_SIZE) { p->size = gf_bs_read_u32(bs); trun_size += 4; } //SHOULDN'T BE USED IF GF_ISOM_TRUN_FIRST_FLAG IS DEFINED if (ptr->flags & GF_ISOM_TRUN_FLAGS) { p->flags = gf_bs_read_u32(bs); trun_size += 4; } if (ptr->flags & GF_ISOM_TRUN_CTS_OFFSET) { if (ptr->version==0) { p->CTS_Offset = (u32) gf_bs_read_u32(bs); } else { p->CTS_Offset = (s32) gf_bs_read_u32(bs); } } gf_list_add(ptr->entries, p); ISOM_DECREASE_SIZE(ptr, trun_size); } return GF_OK; } GF_Box *trun_New() { ISOM_DECL_BOX_ALLOC(GF_TrackFragmentRunBox, GF_ISOM_BOX_TYPE_TRUN); tmp->entries = gf_list_new(); //NO FLAGS SET BY DEFAULT return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trun_Write(GF_Box *s, GF_BitStream *bs) { GF_TrunEntry *p; GF_Err e; u32 i, count; GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->sample_count); //The rest depends on the flags if (ptr->flags & GF_ISOM_TRUN_DATA_OFFSET) { gf_bs_write_u32(bs, ptr->data_offset); } if (ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) { gf_bs_write_u32(bs, ptr->first_sample_flags); } if (! (ptr->flags & (GF_ISOM_TRUN_DURATION | GF_ISOM_TRUN_SIZE | GF_ISOM_TRUN_FLAGS | GF_ISOM_TRUN_CTS_OFFSET) ) ) { return GF_OK; } count = gf_list_count(ptr->entries); for (i=0; i<count; i++) { p = (GF_TrunEntry*)gf_list_get(ptr->entries, i); if (ptr->flags & GF_ISOM_TRUN_DURATION) { gf_bs_write_u32(bs, p->Duration); } if (ptr->flags & GF_ISOM_TRUN_SIZE) { gf_bs_write_u32(bs, p->size); } //SHOULDN'T BE USED IF GF_ISOM_TRUN_FIRST_FLAG IS DEFINED if (ptr->flags & GF_ISOM_TRUN_FLAGS) { gf_bs_write_u32(bs, p->flags); } if (ptr->flags & GF_ISOM_TRUN_CTS_OFFSET) { if (ptr->version==0) { gf_bs_write_u32(bs, p->CTS_Offset); } else { gf_bs_write_u32(bs, (u32) p->CTS_Offset); } } } return GF_OK; } GF_Err trun_Size(GF_Box *s) { u32 i, count; GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s; ptr->size += 4; //The rest depends on the flags if (ptr->flags & GF_ISOM_TRUN_DATA_OFFSET) ptr->size += 4; if (ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) ptr->size += 4; if (! (ptr->flags & (GF_ISOM_TRUN_DURATION | GF_ISOM_TRUN_SIZE | GF_ISOM_TRUN_FLAGS | GF_ISOM_TRUN_CTS_OFFSET) ) ) { return GF_OK; } //if nothing to do, this will be skipped automatically count = gf_list_count(ptr->entries); for (i=0; i<count; i++) { if (ptr->flags & GF_ISOM_TRUN_DURATION) ptr->size += 4; if (ptr->flags & GF_ISOM_TRUN_SIZE) ptr->size += 4; //SHOULDN'T BE USED IF GF_ISOM_TRUN_FIRST_FLAG IS DEFINED if (ptr->flags & GF_ISOM_TRUN_FLAGS) ptr->size += 4; if (ptr->flags & GF_ISOM_TRUN_CTS_OFFSET) ptr->size += 4; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void tsro_del(GF_Box *s) { GF_TimeOffHintEntryBox *tsro = (GF_TimeOffHintEntryBox *)s; gf_free(tsro); } GF_Err tsro_Read(GF_Box *s, GF_BitStream *bs) { GF_TimeOffHintEntryBox *ptr = (GF_TimeOffHintEntryBox *)s; ptr->TimeOffset = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tsro_New() { ISOM_DECL_BOX_ALLOC(GF_TimeOffHintEntryBox, GF_ISOM_BOX_TYPE_TSRO); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tsro_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TimeOffHintEntryBox *ptr = (GF_TimeOffHintEntryBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->TimeOffset); return GF_OK; } GF_Err tsro_Size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void udta_del(GF_Box *s) { u32 i; GF_UserDataMap *map; GF_UserDataBox *ptr = (GF_UserDataBox *)s; if (ptr == NULL) return; i=0; while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) { gf_isom_box_array_del(map->other_boxes); gf_free(map); } gf_list_del(ptr->recordList); gf_free(ptr); } GF_UserDataMap *udta_getEntry(GF_UserDataBox *ptr, u32 box_type, bin128 *uuid) { u32 i; GF_UserDataMap *map; if (ptr == NULL) return NULL; i=0; while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) { if (map->boxType == box_type) { if ((box_type != GF_ISOM_BOX_TYPE_UUID) || !uuid) return map; if (!memcmp(map->uuid, *uuid, 16)) return map; } } return NULL; } GF_Err udta_AddBox(GF_Box *s, GF_Box *a) { GF_Err e; u32 box_type; GF_UserDataMap *map; GF_UserDataBox *ptr = (GF_UserDataBox *)s; if (!ptr) return GF_BAD_PARAM; if (!a) return GF_OK; /* for unknown udta boxes, we reference them by their original box type */ box_type = a->type; if (box_type == GF_ISOM_BOX_TYPE_UNKNOWN) { GF_UnknownBox* unkn = (GF_UnknownBox *)a; if (unkn) box_type = unkn->original_4cc; } map = udta_getEntry(ptr, box_type, (a->type==GF_ISOM_BOX_TYPE_UUID) ? & ((GF_UUIDBox *)a)->uuid : NULL); if (map == NULL) { map = (GF_UserDataMap *) gf_malloc(sizeof(GF_UserDataMap)); if (map == NULL) return GF_OUT_OF_MEM; memset(map, 0, sizeof(GF_UserDataMap)); map->boxType = box_type; if (a->type == GF_ISOM_BOX_TYPE_UUID) memcpy(map->uuid, ((GF_UUIDBox *)a)->uuid, 16); map->other_boxes = gf_list_new(); if (!map->other_boxes) { gf_free(map); return GF_OUT_OF_MEM; } e = gf_list_add(ptr->recordList, map); if (e) return e; } return gf_list_add(map->other_boxes, a); } GF_Err udta_Read(GF_Box *s, GF_BitStream *bs) { GF_Err e = gf_isom_box_array_read(s, bs, udta_AddBox); if (e) return e; if (s->size==4) { u32 val = gf_bs_read_u32(bs); s->size = 0; if (val) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] udta has 4 remaining bytes set to %08X but they should be 0\n", val)); } } return GF_OK; } GF_Box *udta_New() { ISOM_DECL_BOX_ALLOC(GF_UserDataBox, GF_ISOM_BOX_TYPE_UDTA); tmp->recordList = gf_list_new(); if (!tmp->recordList) { gf_free(tmp); return NULL; } return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err udta_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_UserDataMap *map; GF_UserDataBox *ptr = (GF_UserDataBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; i=0; while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) { //warning: here we are not passing the actual "parent" of the list //but the UDTA box. The parent itself is not an box, we don't care about it e = gf_isom_box_array_write(s, map->other_boxes, bs); if (e) return e; } return GF_OK; } GF_Err udta_Size(GF_Box *s) { GF_Err e; u32 i; GF_UserDataMap *map; GF_UserDataBox *ptr = (GF_UserDataBox *)s; i=0; while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) { //warning: here we are not passing the actual "parent" of the list //but the UDTA box. The parent itself is not an box, we don't care about it e = gf_isom_box_array_size(s, map->other_boxes); if (e) return e; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void vmhd_del(GF_Box *s) { GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err vmhd_Read(GF_Box *s, GF_BitStream *bs) { GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s; ptr->reserved = gf_bs_read_u64(bs); return GF_OK; } GF_Box *vmhd_New() { ISOM_DECL_BOX_ALLOC(GF_VideoMediaHeaderBox, GF_ISOM_BOX_TYPE_VMHD); tmp->flags = 1; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err vmhd_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->reserved); return GF_OK; } GF_Err vmhd_Size(GF_Box *s) { GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s; ptr->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void void_del(GF_Box *s) { gf_free(s); } GF_Err void_Read(GF_Box *s, GF_BitStream *bs) { if (s->size) return GF_ISOM_INVALID_FILE; return GF_OK; } GF_Box *void_New() { ISOM_DECL_BOX_ALLOC(GF_Box, GF_ISOM_BOX_TYPE_VOID); return tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err void_Write(GF_Box *s, GF_BitStream *bs) { gf_bs_write_u32(bs, 0); return GF_OK; } GF_Err void_Size(GF_Box *s) { s->size = 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *pdin_New() { ISOM_DECL_BOX_ALLOC(GF_ProgressiveDownloadBox, GF_ISOM_BOX_TYPE_PDIN); tmp->flags = 1; return (GF_Box *)tmp; } void pdin_del(GF_Box *s) { GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox*)s; if (ptr == NULL) return; if (ptr->rates) gf_free(ptr->rates); if (ptr->times) gf_free(ptr->times); gf_free(ptr); } GF_Err pdin_Read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox*)s; ptr->count = (u32) (ptr->size) / 8; ptr->rates = (u32*)gf_malloc(sizeof(u32)*ptr->count); ptr->times = (u32*)gf_malloc(sizeof(u32)*ptr->count); for (i=0; i<ptr->count; i++) { ptr->rates[i] = gf_bs_read_u32(bs); ptr->times[i] = gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pdin_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; for (i=0; i<ptr->count; i++) { gf_bs_write_u32(bs, ptr->rates[i]); gf_bs_write_u32(bs, ptr->times[i]); } return GF_OK; } GF_Err pdin_Size(GF_Box *s) { GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox *)s; ptr->size += 8*ptr->count; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *sdtp_New() { ISOM_DECL_BOX_ALLOC(GF_SampleDependencyTypeBox, GF_ISOM_BOX_TYPE_SDTP); tmp->flags = 1; return (GF_Box *)tmp; } void sdtp_del(GF_Box *s) { GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox*)s; if (ptr == NULL) return; if (ptr->sample_info) gf_free(ptr->sample_info); gf_free(ptr); } GF_Err sdtp_Read(GF_Box *s, GF_BitStream *bs) { GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox*)s; /*out-of-order sdtp, assume no padding at the end*/ if (!ptr->sampleCount) ptr->sampleCount = (u32) ptr->size; else if (ptr->sampleCount > (u32) ptr->size) return GF_ISOM_INVALID_FILE; ptr->sample_info = (u8 *) gf_malloc(sizeof(u8)*ptr->sampleCount); gf_bs_read_data(bs, (char*)ptr->sample_info, ptr->sampleCount); ISOM_DECREASE_SIZE(ptr, ptr->sampleCount); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sdtp_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_data(bs, (char*)ptr->sample_info, ptr->sampleCount); return GF_OK; } GF_Err sdtp_Size(GF_Box *s) { GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox *)s; ptr->size += ptr->sampleCount; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *pasp_New() { ISOM_DECL_BOX_ALLOC(GF_PixelAspectRatioBox, GF_ISOM_BOX_TYPE_PASP); return (GF_Box *)tmp; } void pasp_del(GF_Box *s) { GF_PixelAspectRatioBox *ptr = (GF_PixelAspectRatioBox*)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err pasp_Read(GF_Box *s, GF_BitStream *bs) { GF_PixelAspectRatioBox *ptr = (GF_PixelAspectRatioBox*)s; ptr->hSpacing = gf_bs_read_u32(bs); ptr->vSpacing = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 8); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pasp_Write(GF_Box *s, GF_BitStream *bs) { GF_PixelAspectRatioBox *ptr = (GF_PixelAspectRatioBox *)s; GF_Err e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->hSpacing); gf_bs_write_u32(bs, ptr->vSpacing); return GF_OK; } GF_Err pasp_Size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *clap_New() { ISOM_DECL_BOX_ALLOC(GF_CleanApertureBox, GF_ISOM_BOX_TYPE_CLAP); return (GF_Box *)tmp; } void clap_del(GF_Box *s) { GF_CleanApertureBox *ptr = (GF_CleanApertureBox*)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err clap_Read(GF_Box *s, GF_BitStream *bs) { GF_CleanApertureBox *ptr = (GF_CleanApertureBox*)s; ISOM_DECREASE_SIZE(ptr, 32); ptr->cleanApertureWidthN = gf_bs_read_u32(bs); ptr->cleanApertureWidthD = gf_bs_read_u32(bs); ptr->cleanApertureHeightN = gf_bs_read_u32(bs); ptr->cleanApertureHeightD = gf_bs_read_u32(bs); ptr->horizOffN = gf_bs_read_u32(bs); ptr->horizOffD = gf_bs_read_u32(bs); ptr->vertOffN = gf_bs_read_u32(bs); ptr->vertOffD = gf_bs_read_u32(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err clap_Write(GF_Box *s, GF_BitStream *bs) { GF_CleanApertureBox *ptr = (GF_CleanApertureBox *)s; GF_Err e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->cleanApertureWidthN); gf_bs_write_u32(bs, ptr->cleanApertureWidthD); gf_bs_write_u32(bs, ptr->cleanApertureHeightN); gf_bs_write_u32(bs, ptr->cleanApertureHeightD); gf_bs_write_u32(bs, ptr->horizOffN); gf_bs_write_u32(bs, ptr->horizOffD); gf_bs_write_u32(bs, ptr->vertOffN); gf_bs_write_u32(bs, ptr->vertOffD); return GF_OK; } GF_Err clap_Size(GF_Box *s) { s->size += 32; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *metx_New() { //type is overridden by the box constructor ISOM_DECL_BOX_ALLOC(GF_MetaDataSampleEntryBox, GF_ISOM_BOX_TYPE_METX); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); return (GF_Box *)tmp; } void metx_del(GF_Box *s) { GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox*)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->content_encoding) gf_free(ptr->content_encoding); if (ptr->xml_namespace) gf_free(ptr->xml_namespace); if (ptr->xml_schema_loc) gf_free(ptr->xml_schema_loc); if (ptr->mime_type) gf_free(ptr->mime_type); if (ptr->config) gf_isom_box_del((GF_Box *)ptr->config); gf_free(ptr); } GF_Err metx_AddBox(GF_Box *s, GF_Box *a) { GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_SINF: gf_list_add(ptr->protections, a); break; case GF_ISOM_BOX_TYPE_TXTC: //we allow the config box on metx if (ptr->config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->config = (GF_TextConfigBox *)a; break; default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err metx_Read(GF_Box *s, GF_BitStream *bs) { u32 size, i; GF_Err e; char *str; GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox*)s; e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs); if (e) return e; size = (u32) ptr->size - 8; str = gf_malloc(sizeof(char)*size); i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) { i++; break; } i++; } if (!size && i>1 && str[i-1]) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] metx read invalid string\n")); gf_free(str); return GF_ISOM_INVALID_FILE; } if (i>1) { if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { ptr->xml_namespace = gf_strdup(str); } else { ptr->content_encoding = gf_strdup(str); } } i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) { i++; break; } i++; } if (!size && i>1 && str[i-1]) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] metx read invalid string\n")); gf_free(str); return GF_ISOM_INVALID_FILE; } if ((ptr->type==GF_ISOM_BOX_TYPE_METX) || (ptr->type==GF_ISOM_BOX_TYPE_STPP)) { if (i>1) { if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { ptr->xml_schema_loc = gf_strdup(str); } else { ptr->xml_namespace = gf_strdup(str); } } i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) { i++; break; } i++; } if (!size && i>1 && str[i-1]) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] metx read invalid string\n")); gf_free(str); return GF_ISOM_INVALID_FILE; } if (i>1) { if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { ptr->mime_type = gf_strdup(str); } else { ptr->xml_schema_loc = gf_strdup(str); } } } //mett, sbtt, stxt, stpp else { if (i>1) ptr->mime_type = gf_strdup(str); } ptr->size = size; gf_free(str); return gf_isom_box_array_read(s, bs, metx_AddBox); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err metx_Write(GF_Box *s, GF_BitStream *bs) { GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox *)s; GF_Err e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->reserved, 6); gf_bs_write_u16(bs, ptr->dataReferenceIndex); if (ptr->type!=GF_ISOM_BOX_TYPE_STPP) { if (ptr->content_encoding) gf_bs_write_data(bs, ptr->content_encoding, (u32) strlen(ptr->content_encoding)); gf_bs_write_u8(bs, 0); } if ((ptr->type==GF_ISOM_BOX_TYPE_METX) || (ptr->type==GF_ISOM_BOX_TYPE_STPP)) { if (ptr->xml_namespace) gf_bs_write_data(bs, ptr->xml_namespace, (u32) strlen(ptr->xml_namespace)); gf_bs_write_u8(bs, 0); if (ptr->xml_schema_loc) gf_bs_write_data(bs, ptr->xml_schema_loc, (u32) strlen(ptr->xml_schema_loc)); gf_bs_write_u8(bs, 0); if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { if (ptr->mime_type) gf_bs_write_data(bs, ptr->mime_type, (u32) strlen(ptr->mime_type)); gf_bs_write_u8(bs, 0); } } //mett, sbtt, stxt else { if (ptr->mime_type) gf_bs_write_data(bs, ptr->mime_type, (u32) strlen(ptr->mime_type)); gf_bs_write_u8(bs, 0); if (ptr->config) { gf_isom_box_write((GF_Box *)ptr->config, bs); } } return gf_isom_box_array_write(s, ptr->protections, bs); } GF_Err metx_Size(GF_Box *s) { GF_Err e; GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox *)s; ptr->size += 8; if (ptr->type!=GF_ISOM_BOX_TYPE_STPP) { if (ptr->content_encoding) ptr->size += strlen(ptr->content_encoding); ptr->size++; } if ((ptr->type==GF_ISOM_BOX_TYPE_METX) || (ptr->type==GF_ISOM_BOX_TYPE_STPP)) { if (ptr->xml_namespace) ptr->size += strlen(ptr->xml_namespace); ptr->size++; if (ptr->xml_schema_loc) ptr->size += strlen(ptr->xml_schema_loc); ptr->size++; if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { if (ptr->mime_type) ptr->size += strlen(ptr->mime_type); ptr->size++; } } //mett, sbtt, stxt else { if (ptr->mime_type) ptr->size += strlen(ptr->mime_type); ptr->size++; if (ptr->config) { e = gf_isom_box_size((GF_Box *)ptr->config); if (e) return e; ptr->size += ptr->config->size; } } return gf_isom_box_array_size(s, ptr->protections); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* SimpleTextSampleEntry */ GF_Box *txtc_New() { ISOM_DECL_BOX_ALLOC(GF_TextConfigBox, GF_ISOM_BOX_TYPE_TXTC); return (GF_Box *)tmp; } void txtc_del(GF_Box *s) { GF_TextConfigBox *ptr = (GF_TextConfigBox*)s; if (ptr == NULL) return; if (ptr->config) gf_free(ptr->config); gf_free(ptr); } GF_Err txtc_Read(GF_Box *s, GF_BitStream *bs) { u32 size, i; char *str; GF_TextConfigBox *ptr = (GF_TextConfigBox*)s; size = (u32) ptr->size; str = (char *)gf_malloc(sizeof(char)*size); i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) break; i++; } if (i) ptr->config = gf_strdup(str); gf_free(str); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err txtc_Write(GF_Box *s, GF_BitStream *bs) { GF_TextConfigBox *ptr = (GF_TextConfigBox *)s; GF_Err e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->config) gf_bs_write_data(bs, ptr->config, (u32) strlen(ptr->config)); gf_bs_write_u8(bs, 0); return GF_OK; } GF_Err txtc_Size(GF_Box *s) { GF_TextConfigBox *ptr = (GF_TextConfigBox *)s; if (ptr->config) ptr->size += strlen(ptr->config); ptr->size++; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *dac3_New() { ISOM_DECL_BOX_ALLOC(GF_AC3ConfigBox, GF_ISOM_BOX_TYPE_DAC3); return (GF_Box *)tmp; } GF_Box *dec3_New() { ISOM_DECL_BOX_ALLOC(GF_AC3ConfigBox, GF_ISOM_BOX_TYPE_DAC3); tmp->cfg.is_ec3 = 1; return (GF_Box *)tmp; } void dac3_del(GF_Box *s) { GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s; gf_free(ptr); } GF_Err dac3_Read(GF_Box *s, GF_BitStream *bs) { GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s; if (ptr == NULL) return GF_BAD_PARAM; if (ptr->cfg.is_ec3) { u32 i; ptr->cfg.brcode = gf_bs_read_int(bs, 13); ptr->cfg.nb_streams = gf_bs_read_int(bs, 3) + 1; for (i=0; i<ptr->cfg.nb_streams; i++) { ptr->cfg.streams[i].fscod = gf_bs_read_int(bs, 2); ptr->cfg.streams[i].bsid = gf_bs_read_int(bs, 5); ptr->cfg.streams[i].bsmod = gf_bs_read_int(bs, 5); ptr->cfg.streams[i].acmod = gf_bs_read_int(bs, 3); ptr->cfg.streams[i].lfon = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 3); ptr->cfg.streams[i].nb_dep_sub = gf_bs_read_int(bs, 4); if (ptr->cfg.streams[i].nb_dep_sub) { ptr->cfg.streams[i].chan_loc = gf_bs_read_int(bs, 9); } else { gf_bs_read_int(bs, 1); } } } else { ptr->cfg.nb_streams = 1; ptr->cfg.streams[0].fscod = gf_bs_read_int(bs, 2); ptr->cfg.streams[0].bsid = gf_bs_read_int(bs, 5); ptr->cfg.streams[0].bsmod = gf_bs_read_int(bs, 3); ptr->cfg.streams[0].acmod = gf_bs_read_int(bs, 3); ptr->cfg.streams[0].lfon = gf_bs_read_int(bs, 1); ptr->cfg.brcode = gf_bs_read_int(bs, 5); gf_bs_read_int(bs, 5); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dac3_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s; if (ptr->cfg.is_ec3) s->type = GF_ISOM_BOX_TYPE_DEC3; e = gf_isom_box_write_header(s, bs); if (ptr->cfg.is_ec3) s->type = GF_ISOM_BOX_TYPE_DAC3; if (e) return e; if (ptr->cfg.is_ec3) { u32 i; gf_bs_write_int(bs, ptr->cfg.brcode, 13); gf_bs_write_int(bs, ptr->cfg.nb_streams - 1, 3); for (i=0; i<ptr->cfg.nb_streams; i++) { gf_bs_write_int(bs, ptr->cfg.streams[i].fscod, 2); gf_bs_write_int(bs, ptr->cfg.streams[i].bsid, 5); gf_bs_write_int(bs, ptr->cfg.streams[i].bsmod, 5); gf_bs_write_int(bs, ptr->cfg.streams[i].acmod, 3); gf_bs_write_int(bs, ptr->cfg.streams[i].lfon, 1); gf_bs_write_int(bs, 0, 3); gf_bs_write_int(bs, ptr->cfg.streams[i].nb_dep_sub, 4); if (ptr->cfg.streams[i].nb_dep_sub) { gf_bs_write_int(bs, ptr->cfg.streams[i].chan_loc, 9); } else { gf_bs_write_int(bs, 0, 1); } } } else { gf_bs_write_int(bs, ptr->cfg.streams[0].fscod, 2); gf_bs_write_int(bs, ptr->cfg.streams[0].bsid, 5); gf_bs_write_int(bs, ptr->cfg.streams[0].bsmod, 3); gf_bs_write_int(bs, ptr->cfg.streams[0].acmod, 3); gf_bs_write_int(bs, ptr->cfg.streams[0].lfon, 1); gf_bs_write_int(bs, ptr->cfg.brcode, 5); gf_bs_write_int(bs, 0, 5); } return GF_OK; } GF_Err dac3_Size(GF_Box *s) { GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s; if (ptr->cfg.is_ec3) { u32 i; s->size += 2; for (i=0; i<ptr->cfg.nb_streams; i++) { s->size += 3; if (ptr->cfg.streams[i].nb_dep_sub) s->size += 1; } } else { s->size += 3; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void lsrc_del(GF_Box *s) { GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s; if (ptr == NULL) return; if (ptr->hdr) gf_free(ptr->hdr); gf_free(ptr); } GF_Err lsrc_Read(GF_Box *s, GF_BitStream *bs) { GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s; ptr->hdr_size = (u32) ptr->size; ptr->hdr = gf_malloc(sizeof(char)*ptr->hdr_size); gf_bs_read_data(bs, ptr->hdr, ptr->hdr_size); return GF_OK; } GF_Box *lsrc_New() { ISOM_DECL_BOX_ALLOC(GF_LASERConfigurationBox, GF_ISOM_BOX_TYPE_LSRC); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err lsrc_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->hdr, ptr->hdr_size); return GF_OK; } GF_Err lsrc_Size(GF_Box *s) { GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s; ptr->size += ptr->hdr_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void lsr1_del(GF_Box *s) { GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc); if (ptr->lsr_config) gf_isom_box_del((GF_Box *) ptr->lsr_config); if (ptr->descr) gf_isom_box_del((GF_Box *) ptr->descr); gf_free(ptr); } GF_Err lsr1_AddBox(GF_Box *s, GF_Box *a) { GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_LSRC: if (ptr->lsr_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->lsr_config = (GF_LASERConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_M4DS: if (ptr->descr) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->descr = (GF_MPEG4ExtensionDescriptorsBox *)a; break; default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err lsr1_Read(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox*)s; e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs); if (e) return e; ISOM_DECREASE_SIZE(ptr, 8); return gf_isom_box_array_read(s, bs, lsr1_AddBox); } GF_Box *lsr1_New() { ISOM_DECL_BOX_ALLOC(GF_LASeRSampleEntryBox, GF_ISOM_BOX_TYPE_LSR1); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err lsr1_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->reserved, 6); gf_bs_write_u16(bs, ptr->dataReferenceIndex); if (ptr->lsr_config) { e = gf_isom_box_write((GF_Box *)ptr->lsr_config, bs); if (e) return e; } if (ptr->descr) { e = gf_isom_box_write((GF_Box *)ptr->descr, bs); if (e) return e; } return e; } GF_Err lsr1_Size(GF_Box *s) { GF_Err e; GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s; s->size += 8; if (ptr->lsr_config) { e = gf_isom_box_size((GF_Box *)ptr->lsr_config); if (e) return e; ptr->size += ptr->lsr_config->size; } if (ptr->descr) { e = gf_isom_box_size((GF_Box *)ptr->descr); if (e) return e; ptr->size += ptr->descr->size; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void sidx_del(GF_Box *s) { GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox *) s; if (ptr == NULL) return; if (ptr->refs) gf_free(ptr->refs); gf_free(ptr); } GF_Err sidx_Read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox*) s; ptr->reference_ID = gf_bs_read_u32(bs); ptr->timescale = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 8); if (ptr->version==0) { ptr->earliest_presentation_time = gf_bs_read_u32(bs); ptr->first_offset = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 8); } else { ptr->earliest_presentation_time = gf_bs_read_u64(bs); ptr->first_offset = gf_bs_read_u64(bs); ISOM_DECREASE_SIZE(ptr, 16); } gf_bs_read_u16(bs); /* reserved */ ptr->nb_refs = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, 4); ptr->refs = gf_malloc(sizeof(GF_SIDXReference)*ptr->nb_refs); for (i=0; i<ptr->nb_refs; i++) { ptr->refs[i].reference_type = gf_bs_read_int(bs, 1); ptr->refs[i].reference_size = gf_bs_read_int(bs, 31); ptr->refs[i].subsegment_duration = gf_bs_read_u32(bs); ptr->refs[i].starts_with_SAP = gf_bs_read_int(bs, 1); ptr->refs[i].SAP_type = gf_bs_read_int(bs, 3); ptr->refs[i].SAP_delta_time = gf_bs_read_int(bs, 28); ISOM_DECREASE_SIZE(ptr, 12); } return GF_OK; } GF_Box *sidx_New() { ISOM_DECL_BOX_ALLOC(GF_SegmentIndexBox, GF_ISOM_BOX_TYPE_SIDX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sidx_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox*) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->reference_ID); gf_bs_write_u32(bs, ptr->timescale); if (ptr->version==0) { gf_bs_write_u32(bs, (u32) ptr->earliest_presentation_time); gf_bs_write_u32(bs, (u32) ptr->first_offset); } else { gf_bs_write_u64(bs, ptr->earliest_presentation_time); gf_bs_write_u64(bs, ptr->first_offset); } gf_bs_write_u16(bs, 0); gf_bs_write_u16(bs, ptr->nb_refs); for (i=0; i<ptr->nb_refs; i++ ) { gf_bs_write_int(bs, ptr->refs[i].reference_type, 1); gf_bs_write_int(bs, ptr->refs[i].reference_size, 31); gf_bs_write_u32(bs, ptr->refs[i].subsegment_duration); gf_bs_write_int(bs, ptr->refs[i].starts_with_SAP, 1); gf_bs_write_int(bs, ptr->refs[i].SAP_type, 3); gf_bs_write_int(bs, ptr->refs[i].SAP_delta_time, 28); } return GF_OK; } GF_Err sidx_Size(GF_Box *s) { GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox*) s; ptr->size += 12; if (ptr->version==0) { ptr->size += 8; } else { ptr->size += 16; } ptr->size += ptr->nb_refs * 12; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ssix_del(GF_Box *s) { u32 i; GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox *)s; if (ptr == NULL) return; if (ptr->subsegments) { for (i = 0; i < ptr->subsegment_count; i++) { GF_SubsegmentInfo *subsegment = &ptr->subsegments[i]; if (subsegment->ranges) gf_free(subsegment->ranges); } gf_free(ptr->subsegments); } gf_free(ptr); } GF_Err ssix_Read(GF_Box *s, GF_BitStream *bs) { u32 i,j; GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox*)s; if (ptr->size < 4) return GF_BAD_PARAM; ptr->subsegment_count = gf_bs_read_u32(bs); ptr->size -= 4; ptr->subsegments = gf_malloc(ptr->subsegment_count*sizeof(GF_SubsegmentInfo)); for (i = 0; i < ptr->subsegment_count; i++) { GF_SubsegmentInfo *subseg = &ptr->subsegments[i]; if (ptr->size < 4) return GF_BAD_PARAM; subseg->range_count = gf_bs_read_u32(bs); ptr->size -= 4; if (ptr->size < subseg->range_count*4) return GF_BAD_PARAM; subseg->ranges = (GF_SubsegmentRangeInfo*) gf_malloc(sizeof(GF_SubsegmentRangeInfo) * subseg->range_count); for (j = 0; j < subseg->range_count; j++) { subseg->ranges[j].level = gf_bs_read_u8(bs); subseg->ranges[j].range_size = gf_bs_read_u24(bs); ptr->size -= 4; } } return GF_OK; } GF_Box *ssix_New() { ISOM_DECL_BOX_ALLOC(GF_SubsegmentIndexBox, GF_ISOM_BOX_TYPE_SSIX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ssix_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i, j; GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox*)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->subsegment_count); for (i = 0; i<ptr->subsegment_count; i++) { gf_bs_write_u32(bs, ptr->subsegments[i].range_count); for (j = 0; j < ptr->subsegments[i].range_count; j++) { gf_bs_write_u8(bs, ptr->subsegments[i].ranges[j].level); gf_bs_write_u24(bs, ptr->subsegments[i].ranges[j].range_size); } } return GF_OK; } GF_Err ssix_Size(GF_Box *s) { u32 i; GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox*)s; ptr->size += 4; for (i = 0; i < ptr->subsegment_count; i++) { ptr->size += 4 + 4 * ptr->subsegments[i].range_count; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void leva_del(GF_Box *s) { GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox *)s; if (ptr == NULL) return; if (ptr->levels) gf_free(ptr->levels); gf_free(ptr); } GF_Err leva_Read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox*)s; if (ptr->size < 4) return GF_BAD_PARAM; ptr->level_count = gf_bs_read_u8(bs); ptr->size -= 4; GF_SAFE_ALLOC_N(ptr->levels, ptr->level_count, GF_LevelAssignment); for (i = 0; i < ptr->level_count; i++) { GF_LevelAssignment *level = &ptr->levels[i]; u8 tmp; if (ptr->size < 5) return GF_BAD_PARAM; level->track_id = gf_bs_read_u32(bs); tmp = gf_bs_read_u8(bs); level->padding_flag = tmp >> 7; level->type = tmp & 0x7F; if (level->type == 0) { level->grouping_type = gf_bs_read_u32(bs); } else if (level->type == 1) { level->grouping_type = gf_bs_read_u32(bs); level->grouping_type_parameter = gf_bs_read_u32(bs); } else if (level->type == 4) { level->sub_track_id = gf_bs_read_u32(bs); } } return GF_OK; } GF_Box *leva_New() { ISOM_DECL_BOX_ALLOC(GF_LevelAssignmentBox, GF_ISOM_BOX_TYPE_LEVA); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err leva_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox*)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->level_count); for (i = 0; i<ptr->level_count; i++) { gf_bs_write_u32(bs, ptr->levels[i].track_id); gf_bs_write_u8(bs, ptr->levels[i].padding_flag << 7 | (ptr->levels[i].type & 0x7F)); if (ptr->levels[i].type == 0) { gf_bs_write_u32(bs, ptr->levels[i].grouping_type); } else if (ptr->levels[i].type == 1) { gf_bs_write_u32(bs, ptr->levels[i].grouping_type); gf_bs_write_u32(bs, ptr->levels[i].grouping_type_parameter); } else if (ptr->levels[i].type == 4) { gf_bs_write_u32(bs, ptr->levels[i].sub_track_id); } } return GF_OK; } GF_Err leva_Size(GF_Box *s) { u32 i; GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox*)s; ptr->size += 1; for (i = 0; i < ptr->level_count; i++) { ptr->size += 5; if (ptr->levels[i].type == 0 || ptr->levels[i].type == 4) { ptr->size += 4; } else if (ptr->levels[i].type == 1) { ptr->size += 8; } } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *pcrb_New() { ISOM_DECL_BOX_ALLOC(GF_PcrInfoBox, GF_ISOM_BOX_TYPE_PCRB); return (GF_Box *)tmp; } void pcrb_del(GF_Box *s) { GF_PcrInfoBox *ptr = (GF_PcrInfoBox *) s; if (ptr == NULL) return; if (ptr->pcr_values) gf_free(ptr->pcr_values); gf_free(ptr); } GF_Err pcrb_Read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_PcrInfoBox *ptr = (GF_PcrInfoBox*) s; ptr->subsegment_count = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); ptr->pcr_values = gf_malloc(sizeof(u64)*ptr->subsegment_count); for (i=0; i<ptr->subsegment_count; i++) { u64 data1 = gf_bs_read_u32(bs); u64 data2 = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, 6); ptr->pcr_values[i] = (data1 << 10) | (data2 >> 6); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pcrb_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_PcrInfoBox *ptr = (GF_PcrInfoBox*) s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->subsegment_count); for (i=0; i<ptr->subsegment_count; i++ ) { u32 data1 = (u32) (ptr->pcr_values[i] >> 10); u16 data2 = (u16) (ptr->pcr_values[i] << 6); gf_bs_write_u32(bs, data1); gf_bs_write_u16(bs, data2); } return GF_OK; } GF_Err pcrb_Size(GF_Box *s) { GF_PcrInfoBox *ptr = (GF_PcrInfoBox*) s; ptr->size += 4; ptr->size += ptr->subsegment_count * 6; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *subs_New() { ISOM_DECL_BOX_ALLOC(GF_SubSampleInformationBox, GF_ISOM_BOX_TYPE_SUBS); tmp->Samples = gf_list_new(); return (GF_Box *)tmp; } void subs_del(GF_Box *s) { GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *)s; if (ptr == NULL) return; while (gf_list_count(ptr->Samples)) { GF_SubSampleInfoEntry *pSamp; pSamp = (GF_SubSampleInfoEntry*)gf_list_get(ptr->Samples, 0); while (gf_list_count(pSamp->SubSamples)) { GF_SubSampleEntry *pSubSamp; pSubSamp = (GF_SubSampleEntry*) gf_list_get(pSamp->SubSamples, 0); gf_free(pSubSamp); gf_list_rem(pSamp->SubSamples, 0); } gf_list_del(pSamp->SubSamples); gf_free(pSamp); gf_list_rem(ptr->Samples, 0); } gf_list_del(ptr->Samples); gf_free(ptr); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err subs_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i, j, entry_count; u16 subsample_count; GF_SubSampleInfoEntry *pSamp; GF_SubSampleEntry *pSubSamp; GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; entry_count = gf_list_count(ptr->Samples); gf_bs_write_u32(bs, entry_count); for (i=0; i<entry_count; i++) { pSamp = (GF_SubSampleInfoEntry*) gf_list_get(ptr->Samples, i); subsample_count = gf_list_count(pSamp->SubSamples); gf_bs_write_u32(bs, pSamp->sample_delta); gf_bs_write_u16(bs, subsample_count); for (j=0; j<subsample_count; j++) { pSubSamp = (GF_SubSampleEntry*) gf_list_get(pSamp->SubSamples, j); if (ptr->version == 1) { gf_bs_write_u32(bs, pSubSamp->subsample_size); } else { gf_bs_write_u16(bs, pSubSamp->subsample_size); } gf_bs_write_u8(bs, pSubSamp->subsample_priority); gf_bs_write_u8(bs, pSubSamp->discardable); gf_bs_write_u32(bs, pSubSamp->reserved); } } return e; } GF_Err subs_Size(GF_Box *s) { GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *) s; GF_SubSampleInfoEntry *pSamp; u32 entry_count, i; u16 subsample_count; // add 4 byte for entry_count ptr->size += 4; entry_count = gf_list_count(ptr->Samples); for (i=0; i<entry_count; i++) { pSamp = (GF_SubSampleInfoEntry*) gf_list_get(ptr->Samples, i); subsample_count = gf_list_count(pSamp->SubSamples); // 4 byte for sample_delta, 2 byte for subsample_count // and 6 + (4 or 2) bytes for each subsample ptr->size += 4 + 2 + subsample_count * (6 + (ptr->version==1 ? 4 : 2)); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Err subs_Read(GF_Box *s, GF_BitStream *bs) { GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *)s; u32 entry_count, i, j; u16 subsample_count; entry_count = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); for (i=0; i<entry_count; i++) { u32 subs_size=0; GF_SubSampleInfoEntry *pSamp = (GF_SubSampleInfoEntry*) gf_malloc(sizeof(GF_SubSampleInfoEntry)); if (!pSamp) return GF_OUT_OF_MEM; memset(pSamp, 0, sizeof(GF_SubSampleInfoEntry)); pSamp->SubSamples = gf_list_new(); pSamp->sample_delta = gf_bs_read_u32(bs); subsample_count = gf_bs_read_u16(bs); subs_size=6; for (j=0; j<subsample_count; j++) { GF_SubSampleEntry *pSubSamp = (GF_SubSampleEntry*) gf_malloc(sizeof(GF_SubSampleEntry)); if (!pSubSamp) return GF_OUT_OF_MEM; memset(pSubSamp, 0, sizeof(GF_SubSampleEntry)); if (ptr->version==1) { pSubSamp->subsample_size = gf_bs_read_u32(bs); subs_size+=4; } else { pSubSamp->subsample_size = gf_bs_read_u16(bs); subs_size+=2; } pSubSamp->subsample_priority = gf_bs_read_u8(bs); pSubSamp->discardable = gf_bs_read_u8(bs); pSubSamp->reserved = gf_bs_read_u32(bs); subs_size+=6; gf_list_add(pSamp->SubSamples, pSubSamp); } gf_list_add(ptr->Samples, pSamp); ISOM_DECREASE_SIZE(ptr, subs_size); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS GF_Box *tfdt_New() { ISOM_DECL_BOX_ALLOC(GF_TFBaseMediaDecodeTimeBox, GF_ISOM_BOX_TYPE_TFDT); return (GF_Box *)tmp; } void tfdt_del(GF_Box *s) { gf_free(s); } /*this is using chpl format according to some NeroRecode samples*/ GF_Err tfdt_Read(GF_Box *s,GF_BitStream *bs) { GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox *)s; if (ptr->version==1) { ptr->baseMediaDecodeTime = gf_bs_read_u64(bs); ISOM_DECREASE_SIZE(ptr, 8); } else { ptr->baseMediaDecodeTime = (u32) gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tfdt_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version==1) { gf_bs_write_u64(bs, ptr->baseMediaDecodeTime); } else { gf_bs_write_u32(bs, (u32) ptr->baseMediaDecodeTime); } return GF_OK; } GF_Err tfdt_Size(GF_Box *s) { GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox *)s; if (ptr->baseMediaDecodeTime<=0xFFFFFFFF) { ptr->version = 0; ptr->size += 4; } else { ptr->version = 1; ptr->size += 8; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ GF_Box *rvcc_New() { ISOM_DECL_BOX_ALLOC(GF_RVCConfigurationBox, GF_ISOM_BOX_TYPE_RVCC); return (GF_Box *)tmp; } void rvcc_del(GF_Box *s) { gf_free(s); } GF_Err rvcc_Read(GF_Box *s,GF_BitStream *bs) { GF_RVCConfigurationBox *ptr = (GF_RVCConfigurationBox*)s; ptr->predefined_rvc_config = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, 2); if (!ptr->predefined_rvc_config) { ptr->rvc_meta_idx = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, 2); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err rvcc_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_RVCConfigurationBox *ptr = (GF_RVCConfigurationBox*) s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->predefined_rvc_config); if (!ptr->predefined_rvc_config) { gf_bs_write_u16(bs, ptr->rvc_meta_idx); } return GF_OK; } GF_Err rvcc_Size(GF_Box *s) { GF_RVCConfigurationBox *ptr = (GF_RVCConfigurationBox *)s; ptr->size += 2; if (! ptr->predefined_rvc_config) ptr->size += 2; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *sbgp_New() { ISOM_DECL_BOX_ALLOC(GF_SampleGroupBox, GF_ISOM_BOX_TYPE_SBGP); return (GF_Box *)tmp; } void sbgp_del(GF_Box *a) { GF_SampleGroupBox *p = (GF_SampleGroupBox *)a; if (p->sample_entries) gf_free(p->sample_entries); gf_free(p); } GF_Err sbgp_Read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_SampleGroupBox *ptr = (GF_SampleGroupBox *)s; ptr->grouping_type = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->version==1) { ptr->grouping_type_parameter = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); } ptr->entry_count = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); ptr->sample_entries = gf_malloc(sizeof(GF_SampleGroupEntry)*ptr->entry_count); if (!ptr->sample_entries) return GF_IO_ERR; for (i=0; i<ptr->entry_count; i++) { ptr->sample_entries[i].sample_count = gf_bs_read_u32(bs); ptr->sample_entries[i].group_description_index = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 8); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sbgp_Write(GF_Box *s, GF_BitStream *bs) { u32 i; GF_Err e; GF_SampleGroupBox *p = (GF_SampleGroupBox*)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, p->grouping_type); if (p->version==1) gf_bs_write_u32(bs, p->grouping_type_parameter); gf_bs_write_u32(bs, p->entry_count); for (i = 0; i<p->entry_count; i++ ) { gf_bs_write_u32(bs, p->sample_entries[i].sample_count); gf_bs_write_u32(bs, p->sample_entries[i].group_description_index); } return GF_OK; } GF_Err sbgp_Size(GF_Box *s) { GF_SampleGroupBox *p = (GF_SampleGroupBox*)s; p->size += 8; if (p->grouping_type_parameter) p->version=1; if (p->version==1) p->size += 4; p->size += 8*p->entry_count; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ static void *sgpd_parse_entry(u32 grouping_type, GF_BitStream *bs, u32 entry_size, u32 *total_bytes) { Bool null_size_ok = GF_FALSE; GF_DefaultSampleGroupDescriptionEntry *ptr; switch (grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: case GF_ISOM_SAMPLE_GROUP_PROL: { GF_RollRecoveryEntry *ptr; GF_SAFEALLOC(ptr, GF_RollRecoveryEntry); if (!ptr) return NULL; ptr->roll_distance = gf_bs_read_int(bs, 16); *total_bytes = 2; return ptr; } case GF_ISOM_SAMPLE_GROUP_RAP: { GF_VisualRandomAccessEntry *ptr; GF_SAFEALLOC(ptr, GF_VisualRandomAccessEntry); if (!ptr) return NULL; ptr->num_leading_samples_known = gf_bs_read_int(bs, 1); ptr->num_leading_samples = gf_bs_read_int(bs, 7); *total_bytes = 1; return ptr; } case GF_ISOM_SAMPLE_GROUP_SAP: { GF_SAPEntry *ptr; GF_SAFEALLOC(ptr, GF_SAPEntry); if (!ptr) return NULL; ptr->dependent_flag = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 3); ptr->SAP_type = gf_bs_read_int(bs, 4); *total_bytes = 1; return ptr; } case GF_ISOM_SAMPLE_GROUP_SYNC: { GF_SYNCEntry *ptr; GF_SAFEALLOC(ptr, GF_SYNCEntry); if (!ptr) return NULL; gf_bs_read_int(bs, 2); ptr->NALU_type = gf_bs_read_int(bs, 6); *total_bytes = 1; return ptr; } case GF_ISOM_SAMPLE_GROUP_TELE: { GF_TemporalLevelEntry *ptr; GF_SAFEALLOC(ptr, GF_TemporalLevelEntry); if (!ptr) return NULL; ptr->level_independently_decodable = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 7); *total_bytes = 1; return ptr; } case GF_ISOM_SAMPLE_GROUP_SEIG: { GF_CENCSampleEncryptionGroupEntry *ptr; GF_SAFEALLOC(ptr, GF_CENCSampleEncryptionGroupEntry); if (!ptr) return NULL; gf_bs_read_u8(bs); //reserved ptr->crypt_byte_block = gf_bs_read_int(bs, 4); ptr->skip_byte_block = gf_bs_read_int(bs, 4); ptr->IsProtected = gf_bs_read_u8(bs); ptr->Per_Sample_IV_size = gf_bs_read_u8(bs); gf_bs_read_data(bs, (char *)ptr->KID, 16); *total_bytes = 20; if ((ptr->IsProtected == 1) && !ptr->Per_Sample_IV_size) { ptr->constant_IV_size = gf_bs_read_u8(bs); assert((ptr->constant_IV_size == 8) || (ptr->constant_IV_size == 16)); gf_bs_read_data(bs, (char *)ptr->constant_IV, ptr->constant_IV_size); *total_bytes += 1 + ptr->constant_IV_size; } if (!entry_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] seig sample group does not indicate entry size, deprecated in spec\n")); } return ptr; } case GF_ISOM_SAMPLE_GROUP_OINF: { GF_OperatingPointsInformation *ptr = gf_isom_oinf_new_entry(); u32 s = (u32) gf_bs_get_position(bs); gf_isom_oinf_read_entry(ptr, bs); *total_bytes = (u32) gf_bs_get_position(bs) - s; if (!entry_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] oinf sample group does not indicate entry size, deprecated in spec\n")); } return ptr; } case GF_ISOM_SAMPLE_GROUP_LINF: { GF_LHVCLayerInformation *ptr = gf_isom_linf_new_entry(); u32 s = (u32) gf_bs_get_position(bs); gf_isom_linf_read_entry(ptr, bs); *total_bytes = (u32) gf_bs_get_position(bs) - s; if (!entry_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] linf sample group does not indicate entry size, deprecated in spec\n")); } return ptr; } case GF_ISOM_SAMPLE_GROUP_TRIF: if (! entry_size) { u32 flags = gf_bs_peek_bits(bs, 24, 0); if (flags & 0x10000) entry_size=3; else { if (flags & 0x80000) entry_size=7; else entry_size=11; //have dependency list if (flags & 0x200000) { u32 nb_entries = gf_bs_peek_bits(bs, 16, entry_size); entry_size += 2 + 2*nb_entries; } } GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] trif sample group does not indicate entry size, deprecated in spec\n")); } break; case GF_ISOM_SAMPLE_GROUP_NALM: if (! entry_size) { u64 start = gf_bs_get_position(bs); Bool rle, large_size; u32 entry_count; gf_bs_read_int(bs, 6); large_size = gf_bs_read_int(bs, 1); rle = gf_bs_read_int(bs, 1); entry_count = gf_bs_read_int(bs, large_size ? 16 : 8); gf_bs_seek(bs, start); entry_size = 1 + large_size ? 2 : 1; entry_size += entry_count * 2; if (rle) entry_size += entry_count * (large_size ? 2 : 1); GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] nalm sample group does not indicate entry size, deprecated in spec\n")); } break; case GF_ISOM_SAMPLE_GROUP_TSAS: case GF_ISOM_SAMPLE_GROUP_STSA: null_size_ok = GF_TRUE; break; //TODO, add support for these ones ? case GF_ISOM_SAMPLE_GROUP_TSCL: entry_size = 20; break; case GF_ISOM_SAMPLE_GROUP_LBLI: entry_size = 2; break; default: break; } if (!entry_size && !null_size_ok) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] %s sample group does not indicate entry size and is not implemented, cannot parse!\n", gf_4cc_to_str( grouping_type) )); return NULL; } GF_SAFEALLOC(ptr, GF_DefaultSampleGroupDescriptionEntry); if (!ptr) return NULL; if (entry_size) { ptr->length = entry_size; ptr->data = (u8 *) gf_malloc(sizeof(u8)*ptr->length); gf_bs_read_data(bs, (char *) ptr->data, ptr->length); *total_bytes = entry_size; } return ptr; } static void sgpd_del_entry(u32 grouping_type, void *entry) { switch (grouping_type) { case GF_ISOM_SAMPLE_GROUP_SYNC: case GF_ISOM_SAMPLE_GROUP_ROLL: case GF_ISOM_SAMPLE_GROUP_PROL: case GF_ISOM_SAMPLE_GROUP_RAP: case GF_ISOM_SAMPLE_GROUP_SEIG: case GF_ISOM_SAMPLE_GROUP_TELE: case GF_ISOM_SAMPLE_GROUP_SAP: gf_free(entry); return; case GF_ISOM_SAMPLE_GROUP_OINF: gf_isom_oinf_del_entry(entry); return; case GF_ISOM_SAMPLE_GROUP_LINF: gf_isom_linf_del_entry(entry); return; default: { GF_DefaultSampleGroupDescriptionEntry *ptr = (GF_DefaultSampleGroupDescriptionEntry *)entry; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } } } void sgpd_write_entry(u32 grouping_type, void *entry, GF_BitStream *bs) { switch (grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: case GF_ISOM_SAMPLE_GROUP_PROL: gf_bs_write_int(bs, ((GF_RollRecoveryEntry*)entry)->roll_distance, 16); return; case GF_ISOM_SAMPLE_GROUP_RAP: gf_bs_write_int(bs, ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples_known, 1); gf_bs_write_int(bs, ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples, 7); return; case GF_ISOM_SAMPLE_GROUP_SAP: gf_bs_write_int(bs, ((GF_SAPEntry*)entry)->dependent_flag, 1); gf_bs_write_int(bs, 0, 3); gf_bs_write_int(bs, ((GF_SAPEntry*)entry)->SAP_type, 4); return; case GF_ISOM_SAMPLE_GROUP_SYNC: gf_bs_write_int(bs, 0, 2); gf_bs_write_int(bs, ((GF_SYNCEntry*)entry)->NALU_type, 6); return; case GF_ISOM_SAMPLE_GROUP_TELE: gf_bs_write_int(bs, ((GF_TemporalLevelEntry*)entry)->level_independently_decodable, 1); gf_bs_write_int(bs, 0, 7); return; case GF_ISOM_SAMPLE_GROUP_SEIG: gf_bs_write_u8(bs, 0x0); gf_bs_write_int(bs, ((GF_CENCSampleEncryptionGroupEntry*)entry)->crypt_byte_block, 4); gf_bs_write_int(bs, ((GF_CENCSampleEncryptionGroupEntry*)entry)->skip_byte_block, 4); gf_bs_write_u8(bs, ((GF_CENCSampleEncryptionGroupEntry *)entry)->IsProtected); gf_bs_write_u8(bs, ((GF_CENCSampleEncryptionGroupEntry *)entry)->Per_Sample_IV_size); gf_bs_write_data(bs, (char *)((GF_CENCSampleEncryptionGroupEntry *)entry)->KID, 16); if ((((GF_CENCSampleEncryptionGroupEntry *)entry)->IsProtected == 1) && !((GF_CENCSampleEncryptionGroupEntry *)entry)->Per_Sample_IV_size) { gf_bs_write_u8(bs, ((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV_size); gf_bs_write_data(bs, (char *)((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV, ((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV_size); } return; case GF_ISOM_SAMPLE_GROUP_OINF: gf_isom_oinf_write_entry(entry, bs); return; case GF_ISOM_SAMPLE_GROUP_LINF: gf_isom_linf_write_entry(entry, bs); return; default: { GF_DefaultSampleGroupDescriptionEntry *ptr = (GF_DefaultSampleGroupDescriptionEntry *)entry; if (ptr->length) gf_bs_write_data(bs, (char *) ptr->data, ptr->length); } } } #ifndef GPAC_DISABLE_ISOM_WRITE static u32 sgpd_size_entry(u32 grouping_type, void *entry) { switch (grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: case GF_ISOM_SAMPLE_GROUP_PROL: return 2; case GF_ISOM_SAMPLE_GROUP_TELE: case GF_ISOM_SAMPLE_GROUP_RAP: case GF_ISOM_SAMPLE_GROUP_SAP: case GF_ISOM_SAMPLE_GROUP_SYNC: return 1; case GF_ISOM_SAMPLE_GROUP_TSCL: return 20; case GF_ISOM_SAMPLE_GROUP_LBLI: return 2; case GF_ISOM_SAMPLE_GROUP_TSAS: case GF_ISOM_SAMPLE_GROUP_STSA: return 0; case GF_ISOM_SAMPLE_GROUP_SEIG: return ((((GF_CENCSampleEncryptionGroupEntry *)entry)->IsProtected == 1) && !((GF_CENCSampleEncryptionGroupEntry *)entry)->Per_Sample_IV_size) ? 21 + ((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV_size : 20; case GF_ISOM_SAMPLE_GROUP_OINF: return gf_isom_oinf_size_entry(entry); case GF_ISOM_SAMPLE_GROUP_LINF: return gf_isom_linf_size_entry(entry); default: return ((GF_DefaultSampleGroupDescriptionEntry *)entry)->length; } } #endif GF_Box *sgpd_New() { ISOM_DECL_BOX_ALLOC(GF_SampleGroupDescriptionBox, GF_ISOM_BOX_TYPE_SGPD); /*version 0 is deprecated, use v1 by default*/ tmp->version = 1; tmp->group_descriptions = gf_list_new(); return (GF_Box *)tmp; } void sgpd_del(GF_Box *a) { GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)a; while (gf_list_count(p->group_descriptions)) { void *ptr = gf_list_last(p->group_descriptions); sgpd_del_entry(p->grouping_type, ptr); gf_list_rem_last(p->group_descriptions); } gf_list_del(p->group_descriptions); gf_free(p); } GF_Err sgpd_Read(GF_Box *s, GF_BitStream *bs) { u32 entry_count; GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)s; p->grouping_type = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(p, 4); if (p->version>=1) { p->default_length = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(p, 4); } if (p->version>=2) { p->default_description_index = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(p, 4); } entry_count = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(p, 4); if (entry_count>p->size) return GF_ISOM_INVALID_FILE; while (entry_count) { void *ptr; u32 parsed_bytes=0; u32 size = p->default_length; if ((p->version>=1) && !size) { size = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(p, 4); } ptr = sgpd_parse_entry(p->grouping_type, bs, size, &parsed_bytes); //don't return an error, just stop parsing so that we skip over the sgpd box if (!ptr) return GF_OK; ISOM_DECREASE_SIZE(p, parsed_bytes); gf_list_add(p->group_descriptions, ptr); entry_count--; } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sgpd_Write(GF_Box *s, GF_BitStream *bs) { u32 i; GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)s; GF_Err e; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, p->grouping_type); if (p->version>=1) gf_bs_write_u32(bs, p->default_length); if (p->version>=2) gf_bs_write_u32(bs, p->default_description_index); gf_bs_write_u32(bs, gf_list_count(p->group_descriptions) ); for (i=0; i<gf_list_count(p->group_descriptions); i++) { void *ptr = gf_list_get(p->group_descriptions, i); if ((p->version >= 1) && !p->default_length) { u32 size = sgpd_size_entry(p->grouping_type, ptr); gf_bs_write_u32(bs, size); } sgpd_write_entry(p->grouping_type, ptr, bs); } return GF_OK; } GF_Err sgpd_Size(GF_Box *s) { u32 i; GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)s; p->size += 8; //we force all sample groups to version 1, v0 being deprecated p->version=1; p->size += 4; if (p->version>=2) p->size += 4; p->default_length = 0; for (i=0; i<gf_list_count(p->group_descriptions); i++) { void *ptr = gf_list_get(p->group_descriptions, i); u32 size = sgpd_size_entry(p->grouping_type, ptr); p->size += size; if (!p->default_length) { p->default_length = size; } else if (p->default_length != size) { p->default_length = 0; } } if (p->version>=1) { if (!p->default_length) p->size += gf_list_count(p->group_descriptions)*4; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void saiz_del(GF_Box *s) { GF_SampleAuxiliaryInfoSizeBox*ptr = (GF_SampleAuxiliaryInfoSizeBox*)s; if (ptr == NULL) return; if (ptr->sample_info_size) gf_free(ptr->sample_info_size); gf_free(ptr); } GF_Err saiz_Read(GF_Box *s, GF_BitStream *bs) { GF_SampleAuxiliaryInfoSizeBox*ptr = (GF_SampleAuxiliaryInfoSizeBox*)s; if (ptr->flags & 1) { ptr->aux_info_type = gf_bs_read_u32(bs); ptr->aux_info_type_parameter = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 8); } ptr->default_sample_info_size = gf_bs_read_u8(bs); ptr->sample_count = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 5); if (ptr->default_sample_info_size == 0) { ptr->sample_info_size = gf_malloc(sizeof(u8)*ptr->sample_count); gf_bs_read_data(bs, (char *) ptr->sample_info_size, ptr->sample_count); ISOM_DECREASE_SIZE(ptr, ptr->sample_count); } return GF_OK; } GF_Box *saiz_New() { ISOM_DECL_BOX_ALLOC(GF_SampleAuxiliaryInfoSizeBox, GF_ISOM_BOX_TYPE_SAIZ); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err saiz_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SampleAuxiliaryInfoSizeBox*ptr = (GF_SampleAuxiliaryInfoSizeBox*) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->flags & 1) { gf_bs_write_u32(bs, ptr->aux_info_type); gf_bs_write_u32(bs, ptr->aux_info_type_parameter); } gf_bs_write_u8(bs, ptr->default_sample_info_size); gf_bs_write_u32(bs, ptr->sample_count); if (!ptr->default_sample_info_size) { gf_bs_write_data(bs, (char *) ptr->sample_info_size, ptr->sample_count); } return GF_OK; } GF_Err saiz_Size(GF_Box *s) { GF_SampleAuxiliaryInfoSizeBox *ptr = (GF_SampleAuxiliaryInfoSizeBox*)s; if (ptr->aux_info_type || ptr->aux_info_type_parameter) { ptr->flags |= 1; } if (ptr->flags & 1) ptr->size += 8; ptr->size += 5; if (ptr->default_sample_info_size==0) ptr->size += ptr->sample_count; return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE void saio_del(GF_Box *s) { GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox*)s; if (ptr == NULL) return; if (ptr->offsets) gf_free(ptr->offsets); if (ptr->offsets_large) gf_free(ptr->offsets_large); gf_free(ptr); } GF_Err saio_Read(GF_Box *s, GF_BitStream *bs) { GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox *)s; if (ptr->flags & 1) { ptr->aux_info_type = gf_bs_read_u32(bs); ptr->aux_info_type_parameter = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 8); } ptr->entry_count = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->entry_count) { u32 i; if (ptr->version==0) { ptr->offsets = gf_malloc(sizeof(u32)*ptr->entry_count); for (i=0; i<ptr->entry_count; i++) ptr->offsets[i] = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4*ptr->entry_count); } else { ptr->offsets_large = gf_malloc(sizeof(u64)*ptr->entry_count); for (i=0; i<ptr->entry_count; i++) ptr->offsets_large[i] = gf_bs_read_u64(bs); ISOM_DECREASE_SIZE(ptr, 8*ptr->entry_count); } } return GF_OK; } GF_Box *saio_New() { ISOM_DECL_BOX_ALLOC(GF_SampleAuxiliaryInfoOffsetBox, GF_ISOM_BOX_TYPE_SAIO); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err saio_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->flags & 1) { gf_bs_write_u32(bs, ptr->aux_info_type); gf_bs_write_u32(bs, ptr->aux_info_type_parameter); } gf_bs_write_u32(bs, ptr->entry_count); if (ptr->entry_count) { u32 i; //store position in bitstream before writing data - offsets can be NULL if a single offset is rewritten later on (cf senc_write) ptr->offset_first_offset_field = gf_bs_get_position(bs); if (ptr->version==0) { if (!ptr->offsets) { gf_bs_write_u32(bs, 0); } else { for (i=0; i<ptr->entry_count; i++) gf_bs_write_u32(bs, ptr->offsets[i]); } } else { if (!ptr->offsets_large) { gf_bs_write_u64(bs, 0); } else { for (i=0; i<ptr->entry_count; i++) gf_bs_write_u64(bs, ptr->offsets_large[i]); } } } return GF_OK; } GF_Err saio_Size(GF_Box *s) { GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox*)s; if (ptr->aux_info_type || ptr->aux_info_type_parameter) { ptr->flags |= 1; } if (ptr->offsets_large) { ptr->version = 1; } if (ptr->flags & 1) ptr->size += 8; ptr->size += 4; //a little optim here: in cenc, the saio always points to a single data block, only one entry is needed switch (ptr->aux_info_type) { case GF_ISOM_CENC_SCHEME: case GF_ISOM_CBC_SCHEME: case GF_ISOM_CENS_SCHEME: case GF_ISOM_CBCS_SCHEME: if (ptr->offsets_large) gf_free(ptr->offsets_large); if (ptr->offsets) gf_free(ptr->offsets); ptr->offsets_large = NULL; ptr->offsets = NULL; ptr->entry_count = 1; break; } ptr->size += ((ptr->version==1) ? 8 : 4) * ptr->entry_count; return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE void prft_del(GF_Box *s) { gf_free(s); } GF_Err prft_Read(GF_Box *s,GF_BitStream *bs) { GF_ProducerReferenceTimeBox *ptr = (GF_ProducerReferenceTimeBox *) s; ptr->refTrackID = gf_bs_read_u32(bs); ptr->ntp = gf_bs_read_u64(bs); if (ptr->version==0) { ptr->timestamp = gf_bs_read_u32(bs); } else { ptr->timestamp = gf_bs_read_u64(bs); } return GF_OK; } GF_Box *prft_New() { ISOM_DECL_BOX_ALLOC(GF_ProducerReferenceTimeBox, GF_ISOM_BOX_TYPE_PRFT); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err prft_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ProducerReferenceTimeBox *ptr = (GF_ProducerReferenceTimeBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->refTrackID); gf_bs_write_u64(bs, ptr->ntp); if (ptr->version==0) { gf_bs_write_u32(bs, (u32) ptr->timestamp); } else { gf_bs_write_u64(bs, ptr->timestamp); } return GF_OK; } GF_Err prft_Size(GF_Box *s) { GF_ProducerReferenceTimeBox *ptr = (GF_ProducerReferenceTimeBox*)s; ptr->size += 4+8+ (ptr->version ? 8 : 4); return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *trgr_New() { ISOM_DECL_BOX_ALLOC(GF_TrackGroupBox, GF_ISOM_BOX_TYPE_TRGR); tmp->groups = gf_list_new(); if (!tmp->groups) { gf_free(tmp); return NULL; } return (GF_Box *)tmp; } void trgr_del(GF_Box *s) { GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s; if (ptr == NULL) return; gf_isom_box_array_del(ptr->groups); gf_free(ptr); } GF_Err trgr_AddBox(GF_Box *s, GF_Box *a) { GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s; return gf_list_add(ptr->groups, a); } GF_Err trgr_Read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read_ex(s, bs, trgr_AddBox, s->type); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trgr_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackGroupBox *ptr = (GF_TrackGroupBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; return gf_isom_box_array_write(s, ptr->groups, bs); } GF_Err trgr_Size(GF_Box *s) { GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s; return gf_isom_box_array_size(s, ptr->groups); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *trgt_New() { ISOM_DECL_BOX_ALLOC(GF_TrackGroupTypeBox, GF_ISOM_BOX_TYPE_TRGT); return (GF_Box *)tmp; } void trgt_del(GF_Box *s) { GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err trgt_Read(GF_Box *s, GF_BitStream *bs) { GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *)s; ptr->track_group_id = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trgt_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *) s; if (!s) return GF_BAD_PARAM; s->type = ptr->group_type; e = gf_isom_full_box_write(s, bs); s->type = GF_ISOM_BOX_TYPE_TRGT; if (e) return e; gf_bs_write_u32(bs, ptr->track_group_id); return GF_OK; } GF_Err trgt_Size(GF_Box *s) { GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s; ptr->size+= 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *stvi_New() { ISOM_DECL_BOX_ALLOC(GF_StereoVideoBox, GF_ISOM_BOX_TYPE_STVI); return (GF_Box *)tmp; } void stvi_del(GF_Box *s) { GF_StereoVideoBox *ptr = (GF_StereoVideoBox *)s; if (ptr == NULL) return; if (ptr->stereo_indication_type) gf_free(ptr->stereo_indication_type); gf_free(ptr); } GF_Err stvi_Read(GF_Box *s, GF_BitStream *bs) { GF_StereoVideoBox *ptr = (GF_StereoVideoBox *)s; ISOM_DECREASE_SIZE(ptr, 12); gf_bs_read_int(bs, 30); ptr->single_view_allowed = gf_bs_read_int(bs, 2); ptr->stereo_scheme = gf_bs_read_u32(bs); ptr->sit_len = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, ptr->sit_len); ptr->stereo_indication_type = gf_malloc(sizeof(char)*ptr->sit_len); gf_bs_read_data(bs, ptr->stereo_indication_type, ptr->sit_len); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stvi_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_StereoVideoBox *ptr = (GF_StereoVideoBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, 0, 30); gf_bs_write_int(bs, ptr->single_view_allowed, 2); gf_bs_write_u32(bs, ptr->stereo_scheme); gf_bs_write_u32(bs, ptr->sit_len); gf_bs_write_data(bs, ptr->stereo_indication_type, ptr->sit_len); return GF_OK; } GF_Err stvi_Size(GF_Box *s) { GF_StereoVideoBox *ptr = (GF_StereoVideoBox *)s; ptr->size+= 12 + ptr->sit_len; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *fiin_New() { ISOM_DECL_BOX_ALLOC(FDItemInformationBox, GF_ISOM_BOX_TYPE_FIIN); return (GF_Box *)tmp; } void fiin_del(GF_Box *s) { FDItemInformationBox *ptr = (FDItemInformationBox *)s; if (ptr == NULL) return; if (ptr->partition_entries) gf_isom_box_array_del(ptr->partition_entries); if (ptr->session_info) gf_isom_box_del((GF_Box*)ptr->session_info); if (ptr->group_id_to_name) gf_isom_box_del((GF_Box*)ptr->group_id_to_name); gf_free(ptr); } GF_Err fiin_AddBox(GF_Box *s, GF_Box *a) { FDItemInformationBox *ptr = (FDItemInformationBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_PAEN: if (!ptr->partition_entries) ptr->partition_entries = gf_list_new(); return gf_list_add(ptr->partition_entries, a); case GF_ISOM_BOX_TYPE_SEGR: if (ptr->session_info) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->session_info = (FDSessionGroupBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_GITN: if (ptr->group_id_to_name) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->group_id_to_name = (GroupIdToNameBox *)a; return GF_OK; default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err fiin_Read(GF_Box *s, GF_BitStream *bs) { FDItemInformationBox *ptr = (FDItemInformationBox *)s; ISOM_DECREASE_SIZE(ptr, 2); gf_bs_read_u16(bs); return gf_isom_box_array_read(s, bs, fiin_AddBox); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err fiin_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; FDItemInformationBox *ptr = (FDItemInformationBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, gf_list_count(ptr->partition_entries) ); e = gf_isom_box_array_write(s, ptr->partition_entries, bs); if (e) return e; if (ptr->session_info) gf_isom_box_write((GF_Box*)ptr->session_info, bs); if (ptr->group_id_to_name) gf_isom_box_write((GF_Box*)ptr->group_id_to_name, bs); return GF_OK; } GF_Err fiin_Size(GF_Box *s) { GF_Err e; FDItemInformationBox *ptr = (FDItemInformationBox *)s; ptr->size+= 2; if (ptr->partition_entries) { e = gf_isom_box_array_size(s, ptr->partition_entries); if (e) return e; } if (ptr->session_info) { e = gf_isom_box_size((GF_Box *)ptr->session_info); if (e) return e; ptr->size += ptr->session_info->size; } if (ptr->group_id_to_name) { e = gf_isom_box_size((GF_Box *) ptr->group_id_to_name); if (e) return e; ptr->size += ptr->group_id_to_name->size; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *paen_New() { ISOM_DECL_BOX_ALLOC(FDPartitionEntryBox, GF_ISOM_BOX_TYPE_PAEN); return (GF_Box *)tmp; } void paen_del(GF_Box *s) { FDPartitionEntryBox *ptr = (FDPartitionEntryBox *)s; if (ptr == NULL) return; if (ptr->blocks_and_symbols) gf_isom_box_del((GF_Box*)ptr->blocks_and_symbols); if (ptr->FEC_symbol_locations) gf_isom_box_del((GF_Box*)ptr->FEC_symbol_locations); if (ptr->File_symbol_locations) gf_isom_box_del((GF_Box*)ptr->File_symbol_locations); gf_free(ptr); } GF_Err paen_AddBox(GF_Box *s, GF_Box *a) { FDPartitionEntryBox *ptr = (FDPartitionEntryBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_FPAR: if (ptr->blocks_and_symbols) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->blocks_and_symbols = (FilePartitionBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_FECR: if (ptr->FEC_symbol_locations) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->FEC_symbol_locations = (FECReservoirBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_FIRE: if (ptr->File_symbol_locations) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->File_symbol_locations = (FileReservoirBox *)a; return GF_OK; default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err paen_Read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, fiin_AddBox); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err paen_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; FDPartitionEntryBox *ptr = (FDPartitionEntryBox *) s; if (!s) return GF_BAD_PARAM; if (ptr->blocks_and_symbols) { e = gf_isom_box_write((GF_Box *)ptr->blocks_and_symbols, bs); if (e) return e; } if (ptr->FEC_symbol_locations) { e = gf_isom_box_write((GF_Box *)ptr->FEC_symbol_locations, bs); if (e) return e; } if (ptr->File_symbol_locations) { e = gf_isom_box_write((GF_Box *)ptr->File_symbol_locations, bs); if (e) return e; } return GF_OK; } GF_Err paen_Size(GF_Box *s) { GF_Err e; FDPartitionEntryBox *ptr = (FDPartitionEntryBox *)s; if (ptr->blocks_and_symbols) { e = gf_isom_box_size((GF_Box *)ptr->blocks_and_symbols); if (e) return e; ptr->size += ptr->blocks_and_symbols->size; } if (ptr->FEC_symbol_locations) { e = gf_isom_box_size((GF_Box *) ptr->FEC_symbol_locations); if (e) return e; ptr->size += ptr->FEC_symbol_locations->size; } if (ptr->File_symbol_locations) { e = gf_isom_box_size((GF_Box *) ptr->File_symbol_locations); if (e) return e; ptr->size += ptr->File_symbol_locations->size; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *fpar_New() { ISOM_DECL_BOX_ALLOC(FilePartitionBox, GF_ISOM_BOX_TYPE_FPAR); return (GF_Box *)tmp; } void fpar_del(GF_Box *s) { FilePartitionBox *ptr = (FilePartitionBox *)s; if (ptr == NULL) return; if (ptr->scheme_specific_info) gf_free(ptr->scheme_specific_info); if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err gf_isom_read_null_terminated_string(GF_Box *s, GF_BitStream *bs, u64 size, char **out_str) { u32 len=10; u32 i=0; *out_str = gf_malloc(sizeof(char)*len); while (1) { ISOM_DECREASE_SIZE(s, 1 ); (*out_str)[i] = gf_bs_read_u8(bs); if (!(*out_str)[i]) break; i++; if (i==len) { len += 10; *out_str = gf_realloc(*out_str, sizeof(char)*len); } if (gf_bs_available(bs) == 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] missing null character in null terminated string\n")); (*out_str)[i] = 0; return GF_OK; } if (i >= size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] string bigger than container, probably missing null character\n")); (*out_str)[i] = 0; return GF_OK; } } return GF_OK; } GF_Err fpar_Read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_Err e; FilePartitionBox *ptr = (FilePartitionBox *)s; ISOM_DECREASE_SIZE(ptr, ((ptr->version ? 4 : 2) + 12) ); ptr->itemID = gf_bs_read_int(bs, ptr->version ? 32 : 16); ptr->packet_payload_size = gf_bs_read_u16(bs); gf_bs_read_u8(bs); ptr->FEC_encoding_ID = gf_bs_read_u8(bs); ptr->FEC_instance_ID = gf_bs_read_u16(bs); ptr->max_source_block_length = gf_bs_read_u16(bs); ptr->encoding_symbol_length = gf_bs_read_u16(bs); ptr->max_number_of_encoding_symbols = gf_bs_read_u16(bs); e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->scheme_specific_info); if (e) return e; ISOM_DECREASE_SIZE(ptr, (ptr->version ? 4 : 2) ); ptr->nb_entries = gf_bs_read_int(bs, ptr->version ? 32 : 16); ISOM_DECREASE_SIZE(ptr, ptr->nb_entries * 6 ); GF_SAFE_ALLOC_N(ptr->entries, ptr->nb_entries, FilePartitionEntry); for (i=0;i < ptr->nb_entries; i++) { ptr->entries[i].block_count = gf_bs_read_u16(bs); ptr->entries[i].block_size = gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err fpar_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; FilePartitionBox *ptr = (FilePartitionBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->itemID, ptr->version ? 32 : 16); gf_bs_write_u16(bs, ptr->packet_payload_size); gf_bs_write_u8(bs, 0); gf_bs_write_u8(bs, ptr->FEC_encoding_ID); gf_bs_write_u16(bs, ptr->FEC_instance_ID); gf_bs_write_u16(bs, ptr->max_source_block_length); gf_bs_write_u16(bs, ptr->encoding_symbol_length); gf_bs_write_u16(bs, ptr->max_number_of_encoding_symbols); if (ptr->scheme_specific_info) { gf_bs_write_data(bs, ptr->scheme_specific_info, (u32)strlen(ptr->scheme_specific_info) ); } //null terminated string gf_bs_write_u8(bs, 0); gf_bs_write_int(bs, ptr->nb_entries, ptr->version ? 32 : 16); for (i=0;i < ptr->nb_entries; i++) { gf_bs_write_u16(bs, ptr->entries[i].block_count); gf_bs_write_u32(bs, ptr->entries[i].block_size); } return GF_OK; } GF_Err fpar_Size(GF_Box *s) { FilePartitionBox *ptr = (FilePartitionBox *)s; ptr->size+= 13 + ptr->version ? 8 : 4; if (ptr->scheme_specific_info) ptr->size += strlen(ptr->scheme_specific_info); ptr->size+= ptr->nb_entries * 6; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *fecr_New() { ISOM_DECL_BOX_ALLOC(FECReservoirBox, GF_ISOM_BOX_TYPE_FECR); return (GF_Box *)tmp; } void fecr_del(GF_Box *s) { FECReservoirBox *ptr = (FECReservoirBox *)s; if (ptr == NULL) return; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err fecr_Read(GF_Box *s, GF_BitStream *bs) { u32 i; FECReservoirBox *ptr = (FECReservoirBox *)s; ISOM_DECREASE_SIZE(ptr, (ptr->version ? 4 : 2) ); ptr->nb_entries = gf_bs_read_int(bs, ptr->version ? 32 : 16); ISOM_DECREASE_SIZE(ptr, ptr->nb_entries * (ptr->version ? 8 : 6) ); GF_SAFE_ALLOC_N(ptr->entries, ptr->nb_entries, FECReservoirEntry); for (i=0; i<ptr->nb_entries; i++) { ptr->entries[i].item_id = gf_bs_read_int(bs, ptr->version ? 32 : 16); ptr->entries[i].symbol_count = gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err fecr_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; FECReservoirBox *ptr = (FECReservoirBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->nb_entries, ptr->version ? 32 : 16); for (i=0; i<ptr->nb_entries; i++) { gf_bs_write_int(bs, ptr->entries[i].item_id, ptr->version ? 32 : 16); gf_bs_write_u32(bs, ptr->entries[i].symbol_count); } return GF_OK; } GF_Err fecr_Size(GF_Box *s) { FECReservoirBox *ptr = (FECReservoirBox *)s; ptr->size += (ptr->version ? 4 : 2) + ptr->nb_entries * (ptr->version ? 8 : 6); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *segr_New() { ISOM_DECL_BOX_ALLOC(FDSessionGroupBox, GF_ISOM_BOX_TYPE_SEGR); return (GF_Box *)tmp; } void segr_del(GF_Box *s) { u32 i; FDSessionGroupBox *ptr = (FDSessionGroupBox *)s; if (ptr == NULL) return; for (i=0; i<ptr->num_session_groups; i++) { if (ptr->session_groups[i].group_ids) gf_free(ptr->session_groups[i].group_ids); if (ptr->session_groups[i].channels) gf_free(ptr->session_groups[i].channels); } if (ptr->session_groups) gf_free(ptr->session_groups); gf_free(ptr); } GF_Err segr_Read(GF_Box *s, GF_BitStream *bs) { u32 i, k; FDSessionGroupBox *ptr = (FDSessionGroupBox *)s; ISOM_DECREASE_SIZE(ptr, 2); ptr->num_session_groups = gf_bs_read_u16(bs); if (ptr->num_session_groups*3>ptr->size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in segr\n", ptr->num_session_groups)); ptr->num_session_groups = 0; return GF_ISOM_INVALID_FILE; } GF_SAFE_ALLOC_N(ptr->session_groups, ptr->num_session_groups, SessionGroupEntry); for (i=0; i<ptr->num_session_groups; i++) { ptr->session_groups[i].nb_groups = gf_bs_read_u8(bs); ISOM_DECREASE_SIZE(ptr, 1); GF_SAFE_ALLOC_N(ptr->session_groups[i].group_ids, ptr->session_groups[i].nb_groups, u32); for (k=0; k<ptr->session_groups[i].nb_groups; k++) { ISOM_DECREASE_SIZE(ptr, 4); ptr->session_groups[i].group_ids[k] = gf_bs_read_u32(bs); } ptr->session_groups[i].nb_channels = gf_bs_read_u16(bs); GF_SAFE_ALLOC_N(ptr->session_groups[i].channels, ptr->session_groups[i].nb_channels, u32); for (k=0; k<ptr->session_groups[i].nb_channels; k++) { ISOM_DECREASE_SIZE(ptr, 4); ptr->session_groups[i].channels[k] = gf_bs_read_u32(bs); } } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err segr_Write(GF_Box *s, GF_BitStream *bs) { u32 i, k; FDSessionGroupBox *ptr = (FDSessionGroupBox *) s; if (!s) return GF_BAD_PARAM; gf_bs_write_u16(bs, ptr->num_session_groups); for (i=0; i<ptr->num_session_groups; i++) { gf_bs_write_u8(bs, ptr->session_groups[i].nb_groups); for (k=0; k<ptr->session_groups[i].nb_groups; k++) { gf_bs_write_u32(bs, ptr->session_groups[i].group_ids[k]); } gf_bs_write_u16(bs, ptr->session_groups[i].nb_channels); for (k=0; k<ptr->session_groups[i].nb_channels; k++) { gf_bs_write_u32(bs, ptr->session_groups[i].channels[k]); } } return GF_OK; } GF_Err segr_Size(GF_Box *s) { u32 i; FDSessionGroupBox *ptr = (FDSessionGroupBox *)s; ptr->size += 2; for (i=0; i<ptr->num_session_groups; i++) { ptr->size += 1 + 4*ptr->session_groups[i].nb_groups; ptr->size += 2 + 4*ptr->session_groups[i].nb_channels; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *gitn_New() { ISOM_DECL_BOX_ALLOC(GroupIdToNameBox, GF_ISOM_BOX_TYPE_GITN); return (GF_Box *)tmp; } void gitn_del(GF_Box *s) { u32 i; GroupIdToNameBox *ptr = (GroupIdToNameBox *)s; if (ptr == NULL) return; for (i=0; i<ptr->nb_entries; i++) { if (ptr->entries[i].name) gf_free(ptr->entries[i].name); } if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err gitn_Read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_Err e; GroupIdToNameBox *ptr = (GroupIdToNameBox *)s; ISOM_DECREASE_SIZE(ptr, 2); ptr->nb_entries = gf_bs_read_u16(bs); GF_SAFE_ALLOC_N(ptr->entries, ptr->nb_entries, GroupIdNameEntry); for (i=0; i<ptr->nb_entries; i++) { ISOM_DECREASE_SIZE(ptr, 4); ptr->entries[i].group_id = gf_bs_read_u32(bs); e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->entries[i].name); if (e) return e; } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err gitn_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GroupIdToNameBox *ptr = (GroupIdToNameBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->nb_entries); for (i=0; i<ptr->nb_entries; i++) { gf_bs_write_u32(bs, ptr->entries[i].group_id); if (ptr->entries[i].name) gf_bs_write_data(bs, ptr->entries[i].name, (u32)strlen(ptr->entries[i].name) ); gf_bs_write_u8(bs, 0); } return GF_OK; } GF_Err gitn_Size(GF_Box *s) { u32 i; GroupIdToNameBox *ptr = (GroupIdToNameBox *)s; ptr->size += 2; for (i=0; i<ptr->nb_entries; i++) { ptr->size += 5; if (ptr->entries[i].name) ptr->size += strlen(ptr->entries[i].name); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_HINTING GF_Box *fdpa_New() { ISOM_DECL_BOX_ALLOC(GF_FDpacketBox, GF_ISOM_BOX_TYPE_FDPA); return (GF_Box *)tmp; } void fdpa_del(GF_Box *s) { u32 i; GF_FDpacketBox *ptr = (GF_FDpacketBox *)s; if (ptr == NULL) return; if (ptr->headers) { for (i=0; i<ptr->header_ext_count; i++) { if (ptr->headers[i].data) gf_free(ptr->headers[i].data); } gf_free(ptr->headers); } gf_free(ptr); } GF_Err fdpa_Read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_FDpacketBox *ptr = (GF_FDpacketBox *)s; ISOM_DECREASE_SIZE(ptr, 3); ptr->info.sender_current_time_present = gf_bs_read_int(bs, 1); ptr->info.expected_residual_time_present = gf_bs_read_int(bs, 1); ptr->info.session_close_bit = gf_bs_read_int(bs, 1); ptr->info.object_close_bit = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 4); ptr->info.transport_object_identifier = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, 2); ptr->header_ext_count = gf_bs_read_u16(bs); if (ptr->header_ext_count*2>ptr->size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in fdpa\n", ptr->header_ext_count)); return GF_ISOM_INVALID_FILE; } GF_SAFE_ALLOC_N(ptr->headers, ptr->header_ext_count, GF_LCTheaderExtension); for (i=0; i<ptr->header_ext_count; i++) { ptr->headers[i].header_extension_type = gf_bs_read_u8(bs); ISOM_DECREASE_SIZE(ptr, 1); if (ptr->headers[i].header_extension_type > 127) { gf_bs_read_data(bs, (char *) ptr->headers[i].content, 3); } else { ISOM_DECREASE_SIZE(ptr, 1); ptr->headers[i].data_length = gf_bs_read_u8(bs); if (ptr->headers[i].data_length) { ptr->headers[i].data_length = 4*ptr->headers[i].data_length - 2; ptr->headers[i].data = gf_malloc(sizeof(char) * ptr->headers[i].data_length); gf_bs_read_data(bs, ptr->headers[i].data, ptr->headers[i].data_length); } } } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err fdpa_Write(GF_Box *s, GF_BitStream *bs) { u32 i; GF_FDpacketBox *ptr = (GF_FDpacketBox *) s; if (!s) return GF_BAD_PARAM; gf_bs_write_int(bs, ptr->info.sender_current_time_present, 1); gf_bs_write_int(bs, ptr->info.expected_residual_time_present, 1); gf_bs_write_int(bs, ptr->info.session_close_bit, 1); gf_bs_write_int(bs, ptr->info.object_close_bit, 1); gf_bs_write_int(bs, 0, 4); ptr->info.transport_object_identifier = gf_bs_read_u16(bs); gf_bs_write_u16(bs, ptr->header_ext_count); for (i=0; i<ptr->header_ext_count; i++) { gf_bs_write_u8(bs, ptr->headers[i].header_extension_type); if (ptr->headers[i].header_extension_type > 127) { gf_bs_write_data(bs, (const char *) ptr->headers[i].content, 3); } else { gf_bs_write_u8(bs, ptr->headers[i].data_length ? (ptr->headers[i].data_length+2)/4 : 0); if (ptr->headers[i].data_length) { gf_bs_write_data(bs, ptr->headers[i].data, ptr->headers[i].data_length); } } } return GF_OK; } GF_Err fdpa_Size(GF_Box *s) { u32 i; GF_FDpacketBox *ptr = (GF_FDpacketBox *)s; ptr->size += 5; for (i=0; i<ptr->header_ext_count; i++) { ptr->size += 1; if (ptr->headers[i].header_extension_type > 127) { ptr->size += 3; } else { ptr->size += 1 + ptr->headers[i].data_length; } } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *extr_New() { ISOM_DECL_BOX_ALLOC(GF_ExtraDataBox, GF_ISOM_BOX_TYPE_EXTR); return (GF_Box *)tmp; } void extr_del(GF_Box *s) { GF_ExtraDataBox *ptr = (GF_ExtraDataBox *)s; if (ptr == NULL) return; if (ptr->feci) gf_isom_box_del((GF_Box*)ptr->feci); if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err extr_Read(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ExtraDataBox *ptr = (GF_ExtraDataBox *)s; e = gf_isom_box_parse((GF_Box**) &ptr->feci, bs); if (e) return e; if (!ptr->feci || ptr->feci->size > ptr->size) return GF_ISOM_INVALID_MEDIA; ptr->data_length = (u32) (ptr->size - ptr->feci->size); ptr->data = gf_malloc(sizeof(char)*ptr->data_length); gf_bs_read_data(bs, ptr->data, ptr->data_length); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err extr_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ExtraDataBox *ptr = (GF_ExtraDataBox *) s; if (!s) return GF_BAD_PARAM; if (ptr->feci) { e = gf_isom_box_write((GF_Box *)ptr->feci, bs); if (e) return e; } gf_bs_write_data(bs, ptr->data, ptr->data_length); return GF_OK; } GF_Err extr_Size(GF_Box *s) { GF_Err e; GF_ExtraDataBox *ptr = (GF_ExtraDataBox *) s; if (ptr->feci) { e = gf_isom_box_size((GF_Box *)ptr->feci); if (e) return e; ptr->size += ptr->feci->size; } ptr->size += ptr->data_length; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *fdsa_New() { ISOM_DECL_BOX_ALLOC(GF_HintSample, GF_ISOM_BOX_TYPE_FDSA); if (!tmp) return NULL; tmp->packetTable = gf_list_new(); tmp->hint_subtype = GF_ISOM_BOX_TYPE_FDP_STSD; return (GF_Box*)tmp; } void fdsa_del(GF_Box *s) { GF_HintSample *ptr = (GF_HintSample *)s; gf_isom_box_array_del(ptr->packetTable); if (ptr->extra_data) gf_isom_box_del((GF_Box*)ptr->extra_data); gf_free(ptr); } GF_Err fdsa_AddBox(GF_Box *s, GF_Box *a) { GF_HintSample *ptr = (GF_HintSample *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_FDPA: gf_list_add(ptr->packetTable, a); break; case GF_ISOM_BOX_TYPE_EXTR: if (ptr->extra_data) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->extra_data = (GF_ExtraDataBox*)a; break; default: return gf_isom_box_add_default(s, a); } return GF_OK; } GF_Err fdsa_Read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, fdsa_AddBox); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err fdsa_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_HintSample *ptr = (GF_HintSample *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_array_write(s, ptr->packetTable, bs); if (e) return e; if (ptr->extra_data) { e = gf_isom_box_write((GF_Box *)ptr->extra_data, bs); if (e) return e; } return GF_OK; } GF_Err fdsa_Size(GF_Box *s) { GF_HintSample *ptr = (GF_HintSample*)s; GF_Err e; if (ptr->extra_data) { e = gf_isom_box_size((GF_Box *)ptr->extra_data); if (e) return e; ptr->size += ptr->extra_data->size; } return gf_isom_box_array_size(s, ptr->packetTable); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_HINTING*/ void trik_del(GF_Box *s) { GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s; if (ptr == NULL) return; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err trik_Read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s; ptr->entry_count = (u32) ptr->size; ptr->entries = (GF_TrickPlayBoxEntry *) gf_malloc(ptr->entry_count * sizeof(GF_TrickPlayBoxEntry) ); if (ptr->entries == NULL) return GF_OUT_OF_MEM; for (i=0; i< ptr->entry_count; i++) { ptr->entries[i].pic_type = gf_bs_read_int(bs, 2); ptr->entries[i].dependency_level = gf_bs_read_int(bs, 6); } return GF_OK; } GF_Box *trik_New() { ISOM_DECL_BOX_ALLOC(GF_TrickPlayBox, GF_ISOM_BOX_TYPE_TRIK); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trik_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; for (i=0; i < ptr->entry_count; i++ ) { gf_bs_write_int(bs, ptr->entries[i].pic_type, 2); gf_bs_write_int(bs, ptr->entries[i].dependency_level, 6); } return GF_OK; } GF_Err trik_Size(GF_Box *s) { GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s; ptr->size += 8 * ptr->entry_count; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void bloc_del(GF_Box *s) { gf_free(s); } GF_Err bloc_Read(GF_Box *s,GF_BitStream *bs) { GF_BaseLocationBox *ptr = (GF_BaseLocationBox *) s; ISOM_DECREASE_SIZE(s, 256) gf_bs_read_data(bs, (char *) ptr->baseLocation, 256); ISOM_DECREASE_SIZE(s, 256) gf_bs_read_data(bs, (char *) ptr->basePurlLocation, 256); ISOM_DECREASE_SIZE(s, 512) gf_bs_skip_bytes(bs, 512); return GF_OK; } GF_Box *bloc_New() { ISOM_DECL_BOX_ALLOC(GF_BaseLocationBox, GF_ISOM_BOX_TYPE_TRIK); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err bloc_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_BaseLocationBox *ptr = (GF_BaseLocationBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_data(bs, (const char *) ptr->baseLocation, 256); gf_bs_write_data(bs, (const char *) ptr->basePurlLocation, 256); for (i=0; i < 64; i++ ) { gf_bs_write_u64(bs, 0); } return GF_OK; } GF_Err bloc_Size(GF_Box *s) { s->size += 1024; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ainf_del(GF_Box *s) { GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s; if (ptr->APID) gf_free(ptr->APID); gf_free(s); } GF_Err ainf_Read(GF_Box *s,GF_BitStream *bs) { GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s; ISOM_DECREASE_SIZE(s, 4) ptr->profile_version = gf_bs_read_u32(bs); return gf_isom_read_null_terminated_string(s, bs, s->size, &ptr->APID); } GF_Box *ainf_New() { ISOM_DECL_BOX_ALLOC(GF_AssetInformationBox, GF_ISOM_BOX_TYPE_AINF); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ainf_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->profile_version); gf_bs_write_data(bs, ptr->APID, (u32) strlen(ptr->APID) + 1); return GF_OK; } GF_Err ainf_Size(GF_Box *s) { GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s; s->size += 4 + strlen(ptr->APID) + 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mhac_del(GF_Box *s) { GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s; if (ptr->mha_config) gf_free(ptr->mha_config); gf_free(s); } GF_Err mhac_Read(GF_Box *s,GF_BitStream *bs) { GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s; ISOM_DECREASE_SIZE(s, 5) ptr->configuration_version = gf_bs_read_u8(bs); ptr->mha_pl_indication = gf_bs_read_u8(bs); ptr->reference_channel_layout = gf_bs_read_u8(bs); ptr->mha_config_size = gf_bs_read_u16(bs); if (ptr->mha_config_size) { ISOM_DECREASE_SIZE(s, ptr->mha_config_size) ptr->mha_config = gf_malloc(sizeof(char)*ptr->mha_config_size); gf_bs_read_data(bs, ptr->mha_config, ptr->mha_config_size); } return GF_OK; } GF_Box *mhac_New() { ISOM_DECL_BOX_ALLOC(GF_MHAConfigBox, GF_ISOM_BOX_TYPE_MHAC); tmp->configuration_version = 1; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mhac_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s; e = gf_isom_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->configuration_version); gf_bs_write_u8(bs, ptr->mha_pl_indication); gf_bs_write_u8(bs, ptr->reference_channel_layout); gf_bs_write_u16(bs, ptr->mha_config ? ptr->mha_config_size : 0); if (ptr->mha_config && ptr->mha_config_size) gf_bs_write_data(bs, ptr->mha_config, ptr->mha_config_size); return GF_OK; } GF_Err mhac_Size(GF_Box *s) { GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s; s->size += 5; if (ptr->mha_config_size && ptr->mha_config) s->size += ptr->mha_config_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* Dolby Vision */ GF_Box *dvcC_New() { GF_DOVIConfigurationBox *tmp = (GF_DOVIConfigurationBox *)gf_malloc(sizeof(GF_DOVIConfigurationBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_DOVIConfigurationBox)); tmp->type = GF_ISOM_BOX_TYPE_DVCC; return (GF_Box *)tmp; } void dvcC_del(GF_Box *s) { GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox*)s; gf_free(ptr); } GF_Err dvcC_Read(GF_Box *s, GF_BitStream *bs) { GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox *)s; //GF_DOVIDecoderConfigurationRecord ptr->DOVIConfig.dv_version_major = gf_bs_read_u8(bs); ptr->DOVIConfig.dv_version_minor = gf_bs_read_u8(bs); ptr->DOVIConfig.dv_profile = gf_bs_read_int(bs, 7); ptr->DOVIConfig.dv_level = gf_bs_read_int(bs, 6); ptr->DOVIConfig.rpu_present_flag = gf_bs_read_int(bs, 1); ptr->DOVIConfig.el_present_flag = gf_bs_read_int(bs, 1); ptr->DOVIConfig.bl_present_flag = gf_bs_read_int(bs, 1); { int i = 0; u32 data[5]; memset(data, 0, sizeof(data)); gf_bs_read_data(bs, (char*)data, 20); for (i = 0; i < 5; ++i) { if (data[i] != 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] dvcC reserved bytes are not zero\n")); //return GF_ISOM_INVALID_FILE; } } } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dvcC_Write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; //GF_DOVIDecoderConfigurationRecord gf_bs_write_u8(bs, ptr->DOVIConfig.dv_version_major); gf_bs_write_u8(bs, ptr->DOVIConfig.dv_version_minor); gf_bs_write_int(bs, ptr->DOVIConfig.dv_profile, 7); gf_bs_write_int(bs, ptr->DOVIConfig.dv_level, 6); gf_bs_write_int(bs, ptr->DOVIConfig.rpu_present_flag, 1); gf_bs_write_int(bs, ptr->DOVIConfig.el_present_flag, 1); gf_bs_write_int(bs, ptr->DOVIConfig.bl_present_flag, 1); gf_bs_write_int(bs, 0, 5*32); return GF_OK; } GF_Err dvcC_Size(GF_Box *s) { GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox *)s; ptr->size += sizeof(GF_DOVIDecoderConfigurationRecord); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM*/
./CrossVul/dataset_final_sorted/CWE-400/c/good_551_0
crossvul-cpp_data_bad_485_2
// SPDX-License-Identifier: GPL-2.0 /* * Host Wire Adapter: * Driver glue, HWA-specific functions, bridges to WAHC and WUSBHC * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * The HWA driver is a simple layer that forwards requests to the WAHC * (Wire Adater Host Controller) or WUSBHC (Wireless USB Host * Controller) layers. * * Host Wire Adapter is the 'WUSB 1.0 standard' name for Wireless-USB * Host Controller that is connected to your system via USB (a USB * dongle that implements a USB host...). There is also a Device Wired * Adaptor, DWA (Wireless USB hub) that uses the same mechanism for * transferring data (it is after all a USB host connected via * Wireless USB), we have a common layer called Wire Adapter Host * Controller that does all the hard work. The WUSBHC (Wireless USB * Host Controller) is the part common to WUSB Host Controllers, the * HWA and the PCI-based one, that is implemented following the WHCI * spec. All these layers are implemented in ../wusbcore. * * The main functions are hwahc_op_urb_{en,de}queue(), that pass the * job of converting a URB to a Wire Adapter * * Entry points: * * hwahc_driver_*() Driver initialization, registration and * teardown. * * hwahc_probe() New device came up, create an instance for * it [from device enumeration]. * * hwahc_disconnect() Remove device instance [from device * enumeration]. * * [__]hwahc_op_*() Host-Wire-Adaptor specific functions for * starting/stopping/etc (some might be made also * DWA). */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/workqueue.h> #include <linux/wait.h> #include <linux/completion.h> #include "../wusbcore/wa-hc.h" #include "../wusbcore/wusbhc.h" struct hwahc { struct wusbhc wusbhc; /* has to be 1st */ struct wahc wa; }; /* * FIXME should be wusbhc * * NOTE: we need to cache the Cluster ID because later...there is no * way to get it :) */ static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id) { int result; struct wusbhc *wusbhc = &hwahc->wusbhc; struct wahc *wa = &hwahc->wa; struct device *dev = &wa->usb_iface->dev; result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_SET_CLUSTER_ID, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, cluster_id, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, NULL, 0, USB_CTRL_SET_TIMEOUT); if (result < 0) dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n", cluster_id, result); else wusbhc->cluster_id = cluster_id; dev_info(dev, "Wireless USB Cluster ID set to 0x%02x\n", cluster_id); return result; } static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots) { struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_SET_NUM_DNTS, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, interval << 8 | slots, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, NULL, 0, USB_CTRL_SET_TIMEOUT); } /* * Reset a WUSB host controller and wait for it to complete doing it. * * @usb_hcd: Pointer to WUSB Host Controller instance. * */ static int hwahc_op_reset(struct usb_hcd *usb_hcd) { int result; struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct device *dev = &hwahc->wa.usb_iface->dev; mutex_lock(&wusbhc->mutex); wa_nep_disarm(&hwahc->wa); result = __wa_set_feature(&hwahc->wa, WA_RESET); if (result < 0) { dev_err(dev, "error commanding HC to reset: %d\n", result); goto error_unlock; } result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0); if (result < 0) { dev_err(dev, "error waiting for HC to reset: %d\n", result); goto error_unlock; } error_unlock: mutex_unlock(&wusbhc->mutex); return result; } /* * FIXME: break this function up */ static int hwahc_op_start(struct usb_hcd *usb_hcd) { u8 addr; int result; struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); result = -ENOSPC; mutex_lock(&wusbhc->mutex); addr = wusb_cluster_id_get(); if (addr == 0) goto error_cluster_id_get; result = __hwahc_set_cluster_id(hwahc, addr); if (result < 0) goto error_set_cluster_id; usb_hcd->uses_new_polling = 1; set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags); usb_hcd->state = HC_STATE_RUNNING; /* * prevent USB core from suspending the root hub since * bus_suspend and bus_resume are not yet supported. */ pm_runtime_get_noresume(&usb_hcd->self.root_hub->dev); result = 0; out: mutex_unlock(&wusbhc->mutex); return result; error_set_cluster_id: wusb_cluster_id_put(wusbhc->cluster_id); error_cluster_id_get: goto out; } /* * No need to abort pipes, as when this is called, all the children * has been disconnected and that has done it [through * usb_disable_interface() -> usb_disable_endpoint() -> * hwahc_op_ep_disable() - >rpipe_ep_disable()]. */ static void hwahc_op_stop(struct usb_hcd *usb_hcd) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); mutex_lock(&wusbhc->mutex); wusb_cluster_id_put(wusbhc->cluster_id); mutex_unlock(&wusbhc->mutex); } static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; /* * We cannot query the HWA for the WUSB time since that requires sending * a synchronous URB and this function can be called in_interrupt. * Instead, query the USB frame number for our parent and use that. */ return usb_get_current_frame_number(wa->usb_dev); } static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, gfp_t gfp) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); return wa_urb_enqueue(&hwahc->wa, urb->ep, urb, gfp); } static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); return wa_urb_dequeue(&hwahc->wa, urb, status); } /* * Release resources allocated for an endpoint * * If there is an associated rpipe to this endpoint, go ahead and put it. */ static void hwahc_op_endpoint_disable(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); rpipe_ep_disable(&hwahc->wa, ep); } static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc) { int result; struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct device *dev = &hwahc->wa.usb_iface->dev; result = __wa_set_feature(&hwahc->wa, WA_ENABLE); if (result < 0) { dev_err(dev, "error commanding HC to start: %d\n", result); goto error_stop; } result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE); if (result < 0) { dev_err(dev, "error waiting for HC to start: %d\n", result); goto error_stop; } result = wa_nep_arm(&hwahc->wa, GFP_KERNEL); if (result < 0) { dev_err(dev, "cannot listen to notifications: %d\n", result); goto error_stop; } /* * If WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS is set, * disable transfer notifications. */ if (hwahc->wa.quirks & WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS) { struct usb_host_interface *cur_altsetting = hwahc->wa.usb_iface->cur_altsetting; result = usb_control_msg(hwahc->wa.usb_dev, usb_sndctrlpipe(hwahc->wa.usb_dev, 0), WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, WA_REQ_ALEREON_FEATURE_SET, cur_altsetting->desc.bInterfaceNumber, NULL, 0, USB_CTRL_SET_TIMEOUT); /* * If we successfully sent the control message, start DTI here * because no transfer notifications will be received which is * where DTI is normally started. */ if (result == 0) result = wa_dti_start(&hwahc->wa); else result = 0; /* OK. Continue normally. */ if (result < 0) { dev_err(dev, "cannot start DTI: %d\n", result); goto error_dti_start; } } return result; error_dti_start: wa_nep_disarm(&hwahc->wa); error_stop: __wa_clear_feature(&hwahc->wa, WA_ENABLE); return result; } static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc, int delay) { struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; int ret; ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_CHAN_STOP, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, delay * 1000, iface_no, NULL, 0, USB_CTRL_SET_TIMEOUT); if (ret == 0) msleep(delay); wa_nep_disarm(&hwahc->wa); __wa_stop(&hwahc->wa); } /* * Set the UWB MAS allocation for the WUSB cluster * * @stream_index: stream to use (-1 for cancelling the allocation) * @mas: mas bitmap to use */ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas) { int result; struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; struct device *dev = &wa->usb_iface->dev; u8 mas_le[UWB_NUM_MAS/8]; /* Set the stream index */ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_SET_STREAM_IDX, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, stream_index, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, NULL, 0, USB_CTRL_SET_TIMEOUT); if (result < 0) { dev_err(dev, "Cannot set WUSB stream index: %d\n", result); goto out; } uwb_mas_bm_copy_le(mas_le, mas); /* Set the MAS allocation */ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_SET_WUSB_MAS, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, mas_le, 32, USB_CTRL_SET_TIMEOUT); if (result < 0) dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result); out: return result; } /* * Add an IE to the host's MMC * * @interval: See WUSB1.0[8.5.3.1] * @repeat_cnt: See WUSB1.0[8.5.3.1] * @handle: See WUSB1.0[8.5.3.1] * @wuie: Pointer to the header of the WUSB IE data to add. * MUST BE allocated in a kmalloc buffer (no stack or * vmalloc). * * NOTE: the format of the WUSB IEs for MMCs are different to the * normal MBOA MAC IEs (IE Id + Length in MBOA MAC vs. Length + * Id in WUSB IEs). Standards...you gotta love'em. */ static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, u8 handle, struct wuie_hdr *wuie) { struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_ADD_MMC_IE, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, interval << 8 | repeat_cnt, handle << 8 | iface_no, wuie, wuie->bLength, USB_CTRL_SET_TIMEOUT); } /* * Remove an IE to the host's MMC * * @handle: See WUSB1.0[8.5.3.1] */ static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle) { struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_REMOVE_MMC_IE, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, handle << 8 | iface_no, NULL, 0, USB_CTRL_SET_TIMEOUT); } /* * Update device information for a given fake port * * @port_idx: Fake port to which device is connected (wusbhc index, not * USB port number). */ static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) { struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; struct hwa_dev_info *dev_info; int ret; /* fill out the Device Info buffer and send it */ dev_info = kzalloc(sizeof(struct hwa_dev_info), GFP_KERNEL); if (!dev_info) return -ENOMEM; uwb_mas_bm_copy_le(dev_info->bmDeviceAvailability, &wusb_dev->availability); dev_info->bDeviceAddress = wusb_dev->addr; /* * If the descriptors haven't been read yet, use a default PHY * rate of 53.3 Mbit/s only. The correct value will be used * when this will be called again as part of the * authentication process (which occurs after the descriptors * have been read). */ if (wusb_dev->wusb_cap_descr) dev_info->wPHYRates = wusb_dev->wusb_cap_descr->wPHYRates; else dev_info->wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53); ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), WUSB_REQ_SET_DEV_INFO, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, wusb_dev->port_idx << 8 | iface_no, dev_info, sizeof(struct hwa_dev_info), USB_CTRL_SET_TIMEOUT); kfree(dev_info); return ret; } /* * Set host's idea of which encryption (and key) method to use when * talking to ad evice on a given port. * * If key is NULL, it means disable encryption for that "virtual port" * (used when we disconnect). */ static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, const void *key, size_t key_size, u8 key_idx) { int result = -ENOMEM; struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; struct usb_key_descriptor *keyd; size_t keyd_len; keyd_len = sizeof(*keyd) + key_size; keyd = kzalloc(keyd_len, GFP_KERNEL); if (keyd == NULL) return -ENOMEM; keyd->bLength = keyd_len; keyd->bDescriptorType = USB_DT_KEY; keyd->tTKID[0] = (tkid >> 0) & 0xff; keyd->tTKID[1] = (tkid >> 8) & 0xff; keyd->tTKID[2] = (tkid >> 16) & 0xff; memcpy(keyd->bKeyData, key, key_size); result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), USB_REQ_SET_DESCRIPTOR, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, USB_DT_KEY << 8 | key_idx, port_idx << 8 | iface_no, keyd, keyd_len, USB_CTRL_SET_TIMEOUT); kzfree(keyd); /* clear keys etc. */ return result; } /* * Set host's idea of which encryption (and key) method to use when * talking to ad evice on a given port. * * If key is NULL, it means disable encryption for that "virtual port" * (used when we disconnect). */ static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, const void *key, size_t key_size) { int result = -ENOMEM; struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); struct wahc *wa = &hwahc->wa; u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; u8 encryption_value; /* Tell the host which key to use to talk to the device */ if (key) { u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_PTK, WUSB_KEY_INDEX_ORIGINATOR_HOST); result = __hwahc_dev_set_key(wusbhc, port_idx, tkid, key, key_size, key_idx); if (result < 0) goto error_set_key; encryption_value = wusbhc->ccm1_etd->bEncryptionValue; } else { /* FIXME: this should come from wusbhc->etd[UNSECURE].value */ encryption_value = 0; } /* Set the encryption type for communicating with the device */ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), USB_REQ_SET_ENCRYPTION, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, encryption_value, port_idx << 8 | iface_no, NULL, 0, USB_CTRL_SET_TIMEOUT); if (result < 0) dev_err(wusbhc->dev, "Can't set host's WUSB encryption for " "port index %u to %s (value %d): %d\n", port_idx, wusb_et_name(wusbhc->ccm1_etd->bEncryptionType), wusbhc->ccm1_etd->bEncryptionValue, result); error_set_key: return result; } /* * Set host's GTK key */ static int __hwahc_op_set_gtk(struct wusbhc *wusbhc, u32 tkid, const void *key, size_t key_size) { u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK, WUSB_KEY_INDEX_ORIGINATOR_HOST); return __hwahc_dev_set_key(wusbhc, 0, tkid, key, key_size, key_idx); } /* * Get the Wire Adapter class-specific descriptor * * NOTE: this descriptor comes with the big bundled configuration * descriptor that includes the interfaces' and endpoints', so * we just look for it in the cached copy kept by the USB stack. * * NOTE2: We convert LE fields to CPU order. */ static int wa_fill_descr(struct wahc *wa) { int result; struct device *dev = &wa->usb_iface->dev; char *itr; struct usb_device *usb_dev = wa->usb_dev; struct usb_descriptor_header *hdr; struct usb_wa_descriptor *wa_descr; size_t itr_size, actconfig_idx; actconfig_idx = (usb_dev->actconfig - usb_dev->config) / sizeof(usb_dev->config[0]); itr = usb_dev->rawdescriptors[actconfig_idx]; itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); while (itr_size >= sizeof(*hdr)) { hdr = (struct usb_descriptor_header *) itr; dev_dbg(dev, "Extra device descriptor: " "type %02x/%u bytes @ %zu (%zu left)\n", hdr->bDescriptorType, hdr->bLength, (itr - usb_dev->rawdescriptors[actconfig_idx]), itr_size); if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER) goto found; itr += hdr->bLength; itr_size -= hdr->bLength; } dev_err(dev, "cannot find Wire Adapter Class descriptor\n"); return -ENODEV; found: result = -EINVAL; if (hdr->bLength > itr_size) { /* is it available? */ dev_err(dev, "incomplete Wire Adapter Class descriptor " "(%zu bytes left, %u needed)\n", itr_size, hdr->bLength); goto error; } if (hdr->bLength < sizeof(*wa->wa_descr)) { dev_err(dev, "short Wire Adapter Class descriptor\n"); goto error; } wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr; if (le16_to_cpu(wa_descr->bcdWAVersion) > 0x0100) dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n", (le16_to_cpu(wa_descr->bcdWAVersion) & 0xff00) >> 8, le16_to_cpu(wa_descr->bcdWAVersion) & 0x00ff); result = 0; error: return result; } static const struct hc_driver hwahc_hc_driver = { .description = "hwa-hcd", .product_desc = "Wireless USB HWA host controller", .hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd), .irq = NULL, /* FIXME */ .flags = HCD_USB25, .reset = hwahc_op_reset, .start = hwahc_op_start, .stop = hwahc_op_stop, .get_frame_number = hwahc_op_get_frame_number, .urb_enqueue = hwahc_op_urb_enqueue, .urb_dequeue = hwahc_op_urb_dequeue, .endpoint_disable = hwahc_op_endpoint_disable, .hub_status_data = wusbhc_rh_status_data, .hub_control = wusbhc_rh_control, .start_port_reset = wusbhc_rh_start_port_reset, }; static int hwahc_security_create(struct hwahc *hwahc) { int result; struct wusbhc *wusbhc = &hwahc->wusbhc; struct usb_device *usb_dev = hwahc->wa.usb_dev; struct device *dev = &usb_dev->dev; struct usb_security_descriptor *secd; struct usb_encryption_descriptor *etd; void *itr, *top; size_t itr_size, needed, bytes; u8 index; char buf[64]; /* Find the host's security descriptors in the config descr bundle */ index = (usb_dev->actconfig - usb_dev->config) / sizeof(usb_dev->config[0]); itr = usb_dev->rawdescriptors[index]; itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); top = itr + itr_size; result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index], le16_to_cpu(usb_dev->actconfig->desc.wTotalLength), USB_DT_SECURITY, (void **) &secd); if (result == -1) { dev_warn(dev, "BUG? WUSB host has no security descriptors\n"); return 0; } needed = sizeof(*secd); if (top - (void *)secd < needed) { dev_err(dev, "BUG? Not enough data to process security " "descriptor header (%zu bytes left vs %zu needed)\n", top - (void *) secd, needed); return 0; } needed = le16_to_cpu(secd->wTotalLength); if (top - (void *)secd < needed) { dev_err(dev, "BUG? Not enough data to process security " "descriptors (%zu bytes left vs %zu needed)\n", top - (void *) secd, needed); return 0; } /* Walk over the sec descriptors and store CCM1's on wusbhc */ itr = (void *) secd + sizeof(*secd); top = (void *) secd + le16_to_cpu(secd->wTotalLength); index = 0; bytes = 0; while (itr < top) { etd = itr; if (top - itr < sizeof(*etd)) { dev_err(dev, "BUG: bad host security descriptor; " "not enough data (%zu vs %zu left)\n", top - itr, sizeof(*etd)); break; } if (etd->bLength < sizeof(*etd)) { dev_err(dev, "BUG: bad host encryption descriptor; " "descriptor is too short " "(%zu vs %zu needed)\n", (size_t)etd->bLength, sizeof(*etd)); break; } itr += etd->bLength; bytes += snprintf(buf + bytes, sizeof(buf) - bytes, "%s (0x%02x) ", wusb_et_name(etd->bEncryptionType), etd->bEncryptionValue); wusbhc->ccm1_etd = etd; } dev_info(dev, "supported encryption types: %s\n", buf); if (wusbhc->ccm1_etd == NULL) { dev_err(dev, "E: host doesn't support CCM-1 crypto\n"); return 0; } /* Pretty print what we support */ return 0; } static void hwahc_security_release(struct hwahc *hwahc) { /* nothing to do here so far... */ } static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface, kernel_ulong_t quirks) { int result; struct device *dev = &iface->dev; struct wusbhc *wusbhc = &hwahc->wusbhc; struct wahc *wa = &hwahc->wa; struct usb_device *usb_dev = interface_to_usbdev(iface); wa->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */ wa->usb_iface = usb_get_intf(iface); wusbhc->dev = dev; /* defer getting the uwb_rc handle until it is needed since it * may not have been registered by the hwa_rc driver yet. */ wusbhc->uwb_rc = NULL; result = wa_fill_descr(wa); /* Get the device descriptor */ if (result < 0) goto error_fill_descriptor; if (wa->wa_descr->bNumPorts > USB_MAXCHILDREN) { dev_err(dev, "FIXME: USB_MAXCHILDREN too low for WUSB " "adapter (%u ports)\n", wa->wa_descr->bNumPorts); wusbhc->ports_max = USB_MAXCHILDREN; } else { wusbhc->ports_max = wa->wa_descr->bNumPorts; } wusbhc->mmcies_max = wa->wa_descr->bNumMMCIEs; wusbhc->start = __hwahc_op_wusbhc_start; wusbhc->stop = __hwahc_op_wusbhc_stop; wusbhc->mmcie_add = __hwahc_op_mmcie_add; wusbhc->mmcie_rm = __hwahc_op_mmcie_rm; wusbhc->dev_info_set = __hwahc_op_dev_info_set; wusbhc->bwa_set = __hwahc_op_bwa_set; wusbhc->set_num_dnts = __hwahc_op_set_num_dnts; wusbhc->set_ptk = __hwahc_op_set_ptk; wusbhc->set_gtk = __hwahc_op_set_gtk; result = hwahc_security_create(hwahc); if (result < 0) { dev_err(dev, "Can't initialize security: %d\n", result); goto error_security_create; } wa->wusb = wusbhc; /* FIXME: ugly, need to fix */ result = wusbhc_create(&hwahc->wusbhc); if (result < 0) { dev_err(dev, "Can't create WUSB HC structures: %d\n", result); goto error_wusbhc_create; } result = wa_create(&hwahc->wa, iface, quirks); if (result < 0) goto error_wa_create; return 0; error_wa_create: wusbhc_destroy(&hwahc->wusbhc); error_wusbhc_create: /* WA Descr fill allocs no resources */ error_security_create: error_fill_descriptor: usb_put_intf(iface); usb_put_dev(usb_dev); return result; } static void hwahc_destroy(struct hwahc *hwahc) { struct wusbhc *wusbhc = &hwahc->wusbhc; mutex_lock(&wusbhc->mutex); __wa_destroy(&hwahc->wa); wusbhc_destroy(&hwahc->wusbhc); hwahc_security_release(hwahc); hwahc->wusbhc.dev = NULL; uwb_rc_put(wusbhc->uwb_rc); usb_put_intf(hwahc->wa.usb_iface); usb_put_dev(hwahc->wa.usb_dev); mutex_unlock(&wusbhc->mutex); } static void hwahc_init(struct hwahc *hwahc) { wa_init(&hwahc->wa); } static int hwahc_probe(struct usb_interface *usb_iface, const struct usb_device_id *id) { int result; struct usb_hcd *usb_hcd; struct wusbhc *wusbhc; struct hwahc *hwahc; struct device *dev = &usb_iface->dev; result = -ENOMEM; usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa"); if (usb_hcd == NULL) { dev_err(dev, "unable to allocate instance\n"); goto error_alloc; } usb_hcd->wireless = 1; usb_hcd->self.sg_tablesize = ~0; wusbhc = usb_hcd_to_wusbhc(usb_hcd); hwahc = container_of(wusbhc, struct hwahc, wusbhc); hwahc_init(hwahc); result = hwahc_create(hwahc, usb_iface, id->driver_info); if (result < 0) { dev_err(dev, "Cannot initialize internals: %d\n", result); goto error_hwahc_create; } result = usb_add_hcd(usb_hcd, 0, 0); if (result < 0) { dev_err(dev, "Cannot add HCD: %d\n", result); goto error_add_hcd; } device_wakeup_enable(usb_hcd->self.controller); result = wusbhc_b_create(&hwahc->wusbhc); if (result < 0) { dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result); goto error_wusbhc_b_create; } return 0; error_wusbhc_b_create: usb_remove_hcd(usb_hcd); error_add_hcd: hwahc_destroy(hwahc); error_hwahc_create: usb_put_hcd(usb_hcd); error_alloc: return result; } static void hwahc_disconnect(struct usb_interface *usb_iface) { struct usb_hcd *usb_hcd; struct wusbhc *wusbhc; struct hwahc *hwahc; usb_hcd = usb_get_intfdata(usb_iface); wusbhc = usb_hcd_to_wusbhc(usb_hcd); hwahc = container_of(wusbhc, struct hwahc, wusbhc); wusbhc_b_destroy(&hwahc->wusbhc); usb_remove_hcd(usb_hcd); hwahc_destroy(hwahc); usb_put_hcd(usb_hcd); } static const struct usb_device_id hwahc_id_table[] = { /* Alereon 5310 */ { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5310, 0xe0, 0x02, 0x01), .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC | WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS }, /* Alereon 5611 */ { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5611, 0xe0, 0x02, 0x01), .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC | WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS }, /* FIXME: use class labels for this */ { USB_INTERFACE_INFO(0xe0, 0x02, 0x01), }, {}, }; MODULE_DEVICE_TABLE(usb, hwahc_id_table); static struct usb_driver hwahc_driver = { .name = "hwa-hc", .probe = hwahc_probe, .disconnect = hwahc_disconnect, .id_table = hwahc_id_table, }; module_usb_driver(hwahc_driver); MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-400/c/bad_485_2
crossvul-cpp_data_bad_1261_0
/** * Copyright (c) 2014 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/etherdevice.h> #include <linux/timer.h> #include "rsi_mgmt.h" #include "rsi_common.h" #include "rsi_ps.h" #include "rsi_hal.h" static struct bootup_params boot_params_20 = { .magic_number = cpu_to_le16(0x5aa5), .crystal_good_time = 0x0, .valid = cpu_to_le32(VALID_20), .reserved_for_valids = 0x0, .bootup_mode_info = 0x0, .digital_loop_back_params = 0x0, .rtls_timestamp_en = 0x0, .host_spi_intr_cfg = 0x0, .device_clk_info = {{ .pll_config_g = { .tapll_info_g = { .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)| (TA_PLL_M_VAL_20)), .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20), }, .pll960_info_g = { .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)| (PLL960_N_VAL_20)), .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20), .pll_reg_3 = 0x0, }, .afepll_info_g = { .pll_reg = cpu_to_le16(0x9f0), } }, .switch_clk_g = { .switch_clk_info = cpu_to_le16(0xb), .bbp_lmac_clk_reg_val = cpu_to_le16(0x111), .umac_clock_reg_config = cpu_to_le16(0x48), .qspi_uart_clock_reg_config = cpu_to_le16(0x1211) } }, { .pll_config_g = { .tapll_info_g = { .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)| (TA_PLL_M_VAL_20)), .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20), }, .pll960_info_g = { .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)| (PLL960_N_VAL_20)), .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20), .pll_reg_3 = 0x0, }, .afepll_info_g = { .pll_reg = cpu_to_le16(0x9f0), } }, .switch_clk_g = { .switch_clk_info = 0x0, .bbp_lmac_clk_reg_val = 0x0, .umac_clock_reg_config = 0x0, .qspi_uart_clock_reg_config = 0x0 } }, { .pll_config_g = { .tapll_info_g = { .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)| (TA_PLL_M_VAL_20)), .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20), }, .pll960_info_g = { .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)| (PLL960_N_VAL_20)), .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20), .pll_reg_3 = 0x0, }, .afepll_info_g = { .pll_reg = cpu_to_le16(0x9f0), } }, .switch_clk_g = { .switch_clk_info = 0x0, .bbp_lmac_clk_reg_val = 0x0, .umac_clock_reg_config = 0x0, .qspi_uart_clock_reg_config = 0x0 } } }, .buckboost_wakeup_cnt = 0x0, .pmu_wakeup_wait = 0x0, .shutdown_wait_time = 0x0, .pmu_slp_clkout_sel = 0x0, .wdt_prog_value = 0x0, .wdt_soc_rst_delay = 0x0, .dcdc_operation_mode = 0x0, .soc_reset_wait_cnt = 0x0, .waiting_time_at_fresh_sleep = 0x0, .max_threshold_to_avoid_sleep = 0x0, .beacon_resedue_alg_en = 0, }; static struct bootup_params boot_params_40 = { .magic_number = cpu_to_le16(0x5aa5), .crystal_good_time = 0x0, .valid = cpu_to_le32(VALID_40), .reserved_for_valids = 0x0, .bootup_mode_info = 0x0, .digital_loop_back_params = 0x0, .rtls_timestamp_en = 0x0, .host_spi_intr_cfg = 0x0, .device_clk_info = {{ .pll_config_g = { .tapll_info_g = { .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)| (TA_PLL_M_VAL_40)), .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40), }, .pll960_info_g = { .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)| (PLL960_N_VAL_40)), .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40), .pll_reg_3 = 0x0, }, .afepll_info_g = { .pll_reg = cpu_to_le16(0x9f0), } }, .switch_clk_g = { .switch_clk_info = cpu_to_le16(0x09), .bbp_lmac_clk_reg_val = cpu_to_le16(0x1121), .umac_clock_reg_config = cpu_to_le16(0x48), .qspi_uart_clock_reg_config = cpu_to_le16(0x1211) } }, { .pll_config_g = { .tapll_info_g = { .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)| (TA_PLL_M_VAL_40)), .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40), }, .pll960_info_g = { .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)| (PLL960_N_VAL_40)), .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40), .pll_reg_3 = 0x0, }, .afepll_info_g = { .pll_reg = cpu_to_le16(0x9f0), } }, .switch_clk_g = { .switch_clk_info = 0x0, .bbp_lmac_clk_reg_val = 0x0, .umac_clock_reg_config = 0x0, .qspi_uart_clock_reg_config = 0x0 } }, { .pll_config_g = { .tapll_info_g = { .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)| (TA_PLL_M_VAL_40)), .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40), }, .pll960_info_g = { .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)| (PLL960_N_VAL_40)), .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40), .pll_reg_3 = 0x0, }, .afepll_info_g = { .pll_reg = cpu_to_le16(0x9f0), } }, .switch_clk_g = { .switch_clk_info = 0x0, .bbp_lmac_clk_reg_val = 0x0, .umac_clock_reg_config = 0x0, .qspi_uart_clock_reg_config = 0x0 } } }, .buckboost_wakeup_cnt = 0x0, .pmu_wakeup_wait = 0x0, .shutdown_wait_time = 0x0, .pmu_slp_clkout_sel = 0x0, .wdt_prog_value = 0x0, .wdt_soc_rst_delay = 0x0, .dcdc_operation_mode = 0x0, .soc_reset_wait_cnt = 0x0, .waiting_time_at_fresh_sleep = 0x0, .max_threshold_to_avoid_sleep = 0x0, .beacon_resedue_alg_en = 0, }; static struct bootup_params_9116 boot_params_9116_20 = { .magic_number = cpu_to_le16(LOADED_TOKEN), .valid = cpu_to_le32(VALID_20), .device_clk_info_9116 = {{ .pll_config_9116_g = { .pll_ctrl_set_reg = cpu_to_le16(0xd518), .pll_ctrl_clr_reg = cpu_to_le16(0x2ae7), .pll_modem_conig_reg = cpu_to_le16(0x2000), .soc_clk_config_reg = cpu_to_le16(0x0c18), .adc_dac_strm1_config_reg = cpu_to_le16(0x1100), .adc_dac_strm2_config_reg = cpu_to_le16(0x6600), }, .switch_clk_9116_g = { .switch_clk_info = cpu_to_le32((RSI_SWITCH_TASS_CLK | RSI_SWITCH_WLAN_BBP_LMAC_CLK_REG | RSI_SWITCH_BBP_LMAC_CLK_REG)), .tass_clock_reg = cpu_to_le32(0x083C0503), .wlan_bbp_lmac_clk_reg_val = cpu_to_le32(0x01042001), .zbbt_bbp_lmac_clk_reg_val = cpu_to_le32(0x02010001), .bbp_lmac_clk_en_val = cpu_to_le32(0x0000003b), } }, }, }; static struct bootup_params_9116 boot_params_9116_40 = { .magic_number = cpu_to_le16(LOADED_TOKEN), .valid = cpu_to_le32(VALID_40), .device_clk_info_9116 = {{ .pll_config_9116_g = { .pll_ctrl_set_reg = cpu_to_le16(0xd518), .pll_ctrl_clr_reg = cpu_to_le16(0x2ae7), .pll_modem_conig_reg = cpu_to_le16(0x3000), .soc_clk_config_reg = cpu_to_le16(0x0c18), .adc_dac_strm1_config_reg = cpu_to_le16(0x0000), .adc_dac_strm2_config_reg = cpu_to_le16(0x6600), }, .switch_clk_9116_g = { .switch_clk_info = cpu_to_le32((RSI_SWITCH_TASS_CLK | RSI_SWITCH_WLAN_BBP_LMAC_CLK_REG | RSI_SWITCH_BBP_LMAC_CLK_REG | RSI_MODEM_CLK_160MHZ)), .tass_clock_reg = cpu_to_le32(0x083C0503), .wlan_bbp_lmac_clk_reg_val = cpu_to_le32(0x01042002), .zbbt_bbp_lmac_clk_reg_val = cpu_to_le32(0x04010002), .bbp_lmac_clk_en_val = cpu_to_le32(0x0000003b), } }, }, }; static u16 mcs[] = {13, 26, 39, 52, 78, 104, 117, 130}; /** * rsi_set_default_parameters() - This function sets default parameters. * @common: Pointer to the driver private structure. * * Return: none */ static void rsi_set_default_parameters(struct rsi_common *common) { common->band = NL80211_BAND_2GHZ; common->channel_width = BW_20MHZ; common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; common->channel = 1; common->min_rate = 0xffff; common->fsm_state = FSM_CARD_NOT_READY; common->iface_down = true; common->endpoint = EP_2GHZ_20MHZ; common->driver_mode = 1; /* End to end mode */ common->lp_ps_handshake_mode = 0; /* Default no handShake mode*/ common->ulp_ps_handshake_mode = 2; /* Default PKT handShake mode*/ common->rf_power_val = 0; /* Default 1.9V */ common->wlan_rf_power_mode = 0; common->obm_ant_sel_val = 2; common->beacon_interval = RSI_BEACON_INTERVAL; common->dtim_cnt = RSI_DTIM_COUNT; common->w9116_features.pll_mode = 0x0; common->w9116_features.rf_type = 1; common->w9116_features.wireless_mode = 0; common->w9116_features.enable_ppe = 0; common->w9116_features.afe_type = 1; common->w9116_features.dpd = 0; common->w9116_features.sifs_tx_enable = 0; common->w9116_features.ps_options = 0; } void init_bgscan_params(struct rsi_common *common) { memset((u8 *)&common->bgscan, 0, sizeof(struct rsi_bgscan_params)); common->bgscan.bgscan_threshold = RSI_DEF_BGSCAN_THRLD; common->bgscan.roam_threshold = RSI_DEF_ROAM_THRLD; common->bgscan.bgscan_periodicity = RSI_BGSCAN_PERIODICITY; common->bgscan.num_bgscan_channels = 0; common->bgscan.two_probe = 1; common->bgscan.active_scan_duration = RSI_ACTIVE_SCAN_TIME; common->bgscan.passive_scan_duration = RSI_PASSIVE_SCAN_TIME; } /** * rsi_set_contention_vals() - This function sets the contention values for the * backoff procedure. * @common: Pointer to the driver private structure. * * Return: None. */ static void rsi_set_contention_vals(struct rsi_common *common) { u8 ii = 0; for (; ii < NUM_EDCA_QUEUES; ii++) { common->tx_qinfo[ii].wme_params = (((common->edca_params[ii].cw_min / 2) + (common->edca_params[ii].aifs)) * WMM_SHORT_SLOT_TIME + SIFS_DURATION); common->tx_qinfo[ii].weight = common->tx_qinfo[ii].wme_params; common->tx_qinfo[ii].pkt_contended = 0; } } /** * rsi_send_internal_mgmt_frame() - This function sends management frames to * firmware.Also schedules packet to queue * for transmission. * @common: Pointer to the driver private structure. * @skb: Pointer to the socket buffer structure. * * Return: 0 on success, -1 on failure. */ static int rsi_send_internal_mgmt_frame(struct rsi_common *common, struct sk_buff *skb) { struct skb_info *tx_params; struct rsi_cmd_desc *desc; if (skb == NULL) { rsi_dbg(ERR_ZONE, "%s: Unable to allocate skb\n", __func__); return -ENOMEM; } desc = (struct rsi_cmd_desc *)skb->data; desc->desc_dword0.len_qno |= cpu_to_le16(DESC_IMMEDIATE_WAKEUP); skb->priority = MGMT_SOFT_Q; tx_params = (struct skb_info *)&IEEE80211_SKB_CB(skb)->driver_data; tx_params->flags |= INTERNAL_MGMT_PKT; skb_queue_tail(&common->tx_queue[MGMT_SOFT_Q], skb); rsi_set_event(&common->tx_thread.event); return 0; } /** * rsi_load_radio_caps() - This function is used to send radio capabilities * values to firmware. * @common: Pointer to the driver private structure. * * Return: 0 on success, corresponding negative error code on failure. */ static int rsi_load_radio_caps(struct rsi_common *common) { struct rsi_radio_caps *radio_caps; struct rsi_hw *adapter = common->priv; u16 inx = 0; u8 ii; u8 radio_id = 0; u16 gc[20] = {0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0}; struct sk_buff *skb; u16 frame_len = sizeof(struct rsi_radio_caps); rsi_dbg(INFO_ZONE, "%s: Sending rate symbol req frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); radio_caps = (struct rsi_radio_caps *)skb->data; radio_caps->desc_dword0.frame_type = RADIO_CAPABILITIES; radio_caps->channel_num = common->channel; radio_caps->rf_model = RSI_RF_TYPE; radio_caps->radio_cfg_info = RSI_LMAC_CLOCK_80MHZ; if (common->channel_width == BW_40MHZ) { radio_caps->radio_cfg_info |= RSI_ENABLE_40MHZ; if (common->fsm_state == FSM_MAC_INIT_DONE) { struct ieee80211_hw *hw = adapter->hw; struct ieee80211_conf *conf = &hw->conf; if (conf_is_ht40_plus(conf)) { radio_caps->ppe_ack_rate = cpu_to_le16(LOWER_20_ENABLE | (LOWER_20_ENABLE >> 12)); } else if (conf_is_ht40_minus(conf)) { radio_caps->ppe_ack_rate = cpu_to_le16(UPPER_20_ENABLE | (UPPER_20_ENABLE >> 12)); } else { radio_caps->ppe_ack_rate = cpu_to_le16((BW_40MHZ << 12) | FULL40M_ENABLE); } } } radio_caps->radio_info |= radio_id; if (adapter->device_model == RSI_DEV_9116 && common->channel_width == BW_20MHZ) radio_caps->radio_cfg_info &= ~0x3; radio_caps->sifs_tx_11n = cpu_to_le16(SIFS_TX_11N_VALUE); radio_caps->sifs_tx_11b = cpu_to_le16(SIFS_TX_11B_VALUE); radio_caps->slot_rx_11n = cpu_to_le16(SHORT_SLOT_VALUE); radio_caps->ofdm_ack_tout = cpu_to_le16(OFDM_ACK_TOUT_VALUE); radio_caps->cck_ack_tout = cpu_to_le16(CCK_ACK_TOUT_VALUE); radio_caps->preamble_type = cpu_to_le16(LONG_PREAMBLE); for (ii = 0; ii < MAX_HW_QUEUES; ii++) { radio_caps->qos_params[ii].cont_win_min_q = cpu_to_le16(3); radio_caps->qos_params[ii].cont_win_max_q = cpu_to_le16(0x3f); radio_caps->qos_params[ii].aifsn_val_q = cpu_to_le16(2); radio_caps->qos_params[ii].txop_q = 0; } for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) { if (common->edca_params[ii].cw_max > 0) { radio_caps->qos_params[ii].cont_win_min_q = cpu_to_le16(common->edca_params[ii].cw_min); radio_caps->qos_params[ii].cont_win_max_q = cpu_to_le16(common->edca_params[ii].cw_max); radio_caps->qos_params[ii].aifsn_val_q = cpu_to_le16(common->edca_params[ii].aifs << 8); radio_caps->qos_params[ii].txop_q = cpu_to_le16(common->edca_params[ii].txop); } } radio_caps->qos_params[BROADCAST_HW_Q].txop_q = cpu_to_le16(0xffff); radio_caps->qos_params[MGMT_HW_Q].txop_q = 0; radio_caps->qos_params[BEACON_HW_Q].txop_q = cpu_to_le16(0xffff); memcpy(&common->rate_pwr[0], &gc[0], 40); for (ii = 0; ii < 20; ii++) radio_caps->gcpd_per_rate[inx++] = cpu_to_le16(common->rate_pwr[ii] & 0x00FF); rsi_set_len_qno(&radio_caps->desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_mgmt_pkt_to_core() - This function is the entry point for Mgmt module. * @common: Pointer to the driver private structure. * @msg: Pointer to received packet. * @msg_len: Length of the received packet. * @type: Type of received packet. * * Return: 0 on success, -1 on failure. */ static int rsi_mgmt_pkt_to_core(struct rsi_common *common, u8 *msg, s32 msg_len) { struct rsi_hw *adapter = common->priv; struct ieee80211_tx_info *info; struct skb_info *rx_params; u8 pad_bytes = msg[4]; struct sk_buff *skb; if (!adapter->sc_nvifs) return -ENOLINK; msg_len -= pad_bytes; if (msg_len <= 0) { rsi_dbg(MGMT_RX_ZONE, "%s: Invalid rx msg of len = %d\n", __func__, msg_len); return -EINVAL; } skb = dev_alloc_skb(msg_len); if (!skb) return -ENOMEM; skb_put_data(skb, (u8 *)(msg + FRAME_DESC_SZ + pad_bytes), msg_len); info = IEEE80211_SKB_CB(skb); rx_params = (struct skb_info *)info->driver_data; rx_params->rssi = rsi_get_rssi(msg); rx_params->channel = rsi_get_channel(msg); rsi_indicate_pkt_to_os(common, skb); return 0; } /** * rsi_hal_send_sta_notify_frame() - This function sends the station notify * frame to firmware. * @common: Pointer to the driver private structure. * @opmode: Operating mode of device. * @notify_event: Notification about station connection. * @bssid: bssid. * @qos_enable: Qos is enabled. * @aid: Aid (unique for all STA). * * Return: status: 0 on success, corresponding negative error code on failure. */ int rsi_hal_send_sta_notify_frame(struct rsi_common *common, enum opmode opmode, u8 notify_event, const unsigned char *bssid, u8 qos_enable, u16 aid, u16 sta_id, struct ieee80211_vif *vif) { struct sk_buff *skb = NULL; struct rsi_peer_notify *peer_notify; u16 vap_id = ((struct vif_priv *)vif->drv_priv)->vap_id; int status; u16 frame_len = sizeof(struct rsi_peer_notify); rsi_dbg(MGMT_TX_ZONE, "%s: Sending sta notify frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); peer_notify = (struct rsi_peer_notify *)skb->data; if (opmode == RSI_OPMODE_STA) peer_notify->command = cpu_to_le16(PEER_TYPE_AP << 1); else if (opmode == RSI_OPMODE_AP) peer_notify->command = cpu_to_le16(PEER_TYPE_STA << 1); switch (notify_event) { case STA_CONNECTED: peer_notify->command |= cpu_to_le16(RSI_ADD_PEER); break; case STA_DISCONNECTED: peer_notify->command |= cpu_to_le16(RSI_DELETE_PEER); break; default: break; } peer_notify->command |= cpu_to_le16((aid & 0xfff) << 4); ether_addr_copy(peer_notify->mac_addr, bssid); peer_notify->mpdu_density = cpu_to_le16(RSI_MPDU_DENSITY); peer_notify->sta_flags = cpu_to_le32((qos_enable) ? 1 : 0); rsi_set_len_qno(&peer_notify->desc.desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); peer_notify->desc.desc_dword0.frame_type = PEER_NOTIFY; peer_notify->desc.desc_dword3.qid_tid = sta_id; peer_notify->desc.desc_dword3.sta_id = vap_id; skb_put(skb, frame_len); status = rsi_send_internal_mgmt_frame(common, skb); if ((vif->type == NL80211_IFTYPE_STATION) && (!status && qos_enable)) { rsi_set_contention_vals(common); status = rsi_load_radio_caps(common); } return status; } /** * rsi_send_aggregation_params_frame() - This function sends the ampdu * indication frame to firmware. * @common: Pointer to the driver private structure. * @tid: traffic identifier. * @ssn: ssn. * @buf_size: buffer size. * @event: notification about station connection. * * Return: 0 on success, corresponding negative error code on failure. */ int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid, u16 ssn, u8 buf_size, u8 event, u8 sta_id) { struct sk_buff *skb = NULL; struct rsi_aggr_params *aggr_params; u16 frame_len = sizeof(struct rsi_aggr_params); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); aggr_params = (struct rsi_aggr_params *)skb->data; rsi_dbg(MGMT_TX_ZONE, "%s: Sending AMPDU indication frame\n", __func__); rsi_set_len_qno(&aggr_params->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); aggr_params->desc_dword0.frame_type = AMPDU_IND; aggr_params->aggr_params = tid & RSI_AGGR_PARAMS_TID_MASK; aggr_params->peer_id = sta_id; if (event == STA_TX_ADDBA_DONE) { aggr_params->seq_start = cpu_to_le16(ssn); aggr_params->baw_size = cpu_to_le16(buf_size); aggr_params->aggr_params |= RSI_AGGR_PARAMS_START; } else if (event == STA_RX_ADDBA_DONE) { aggr_params->seq_start = cpu_to_le16(ssn); aggr_params->aggr_params |= (RSI_AGGR_PARAMS_START | RSI_AGGR_PARAMS_RX_AGGR); } else if (event == STA_RX_DELBA) { aggr_params->aggr_params |= RSI_AGGR_PARAMS_RX_AGGR; } skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_program_bb_rf() - This function starts base band and RF programming. * This is called after initial configurations are done. * @common: Pointer to the driver private structure. * * Return: 0 on success, corresponding negative error code on failure. */ static int rsi_program_bb_rf(struct rsi_common *common) { struct sk_buff *skb; struct rsi_bb_rf_prog *bb_rf_prog; u16 frame_len = sizeof(struct rsi_bb_rf_prog); rsi_dbg(MGMT_TX_ZONE, "%s: Sending program BB/RF frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); bb_rf_prog = (struct rsi_bb_rf_prog *)skb->data; rsi_set_len_qno(&bb_rf_prog->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); bb_rf_prog->desc_dword0.frame_type = BBP_PROG_IN_TA; bb_rf_prog->endpoint = common->endpoint; bb_rf_prog->rf_power_mode = common->wlan_rf_power_mode; if (common->rf_reset) { bb_rf_prog->flags = cpu_to_le16(RF_RESET_ENABLE); rsi_dbg(MGMT_TX_ZONE, "%s: ===> RF RESET REQUEST SENT <===\n", __func__); common->rf_reset = 0; } common->bb_rf_prog_count = 1; bb_rf_prog->flags |= cpu_to_le16(PUT_BBP_RESET | BBP_REG_WRITE | (RSI_RF_TYPE << 4)); skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_set_vap_capabilities() - This function send vap capability to firmware. * @common: Pointer to the driver private structure. * @opmode: Operating mode of device. * * Return: 0 on success, corresponding negative error code on failure. */ int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode, u8 *mac_addr, u8 vap_id, u8 vap_status) { struct sk_buff *skb = NULL; struct rsi_vap_caps *vap_caps; struct rsi_hw *adapter = common->priv; struct ieee80211_hw *hw = adapter->hw; struct ieee80211_conf *conf = &hw->conf; u16 frame_len = sizeof(struct rsi_vap_caps); rsi_dbg(MGMT_TX_ZONE, "%s: Sending VAP capabilities frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); vap_caps = (struct rsi_vap_caps *)skb->data; rsi_set_len_qno(&vap_caps->desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); vap_caps->desc_dword0.frame_type = VAP_CAPABILITIES; vap_caps->status = vap_status; vap_caps->vif_type = mode; vap_caps->channel_bw = common->channel_width; vap_caps->vap_id = vap_id; vap_caps->radioid_macid = ((common->mac_id & 0xf) << 4) | (common->radio_id & 0xf); memcpy(vap_caps->mac_addr, mac_addr, IEEE80211_ADDR_LEN); vap_caps->keep_alive_period = cpu_to_le16(90); vap_caps->frag_threshold = cpu_to_le16(IEEE80211_MAX_FRAG_THRESHOLD); vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold); if (common->band == NL80211_BAND_5GHZ) { vap_caps->default_ctrl_rate = cpu_to_le16(RSI_RATE_6); vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6); } else { vap_caps->default_ctrl_rate = cpu_to_le16(RSI_RATE_1); vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_1); } if (conf_is_ht40(conf)) { if (conf_is_ht40_minus(conf)) vap_caps->ctrl_rate_flags = cpu_to_le16(UPPER_20_ENABLE); else if (conf_is_ht40_plus(conf)) vap_caps->ctrl_rate_flags = cpu_to_le16(LOWER_20_ENABLE); else vap_caps->ctrl_rate_flags = cpu_to_le16(FULL40M_ENABLE); } vap_caps->default_data_rate = 0; vap_caps->beacon_interval = cpu_to_le16(common->beacon_interval); vap_caps->dtim_period = cpu_to_le16(common->dtim_cnt); skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_hal_load_key() - This function is used to load keys within the firmware. * @common: Pointer to the driver private structure. * @data: Pointer to the key data. * @key_len: Key length to be loaded. * @key_type: Type of key: GROUP/PAIRWISE. * @key_id: Key index. * @cipher: Type of cipher used. * * Return: 0 on success, -1 on failure. */ int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len, u8 key_type, u8 key_id, u32 cipher, s16 sta_id, struct ieee80211_vif *vif) { struct sk_buff *skb = NULL; struct rsi_set_key *set_key; u16 key_descriptor = 0; u16 frame_len = sizeof(struct rsi_set_key); rsi_dbg(MGMT_TX_ZONE, "%s: Sending load key frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); set_key = (struct rsi_set_key *)skb->data; if (key_type == RSI_GROUP_KEY) { key_descriptor = RSI_KEY_TYPE_BROADCAST; if (vif->type == NL80211_IFTYPE_AP) key_descriptor |= RSI_KEY_MODE_AP; } if ((cipher == WLAN_CIPHER_SUITE_WEP40) || (cipher == WLAN_CIPHER_SUITE_WEP104)) { key_id = 0; key_descriptor |= RSI_WEP_KEY; if (key_len >= 13) key_descriptor |= RSI_WEP_KEY_104; } else if (cipher != KEY_TYPE_CLEAR) { key_descriptor |= RSI_CIPHER_WPA; if (cipher == WLAN_CIPHER_SUITE_TKIP) key_descriptor |= RSI_CIPHER_TKIP; } key_descriptor |= RSI_PROTECT_DATA_FRAMES; key_descriptor |= (key_id << RSI_KEY_ID_OFFSET); rsi_set_len_qno(&set_key->desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); set_key->desc_dword0.frame_type = SET_KEY_REQ; set_key->key_desc = cpu_to_le16(key_descriptor); set_key->sta_id = sta_id; if (data) { if ((cipher == WLAN_CIPHER_SUITE_WEP40) || (cipher == WLAN_CIPHER_SUITE_WEP104)) { memcpy(&set_key->key[key_id][1], data, key_len * 2); } else { memcpy(&set_key->key[0][0], data, key_len); } memcpy(set_key->tx_mic_key, &data[16], 8); memcpy(set_key->rx_mic_key, &data[24], 8); } else { memset(&set_key[FRAME_DESC_SZ], 0, frame_len - FRAME_DESC_SZ); } skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /* * This function sends the common device configuration parameters to device. * This frame includes the useful information to make device works on * specific operating mode. */ static int rsi_send_common_dev_params(struct rsi_common *common) { struct sk_buff *skb; u16 frame_len; struct rsi_config_vals *dev_cfgs; frame_len = sizeof(struct rsi_config_vals); rsi_dbg(MGMT_TX_ZONE, "Sending common device config params\n"); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Unable to allocate skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); dev_cfgs = (struct rsi_config_vals *)skb->data; memset(dev_cfgs, 0, (sizeof(struct rsi_config_vals))); rsi_set_len_qno(&dev_cfgs->len_qno, (frame_len - FRAME_DESC_SZ), RSI_COEX_Q); dev_cfgs->pkt_type = COMMON_DEV_CONFIG; dev_cfgs->lp_ps_handshake = common->lp_ps_handshake_mode; dev_cfgs->ulp_ps_handshake = common->ulp_ps_handshake_mode; dev_cfgs->unused_ulp_gpio = RSI_UNUSED_ULP_GPIO_BITMAP; dev_cfgs->unused_soc_gpio_bitmap = cpu_to_le32(RSI_UNUSED_SOC_GPIO_BITMAP); dev_cfgs->opermode = common->oper_mode; dev_cfgs->wlan_rf_pwr_mode = common->wlan_rf_power_mode; dev_cfgs->driver_mode = common->driver_mode; dev_cfgs->region_code = NL80211_DFS_FCC; dev_cfgs->antenna_sel_val = common->obm_ant_sel_val; skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /* * rsi_load_bootup_params() - This function send bootup params to the firmware. * @common: Pointer to the driver private structure. * * Return: 0 on success, corresponding error code on failure. */ static int rsi_load_bootup_params(struct rsi_common *common) { struct sk_buff *skb; struct rsi_boot_params *boot_params; rsi_dbg(MGMT_TX_ZONE, "%s: Sending boot params frame\n", __func__); skb = dev_alloc_skb(sizeof(struct rsi_boot_params)); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, sizeof(struct rsi_boot_params)); boot_params = (struct rsi_boot_params *)skb->data; rsi_dbg(MGMT_TX_ZONE, "%s:\n", __func__); if (common->channel_width == BW_40MHZ) { memcpy(&boot_params->bootup_params, &boot_params_40, sizeof(struct bootup_params)); rsi_dbg(MGMT_TX_ZONE, "%s: Packet 40MHZ <=== %d\n", __func__, UMAC_CLK_40BW); boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_40BW); } else { memcpy(&boot_params->bootup_params, &boot_params_20, sizeof(struct bootup_params)); if (boot_params_20.valid != cpu_to_le32(VALID_20)) { boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_20BW); rsi_dbg(MGMT_TX_ZONE, "%s: Packet 20MHZ <=== %d\n", __func__, UMAC_CLK_20BW); } else { boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_40MHZ); rsi_dbg(MGMT_TX_ZONE, "%s: Packet 20MHZ <=== %d\n", __func__, UMAC_CLK_40MHZ); } } /** * Bit{0:11} indicates length of the Packet * Bit{12:15} indicates host queue number */ boot_params->desc_word[0] = cpu_to_le16(sizeof(struct bootup_params) | (RSI_WIFI_MGMT_Q << 12)); boot_params->desc_word[1] = cpu_to_le16(BOOTUP_PARAMS_REQUEST); skb_put(skb, sizeof(struct rsi_boot_params)); return rsi_send_internal_mgmt_frame(common, skb); } static int rsi_load_9116_bootup_params(struct rsi_common *common) { struct sk_buff *skb; struct rsi_boot_params_9116 *boot_params; rsi_dbg(MGMT_TX_ZONE, "%s: Sending boot params frame\n", __func__); skb = dev_alloc_skb(sizeof(struct rsi_boot_params_9116)); if (!skb) return -ENOMEM; memset(skb->data, 0, sizeof(struct rsi_boot_params)); boot_params = (struct rsi_boot_params_9116 *)skb->data; if (common->channel_width == BW_40MHZ) { memcpy(&boot_params->bootup_params, &boot_params_9116_40, sizeof(struct bootup_params_9116)); rsi_dbg(MGMT_TX_ZONE, "%s: Packet 40MHZ <=== %d\n", __func__, UMAC_CLK_40BW); boot_params->umac_clk = cpu_to_le16(UMAC_CLK_40BW); } else { memcpy(&boot_params->bootup_params, &boot_params_9116_20, sizeof(struct bootup_params_9116)); if (boot_params_20.valid != cpu_to_le32(VALID_20)) { boot_params->umac_clk = cpu_to_le16(UMAC_CLK_20BW); rsi_dbg(MGMT_TX_ZONE, "%s: Packet 20MHZ <=== %d\n", __func__, UMAC_CLK_20BW); } else { boot_params->umac_clk = cpu_to_le16(UMAC_CLK_40MHZ); rsi_dbg(MGMT_TX_ZONE, "%s: Packet 20MHZ <=== %d\n", __func__, UMAC_CLK_40MHZ); } } rsi_set_len_qno(&boot_params->desc_dword0.len_qno, sizeof(struct bootup_params_9116), RSI_WIFI_MGMT_Q); boot_params->desc_dword0.frame_type = BOOTUP_PARAMS_REQUEST; skb_put(skb, sizeof(struct rsi_boot_params_9116)); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_send_reset_mac() - This function prepares reset MAC request and sends an * internal management frame to indicate it to firmware. * @common: Pointer to the driver private structure. * * Return: 0 on success, corresponding error code on failure. */ static int rsi_send_reset_mac(struct rsi_common *common) { struct sk_buff *skb; struct rsi_mac_frame *mgmt_frame; rsi_dbg(MGMT_TX_ZONE, "%s: Sending reset MAC frame\n", __func__); skb = dev_alloc_skb(FRAME_DESC_SZ); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, FRAME_DESC_SZ); mgmt_frame = (struct rsi_mac_frame *)skb->data; mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); mgmt_frame->desc_word[1] = cpu_to_le16(RESET_MAC_REQ); mgmt_frame->desc_word[4] = cpu_to_le16(RETRY_COUNT << 8); #define RSI_9116_DEF_TA_AGGR 3 if (common->priv->device_model == RSI_DEV_9116) mgmt_frame->desc_word[3] |= cpu_to_le16(RSI_9116_DEF_TA_AGGR << 8); skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_band_check() - This function programs the band * @common: Pointer to the driver private structure. * * Return: 0 on success, corresponding error code on failure. */ int rsi_band_check(struct rsi_common *common, struct ieee80211_channel *curchan) { struct rsi_hw *adapter = common->priv; struct ieee80211_hw *hw = adapter->hw; u8 prev_bw = common->channel_width; u8 prev_ep = common->endpoint; int status = 0; if (common->band != curchan->band) { common->rf_reset = 1; common->band = curchan->band; } if ((hw->conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) || (hw->conf.chandef.width == NL80211_CHAN_WIDTH_20)) common->channel_width = BW_20MHZ; else common->channel_width = BW_40MHZ; if (common->band == NL80211_BAND_2GHZ) { if (common->channel_width) common->endpoint = EP_2GHZ_40MHZ; else common->endpoint = EP_2GHZ_20MHZ; } else { if (common->channel_width) common->endpoint = EP_5GHZ_40MHZ; else common->endpoint = EP_5GHZ_20MHZ; } if (common->endpoint != prev_ep) { status = rsi_program_bb_rf(common); if (status) return status; } if (common->channel_width != prev_bw) { if (adapter->device_model == RSI_DEV_9116) status = rsi_load_9116_bootup_params(common); else status = rsi_load_bootup_params(common); if (status) return status; status = rsi_load_radio_caps(common); if (status) return status; } return status; } /** * rsi_set_channel() - This function programs the channel. * @common: Pointer to the driver private structure. * @channel: Channel value to be set. * * Return: 0 on success, corresponding error code on failure. */ int rsi_set_channel(struct rsi_common *common, struct ieee80211_channel *channel) { struct sk_buff *skb = NULL; struct rsi_chan_config *chan_cfg; u16 frame_len = sizeof(struct rsi_chan_config); rsi_dbg(MGMT_TX_ZONE, "%s: Sending scan req frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } if (!channel) { dev_kfree_skb(skb); return 0; } memset(skb->data, 0, frame_len); chan_cfg = (struct rsi_chan_config *)skb->data; rsi_set_len_qno(&chan_cfg->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); chan_cfg->desc_dword0.frame_type = SCAN_REQUEST; chan_cfg->channel_number = channel->hw_value; chan_cfg->antenna_gain_offset_2g = channel->max_antenna_gain; chan_cfg->antenna_gain_offset_5g = channel->max_antenna_gain; chan_cfg->region_rftype = (RSI_RF_TYPE & 0xf) << 4; if ((channel->flags & IEEE80211_CHAN_NO_IR) || (channel->flags & IEEE80211_CHAN_RADAR)) { chan_cfg->antenna_gain_offset_2g |= RSI_CHAN_RADAR; } else { if (common->tx_power < channel->max_power) chan_cfg->tx_power = cpu_to_le16(common->tx_power); else chan_cfg->tx_power = cpu_to_le16(channel->max_power); } chan_cfg->region_rftype |= (common->priv->dfs_region & 0xf); if (common->channel_width == BW_40MHZ) chan_cfg->channel_width = 0x1; common->channel = channel->hw_value; skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_send_radio_params_update() - This function sends the radio * parameters update to device * @common: Pointer to the driver private structure. * @channel: Channel value to be set. * * Return: 0 on success, corresponding error code on failure. */ int rsi_send_radio_params_update(struct rsi_common *common) { struct rsi_mac_frame *cmd_frame; struct sk_buff *skb = NULL; rsi_dbg(MGMT_TX_ZONE, "%s: Sending Radio Params update frame\n", __func__); skb = dev_alloc_skb(FRAME_DESC_SZ); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, FRAME_DESC_SZ); cmd_frame = (struct rsi_mac_frame *)skb->data; cmd_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); cmd_frame->desc_word[1] = cpu_to_le16(RADIO_PARAMS_UPDATE); cmd_frame->desc_word[3] = cpu_to_le16(BIT(0)); cmd_frame->desc_word[3] |= cpu_to_le16(common->tx_power << 8); skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); } /* This function programs the threshold. */ int rsi_send_vap_dynamic_update(struct rsi_common *common) { struct sk_buff *skb; struct rsi_dynamic_s *dynamic_frame; rsi_dbg(MGMT_TX_ZONE, "%s: Sending vap update indication frame\n", __func__); skb = dev_alloc_skb(sizeof(struct rsi_dynamic_s)); if (!skb) return -ENOMEM; memset(skb->data, 0, sizeof(struct rsi_dynamic_s)); dynamic_frame = (struct rsi_dynamic_s *)skb->data; rsi_set_len_qno(&dynamic_frame->desc_dword0.len_qno, sizeof(dynamic_frame->frame_body), RSI_WIFI_MGMT_Q); dynamic_frame->desc_dword0.frame_type = VAP_DYNAMIC_UPDATE; dynamic_frame->desc_dword2.pkt_info = cpu_to_le32(common->rts_threshold); if (common->wow_flags & RSI_WOW_ENABLED) { /* Beacon miss threshold */ dynamic_frame->desc_dword3.token = cpu_to_le16(RSI_BCN_MISS_THRESHOLD); dynamic_frame->frame_body.keep_alive_period = cpu_to_le16(RSI_WOW_KEEPALIVE); } else { dynamic_frame->frame_body.keep_alive_period = cpu_to_le16(RSI_DEF_KEEPALIVE); } dynamic_frame->desc_dword3.sta_id = 0; /* vap id */ skb_put(skb, sizeof(struct rsi_dynamic_s)); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_compare() - This function is used to compare two integers * @a: pointer to the first integer * @b: pointer to the second integer * * Return: 0 if both are equal, -1 if the first is smaller, else 1 */ static int rsi_compare(const void *a, const void *b) { u16 _a = *(const u16 *)(a); u16 _b = *(const u16 *)(b); if (_a > _b) return -1; if (_a < _b) return 1; return 0; } /** * rsi_map_rates() - This function is used to map selected rates to hw rates. * @rate: The standard rate to be mapped. * @offset: Offset that will be returned. * * Return: 0 if it is a mcs rate, else 1 */ static bool rsi_map_rates(u16 rate, int *offset) { int kk; for (kk = 0; kk < ARRAY_SIZE(rsi_mcsrates); kk++) { if (rate == mcs[kk]) { *offset = kk; return false; } } for (kk = 0; kk < ARRAY_SIZE(rsi_rates); kk++) { if (rate == rsi_rates[kk].bitrate / 5) { *offset = kk; break; } } return true; } /** * rsi_send_auto_rate_request() - This function is to set rates for connection * and send autorate request to firmware. * @common: Pointer to the driver private structure. * * Return: 0 on success, corresponding error code on failure. */ static int rsi_send_auto_rate_request(struct rsi_common *common, struct ieee80211_sta *sta, u16 sta_id, struct ieee80211_vif *vif) { struct sk_buff *skb; struct rsi_auto_rate *auto_rate; int ii = 0, jj = 0, kk = 0; struct ieee80211_hw *hw = common->priv->hw; u8 band = hw->conf.chandef.chan->band; u8 num_supported_rates = 0; u8 rate_table_offset, rate_offset = 0; u32 rate_bitmap; u16 *selected_rates, min_rate; bool is_ht = false, is_sgi = false; u16 frame_len = sizeof(struct rsi_auto_rate); rsi_dbg(MGMT_TX_ZONE, "%s: Sending auto rate request frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, frame_len); selected_rates = kzalloc(2 * RSI_TBL_SZ, GFP_KERNEL); if (!selected_rates) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n", __func__); dev_kfree_skb(skb); return -ENOMEM; } auto_rate = (struct rsi_auto_rate *)skb->data; auto_rate->aarf_rssi = cpu_to_le16(((u16)3 << 6) | (u16)(18 & 0x3f)); auto_rate->collision_tolerance = cpu_to_le16(3); auto_rate->failure_limit = cpu_to_le16(3); auto_rate->initial_boundary = cpu_to_le16(3); auto_rate->max_threshold_limt = cpu_to_le16(27); auto_rate->desc.desc_dword0.frame_type = AUTO_RATE_IND; if (common->channel_width == BW_40MHZ) auto_rate->desc.desc_dword3.qid_tid = BW_40MHZ; auto_rate->desc.desc_dword3.sta_id = sta_id; if (vif->type == NL80211_IFTYPE_STATION) { rate_bitmap = common->bitrate_mask[band]; is_ht = common->vif_info[0].is_ht; is_sgi = common->vif_info[0].sgi; } else { rate_bitmap = sta->supp_rates[band]; is_ht = sta->ht_cap.ht_supported; if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) || (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) is_sgi = true; } if (band == NL80211_BAND_2GHZ) { if ((rate_bitmap == 0) && (is_ht)) min_rate = RSI_RATE_MCS0; else min_rate = RSI_RATE_1; rate_table_offset = 0; } else { if ((rate_bitmap == 0) && (is_ht)) min_rate = RSI_RATE_MCS0; else min_rate = RSI_RATE_6; rate_table_offset = 4; } for (ii = 0, jj = 0; ii < (ARRAY_SIZE(rsi_rates) - rate_table_offset); ii++) { if (rate_bitmap & BIT(ii)) { selected_rates[jj++] = (rsi_rates[ii + rate_table_offset].bitrate / 5); rate_offset++; } } num_supported_rates = jj; if (is_ht) { for (ii = 0; ii < ARRAY_SIZE(mcs); ii++) selected_rates[jj++] = mcs[ii]; num_supported_rates += ARRAY_SIZE(mcs); rate_offset += ARRAY_SIZE(mcs); } sort(selected_rates, jj, sizeof(u16), &rsi_compare, NULL); /* mapping the rates to RSI rates */ for (ii = 0; ii < jj; ii++) { if (rsi_map_rates(selected_rates[ii], &kk)) { auto_rate->supported_rates[ii] = cpu_to_le16(rsi_rates[kk].hw_value); } else { auto_rate->supported_rates[ii] = cpu_to_le16(rsi_mcsrates[kk]); } } /* loading HT rates in the bottom half of the auto rate table */ if (is_ht) { for (ii = rate_offset, kk = ARRAY_SIZE(rsi_mcsrates) - 1; ii < rate_offset + 2 * ARRAY_SIZE(rsi_mcsrates); ii++) { if (is_sgi || conf_is_ht40(&common->priv->hw->conf)) auto_rate->supported_rates[ii++] = cpu_to_le16(rsi_mcsrates[kk] | BIT(9)); else auto_rate->supported_rates[ii++] = cpu_to_le16(rsi_mcsrates[kk]); auto_rate->supported_rates[ii] = cpu_to_le16(rsi_mcsrates[kk--]); } for (; ii < (RSI_TBL_SZ - 1); ii++) { auto_rate->supported_rates[ii] = cpu_to_le16(rsi_mcsrates[0]); } } for (; ii < RSI_TBL_SZ; ii++) auto_rate->supported_rates[ii] = cpu_to_le16(min_rate); auto_rate->num_supported_rates = cpu_to_le16(num_supported_rates * 2); auto_rate->moderate_rate_inx = cpu_to_le16(num_supported_rates / 2); num_supported_rates *= 2; rsi_set_len_qno(&auto_rate->desc.desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); skb_put(skb, frame_len); kfree(selected_rates); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_inform_bss_status() - This function informs about bss status with the * help of sta notify params by sending an internal * management frame to firmware. * @common: Pointer to the driver private structure. * @status: Bss status type. * @bssid: Bssid. * @qos_enable: Qos is enabled. * @aid: Aid (unique for all STAs). * * Return: None. */ void rsi_inform_bss_status(struct rsi_common *common, enum opmode opmode, u8 status, const u8 *addr, u8 qos_enable, u16 aid, struct ieee80211_sta *sta, u16 sta_id, u16 assoc_cap, struct ieee80211_vif *vif) { if (status) { if (opmode == RSI_OPMODE_STA) common->hw_data_qs_blocked = true; rsi_hal_send_sta_notify_frame(common, opmode, STA_CONNECTED, addr, qos_enable, aid, sta_id, vif); if (common->min_rate == 0xffff) rsi_send_auto_rate_request(common, sta, sta_id, vif); if (opmode == RSI_OPMODE_STA && !(assoc_cap & WLAN_CAPABILITY_PRIVACY) && !rsi_send_block_unblock_frame(common, false)) common->hw_data_qs_blocked = false; } else { if (opmode == RSI_OPMODE_STA) common->hw_data_qs_blocked = true; if (!(common->wow_flags & RSI_WOW_ENABLED)) rsi_hal_send_sta_notify_frame(common, opmode, STA_DISCONNECTED, addr, qos_enable, aid, sta_id, vif); if (opmode == RSI_OPMODE_STA) rsi_send_block_unblock_frame(common, true); } } /** * rsi_eeprom_read() - This function sends a frame to read the mac address * from the eeprom. * @common: Pointer to the driver private structure. * * Return: 0 on success, -1 on failure. */ static int rsi_eeprom_read(struct rsi_common *common) { struct rsi_eeprom_read_frame *mgmt_frame; struct rsi_hw *adapter = common->priv; struct sk_buff *skb; rsi_dbg(MGMT_TX_ZONE, "%s: Sending EEPROM read req frame\n", __func__); skb = dev_alloc_skb(FRAME_DESC_SZ); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, FRAME_DESC_SZ); mgmt_frame = (struct rsi_eeprom_read_frame *)skb->data; /* FrameType */ rsi_set_len_qno(&mgmt_frame->len_qno, 0, RSI_WIFI_MGMT_Q); mgmt_frame->pkt_type = EEPROM_READ; /* Number of bytes to read */ mgmt_frame->pkt_info = cpu_to_le32((adapter->eeprom.length << RSI_EEPROM_LEN_OFFSET) & RSI_EEPROM_LEN_MASK); mgmt_frame->pkt_info |= cpu_to_le32((3 << RSI_EEPROM_HDR_SIZE_OFFSET) & RSI_EEPROM_HDR_SIZE_MASK); /* Address to read */ mgmt_frame->eeprom_offset = cpu_to_le32(adapter->eeprom.offset); skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); } /** * This function sends a frame to block/unblock * data queues in the firmware * * @param common Pointer to the driver private structure. * @param block event - block if true, unblock if false * @return 0 on success, -1 on failure. */ int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event) { struct rsi_block_unblock_data *mgmt_frame; struct sk_buff *skb; rsi_dbg(MGMT_TX_ZONE, "%s: Sending block/unblock frame\n", __func__); skb = dev_alloc_skb(FRAME_DESC_SZ); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, FRAME_DESC_SZ); mgmt_frame = (struct rsi_block_unblock_data *)skb->data; rsi_set_len_qno(&mgmt_frame->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); mgmt_frame->desc_dword0.frame_type = BLOCK_HW_QUEUE; mgmt_frame->host_quiet_info = QUIET_INFO_VALID; if (block_event) { rsi_dbg(INFO_ZONE, "blocking the data qs\n"); mgmt_frame->block_q_bitmap = cpu_to_le16(0xf); mgmt_frame->block_q_bitmap |= cpu_to_le16(0xf << 4); } else { rsi_dbg(INFO_ZONE, "unblocking the data qs\n"); mgmt_frame->unblock_q_bitmap = cpu_to_le16(0xf); mgmt_frame->unblock_q_bitmap |= cpu_to_le16(0xf << 4); } skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_send_rx_filter_frame() - Sends a frame to filter the RX packets * * @common: Pointer to the driver private structure. * @rx_filter_word: Flags of filter packets * * @Return: 0 on success, -1 on failure. */ int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word) { struct rsi_mac_frame *cmd_frame; struct sk_buff *skb; rsi_dbg(MGMT_TX_ZONE, "Sending RX filter frame\n"); skb = dev_alloc_skb(FRAME_DESC_SZ); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, FRAME_DESC_SZ); cmd_frame = (struct rsi_mac_frame *)skb->data; cmd_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); cmd_frame->desc_word[1] = cpu_to_le16(SET_RX_FILTER); cmd_frame->desc_word[4] = cpu_to_le16(rx_filter_word); skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); } int rsi_send_ps_request(struct rsi_hw *adapter, bool enable, struct ieee80211_vif *vif) { struct rsi_common *common = adapter->priv; struct ieee80211_bss_conf *bss = &vif->bss_conf; struct rsi_request_ps *ps; struct rsi_ps_info *ps_info; struct sk_buff *skb; int frame_len = sizeof(*ps); skb = dev_alloc_skb(frame_len); if (!skb) return -ENOMEM; memset(skb->data, 0, frame_len); ps = (struct rsi_request_ps *)skb->data; ps_info = &adapter->ps_info; rsi_set_len_qno(&ps->desc.desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); ps->desc.desc_dword0.frame_type = WAKEUP_SLEEP_REQUEST; if (enable) { ps->ps_sleep.enable = RSI_PS_ENABLE; ps->desc.desc_dword3.token = cpu_to_le16(RSI_SLEEP_REQUEST); } else { ps->ps_sleep.enable = RSI_PS_DISABLE; ps->desc.desc_dword0.len_qno |= cpu_to_le16(RSI_PS_DISABLE_IND); ps->desc.desc_dword3.token = cpu_to_le16(RSI_WAKEUP_REQUEST); } ps->ps_uapsd_acs = common->uapsd_bitmap; ps->ps_sleep.sleep_type = ps_info->sleep_type; ps->ps_sleep.num_bcns_per_lis_int = cpu_to_le16(ps_info->num_bcns_per_lis_int); ps->ps_sleep.sleep_duration = cpu_to_le32(ps_info->deep_sleep_wakeup_period); if (bss->assoc) ps->ps_sleep.connected_sleep = RSI_CONNECTED_SLEEP; else ps->ps_sleep.connected_sleep = RSI_DEEP_SLEEP; ps->ps_listen_interval = cpu_to_le32(ps_info->listen_interval); ps->ps_dtim_interval_duration = cpu_to_le32(ps_info->dtim_interval_duration); if (ps_info->listen_interval > ps_info->dtim_interval_duration) ps->ps_listen_interval = cpu_to_le32(RSI_PS_DISABLE); ps->ps_num_dtim_intervals = cpu_to_le16(ps_info->num_dtims_per_sleep); skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } static int rsi_send_w9116_features(struct rsi_common *common) { struct rsi_wlan_9116_features *w9116_features; u16 frame_len = sizeof(struct rsi_wlan_9116_features); struct sk_buff *skb; rsi_dbg(MGMT_TX_ZONE, "%s: Sending wlan 9116 features\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) return -ENOMEM; memset(skb->data, 0, frame_len); w9116_features = (struct rsi_wlan_9116_features *)skb->data; w9116_features->pll_mode = common->w9116_features.pll_mode; w9116_features->rf_type = common->w9116_features.rf_type; w9116_features->wireless_mode = common->w9116_features.wireless_mode; w9116_features->enable_ppe = common->w9116_features.enable_ppe; w9116_features->afe_type = common->w9116_features.afe_type; if (common->w9116_features.dpd) w9116_features->feature_enable |= cpu_to_le32(RSI_DPD); if (common->w9116_features.sifs_tx_enable) w9116_features->feature_enable |= cpu_to_le32(RSI_SIFS_TX_ENABLE); if (common->w9116_features.ps_options & RSI_DUTY_CYCLING) w9116_features->feature_enable |= cpu_to_le32(RSI_DUTY_CYCLING); if (common->w9116_features.ps_options & RSI_END_OF_FRAME) w9116_features->feature_enable |= cpu_to_le32(RSI_END_OF_FRAME); w9116_features->feature_enable |= cpu_to_le32((common->w9116_features.ps_options & ~0x3) << 2); rsi_set_len_qno(&w9116_features->desc.desc_dword0.len_qno, frame_len - FRAME_DESC_SZ, RSI_WIFI_MGMT_Q); w9116_features->desc.desc_dword0.frame_type = FEATURES_ENABLE; skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_set_antenna() - This function send antenna configuration request * to device * * @common: Pointer to the driver private structure. * @antenna: bitmap for tx antenna selection * * Return: 0 on Success, negative error code on failure */ int rsi_set_antenna(struct rsi_common *common, u8 antenna) { struct rsi_ant_sel_frame *ant_sel_frame; struct sk_buff *skb; skb = dev_alloc_skb(FRAME_DESC_SZ); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } memset(skb->data, 0, FRAME_DESC_SZ); ant_sel_frame = (struct rsi_ant_sel_frame *)skb->data; ant_sel_frame->desc_dword0.frame_type = ANT_SEL_FRAME; ant_sel_frame->sub_frame_type = ANTENNA_SEL_TYPE; ant_sel_frame->ant_value = cpu_to_le16(antenna & ANTENNA_MASK_VALUE); rsi_set_len_qno(&ant_sel_frame->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); } static int rsi_send_beacon(struct rsi_common *common) { struct sk_buff *skb = NULL; u8 dword_align_bytes = 0; skb = dev_alloc_skb(MAX_MGMT_PKT_SIZE); if (!skb) return -ENOMEM; memset(skb->data, 0, MAX_MGMT_PKT_SIZE); dword_align_bytes = ((unsigned long)skb->data & 0x3f); if (dword_align_bytes) skb_pull(skb, (64 - dword_align_bytes)); if (rsi_prepare_beacon(common, skb)) { rsi_dbg(ERR_ZONE, "Failed to prepare beacon\n"); return -EINVAL; } skb_queue_tail(&common->tx_queue[MGMT_BEACON_Q], skb); rsi_set_event(&common->tx_thread.event); rsi_dbg(DATA_TX_ZONE, "%s: Added to beacon queue\n", __func__); return 0; } #ifdef CONFIG_PM int rsi_send_wowlan_request(struct rsi_common *common, u16 flags, u16 sleep_status) { struct rsi_wowlan_req *cmd_frame; struct sk_buff *skb; u8 length; rsi_dbg(ERR_ZONE, "%s: Sending wowlan request frame\n", __func__); length = sizeof(*cmd_frame); skb = dev_alloc_skb(length); if (!skb) return -ENOMEM; memset(skb->data, 0, length); cmd_frame = (struct rsi_wowlan_req *)skb->data; rsi_set_len_qno(&cmd_frame->desc.desc_dword0.len_qno, (length - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); cmd_frame->desc.desc_dword0.frame_type = WOWLAN_CONFIG_PARAMS; cmd_frame->host_sleep_status = sleep_status; if (common->secinfo.security_enable && common->secinfo.gtk_cipher) flags |= RSI_WOW_GTK_REKEY; if (sleep_status) cmd_frame->wow_flags = flags; rsi_dbg(INFO_ZONE, "Host_Sleep_Status : %d Flags : %d\n", cmd_frame->host_sleep_status, cmd_frame->wow_flags); skb_put(skb, length); return rsi_send_internal_mgmt_frame(common, skb); } #endif int rsi_send_bgscan_params(struct rsi_common *common, int enable) { struct rsi_bgscan_params *params = &common->bgscan; struct cfg80211_scan_request *scan_req = common->hwscan; struct rsi_bgscan_config *bgscan; struct sk_buff *skb; u16 frame_len = sizeof(*bgscan); u8 i; rsi_dbg(MGMT_TX_ZONE, "%s: Sending bgscan params frame\n", __func__); skb = dev_alloc_skb(frame_len); if (!skb) return -ENOMEM; memset(skb->data, 0, frame_len); bgscan = (struct rsi_bgscan_config *)skb->data; rsi_set_len_qno(&bgscan->desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); bgscan->desc_dword0.frame_type = BG_SCAN_PARAMS; bgscan->bgscan_threshold = cpu_to_le16(params->bgscan_threshold); bgscan->roam_threshold = cpu_to_le16(params->roam_threshold); if (enable) bgscan->bgscan_periodicity = cpu_to_le16(params->bgscan_periodicity); bgscan->active_scan_duration = cpu_to_le16(params->active_scan_duration); bgscan->passive_scan_duration = cpu_to_le16(params->passive_scan_duration); bgscan->two_probe = params->two_probe; bgscan->num_bgscan_channels = scan_req->n_channels; for (i = 0; i < bgscan->num_bgscan_channels; i++) bgscan->channels2scan[i] = cpu_to_le16(scan_req->channels[i]->hw_value); skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } /* This function sends the probe request to be used by firmware in * background scan */ int rsi_send_bgscan_probe_req(struct rsi_common *common, struct ieee80211_vif *vif) { struct cfg80211_scan_request *scan_req = common->hwscan; struct rsi_bgscan_probe *bgscan; struct sk_buff *skb; struct sk_buff *probereq_skb; u16 frame_len = sizeof(*bgscan); size_t ssid_len = 0; u8 *ssid = NULL; rsi_dbg(MGMT_TX_ZONE, "%s: Sending bgscan probe req frame\n", __func__); if (common->priv->sc_nvifs <= 0) return -ENODEV; if (scan_req->n_ssids) { ssid = scan_req->ssids[0].ssid; ssid_len = scan_req->ssids[0].ssid_len; } skb = dev_alloc_skb(frame_len + MAX_BGSCAN_PROBE_REQ_LEN); if (!skb) return -ENOMEM; memset(skb->data, 0, frame_len + MAX_BGSCAN_PROBE_REQ_LEN); bgscan = (struct rsi_bgscan_probe *)skb->data; bgscan->desc_dword0.frame_type = BG_SCAN_PROBE_REQ; bgscan->flags = cpu_to_le16(HOST_BG_SCAN_TRIG); if (common->band == NL80211_BAND_5GHZ) { bgscan->mgmt_rate = cpu_to_le16(RSI_RATE_6); bgscan->def_chan = cpu_to_le16(40); } else { bgscan->mgmt_rate = cpu_to_le16(RSI_RATE_1); bgscan->def_chan = cpu_to_le16(11); } bgscan->channel_scan_time = cpu_to_le16(RSI_CHANNEL_SCAN_TIME); probereq_skb = ieee80211_probereq_get(common->priv->hw, vif->addr, ssid, ssid_len, scan_req->ie_len); if (!probereq_skb) { dev_kfree_skb(skb); return -ENOMEM; } memcpy(&skb->data[frame_len], probereq_skb->data, probereq_skb->len); bgscan->probe_req_length = cpu_to_le16(probereq_skb->len); rsi_set_len_qno(&bgscan->desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ + probereq_skb->len), RSI_WIFI_MGMT_Q); skb_put(skb, frame_len + probereq_skb->len); dev_kfree_skb(probereq_skb); return rsi_send_internal_mgmt_frame(common, skb); } /** * rsi_handle_ta_confirm_type() - This function handles the confirm frames. * @common: Pointer to the driver private structure. * @msg: Pointer to received packet. * * Return: 0 on success, -1 on failure. */ static int rsi_handle_ta_confirm_type(struct rsi_common *common, u8 *msg) { struct rsi_hw *adapter = common->priv; u8 sub_type = (msg[15] & 0xff); u16 msg_len = ((u16 *)msg)[0] & 0xfff; u8 offset; switch (sub_type) { case BOOTUP_PARAMS_REQUEST: rsi_dbg(FSM_ZONE, "%s: Boot up params confirm received\n", __func__); if (common->fsm_state == FSM_BOOT_PARAMS_SENT) { if (adapter->device_model == RSI_DEV_9116) { common->band = NL80211_BAND_5GHZ; common->num_supp_bands = 2; if (rsi_send_reset_mac(common)) goto out; else common->fsm_state = FSM_RESET_MAC_SENT; } else { adapter->eeprom.length = (IEEE80211_ADDR_LEN + WLAN_MAC_MAGIC_WORD_LEN + WLAN_HOST_MODE_LEN); adapter->eeprom.offset = WLAN_MAC_EEPROM_ADDR; if (rsi_eeprom_read(common)) { common->fsm_state = FSM_CARD_NOT_READY; goto out; } common->fsm_state = FSM_EEPROM_READ_MAC_ADDR; } } else { rsi_dbg(INFO_ZONE, "%s: Received bootup params cfm in %d state\n", __func__, common->fsm_state); return 0; } break; case EEPROM_READ: rsi_dbg(FSM_ZONE, "EEPROM READ confirm received\n"); if (msg_len <= 0) { rsi_dbg(FSM_ZONE, "%s: [EEPROM_READ] Invalid len %d\n", __func__, msg_len); goto out; } if (msg[16] != MAGIC_WORD) { rsi_dbg(FSM_ZONE, "%s: [EEPROM_READ] Invalid token\n", __func__); common->fsm_state = FSM_CARD_NOT_READY; goto out; } if (common->fsm_state == FSM_EEPROM_READ_MAC_ADDR) { offset = (FRAME_DESC_SZ + WLAN_HOST_MODE_LEN + WLAN_MAC_MAGIC_WORD_LEN); memcpy(common->mac_addr, &msg[offset], ETH_ALEN); adapter->eeprom.length = ((WLAN_MAC_MAGIC_WORD_LEN + 3) & (~3)); adapter->eeprom.offset = WLAN_EEPROM_RFTYPE_ADDR; if (rsi_eeprom_read(common)) { rsi_dbg(ERR_ZONE, "%s: Failed reading RF band\n", __func__); common->fsm_state = FSM_CARD_NOT_READY; goto out; } common->fsm_state = FSM_EEPROM_READ_RF_TYPE; } else if (common->fsm_state == FSM_EEPROM_READ_RF_TYPE) { if ((msg[17] & 0x3) == 0x3) { rsi_dbg(INIT_ZONE, "Dual band supported\n"); common->band = NL80211_BAND_5GHZ; common->num_supp_bands = 2; } else if ((msg[17] & 0x3) == 0x1) { rsi_dbg(INIT_ZONE, "Only 2.4Ghz band supported\n"); common->band = NL80211_BAND_2GHZ; common->num_supp_bands = 1; } if (rsi_send_reset_mac(common)) goto out; common->fsm_state = FSM_RESET_MAC_SENT; } else { rsi_dbg(ERR_ZONE, "%s: Invalid EEPROM read type\n", __func__); return 0; } break; case RESET_MAC_REQ: if (common->fsm_state == FSM_RESET_MAC_SENT) { rsi_dbg(FSM_ZONE, "%s: Reset MAC cfm received\n", __func__); if (rsi_load_radio_caps(common)) goto out; else common->fsm_state = FSM_RADIO_CAPS_SENT; } else { rsi_dbg(ERR_ZONE, "%s: Received reset mac cfm in %d state\n", __func__, common->fsm_state); return 0; } break; case RADIO_CAPABILITIES: if (common->fsm_state == FSM_RADIO_CAPS_SENT) { common->rf_reset = 1; if (adapter->device_model == RSI_DEV_9116 && rsi_send_w9116_features(common)) { rsi_dbg(ERR_ZONE, "Failed to send 9116 features\n"); goto out; } if (rsi_program_bb_rf(common)) { goto out; } else { common->fsm_state = FSM_BB_RF_PROG_SENT; rsi_dbg(FSM_ZONE, "%s: Radio cap cfm received\n", __func__); } } else { rsi_dbg(INFO_ZONE, "%s: Received radio caps cfm in %d state\n", __func__, common->fsm_state); return 0; } break; case BB_PROG_VALUES_REQUEST: case RF_PROG_VALUES_REQUEST: case BBP_PROG_IN_TA: rsi_dbg(FSM_ZONE, "%s: BB/RF cfm received\n", __func__); if (common->fsm_state == FSM_BB_RF_PROG_SENT) { common->bb_rf_prog_count--; if (!common->bb_rf_prog_count) { common->fsm_state = FSM_MAC_INIT_DONE; if (common->reinit_hw) { complete(&common->wlan_init_completion); } else { return rsi_mac80211_attach(common); } } } else { rsi_dbg(INFO_ZONE, "%s: Received bbb_rf cfm in %d state\n", __func__, common->fsm_state); return 0; } break; case SCAN_REQUEST: rsi_dbg(INFO_ZONE, "Set channel confirm\n"); break; case WAKEUP_SLEEP_REQUEST: rsi_dbg(INFO_ZONE, "Wakeup/Sleep confirmation.\n"); return rsi_handle_ps_confirm(adapter, msg); case BG_SCAN_PROBE_REQ: rsi_dbg(INFO_ZONE, "BG scan complete event\n"); if (common->bgscan_en) { struct cfg80211_scan_info info; if (!rsi_send_bgscan_params(common, RSI_STOP_BGSCAN)) common->bgscan_en = 0; info.aborted = false; ieee80211_scan_completed(adapter->hw, &info); } rsi_dbg(INFO_ZONE, "Background scan completed\n"); break; default: rsi_dbg(INFO_ZONE, "%s: Invalid TA confirm pkt received\n", __func__); break; } return 0; out: rsi_dbg(ERR_ZONE, "%s: Unable to send pkt/Invalid frame received\n", __func__); return -EINVAL; } int rsi_handle_card_ready(struct rsi_common *common, u8 *msg) { int status; switch (common->fsm_state) { case FSM_CARD_NOT_READY: rsi_dbg(INIT_ZONE, "Card ready indication from Common HAL\n"); rsi_set_default_parameters(common); if (rsi_send_common_dev_params(common) < 0) return -EINVAL; common->fsm_state = FSM_COMMON_DEV_PARAMS_SENT; break; case FSM_COMMON_DEV_PARAMS_SENT: rsi_dbg(INIT_ZONE, "Card ready indication from WLAN HAL\n"); if (common->priv->device_model == RSI_DEV_9116) { if (msg[16] != MAGIC_WORD) { rsi_dbg(FSM_ZONE, "%s: [EEPROM_READ] Invalid token\n", __func__); common->fsm_state = FSM_CARD_NOT_READY; return -EINVAL; } memcpy(common->mac_addr, &msg[20], ETH_ALEN); rsi_dbg(INIT_ZONE, "MAC Addr %pM", common->mac_addr); } /* Get usb buffer status register address */ common->priv->usb_buffer_status_reg = *(u32 *)&msg[8]; rsi_dbg(INFO_ZONE, "USB buffer status register = %x\n", common->priv->usb_buffer_status_reg); if (common->priv->device_model == RSI_DEV_9116) status = rsi_load_9116_bootup_params(common); else status = rsi_load_bootup_params(common); if (status < 0) { common->fsm_state = FSM_CARD_NOT_READY; return status; } common->fsm_state = FSM_BOOT_PARAMS_SENT; break; default: rsi_dbg(ERR_ZONE, "%s: card ready indication in invalid state %d.\n", __func__, common->fsm_state); return -EINVAL; } return 0; } /** * rsi_mgmt_pkt_recv() - This function processes the management packets * received from the hardware. * @common: Pointer to the driver private structure. * @msg: Pointer to the received packet. * * Return: 0 on success, -1 on failure. */ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg) { s32 msg_len = (le16_to_cpu(*(__le16 *)&msg[0]) & 0x0fff); u16 msg_type = (msg[2]); rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n", __func__, msg_len, msg_type); switch (msg_type) { case TA_CONFIRM_TYPE: return rsi_handle_ta_confirm_type(common, msg); case CARD_READY_IND: common->hibernate_resume = false; rsi_dbg(FSM_ZONE, "%s: Card ready indication received\n", __func__); return rsi_handle_card_ready(common, msg); case TX_STATUS_IND: switch (msg[RSI_TX_STATUS_TYPE]) { case PROBEREQ_CONFIRM: common->mgmt_q_block = false; rsi_dbg(FSM_ZONE, "%s: Probe confirm received\n", __func__); break; case EAPOL4_CONFIRM: if (msg[RSI_TX_STATUS]) { common->eapol4_confirm = true; if (!rsi_send_block_unblock_frame(common, false)) common->hw_data_qs_blocked = false; } } break; case BEACON_EVENT_IND: rsi_dbg(INFO_ZONE, "Beacon event\n"); if (common->fsm_state != FSM_MAC_INIT_DONE) return -1; if (common->iface_down) return -1; if (!common->beacon_enabled) return -1; rsi_send_beacon(common); break; case WOWLAN_WAKEUP_REASON: rsi_dbg(ERR_ZONE, "\n\nWakeup Type: %x\n", msg[15]); switch (msg[15]) { case RSI_UNICAST_MAGIC_PKT: rsi_dbg(ERR_ZONE, "*** Wakeup for Unicast magic packet ***\n"); break; case RSI_BROADCAST_MAGICPKT: rsi_dbg(ERR_ZONE, "*** Wakeup for Broadcast magic packet ***\n"); break; case RSI_EAPOL_PKT: rsi_dbg(ERR_ZONE, "*** Wakeup for GTK renewal ***\n"); break; case RSI_DISCONNECT_PKT: rsi_dbg(ERR_ZONE, "*** Wakeup for Disconnect ***\n"); break; case RSI_HW_BMISS_PKT: rsi_dbg(ERR_ZONE, "*** Wakeup for HW Beacon miss ***\n"); break; default: rsi_dbg(ERR_ZONE, "##### Un-intentional Wakeup #####\n"); break; } break; case RX_DOT11_MGMT: return rsi_mgmt_pkt_to_core(common, msg, msg_len); default: rsi_dbg(INFO_ZONE, "Received packet type: 0x%x\n", msg_type); } return 0; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1261_0
crossvul-cpp_data_good_1273_0
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dm_services.h" #include "link_encoder.h" #include "stream_encoder.h" #include "resource.h" #include "include/irq_service_interface.h" #include "../virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dce110/dce110_timing_generator.h" #include "irq/dce110/irq_service_dce110.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" #include "dce/dce_mem_input.h" #include "dce/dce_ipp.h" #include "dce/dce_transform.h" #include "dce/dce_opp.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "dce100/dce100_hw_sequencer.h" #include "reg_helper.h" #include "dce/dce_10_0_d.h" #include "dce/dce_10_0_sh_mask.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_abm.h" #include "dce/dce_i2c.h" #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_8_2_d.h" #include "gmc/gmc_8_2_sh_mask.h" #endif #ifndef mmDP_DPHY_INTERNAL_CTRL #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7 #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7 #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7 #endif #ifndef mmBIOS_SCRATCH_2 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF #endif #ifndef mmDP_DPHY_BS_SR_SWAP_CNTL #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC #endif #ifndef mmDP_DPHY_FAST_TRAINING #define mmDP_DPHY_FAST_TRAINING 0x4ABC #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC #endif static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = { { .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), } }; /* set register offset */ #define SR(reg_name)\ .reg_name = mm ## reg_name /* set register offset with instance */ #define SRI(reg_name, block, id)\ .reg_name = mm ## block ## id ## _ ## reg_name #define ipp_regs(id)\ [id] = {\ IPP_DCE100_REG_LIST_DCE_BASE(id)\ } static const struct dce_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), ipp_regs(4), ipp_regs(5) }; static const struct dce_ipp_shift ipp_shift = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_ipp_mask ipp_mask = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; #define transform_regs(id)\ [id] = {\ XFM_COMMON_REG_LIST_DCE100(id)\ } static const struct dce_transform_registers xfm_regs[] = { transform_regs(0), transform_regs(1), transform_regs(2), transform_regs(3), transform_regs(4), transform_regs(5) }; static const struct dce_transform_shift xfm_shift = { XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_transform_mask xfm_mask = { XFM_COMMON_MASK_SH_LIST_DCE110(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id)\ [id] = {\ LE_DCE100_REG_LIST(id)\ } static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3), link_regs(4), link_regs(5), link_regs(6), }; #define stream_enc_regs(id)\ [id] = {\ SE_COMMON_REG_LIST_DCE_BASE(id),\ .AFMT_CNTL = 0,\ } static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4), stream_enc_regs(5), stream_enc_regs(6) }; static const struct dce_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT) }; static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_DCE_100_REG_LIST(id),\ } static const struct dce_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5) }; static const struct dce_opp_shift opp_shift = { OPP_COMMON_MASK_SH_LIST_DCE_100(__SHIFT) }; static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6), }; static const struct dce_audio_shift audio_shift = { AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { AUD_COMMON_MASK_SH_LIST(_MASK) }; #define clk_src_regs(id)\ [id] = {\ CS_COMMON_REG_LIST_DCE_100_110(id),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0), clk_src_regs(1), clk_src_regs(2) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE110_COMMON_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCE110(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCE110_COMMON_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; #define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03 static const struct bios_registers bios_regs = { .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, .num_stream_encoder = 6, .num_pll = 3, .num_ddc = 6, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, .pixel_format_support = { .argb8888 = true, .nv12 = false, .fp16 = false }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 1, .fp16 = 1 } }; #define CTX ctx #define REG(reg) mm ## reg #ifndef mmCC_DC_HDMI_STRAPS #define mmCC_DC_HDMI_STRAPS 0x1918 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { REG_GET_2(CC_DC_HDMI_STRAPS, HDMI_DISABLE, &straps->hdmi_disable, AUDIO_STREAM_NUMBER, &straps->audio_stream_number); REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct timing_generator *dce100_timing_generator_create( struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { struct dce110_timing_generator *tg110 = kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); if (!tg110) return NULL; dce110_timing_generator_construct(tg110, ctx, instance, offsets); return &tg110->base; } static struct stream_encoder *dce100_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } #define SRII(reg_name, block, id)\ .reg_name[id] = mm ## block ## id ## _ ## reg_name static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCE10_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCE10_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE10_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dce100_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce100_stream_encoder_create, .create_hwseq = dce100_hwseq_create, }; #define mi_inst_regs(id) { \ MI_DCE8_REG_LIST(id), \ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), mi_inst_regs(1), mi_inst_regs(2), mi_inst_regs(3), mi_inst_regs(4), mi_inst_regs(5), }; static const struct dce_mem_input_shift mi_shifts = { MI_DCE8_MASK_SH_LIST(__SHIFT), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT }; static const struct dce_mem_input_mask mi_masks = { MI_DCE8_MASK_SH_LIST(_MASK), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK }; static struct mem_input *dce100_mem_input_create( struct dc_context *ctx, uint32_t inst) { struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), GFP_KERNEL); if (!dce_mi) { BREAK_TO_DEBUGGER(); return NULL; } dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); dce_mi->wa.single_head_rdreq_dmif_limit = 2; return &dce_mi->base; } static void dce100_transform_destroy(struct transform **xfm) { kfree(TO_DCE_TRANSFORM(*xfm)); *xfm = NULL; } static struct transform *dce100_transform_create( struct dc_context *ctx, uint32_t inst) { struct dce_transform *transform = kzalloc(sizeof(struct dce_transform), GFP_KERNEL); if (!transform) return NULL; dce_transform_construct(transform, ctx, inst, &xfm_regs[inst], &xfm_shift, &xfm_mask); return &transform->base; } static struct input_pixel_processor *dce100_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dce_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 300000, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true }; struct link_encoder *dce100_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; } struct output_pixel_processor *dce100_opp_create( struct dc_context *ctx, uint32_t inst) { struct dce110_opp *opp = kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); if (!opp) return NULL; dce110_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } struct dce_aux *dce100_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst]); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; struct dce_i2c_hw *dce100_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dce100_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } struct clock_source *dce100_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce110_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } void dce100_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } static void destruct(struct dce110_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); if (pool->base.transforms[i] != NULL) dce100_transform_destroy(&pool->base.transforms[i]); if (pool->base.ipps[i] != NULL) dce_ipp_destroy(&pool->base.ipps[i]); if (pool->base.mis[i] != NULL) { kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); pool->base.mis[i] = NULL; } if (pool->base.timing_generators[i] != NULL) { kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) dce100_clock_source_destroy(&pool->base.clock_sources[i]); } if (pool->base.dp_clock_source != NULL) dce100_clock_source_destroy(&pool->base.dp_clock_source); for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i] != NULL) dce_aud_destroy(&pool->base.audios[i]); } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); if (pool->base.irqs != NULL) dal_irq_service_destroy(&pool->base.irqs); } static enum dc_status build_mapped_resource( const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; dce110_resource_build_pipe_hw_param(pipe_ctx); resource_build_info_frame(pipe_ctx); return DC_OK; } bool dce100_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) { int i; bool at_least_one_pipe = false; for (i = 0; i < dc->res_pool->pipe_count; i++) { if (context->res_ctx.pipe_ctx[i].stream) at_least_one_pipe = true; } if (at_least_one_pipe) { /* TODO implement when needed but for now hardcode max value*/ context->bw_ctx.bw.dce.dispclk_khz = 681000; context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; } else { context->bw_ctx.bw.dce.dispclk_khz = 0; context->bw_ctx.bw.dce.yclk_khz = 0; } return true; } static bool dce100_validate_surface_sets( struct dc_state *context) { int i; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) continue; if (context->stream_status[i].plane_count > 1) return false; if (context->stream_status[i].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return false; } return true; } enum dc_status dce100_validate_global( struct dc *dc, struct dc_state *context) { if (!dce100_validate_surface_sets(context)) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } enum dc_status dce100_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_ERROR_UNEXPECTED; result = resource_map_pool_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = resource_map_clock_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = build_mapped_resource(dc, new_ctx, dc_stream); return result; } static void dce100_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) { if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return DC_OK; return DC_FAIL_SURFACE_VALIDATE; } struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link( struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) { int i; int j = -1; struct dc_link *link = stream->link; for (i = 0; i < pool->stream_enc_count; i++) { if (!res_ctx->is_stream_enc_acquired[i] && pool->stream_enc[i]) { /* Store first available for MST second display * in daisy chain use case */ j = i; if (pool->stream_enc[i]->id == link->link_enc->preferred_engine) return pool->stream_enc[i]; } } /* * below can happen in cases when stream encoder is acquired: * 1) for second MST display in chain, so preferred engine already * acquired; * 2) for another link, which preferred engine already acquired by any * MST configuration. * * If signal is of DP type and preferred engine not found, return last available * * TODO - This is just a patch up and a generic solution is * required for non DP connectors. */ if (j >= 0 && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) return pool->stream_enc[j]; return NULL; } static const struct resource_funcs dce100_res_pool_funcs = { .destroy = dce100_destroy_resource_pool, .link_enc_create = dce100_link_encoder_create, .validate_bandwidth = dce100_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce100_add_stream_to_ctx, .validate_global = dce100_validate_global, .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link }; static bool construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap; pool->base.funcs = &dce100_res_pool_funcs; pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[2] = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 3; } else { pool->base.dp_clock_source = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); pool->base.clock_sources[0] = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[1] = dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 2; } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce110_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.pipe_count = res_cap.num_timing_generator; pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; dc->caps.disable_dp_clk_share = true; for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce100_timing_generator_create( ctx, i, &dce100_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce100_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce100_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce100_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce100_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce100_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce100_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = NULL; } dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce100_hw_sequencer_construct(dc); return true; res_create_fail: destruct(pool); return false; } struct resource_pool *dce100_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; }
./CrossVul/dataset_final_sorted/CWE-400/c/good_1273_0
crossvul-cpp_data_bad_812_1
#include "jsi.h" #include "jsvalue.h" #include "jsbuiltin.h" #include "utf.h" #include "regexp.h" static const char *checkstring(js_State *J, int idx) { if (!js_iscoercible(J, idx)) js_typeerror(J, "string function called on null or undefined"); return js_tostring(J, idx); } int js_runeat(js_State *J, const char *s, int i) { Rune rune = 0; while (i-- >= 0) { rune = *(unsigned char*)s; if (rune < Runeself) { if (rune == 0) return 0; ++s; } else s += chartorune(&rune, s); } return rune; } const char *js_utfidxtoptr(const char *s, int i) { Rune rune; while (i-- > 0) { rune = *(unsigned char*)s; if (rune < Runeself) { if (rune == 0) return NULL; ++s; } else s += chartorune(&rune, s); } return s; } int js_utfptrtoidx(const char *s, const char *p) { Rune rune; int i = 0; while (s < p) { if (*(unsigned char *)s < Runeself) ++s; else s += chartorune(&rune, s); ++i; } return i; } static void jsB_new_String(js_State *J) { js_newstring(J, js_gettop(J) > 1 ? js_tostring(J, 1) : ""); } static void jsB_String(js_State *J) { js_pushstring(J, js_gettop(J) > 1 ? js_tostring(J, 1) : ""); } static void Sp_toString(js_State *J) { js_Object *self = js_toobject(J, 0); if (self->type != JS_CSTRING) js_typeerror(J, "not a string"); js_pushliteral(J, self->u.s.string); } static void Sp_valueOf(js_State *J) { js_Object *self = js_toobject(J, 0); if (self->type != JS_CSTRING) js_typeerror(J, "not a string"); js_pushliteral(J, self->u.s.string); } static void Sp_charAt(js_State *J) { char buf[UTFmax + 1]; const char *s = checkstring(J, 0); int pos = js_tointeger(J, 1); Rune rune = js_runeat(J, s, pos); if (rune > 0) { buf[runetochar(buf, &rune)] = 0; js_pushstring(J, buf); } else { js_pushliteral(J, ""); } } static void Sp_charCodeAt(js_State *J) { const char *s = checkstring(J, 0); int pos = js_tointeger(J, 1); Rune rune = js_runeat(J, s, pos); if (rune > 0) js_pushnumber(J, rune); else js_pushnumber(J, NAN); } static void Sp_concat(js_State *J) { int i, top = js_gettop(J); int n; char * volatile out; const char *s; if (top == 1) return; s = checkstring(J, 0); n = strlen(s); out = js_malloc(J, n + 1); strcpy(out, s); if (js_try(J)) { js_free(J, out); js_throw(J); } for (i = 1; i < top; ++i) { s = js_tostring(J, i); n += strlen(s); out = js_realloc(J, out, n + 1); strcat(out, s); } js_pushstring(J, out); js_endtry(J); js_free(J, out); } static void Sp_indexOf(js_State *J) { const char *haystack = checkstring(J, 0); const char *needle = js_tostring(J, 1); int pos = js_tointeger(J, 2); int len = strlen(needle); int k = 0; Rune rune; while (*haystack) { if (k >= pos && !strncmp(haystack, needle, len)) { js_pushnumber(J, k); return; } haystack += chartorune(&rune, haystack); ++k; } js_pushnumber(J, -1); } static void Sp_lastIndexOf(js_State *J) { const char *haystack = checkstring(J, 0); const char *needle = js_tostring(J, 1); int pos = js_isdefined(J, 2) ? js_tointeger(J, 2) : (int)strlen(haystack); int len = strlen(needle); int k = 0, last = -1; Rune rune; while (*haystack && k <= pos) { if (!strncmp(haystack, needle, len)) last = k; haystack += chartorune(&rune, haystack); ++k; } js_pushnumber(J, last); } static void Sp_localeCompare(js_State *J) { const char *a = checkstring(J, 0); const char *b = js_tostring(J, 1); js_pushnumber(J, strcmp(a, b)); } static void Sp_slice(js_State *J) { const char *str = checkstring(J, 0); const char *ss, *ee; int len = utflen(str); int s = js_tointeger(J, 1); int e = js_isdefined(J, 2) ? js_tointeger(J, 2) : len; s = s < 0 ? s + len : s; e = e < 0 ? e + len : e; s = s < 0 ? 0 : s > len ? len : s; e = e < 0 ? 0 : e > len ? len : e; if (s < e) { ss = js_utfidxtoptr(str, s); ee = js_utfidxtoptr(ss, e - s); } else { ss = js_utfidxtoptr(str, e); ee = js_utfidxtoptr(ss, s - e); } js_pushlstring(J, ss, ee - ss); } static void Sp_substring(js_State *J) { const char *str = checkstring(J, 0); const char *ss, *ee; int len = utflen(str); int s = js_tointeger(J, 1); int e = js_isdefined(J, 2) ? js_tointeger(J, 2) : len; s = s < 0 ? 0 : s > len ? len : s; e = e < 0 ? 0 : e > len ? len : e; if (s < e) { ss = js_utfidxtoptr(str, s); ee = js_utfidxtoptr(ss, e - s); } else { ss = js_utfidxtoptr(str, e); ee = js_utfidxtoptr(ss, s - e); } js_pushlstring(J, ss, ee - ss); } static void Sp_toLowerCase(js_State *J) { const char *src = checkstring(J, 0); char *dst = js_malloc(J, UTFmax * strlen(src) + 1); const char *s = src; char *d = dst; Rune rune; while (*s) { s += chartorune(&rune, s); rune = tolowerrune(rune); d += runetochar(d, &rune); } *d = 0; if (js_try(J)) { js_free(J, dst); js_throw(J); } js_pushstring(J, dst); js_endtry(J); js_free(J, dst); } static void Sp_toUpperCase(js_State *J) { const char *src = checkstring(J, 0); char *dst = js_malloc(J, UTFmax * strlen(src) + 1); const char *s = src; char *d = dst; Rune rune; while (*s) { s += chartorune(&rune, s); rune = toupperrune(rune); d += runetochar(d, &rune); } *d = 0; if (js_try(J)) { js_free(J, dst); js_throw(J); } js_pushstring(J, dst); js_endtry(J); js_free(J, dst); } static int istrim(int c) { return c == 0x9 || c == 0xB || c == 0xC || c == 0x20 || c == 0xA0 || c == 0xFEFF || c == 0xA || c == 0xD || c == 0x2028 || c == 0x2029; } static void Sp_trim(js_State *J) { const char *s, *e; s = checkstring(J, 0); while (istrim(*s)) ++s; e = s + strlen(s); while (e > s && istrim(e[-1])) --e; js_pushlstring(J, s, e - s); } static void S_fromCharCode(js_State *J) { int i, top = js_gettop(J); Rune c; char *s, *p; s = p = js_malloc(J, (top-1) * UTFmax + 1); if (js_try(J)) { js_free(J, s); js_throw(J); } for (i = 1; i < top; ++i) { c = js_touint16(J, i); p += runetochar(p, &c); } *p = 0; js_pushstring(J, s); js_endtry(J); js_free(J, s); } static void Sp_match(js_State *J) { js_Regexp *re; const char *text; int len; const char *a, *b, *c, *e; Resub m; text = checkstring(J, 0); if (js_isregexp(J, 1)) js_copy(J, 1); else if (js_isundefined(J, 1)) js_newregexp(J, "", 0); else js_newregexp(J, js_tostring(J, 1), 0); re = js_toregexp(J, -1); if (!(re->flags & JS_REGEXP_G)) { js_RegExp_prototype_exec(J, re, text); return; } re->last = 0; js_newarray(J); len = 0; a = text; e = text + strlen(text); while (a <= e) { if (js_regexec(re->prog, a, &m, a > text ? REG_NOTBOL : 0)) break; b = m.sub[0].sp; c = m.sub[0].ep; js_pushlstring(J, b, c - b); js_setindex(J, -2, len++); a = c; if (c - b == 0) ++a; } if (len == 0) { js_pop(J, 1); js_pushnull(J); } } static void Sp_search(js_State *J) { js_Regexp *re; const char *text; Resub m; text = checkstring(J, 0); if (js_isregexp(J, 1)) js_copy(J, 1); else if (js_isundefined(J, 1)) js_newregexp(J, "", 0); else js_newregexp(J, js_tostring(J, 1), 0); re = js_toregexp(J, -1); if (!js_regexec(re->prog, text, &m, 0)) js_pushnumber(J, js_utfptrtoidx(text, m.sub[0].sp)); else js_pushnumber(J, -1); } static void Sp_replace_regexp(js_State *J) { js_Regexp *re; const char *source, *s, *r; js_Buffer *sb = NULL; int n, x; Resub m; source = checkstring(J, 0); re = js_toregexp(J, 1); if (js_regexec(re->prog, source, &m, 0)) { js_copy(J, 0); return; } re->last = 0; loop: s = m.sub[0].sp; n = m.sub[0].ep - m.sub[0].sp; if (js_iscallable(J, 2)) { js_copy(J, 2); js_pushundefined(J); for (x = 0; m.sub[x].sp; ++x) /* arg 0..x: substring and subexps that matched */ js_pushlstring(J, m.sub[x].sp, m.sub[x].ep - m.sub[x].sp); js_pushnumber(J, s - source); /* arg x+2: offset within search string */ js_copy(J, 0); /* arg x+3: search string */ js_call(J, 2 + x); r = js_tostring(J, -1); js_putm(J, &sb, source, s); js_puts(J, &sb, r); js_pop(J, 1); } else { r = js_tostring(J, 2); js_putm(J, &sb, source, s); while (*r) { if (*r == '$') { switch (*(++r)) { case 0: --r; /* end of string; back up */ /* fallthrough */ case '$': js_putc(J, &sb, '$'); break; case '`': js_putm(J, &sb, source, s); break; case '\'': js_puts(J, &sb, s + n); break; case '&': js_putm(J, &sb, s, s + n); break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': x = *r - '0'; if (r[1] >= '0' && r[1] <= '9') x = x * 10 + *(++r) - '0'; if (x > 0 && x < m.nsub) { js_putm(J, &sb, m.sub[x].sp, m.sub[x].ep); } else { js_putc(J, &sb, '$'); if (x > 10) { js_putc(J, &sb, '0' + x / 10); js_putc(J, &sb, '0' + x % 10); } else { js_putc(J, &sb, '0' + x); } } break; default: js_putc(J, &sb, '$'); js_putc(J, &sb, *r); break; } ++r; } else { js_putc(J, &sb, *r++); } } } if (re->flags & JS_REGEXP_G) { source = m.sub[0].ep; if (n == 0) { if (*source) js_putc(J, &sb, *source++); else goto end; } if (!js_regexec(re->prog, source, &m, REG_NOTBOL)) goto loop; } end: js_puts(J, &sb, s + n); js_putc(J, &sb, 0); if (js_try(J)) { js_free(J, sb); js_throw(J); } js_pushstring(J, sb ? sb->s : ""); js_endtry(J); js_free(J, sb); } static void Sp_replace_string(js_State *J) { const char *source, *needle, *s, *r; js_Buffer *sb = NULL; int n; source = checkstring(J, 0); needle = js_tostring(J, 1); s = strstr(source, needle); if (!s) { js_copy(J, 0); return; } n = strlen(needle); if (js_iscallable(J, 2)) { js_copy(J, 2); js_pushundefined(J); js_pushlstring(J, s, n); /* arg 1: substring that matched */ js_pushnumber(J, s - source); /* arg 2: offset within search string */ js_copy(J, 0); /* arg 3: search string */ js_call(J, 3); r = js_tostring(J, -1); js_putm(J, &sb, source, s); js_puts(J, &sb, r); js_puts(J, &sb, s + n); js_putc(J, &sb, 0); js_pop(J, 1); } else { r = js_tostring(J, 2); js_putm(J, &sb, source, s); while (*r) { if (*r == '$') { switch (*(++r)) { case 0: --r; /* end of string; back up */ /* fallthrough */ case '$': js_putc(J, &sb, '$'); break; case '&': js_putm(J, &sb, s, s + n); break; case '`': js_putm(J, &sb, source, s); break; case '\'': js_puts(J, &sb, s + n); break; default: js_putc(J, &sb, '$'); js_putc(J, &sb, *r); break; } ++r; } else { js_putc(J, &sb, *r++); } } js_puts(J, &sb, s + n); js_putc(J, &sb, 0); } if (js_try(J)) { js_free(J, sb); js_throw(J); } js_pushstring(J, sb ? sb->s : ""); js_endtry(J); js_free(J, sb); } static void Sp_replace(js_State *J) { if (js_isregexp(J, 1)) Sp_replace_regexp(J); else Sp_replace_string(J); } static void Sp_split_regexp(js_State *J) { js_Regexp *re; const char *text; int limit, len, k; const char *p, *a, *b, *c, *e; Resub m; text = checkstring(J, 0); re = js_toregexp(J, 1); limit = js_isdefined(J, 2) ? js_tointeger(J, 2) : 1 << 30; js_newarray(J); len = 0; e = text + strlen(text); /* splitting the empty string */ if (e == text) { if (js_regexec(re->prog, text, &m, 0)) { if (len == limit) return; js_pushliteral(J, ""); js_setindex(J, -2, 0); } return; } p = a = text; while (a < e) { if (js_regexec(re->prog, a, &m, a > text ? REG_NOTBOL : 0)) break; /* no match */ b = m.sub[0].sp; c = m.sub[0].ep; /* empty string at end of last match */ if (b == p) { ++a; continue; } if (len == limit) return; js_pushlstring(J, p, b - p); js_setindex(J, -2, len++); for (k = 1; k < m.nsub; ++k) { if (len == limit) return; js_pushlstring(J, m.sub[k].sp, m.sub[k].ep - m.sub[k].sp); js_setindex(J, -2, len++); } a = p = c; } if (len == limit) return; js_pushstring(J, p); js_setindex(J, -2, len); } static void Sp_split_string(js_State *J) { const char *str = checkstring(J, 0); const char *sep = js_tostring(J, 1); int limit = js_isdefined(J, 2) ? js_tointeger(J, 2) : 1 << 30; int i, n; js_newarray(J); n = strlen(sep); /* empty string */ if (n == 0) { Rune rune; for (i = 0; *str && i < limit; ++i) { n = chartorune(&rune, str); js_pushlstring(J, str, n); js_setindex(J, -2, i); str += n; } return; } for (i = 0; str && i < limit; ++i) { const char *s = strstr(str, sep); if (s) { js_pushlstring(J, str, s-str); js_setindex(J, -2, i); str = s + n; } else { js_pushstring(J, str); js_setindex(J, -2, i); str = NULL; } } } static void Sp_split(js_State *J) { if (js_isundefined(J, 1)) { js_newarray(J); js_copy(J, 0); js_setindex(J, -2, 0); } else if (js_isregexp(J, 1)) { Sp_split_regexp(J); } else { Sp_split_string(J); } } void jsB_initstring(js_State *J) { J->String_prototype->u.s.string = ""; J->String_prototype->u.s.length = 0; js_pushobject(J, J->String_prototype); { jsB_propf(J, "String.prototype.toString", Sp_toString, 0); jsB_propf(J, "String.prototype.valueOf", Sp_valueOf, 0); jsB_propf(J, "String.prototype.charAt", Sp_charAt, 1); jsB_propf(J, "String.prototype.charCodeAt", Sp_charCodeAt, 1); jsB_propf(J, "String.prototype.concat", Sp_concat, 0); /* 1 */ jsB_propf(J, "String.prototype.indexOf", Sp_indexOf, 1); jsB_propf(J, "String.prototype.lastIndexOf", Sp_lastIndexOf, 1); jsB_propf(J, "String.prototype.localeCompare", Sp_localeCompare, 1); jsB_propf(J, "String.prototype.match", Sp_match, 1); jsB_propf(J, "String.prototype.replace", Sp_replace, 2); jsB_propf(J, "String.prototype.search", Sp_search, 1); jsB_propf(J, "String.prototype.slice", Sp_slice, 2); jsB_propf(J, "String.prototype.split", Sp_split, 2); jsB_propf(J, "String.prototype.substring", Sp_substring, 2); jsB_propf(J, "String.prototype.toLowerCase", Sp_toLowerCase, 0); jsB_propf(J, "String.prototype.toLocaleLowerCase", Sp_toLowerCase, 0); jsB_propf(J, "String.prototype.toUpperCase", Sp_toUpperCase, 0); jsB_propf(J, "String.prototype.toLocaleUpperCase", Sp_toUpperCase, 0); /* ES5 */ jsB_propf(J, "String.prototype.trim", Sp_trim, 0); } js_newcconstructor(J, jsB_String, jsB_new_String, "String", 0); /* 1 */ { jsB_propf(J, "String.fromCharCode", S_fromCharCode, 0); /* 1 */ } js_defglobal(J, "String", JS_DONTENUM); }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_812_1
crossvul-cpp_data_good_1254_0
// SPDX-License-Identifier: GPL-2.0+ // // Freescale i.MX7ULP LPSPI driver // // Copyright 2016 Freescale Semiconductor, Inc. // Copyright 2018 NXP Semiconductors #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/platform_data/dma-imx.h> #include <linux/platform_data/spi-imx.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/types.h> #define DRIVER_NAME "fsl_lpspi" #define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */ /* The maximum bytes that edma can transfer once.*/ #define FSL_LPSPI_MAX_EDMA_BYTES ((1 << 15) - 1) /* i.MX7ULP LPSPI registers */ #define IMX7ULP_VERID 0x0 #define IMX7ULP_PARAM 0x4 #define IMX7ULP_CR 0x10 #define IMX7ULP_SR 0x14 #define IMX7ULP_IER 0x18 #define IMX7ULP_DER 0x1c #define IMX7ULP_CFGR0 0x20 #define IMX7ULP_CFGR1 0x24 #define IMX7ULP_DMR0 0x30 #define IMX7ULP_DMR1 0x34 #define IMX7ULP_CCR 0x40 #define IMX7ULP_FCR 0x58 #define IMX7ULP_FSR 0x5c #define IMX7ULP_TCR 0x60 #define IMX7ULP_TDR 0x64 #define IMX7ULP_RSR 0x70 #define IMX7ULP_RDR 0x74 /* General control register field define */ #define CR_RRF BIT(9) #define CR_RTF BIT(8) #define CR_RST BIT(1) #define CR_MEN BIT(0) #define SR_MBF BIT(24) #define SR_TCF BIT(10) #define SR_FCF BIT(9) #define SR_RDF BIT(1) #define SR_TDF BIT(0) #define IER_TCIE BIT(10) #define IER_FCIE BIT(9) #define IER_RDIE BIT(1) #define IER_TDIE BIT(0) #define DER_RDDE BIT(1) #define DER_TDDE BIT(0) #define CFGR1_PCSCFG BIT(27) #define CFGR1_PINCFG (BIT(24)|BIT(25)) #define CFGR1_PCSPOL BIT(8) #define CFGR1_NOSTALL BIT(3) #define CFGR1_MASTER BIT(0) #define FSR_TXCOUNT (0xFF) #define RSR_RXEMPTY BIT(1) #define TCR_CPOL BIT(31) #define TCR_CPHA BIT(30) #define TCR_CONT BIT(21) #define TCR_CONTC BIT(20) #define TCR_RXMSK BIT(19) #define TCR_TXMSK BIT(18) static int clkdivs[] = {1, 2, 4, 8, 16, 32, 64, 128}; struct lpspi_config { u8 bpw; u8 chip_select; u8 prescale; u16 mode; u32 speed_hz; }; struct fsl_lpspi_data { struct device *dev; void __iomem *base; unsigned long base_phys; struct clk *clk_ipg; struct clk *clk_per; bool is_slave; bool is_first_byte; void *rx_buf; const void *tx_buf; void (*tx)(struct fsl_lpspi_data *); void (*rx)(struct fsl_lpspi_data *); u32 remain; u8 watermark; u8 txfifosize; u8 rxfifosize; struct lpspi_config config; struct completion xfer_done; bool slave_aborted; /* DMA */ bool usedma; struct completion dma_rx_completion; struct completion dma_tx_completion; int chipselect[0]; }; static const struct of_device_id fsl_lpspi_dt_ids[] = { { .compatible = "fsl,imx7ulp-spi", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids); #define LPSPI_BUF_RX(type) \ static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi) \ { \ unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR); \ \ if (fsl_lpspi->rx_buf) { \ *(type *)fsl_lpspi->rx_buf = val; \ fsl_lpspi->rx_buf += sizeof(type); \ } \ } #define LPSPI_BUF_TX(type) \ static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi) \ { \ type val = 0; \ \ if (fsl_lpspi->tx_buf) { \ val = *(type *)fsl_lpspi->tx_buf; \ fsl_lpspi->tx_buf += sizeof(type); \ } \ \ fsl_lpspi->remain -= sizeof(type); \ writel(val, fsl_lpspi->base + IMX7ULP_TDR); \ } LPSPI_BUF_RX(u8) LPSPI_BUF_TX(u8) LPSPI_BUF_RX(u16) LPSPI_BUF_TX(u16) LPSPI_BUF_RX(u32) LPSPI_BUF_TX(u32) static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi, unsigned int enable) { writel(enable, fsl_lpspi->base + IMX7ULP_IER); } static int fsl_lpspi_bytes_per_word(const int bpw) { return DIV_ROUND_UP(bpw, BITS_PER_BYTE); } static bool fsl_lpspi_can_dma(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *transfer) { unsigned int bytes_per_word; if (!controller->dma_rx) return false; bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word); switch (bytes_per_word) { case 1: case 2: case 4: break; default: return false; } return true; } static int lpspi_prepare_xfer_hardware(struct spi_controller *controller) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); int ret; ret = pm_runtime_get_sync(fsl_lpspi->dev); if (ret < 0) { dev_err(fsl_lpspi->dev, "failed to enable clock\n"); return ret; } return 0; } static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); pm_runtime_mark_last_busy(fsl_lpspi->dev); pm_runtime_put_autosuspend(fsl_lpspi->dev); return 0; } static int fsl_lpspi_prepare_message(struct spi_controller *controller, struct spi_message *msg) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); struct spi_device *spi = msg->spi; int gpio = fsl_lpspi->chipselect[spi->chip_select]; if (gpio_is_valid(gpio)) gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); return 0; } static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi) { u8 txfifo_cnt; u32 temp; txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff; while (txfifo_cnt < fsl_lpspi->txfifosize) { if (!fsl_lpspi->remain) break; fsl_lpspi->tx(fsl_lpspi); txfifo_cnt++; } if (txfifo_cnt < fsl_lpspi->txfifosize) { if (!fsl_lpspi->is_slave) { temp = readl(fsl_lpspi->base + IMX7ULP_TCR); temp &= ~TCR_CONTC; writel(temp, fsl_lpspi->base + IMX7ULP_TCR); } fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE); } else fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE); } static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi) { while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY)) fsl_lpspi->rx(fsl_lpspi); } static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi) { u32 temp = 0; temp |= fsl_lpspi->config.bpw - 1; temp |= (fsl_lpspi->config.mode & 0x3) << 30; if (!fsl_lpspi->is_slave) { temp |= fsl_lpspi->config.prescale << 27; temp |= (fsl_lpspi->config.chip_select & 0x3) << 24; /* * Set TCR_CONT will keep SS asserted after current transfer. * For the first transfer, clear TCR_CONTC to assert SS. * For subsequent transfer, set TCR_CONTC to keep SS asserted. */ if (!fsl_lpspi->usedma) { temp |= TCR_CONT; if (fsl_lpspi->is_first_byte) temp &= ~TCR_CONTC; else temp |= TCR_CONTC; } } writel(temp, fsl_lpspi->base + IMX7ULP_TCR); dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp); } static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi) { u32 temp; if (!fsl_lpspi->usedma) temp = fsl_lpspi->watermark >> 1 | (fsl_lpspi->watermark >> 1) << 16; else temp = fsl_lpspi->watermark >> 1; writel(temp, fsl_lpspi->base + IMX7ULP_FCR); dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp); } static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) { struct lpspi_config config = fsl_lpspi->config; unsigned int perclk_rate, scldiv; u8 prescale; perclk_rate = clk_get_rate(fsl_lpspi->clk_per); if (config.speed_hz > perclk_rate / 2) { dev_err(fsl_lpspi->dev, "per-clk should be at least two times of transfer speed"); return -EINVAL; } for (prescale = 0; prescale < 8; prescale++) { scldiv = perclk_rate / (clkdivs[prescale] * config.speed_hz) - 2; if (scldiv < 256) { fsl_lpspi->config.prescale = prescale; break; } } if (prescale == 8 && scldiv >= 256) return -EINVAL; writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16), fsl_lpspi->base + IMX7ULP_CCR); dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale=%d, scldiv=%d\n", perclk_rate, config.speed_hz, prescale, scldiv); return 0; } static int fsl_lpspi_dma_configure(struct spi_controller *controller) { int ret; enum dma_slave_buswidth buswidth; struct dma_slave_config rx = {}, tx = {}; struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) { case 4: buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; break; case 2: buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; break; case 1: buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; break; default: return -EINVAL; } tx.direction = DMA_MEM_TO_DEV; tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR; tx.dst_addr_width = buswidth; tx.dst_maxburst = 1; ret = dmaengine_slave_config(controller->dma_tx, &tx); if (ret) { dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n", ret); return ret; } rx.direction = DMA_DEV_TO_MEM; rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR; rx.src_addr_width = buswidth; rx.src_maxburst = 1; ret = dmaengine_slave_config(controller->dma_rx, &rx); if (ret) { dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n", ret); return ret; } return 0; } static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi) { u32 temp; int ret; if (!fsl_lpspi->is_slave) { ret = fsl_lpspi_set_bitrate(fsl_lpspi); if (ret) return ret; } fsl_lpspi_set_watermark(fsl_lpspi); if (!fsl_lpspi->is_slave) temp = CFGR1_MASTER; else temp = CFGR1_PINCFG; if (fsl_lpspi->config.mode & SPI_CS_HIGH) temp |= CFGR1_PCSPOL; writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1); temp = readl(fsl_lpspi->base + IMX7ULP_CR); temp |= CR_RRF | CR_RTF | CR_MEN; writel(temp, fsl_lpspi->base + IMX7ULP_CR); temp = 0; if (fsl_lpspi->usedma) temp = DER_TDDE | DER_RDDE; writel(temp, fsl_lpspi->base + IMX7ULP_DER); return 0; } static int fsl_lpspi_setup_transfer(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *t) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(spi->controller); if (t == NULL) return -EINVAL; fsl_lpspi->config.mode = spi->mode; fsl_lpspi->config.bpw = t->bits_per_word; fsl_lpspi->config.speed_hz = t->speed_hz; fsl_lpspi->config.chip_select = spi->chip_select; if (!fsl_lpspi->config.speed_hz) fsl_lpspi->config.speed_hz = spi->max_speed_hz; if (!fsl_lpspi->config.bpw) fsl_lpspi->config.bpw = spi->bits_per_word; /* Initialize the functions for transfer */ if (fsl_lpspi->config.bpw <= 8) { fsl_lpspi->rx = fsl_lpspi_buf_rx_u8; fsl_lpspi->tx = fsl_lpspi_buf_tx_u8; } else if (fsl_lpspi->config.bpw <= 16) { fsl_lpspi->rx = fsl_lpspi_buf_rx_u16; fsl_lpspi->tx = fsl_lpspi_buf_tx_u16; } else { fsl_lpspi->rx = fsl_lpspi_buf_rx_u32; fsl_lpspi->tx = fsl_lpspi_buf_tx_u32; } if (t->len <= fsl_lpspi->txfifosize) fsl_lpspi->watermark = t->len; else fsl_lpspi->watermark = fsl_lpspi->txfifosize; if (fsl_lpspi_can_dma(controller, spi, t)) fsl_lpspi->usedma = 1; else fsl_lpspi->usedma = 0; return fsl_lpspi_config(fsl_lpspi); } static int fsl_lpspi_slave_abort(struct spi_controller *controller) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); fsl_lpspi->slave_aborted = true; if (!fsl_lpspi->usedma) complete(&fsl_lpspi->xfer_done); else { complete(&fsl_lpspi->dma_tx_completion); complete(&fsl_lpspi->dma_rx_completion); } return 0; } static int fsl_lpspi_wait_for_completion(struct spi_controller *controller) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); if (fsl_lpspi->is_slave) { if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) || fsl_lpspi->slave_aborted) { dev_dbg(fsl_lpspi->dev, "interrupted\n"); return -EINTR; } } else { if (!wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ)) { dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n"); return -ETIMEDOUT; } } return 0; } static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi) { u32 temp; if (!fsl_lpspi->usedma) { /* Disable all interrupt */ fsl_lpspi_intctrl(fsl_lpspi, 0); } /* W1C for all flags in SR */ temp = 0x3F << 8; writel(temp, fsl_lpspi->base + IMX7ULP_SR); /* Clear FIFO and disable module */ temp = CR_RRF | CR_RTF; writel(temp, fsl_lpspi->base + IMX7ULP_CR); return 0; } static void fsl_lpspi_dma_rx_callback(void *cookie) { struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie; complete(&fsl_lpspi->dma_rx_completion); } static void fsl_lpspi_dma_tx_callback(void *cookie) { struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie; complete(&fsl_lpspi->dma_tx_completion); } static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi, int size) { unsigned long timeout = 0; /* Time with actual data transfer and CS change delay related to HW */ timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz; /* Add extra second for scheduler related activities */ timeout += 1; /* Double calculated timeout */ return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC); } static int fsl_lpspi_dma_transfer(struct spi_controller *controller, struct fsl_lpspi_data *fsl_lpspi, struct spi_transfer *transfer) { struct dma_async_tx_descriptor *desc_tx, *desc_rx; unsigned long transfer_timeout; unsigned long timeout; struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; int ret; ret = fsl_lpspi_dma_configure(controller); if (ret) return ret; desc_rx = dmaengine_prep_slave_sg(controller->dma_rx, rx->sgl, rx->nents, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_rx) return -EINVAL; desc_rx->callback = fsl_lpspi_dma_rx_callback; desc_rx->callback_param = (void *)fsl_lpspi; dmaengine_submit(desc_rx); reinit_completion(&fsl_lpspi->dma_rx_completion); dma_async_issue_pending(controller->dma_rx); desc_tx = dmaengine_prep_slave_sg(controller->dma_tx, tx->sgl, tx->nents, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_tx) { dmaengine_terminate_all(controller->dma_tx); return -EINVAL; } desc_tx->callback = fsl_lpspi_dma_tx_callback; desc_tx->callback_param = (void *)fsl_lpspi; dmaengine_submit(desc_tx); reinit_completion(&fsl_lpspi->dma_tx_completion); dma_async_issue_pending(controller->dma_tx); fsl_lpspi->slave_aborted = false; if (!fsl_lpspi->is_slave) { transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi, transfer->len); /* Wait eDMA to finish the data transfer.*/ timeout = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion, transfer_timeout); if (!timeout) { dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n"); dmaengine_terminate_all(controller->dma_tx); dmaengine_terminate_all(controller->dma_rx); fsl_lpspi_reset(fsl_lpspi); return -ETIMEDOUT; } timeout = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion, transfer_timeout); if (!timeout) { dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n"); dmaengine_terminate_all(controller->dma_tx); dmaengine_terminate_all(controller->dma_rx); fsl_lpspi_reset(fsl_lpspi); return -ETIMEDOUT; } } else { if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) || fsl_lpspi->slave_aborted) { dev_dbg(fsl_lpspi->dev, "I/O Error in DMA TX interrupted\n"); dmaengine_terminate_all(controller->dma_tx); dmaengine_terminate_all(controller->dma_rx); fsl_lpspi_reset(fsl_lpspi); return -EINTR; } if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) || fsl_lpspi->slave_aborted) { dev_dbg(fsl_lpspi->dev, "I/O Error in DMA RX interrupted\n"); dmaengine_terminate_all(controller->dma_tx); dmaengine_terminate_all(controller->dma_rx); fsl_lpspi_reset(fsl_lpspi); return -EINTR; } } fsl_lpspi_reset(fsl_lpspi); return 0; } static void fsl_lpspi_dma_exit(struct spi_controller *controller) { if (controller->dma_rx) { dma_release_channel(controller->dma_rx); controller->dma_rx = NULL; } if (controller->dma_tx) { dma_release_channel(controller->dma_tx); controller->dma_tx = NULL; } } static int fsl_lpspi_dma_init(struct device *dev, struct fsl_lpspi_data *fsl_lpspi, struct spi_controller *controller) { int ret; /* Prepare for TX DMA: */ controller->dma_tx = dma_request_slave_channel_reason(dev, "tx"); if (IS_ERR(controller->dma_tx)) { ret = PTR_ERR(controller->dma_tx); dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret); controller->dma_tx = NULL; goto err; } /* Prepare for RX DMA: */ controller->dma_rx = dma_request_slave_channel_reason(dev, "rx"); if (IS_ERR(controller->dma_rx)) { ret = PTR_ERR(controller->dma_rx); dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret); controller->dma_rx = NULL; goto err; } init_completion(&fsl_lpspi->dma_rx_completion); init_completion(&fsl_lpspi->dma_tx_completion); controller->can_dma = fsl_lpspi_can_dma; controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES; return 0; err: fsl_lpspi_dma_exit(controller); return ret; } static int fsl_lpspi_pio_transfer(struct spi_controller *controller, struct spi_transfer *t) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); int ret; fsl_lpspi->tx_buf = t->tx_buf; fsl_lpspi->rx_buf = t->rx_buf; fsl_lpspi->remain = t->len; reinit_completion(&fsl_lpspi->xfer_done); fsl_lpspi->slave_aborted = false; fsl_lpspi_write_tx_fifo(fsl_lpspi); ret = fsl_lpspi_wait_for_completion(controller); if (ret) return ret; fsl_lpspi_reset(fsl_lpspi); return 0; } static int fsl_lpspi_transfer_one(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *t) { struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); int ret; fsl_lpspi->is_first_byte = true; ret = fsl_lpspi_setup_transfer(controller, spi, t); if (ret < 0) return ret; fsl_lpspi_set_cmd(fsl_lpspi); fsl_lpspi->is_first_byte = false; if (fsl_lpspi->usedma) ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t); else ret = fsl_lpspi_pio_transfer(controller, t); if (ret < 0) return ret; return 0; } static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id) { u32 temp_SR, temp_IER; struct fsl_lpspi_data *fsl_lpspi = dev_id; temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER); fsl_lpspi_intctrl(fsl_lpspi, 0); temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR); fsl_lpspi_read_rx_fifo(fsl_lpspi); if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) { fsl_lpspi_write_tx_fifo(fsl_lpspi); return IRQ_HANDLED; } if (temp_SR & SR_MBF || readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) { writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR); fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE); return IRQ_HANDLED; } if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) { writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR); complete(&fsl_lpspi->xfer_done); return IRQ_HANDLED; } return IRQ_NONE; } #ifdef CONFIG_PM static int fsl_lpspi_runtime_resume(struct device *dev) { struct spi_controller *controller = dev_get_drvdata(dev); struct fsl_lpspi_data *fsl_lpspi; int ret; fsl_lpspi = spi_controller_get_devdata(controller); ret = clk_prepare_enable(fsl_lpspi->clk_per); if (ret) return ret; ret = clk_prepare_enable(fsl_lpspi->clk_ipg); if (ret) { clk_disable_unprepare(fsl_lpspi->clk_per); return ret; } return 0; } static int fsl_lpspi_runtime_suspend(struct device *dev) { struct spi_controller *controller = dev_get_drvdata(dev); struct fsl_lpspi_data *fsl_lpspi; fsl_lpspi = spi_controller_get_devdata(controller); clk_disable_unprepare(fsl_lpspi->clk_per); clk_disable_unprepare(fsl_lpspi->clk_ipg); return 0; } #endif static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi) { struct device *dev = fsl_lpspi->dev; pm_runtime_enable(dev); pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT); pm_runtime_use_autosuspend(dev); return 0; } static int fsl_lpspi_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct fsl_lpspi_data *fsl_lpspi; struct spi_controller *controller; struct spi_imx_master *lpspi_platform_info = dev_get_platdata(&pdev->dev); struct resource *res; int i, ret, irq; u32 temp; bool is_slave; is_slave = of_property_read_bool((&pdev->dev)->of_node, "spi-slave"); if (is_slave) controller = spi_alloc_slave(&pdev->dev, sizeof(struct fsl_lpspi_data)); else controller = spi_alloc_master(&pdev->dev, sizeof(struct fsl_lpspi_data)); if (!controller) return -ENOMEM; platform_set_drvdata(pdev, controller); fsl_lpspi = spi_controller_get_devdata(controller); fsl_lpspi->dev = &pdev->dev; fsl_lpspi->is_slave = is_slave; if (!fsl_lpspi->is_slave) { for (i = 0; i < controller->num_chipselect; i++) { int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); if (!gpio_is_valid(cs_gpio) && lpspi_platform_info) cs_gpio = lpspi_platform_info->chipselect[i]; fsl_lpspi->chipselect[i] = cs_gpio; if (!gpio_is_valid(cs_gpio)) continue; ret = devm_gpio_request(&pdev->dev, fsl_lpspi->chipselect[i], DRIVER_NAME); if (ret) { dev_err(&pdev->dev, "can't get cs gpios\n"); goto out_controller_put; } } controller->cs_gpios = fsl_lpspi->chipselect; controller->prepare_message = fsl_lpspi_prepare_message; } controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); controller->transfer_one = fsl_lpspi_transfer_one; controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; controller->dev.of_node = pdev->dev.of_node; controller->bus_num = pdev->id; controller->slave_abort = fsl_lpspi_slave_abort; init_completion(&fsl_lpspi->xfer_done); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); fsl_lpspi->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(fsl_lpspi->base)) { ret = PTR_ERR(fsl_lpspi->base); goto out_controller_put; } fsl_lpspi->base_phys = res->start; irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto out_controller_put; } ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0, dev_name(&pdev->dev), fsl_lpspi); if (ret) { dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret); goto out_controller_put; } fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per"); if (IS_ERR(fsl_lpspi->clk_per)) { ret = PTR_ERR(fsl_lpspi->clk_per); goto out_controller_put; } fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(fsl_lpspi->clk_ipg)) { ret = PTR_ERR(fsl_lpspi->clk_ipg); goto out_controller_put; } /* enable the clock */ ret = fsl_lpspi_init_rpm(fsl_lpspi); if (ret) goto out_controller_put; ret = pm_runtime_get_sync(fsl_lpspi->dev); if (ret < 0) { dev_err(fsl_lpspi->dev, "failed to enable clock\n"); goto out_controller_put; } temp = readl(fsl_lpspi->base + IMX7ULP_PARAM); fsl_lpspi->txfifosize = 1 << (temp & 0x0f); fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f); ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller); if (ret == -EPROBE_DEFER) goto out_controller_put; if (ret < 0) dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret); ret = devm_spi_register_controller(&pdev->dev, controller); if (ret < 0) { dev_err(&pdev->dev, "spi_register_controller error.\n"); goto out_controller_put; } return 0; out_controller_put: spi_controller_put(controller); return ret; } static int fsl_lpspi_remove(struct platform_device *pdev) { struct spi_controller *controller = platform_get_drvdata(pdev); struct fsl_lpspi_data *fsl_lpspi = spi_controller_get_devdata(controller); pm_runtime_disable(fsl_lpspi->dev); spi_master_put(controller); return 0; } #ifdef CONFIG_PM_SLEEP static int fsl_lpspi_suspend(struct device *dev) { int ret; pinctrl_pm_select_sleep_state(dev); ret = pm_runtime_force_suspend(dev); return ret; } static int fsl_lpspi_resume(struct device *dev) { int ret; ret = pm_runtime_force_resume(dev); if (ret) { dev_err(dev, "Error in resume: %d\n", ret); return ret; } pinctrl_pm_select_default_state(dev); return 0; } #endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops fsl_lpspi_pm_ops = { SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend, fsl_lpspi_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume) }; static struct platform_driver fsl_lpspi_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = fsl_lpspi_dt_ids, .pm = &fsl_lpspi_pm_ops, }, .probe = fsl_lpspi_probe, .remove = fsl_lpspi_remove, }; module_platform_driver(fsl_lpspi_driver); MODULE_DESCRIPTION("LPSPI Controller driver"); MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-400/c/good_1254_0
crossvul-cpp_data_bad_5496_0
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * Copyright (c) 2013 Red Hat, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_bit.h" #include "xfs_mount.h" #include "xfs_da_format.h" #include "xfs_da_btree.h" #include "xfs_inode.h" #include "xfs_trans.h" #include "xfs_inode_item.h" #include "xfs_bmap.h" #include "xfs_attr.h" #include "xfs_attr_sf.h" #include "xfs_attr_remote.h" #include "xfs_attr_leaf.h" #include "xfs_error.h" #include "xfs_trace.h" #include "xfs_buf_item.h" #include "xfs_cksum.h" #include "xfs_dir2.h" STATIC int xfs_attr_shortform_compare(const void *a, const void *b) { xfs_attr_sf_sort_t *sa, *sb; sa = (xfs_attr_sf_sort_t *)a; sb = (xfs_attr_sf_sort_t *)b; if (sa->hash < sb->hash) { return -1; } else if (sa->hash > sb->hash) { return 1; } else { return sa->entno - sb->entno; } } #define XFS_ISRESET_CURSOR(cursor) \ (!((cursor)->initted) && !((cursor)->hashval) && \ !((cursor)->blkno) && !((cursor)->offset)) /* * Copy out entries of shortform attribute lists for attr_list(). * Shortform attribute lists are not stored in hashval sorted order. * If the output buffer is not large enough to hold them all, then we * we have to calculate each entries' hashvalue and sort them before * we can begin returning them to the user. */ int xfs_attr_shortform_list(xfs_attr_list_context_t *context) { attrlist_cursor_kern_t *cursor; xfs_attr_sf_sort_t *sbuf, *sbp; xfs_attr_shortform_t *sf; xfs_attr_sf_entry_t *sfe; xfs_inode_t *dp; int sbsize, nsbuf, count, i; int error; ASSERT(context != NULL); dp = context->dp; ASSERT(dp != NULL); ASSERT(dp->i_afp != NULL); sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; ASSERT(sf != NULL); if (!sf->hdr.count) return 0; cursor = context->cursor; ASSERT(cursor != NULL); trace_xfs_attr_list_sf(context); /* * If the buffer is large enough and the cursor is at the start, * do not bother with sorting since we will return everything in * one buffer and another call using the cursor won't need to be * made. * Note the generous fudge factor of 16 overhead bytes per entry. * If bufsize is zero then put_listent must be a search function * and can just scan through what we have. */ if (context->bufsize == 0 || (XFS_ISRESET_CURSOR(cursor) && (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) { for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { error = context->put_listent(context, sfe->flags, sfe->nameval, (int)sfe->namelen, (int)sfe->valuelen, &sfe->nameval[sfe->namelen]); /* * Either search callback finished early or * didn't fit it all in the buffer after all. */ if (context->seen_enough) break; if (error) return error; sfe = XFS_ATTR_SF_NEXTENTRY(sfe); } trace_xfs_attr_list_sf_all(context); return 0; } /* do no more for a search callback */ if (context->bufsize == 0) return 0; /* * It didn't all fit, so we have to sort everything on hashval. */ sbsize = sf->hdr.count * sizeof(*sbuf); sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS); /* * Scan the attribute list for the rest of the entries, storing * the relevant info from only those that match into a buffer. */ nsbuf = 0; for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { if (unlikely( ((char *)sfe < (char *)sf) || ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) { XFS_CORRUPTION_ERROR("xfs_attr_shortform_list", XFS_ERRLEVEL_LOW, context->dp->i_mount, sfe); kmem_free(sbuf); return -EFSCORRUPTED; } sbp->entno = i; sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen); sbp->name = sfe->nameval; sbp->namelen = sfe->namelen; /* These are bytes, and both on-disk, don't endian-flip */ sbp->valuelen = sfe->valuelen; sbp->flags = sfe->flags; sfe = XFS_ATTR_SF_NEXTENTRY(sfe); sbp++; nsbuf++; } /* * Sort the entries on hash then entno. */ xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare); /* * Re-find our place IN THE SORTED LIST. */ count = 0; cursor->initted = 1; cursor->blkno = 0; for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) { if (sbp->hash == cursor->hashval) { if (cursor->offset == count) { break; } count++; } else if (sbp->hash > cursor->hashval) { break; } } if (i == nsbuf) { kmem_free(sbuf); return 0; } /* * Loop putting entries into the user buffer. */ for ( ; i < nsbuf; i++, sbp++) { if (cursor->hashval != sbp->hash) { cursor->hashval = sbp->hash; cursor->offset = 0; } error = context->put_listent(context, sbp->flags, sbp->name, sbp->namelen, sbp->valuelen, &sbp->name[sbp->namelen]); if (error) return error; if (context->seen_enough) break; cursor->offset++; } kmem_free(sbuf); return 0; } STATIC int xfs_attr_node_list(xfs_attr_list_context_t *context) { attrlist_cursor_kern_t *cursor; xfs_attr_leafblock_t *leaf; xfs_da_intnode_t *node; struct xfs_attr3_icleaf_hdr leafhdr; struct xfs_da3_icnode_hdr nodehdr; struct xfs_da_node_entry *btree; int error, i; struct xfs_buf *bp; struct xfs_inode *dp = context->dp; struct xfs_mount *mp = dp->i_mount; trace_xfs_attr_node_list(context); cursor = context->cursor; cursor->initted = 1; /* * Do all sorts of validation on the passed-in cursor structure. * If anything is amiss, ignore the cursor and look up the hashval * starting from the btree root. */ bp = NULL; if (cursor->blkno > 0) { error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1, &bp, XFS_ATTR_FORK); if ((error != 0) && (error != -EFSCORRUPTED)) return error; if (bp) { struct xfs_attr_leaf_entry *entries; node = bp->b_addr; switch (be16_to_cpu(node->hdr.info.magic)) { case XFS_DA_NODE_MAGIC: case XFS_DA3_NODE_MAGIC: trace_xfs_attr_list_wrong_blk(context); xfs_trans_brelse(NULL, bp); bp = NULL; break; case XFS_ATTR_LEAF_MAGIC: case XFS_ATTR3_LEAF_MAGIC: leaf = bp->b_addr; xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf); entries = xfs_attr3_leaf_entryp(leaf); if (cursor->hashval > be32_to_cpu( entries[leafhdr.count - 1].hashval)) { trace_xfs_attr_list_wrong_blk(context); xfs_trans_brelse(NULL, bp); bp = NULL; } else if (cursor->hashval <= be32_to_cpu( entries[0].hashval)) { trace_xfs_attr_list_wrong_blk(context); xfs_trans_brelse(NULL, bp); bp = NULL; } break; default: trace_xfs_attr_list_wrong_blk(context); xfs_trans_brelse(NULL, bp); bp = NULL; } } } /* * We did not find what we expected given the cursor's contents, * so we start from the top and work down based on the hash value. * Note that start of node block is same as start of leaf block. */ if (bp == NULL) { cursor->blkno = 0; for (;;) { __uint16_t magic; error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1, &bp, XFS_ATTR_FORK); if (error) return error; node = bp->b_addr; magic = be16_to_cpu(node->hdr.info.magic); if (magic == XFS_ATTR_LEAF_MAGIC || magic == XFS_ATTR3_LEAF_MAGIC) break; if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) { XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)", XFS_ERRLEVEL_LOW, context->dp->i_mount, node); xfs_trans_brelse(NULL, bp); return -EFSCORRUPTED; } dp->d_ops->node_hdr_from_disk(&nodehdr, node); btree = dp->d_ops->node_tree_p(node); for (i = 0; i < nodehdr.count; btree++, i++) { if (cursor->hashval <= be32_to_cpu(btree->hashval)) { cursor->blkno = be32_to_cpu(btree->before); trace_xfs_attr_list_node_descend(context, btree); break; } } if (i == nodehdr.count) { xfs_trans_brelse(NULL, bp); return 0; } xfs_trans_brelse(NULL, bp); } } ASSERT(bp != NULL); /* * Roll upward through the blocks, processing each leaf block in * order. As long as there is space in the result buffer, keep * adding the information. */ for (;;) { leaf = bp->b_addr; error = xfs_attr3_leaf_list_int(bp, context); if (error) { xfs_trans_brelse(NULL, bp); return error; } xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf); if (context->seen_enough || leafhdr.forw == 0) break; cursor->blkno = leafhdr.forw; xfs_trans_brelse(NULL, bp); error = xfs_attr3_leaf_read(NULL, dp, cursor->blkno, -1, &bp); if (error) return error; } xfs_trans_brelse(NULL, bp); return 0; } /* * Copy out attribute list entries for attr_list(), for leaf attribute lists. */ int xfs_attr3_leaf_list_int( struct xfs_buf *bp, struct xfs_attr_list_context *context) { struct attrlist_cursor_kern *cursor; struct xfs_attr_leafblock *leaf; struct xfs_attr3_icleaf_hdr ichdr; struct xfs_attr_leaf_entry *entries; struct xfs_attr_leaf_entry *entry; int retval; int i; struct xfs_mount *mp = context->dp->i_mount; trace_xfs_attr_list_leaf(context); leaf = bp->b_addr; xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf); entries = xfs_attr3_leaf_entryp(leaf); cursor = context->cursor; cursor->initted = 1; /* * Re-find our place in the leaf block if this is a new syscall. */ if (context->resynch) { entry = &entries[0]; for (i = 0; i < ichdr.count; entry++, i++) { if (be32_to_cpu(entry->hashval) == cursor->hashval) { if (cursor->offset == context->dupcnt) { context->dupcnt = 0; break; } context->dupcnt++; } else if (be32_to_cpu(entry->hashval) > cursor->hashval) { context->dupcnt = 0; break; } } if (i == ichdr.count) { trace_xfs_attr_list_notfound(context); return 0; } } else { entry = &entries[0]; i = 0; } context->resynch = 0; /* * We have found our place, start copying out the new attributes. */ retval = 0; for (; i < ichdr.count; entry++, i++) { if (be32_to_cpu(entry->hashval) != cursor->hashval) { cursor->hashval = be32_to_cpu(entry->hashval); cursor->offset = 0; } if (entry->flags & XFS_ATTR_INCOMPLETE) continue; /* skip incomplete entries */ if (entry->flags & XFS_ATTR_LOCAL) { xfs_attr_leaf_name_local_t *name_loc = xfs_attr3_leaf_name_local(leaf, i); retval = context->put_listent(context, entry->flags, name_loc->nameval, (int)name_loc->namelen, be16_to_cpu(name_loc->valuelen), &name_loc->nameval[name_loc->namelen]); if (retval) return retval; } else { xfs_attr_leaf_name_remote_t *name_rmt = xfs_attr3_leaf_name_remote(leaf, i); int valuelen = be32_to_cpu(name_rmt->valuelen); if (context->put_value) { xfs_da_args_t args; memset((char *)&args, 0, sizeof(args)); args.geo = context->dp->i_mount->m_attr_geo; args.dp = context->dp; args.whichfork = XFS_ATTR_FORK; args.valuelen = valuelen; args.rmtvaluelen = valuelen; args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS); args.rmtblkno = be32_to_cpu(name_rmt->valueblk); args.rmtblkcnt = xfs_attr3_rmt_blocks( args.dp->i_mount, valuelen); retval = xfs_attr_rmtval_get(&args); if (retval) return retval; retval = context->put_listent(context, entry->flags, name_rmt->name, (int)name_rmt->namelen, valuelen, args.value); kmem_free(args.value); } else { retval = context->put_listent(context, entry->flags, name_rmt->name, (int)name_rmt->namelen, valuelen, NULL); } if (retval) return retval; } if (context->seen_enough) break; cursor->offset++; } trace_xfs_attr_list_leaf_end(context); return retval; } /* * Copy out attribute entries for attr_list(), for leaf attribute lists. */ STATIC int xfs_attr_leaf_list(xfs_attr_list_context_t *context) { int error; struct xfs_buf *bp; trace_xfs_attr_leaf_list(context); context->cursor->blkno = 0; error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp); if (error) return error; error = xfs_attr3_leaf_list_int(bp, context); xfs_trans_brelse(NULL, bp); return error; } int xfs_attr_list_int( xfs_attr_list_context_t *context) { int error; xfs_inode_t *dp = context->dp; uint lock_mode; XFS_STATS_INC(dp->i_mount, xs_attr_list); if (XFS_FORCED_SHUTDOWN(dp->i_mount)) return -EIO; /* * Decide on what work routines to call based on the inode size. */ lock_mode = xfs_ilock_attr_map_shared(dp); if (!xfs_inode_hasattr(dp)) { error = 0; } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { error = xfs_attr_shortform_list(context); } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { error = xfs_attr_leaf_list(context); } else { error = xfs_attr_node_list(context); } xfs_iunlock(dp, lock_mode); return error; } #define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \ (((struct attrlist_ent *) 0)->a_name - (char *) 0) #define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \ ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \ & ~(sizeof(u_int32_t)-1)) /* * Format an attribute and copy it out to the user's buffer. * Take care to check values and protect against them changing later, * we may be reading them directly out of a user buffer. */ STATIC int xfs_attr_put_listent( xfs_attr_list_context_t *context, int flags, unsigned char *name, int namelen, int valuelen, unsigned char *value) { struct attrlist *alist = (struct attrlist *)context->alist; attrlist_ent_t *aep; int arraytop; ASSERT(!(context->flags & ATTR_KERNOVAL)); ASSERT(context->count >= 0); ASSERT(context->count < (ATTR_MAX_VALUELEN/8)); ASSERT(context->firstu >= sizeof(*alist)); ASSERT(context->firstu <= context->bufsize); /* * Only list entries in the right namespace. */ if (((context->flags & ATTR_SECURE) == 0) != ((flags & XFS_ATTR_SECURE) == 0)) return 0; if (((context->flags & ATTR_ROOT) == 0) != ((flags & XFS_ATTR_ROOT) == 0)) return 0; arraytop = sizeof(*alist) + context->count * sizeof(alist->al_offset[0]); context->firstu -= ATTR_ENTSIZE(namelen); if (context->firstu < arraytop) { trace_xfs_attr_list_full(context); alist->al_more = 1; context->seen_enough = 1; return 1; } aep = (attrlist_ent_t *)&context->alist[context->firstu]; aep->a_valuelen = valuelen; memcpy(aep->a_name, name, namelen); aep->a_name[namelen] = 0; alist->al_offset[context->count++] = context->firstu; alist->al_count = context->count; trace_xfs_attr_list_add(context); return 0; } /* * Generate a list of extended attribute names and optionally * also value lengths. Positive return value follows the XFS * convention of being an error, zero or negative return code * is the length of the buffer returned (negated), indicating * success. */ int xfs_attr_list( xfs_inode_t *dp, char *buffer, int bufsize, int flags, attrlist_cursor_kern_t *cursor) { xfs_attr_list_context_t context; struct attrlist *alist; int error; /* * Validate the cursor. */ if (cursor->pad1 || cursor->pad2) return -EINVAL; if ((cursor->initted == 0) && (cursor->hashval || cursor->blkno || cursor->offset)) return -EINVAL; /* * Check for a properly aligned buffer. */ if (((long)buffer) & (sizeof(int)-1)) return -EFAULT; if (flags & ATTR_KERNOVAL) bufsize = 0; /* * Initialize the output buffer. */ memset(&context, 0, sizeof(context)); context.dp = dp; context.cursor = cursor; context.resynch = 1; context.flags = flags; context.alist = buffer; context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */ context.firstu = context.bufsize; context.put_listent = xfs_attr_put_listent; alist = (struct attrlist *)context.alist; alist->al_count = 0; alist->al_more = 0; alist->al_offset[0] = context.bufsize; error = xfs_attr_list_int(&context); ASSERT(error <= 0); return error; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_5496_0
crossvul-cpp_data_good_2562_0
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2016 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: Rasmus Lerdorf <rasmus@lerdorf.on.ca> | | Zeev Suraski <zeev@zend.com> | +----------------------------------------------------------------------+ */ /* $Id$ */ #include <stdio.h> #include "php.h" #include "ext/standard/php_standard.h" #include "ext/standard/credits.h" #include "ext/standard/php_smart_str.h" #include "php_variables.h" #include "php_globals.h" #include "php_content_types.h" #include "SAPI.h" #include "zend_globals.h" #ifdef PHP_WIN32 # include "win32/php_inttypes.h" #endif /* for systems that need to override reading of environment variables */ void _php_import_environment_variables(zval *array_ptr TSRMLS_DC); PHPAPI void (*php_import_environment_variables)(zval *array_ptr TSRMLS_DC) = _php_import_environment_variables; PHPAPI void php_register_variable(char *var, char *strval, zval *track_vars_array TSRMLS_DC) { php_register_variable_safe(var, strval, strlen(strval), track_vars_array TSRMLS_CC); } /* binary-safe version */ PHPAPI void php_register_variable_safe(char *var, char *strval, int str_len, zval *track_vars_array TSRMLS_DC) { zval new_entry; assert(strval != NULL); /* Prepare value */ Z_STRLEN(new_entry) = str_len; Z_STRVAL(new_entry) = estrndup(strval, Z_STRLEN(new_entry)); Z_TYPE(new_entry) = IS_STRING; php_register_variable_ex(var, &new_entry, track_vars_array TSRMLS_CC); } PHPAPI void php_register_variable_ex(char *var_name, zval *val, zval *track_vars_array TSRMLS_DC) { char *p = NULL; char *ip = NULL; /* index pointer */ char *index; char *var, *var_orig; int var_len, index_len; zval *gpc_element, **gpc_element_p; zend_bool is_array = 0; HashTable *symtable1 = NULL; ALLOCA_FLAG(use_heap) assert(var_name != NULL); if (track_vars_array) { symtable1 = Z_ARRVAL_P(track_vars_array); } if (!symtable1) { /* Nothing to do */ zval_dtor(val); return; } /* ignore leading spaces in the variable name */ while (*var_name && *var_name==' ') { var_name++; } /* * Prepare variable name */ var_len = strlen(var_name); var = var_orig = do_alloca(var_len + 1, use_heap); memcpy(var_orig, var_name, var_len + 1); /* ensure that we don't have spaces or dots in the variable name (not binary safe) */ for (p = var; *p; p++) { if (*p == ' ' || *p == '.') { *p='_'; } else if (*p == '[') { is_array = 1; ip = p; *p = 0; break; } } var_len = p - var; if (var_len==0) { /* empty variable name, or variable name with a space in it */ zval_dtor(val); free_alloca(var_orig, use_heap); return; } /* GLOBALS hijack attempt, reject parameter */ if (symtable1 == EG(active_symbol_table) && var_len == sizeof("GLOBALS")-1 && !memcmp(var, "GLOBALS", sizeof("GLOBALS")-1)) { zval_dtor(val); free_alloca(var_orig, use_heap); return; } index = var; index_len = var_len; if (is_array) { int nest_level = 0; while (1) { char *index_s; int new_idx_len = 0; if(++nest_level > PG(max_input_nesting_level)) { HashTable *ht; /* too many levels of nesting */ if (track_vars_array) { ht = Z_ARRVAL_P(track_vars_array); zend_symtable_del(ht, var, var_len + 1); } zval_dtor(val); /* do not output the error message to the screen, this helps us to to avoid "information disclosure" */ if (!PG(display_errors)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Input variable nesting level exceeded %ld. To increase the limit change max_input_nesting_level in php.ini.", PG(max_input_nesting_level)); } free_alloca(var_orig, use_heap); return; } ip++; index_s = ip; if (isspace(*ip)) { ip++; } if (*ip==']') { index_s = NULL; } else { ip = strchr(ip, ']'); if (!ip) { /* PHP variables cannot contain '[' in their names, so we replace the character with a '_' */ *(index_s - 1) = '_'; index_len = 0; if (index) { index_len = strlen(index); } goto plain_var; return; } *ip = 0; new_idx_len = strlen(index_s); } if (!index) { MAKE_STD_ZVAL(gpc_element); array_init(gpc_element); if (zend_hash_next_index_insert(symtable1, &gpc_element, sizeof(zval *), (void **) &gpc_element_p) == FAILURE) { zval_ptr_dtor(&gpc_element); zval_dtor(val); free_alloca(var_orig, use_heap); return; } } else { if (zend_symtable_find(symtable1, index, index_len + 1, (void **) &gpc_element_p) == FAILURE || Z_TYPE_PP(gpc_element_p) != IS_ARRAY) { MAKE_STD_ZVAL(gpc_element); array_init(gpc_element); zend_symtable_update(symtable1, index, index_len + 1, &gpc_element, sizeof(zval *), (void **) &gpc_element_p); } } symtable1 = Z_ARRVAL_PP(gpc_element_p); /* ip pointed to the '[' character, now obtain the key */ index = index_s; index_len = new_idx_len; ip++; if (*ip == '[') { is_array = 1; *ip = 0; } else { goto plain_var; } } } else { plain_var: MAKE_STD_ZVAL(gpc_element); gpc_element->value = val->value; Z_TYPE_P(gpc_element) = Z_TYPE_P(val); if (!index) { if (zend_hash_next_index_insert(symtable1, &gpc_element, sizeof(zval *), (void **) &gpc_element_p) == FAILURE) { zval_ptr_dtor(&gpc_element); } } else { /* * According to rfc2965, more specific paths are listed above the less specific ones. * If we encounter a duplicate cookie name, we should skip it, since it is not possible * to have the same (plain text) cookie name for the same path and we should not overwrite * more specific cookies with the less specific ones. */ if (PG(http_globals)[TRACK_VARS_COOKIE] && symtable1 == Z_ARRVAL_P(PG(http_globals)[TRACK_VARS_COOKIE]) && zend_symtable_exists(symtable1, index, index_len + 1)) { zval_ptr_dtor(&gpc_element); } else { zend_symtable_update(symtable1, index, index_len + 1, &gpc_element, sizeof(zval *), (void **) &gpc_element_p); } } } free_alloca(var_orig, use_heap); } typedef struct post_var_data { smart_str str; char *ptr; char *end; uint64_t cnt; /* Bytes in ptr that have already been scanned for '&' */ size_t already_scanned; } post_var_data_t; static zend_bool add_post_var(zval *arr, post_var_data_t *var, zend_bool eof TSRMLS_DC) { char *start, *ksep, *vsep, *val; size_t klen, vlen; /* FIXME: string-size_t */ unsigned int new_vlen; if (var->ptr >= var->end) { return 0; } start = var->ptr + var->already_scanned; vsep = memchr(start, '&', var->end - start); if (!vsep) { if (!eof) { var->already_scanned = var->end - var->ptr; return 0; } else { vsep = var->end; } } ksep = memchr(var->ptr, '=', vsep - var->ptr); if (ksep) { *ksep = '\0'; /* "foo=bar&" or "foo=&" */ klen = ksep - var->ptr; vlen = vsep - ++ksep; } else { ksep = ""; /* "foo&" */ klen = vsep - var->ptr; vlen = 0; } php_url_decode(var->ptr, klen); val = estrndup(ksep, vlen); if (vlen) { vlen = php_url_decode(val, vlen); } if (sapi_module.input_filter(PARSE_POST, var->ptr, &val, vlen, &new_vlen TSRMLS_CC)) { php_register_variable_safe(var->ptr, val, new_vlen, arr TSRMLS_CC); } efree(val); var->ptr = vsep + (vsep != var->end); var->already_scanned = 0; return 1; } static inline int add_post_vars(zval *arr, post_var_data_t *vars, zend_bool eof TSRMLS_DC) { uint64_t max_vars = PG(max_input_vars); vars->ptr = vars->str.c; vars->end = vars->str.c + vars->str.len; while (add_post_var(arr, vars, eof TSRMLS_CC)) { if (++vars->cnt > max_vars) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Input variables exceeded %" PRIu64 ". " "To increase the limit change max_input_vars in php.ini.", max_vars); return FAILURE; } } if (!eof && vars->str.c != vars->ptr) { memmove(vars->str.c, vars->ptr, vars->str.len = vars->end - vars->ptr); } return SUCCESS; } #ifdef PHP_WIN32 #define SAPI_POST_HANDLER_BUFSIZ 16384 #else # define SAPI_POST_HANDLER_BUFSIZ BUFSIZ #endif SAPI_API SAPI_POST_HANDLER_FUNC(php_std_post_handler) { zval *arr = (zval *) arg; php_stream *s = SG(request_info).request_body; post_var_data_t post_data; if (s && SUCCESS == php_stream_rewind(s)) { memset(&post_data, 0, sizeof(post_data)); while (!php_stream_eof(s)) { char buf[SAPI_POST_HANDLER_BUFSIZ] = {0}; size_t len = php_stream_read(s, buf, SAPI_POST_HANDLER_BUFSIZ); if (len && len != (size_t) -1) { smart_str_appendl(&post_data.str, buf, len); if (SUCCESS != add_post_vars(arr, &post_data, 0 TSRMLS_CC)) { if (post_data.str.c) { efree(post_data.str.c); } return; } } if (len != SAPI_POST_HANDLER_BUFSIZ){ break; } } add_post_vars(arr, &post_data, 1 TSRMLS_CC); if (post_data.str.c) { efree(post_data.str.c); } } } #undef SAPI_POST_HANDLER_BUFSIZ SAPI_API SAPI_INPUT_FILTER_FUNC(php_default_input_filter) { /* TODO: check .ini setting here and apply user-defined input filter */ if(new_val_len) *new_val_len = val_len; return 1; } SAPI_API SAPI_TREAT_DATA_FUNC(php_default_treat_data) { char *res = NULL, *var, *val, *separator = NULL; const char *c_var; zval *array_ptr; int free_buffer = 0; char *strtok_buf = NULL; long count = 0; switch (arg) { case PARSE_POST: case PARSE_GET: case PARSE_COOKIE: ALLOC_ZVAL(array_ptr); array_init(array_ptr); INIT_PZVAL(array_ptr); switch (arg) { case PARSE_POST: if (PG(http_globals)[TRACK_VARS_POST]) { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_POST]); } PG(http_globals)[TRACK_VARS_POST] = array_ptr; break; case PARSE_GET: if (PG(http_globals)[TRACK_VARS_GET]) { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_GET]); } PG(http_globals)[TRACK_VARS_GET] = array_ptr; break; case PARSE_COOKIE: if (PG(http_globals)[TRACK_VARS_COOKIE]) { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_COOKIE]); } PG(http_globals)[TRACK_VARS_COOKIE] = array_ptr; break; } break; default: array_ptr = destArray; break; } if (arg == PARSE_POST) { sapi_handle_post(array_ptr TSRMLS_CC); return; } if (arg == PARSE_GET) { /* GET data */ c_var = SG(request_info).query_string; if (c_var && *c_var) { res = (char *) estrdup(c_var); free_buffer = 1; } else { free_buffer = 0; } } else if (arg == PARSE_COOKIE) { /* Cookie data */ c_var = SG(request_info).cookie_data; if (c_var && *c_var) { res = (char *) estrdup(c_var); free_buffer = 1; } else { free_buffer = 0; } } else if (arg == PARSE_STRING) { /* String data */ res = str; free_buffer = 1; } if (!res) { return; } switch (arg) { case PARSE_GET: case PARSE_STRING: separator = (char *) estrdup(PG(arg_separator).input); break; case PARSE_COOKIE: separator = ";\0"; break; } var = php_strtok_r(res, separator, &strtok_buf); while (var) { val = strchr(var, '='); if (arg == PARSE_COOKIE) { /* Remove leading spaces from cookie names, needed for multi-cookie header where ; can be followed by a space */ while (isspace(*var)) { var++; } if (var == val || *var == '\0') { goto next_cookie; } } if (++count > PG(max_input_vars)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Input variables exceeded %ld. To increase the limit change max_input_vars in php.ini.", PG(max_input_vars)); break; } if (val) { /* have a value */ int val_len; unsigned int new_val_len; *val++ = '\0'; php_url_decode(var, strlen(var)); val_len = php_url_decode(val, strlen(val)); val = estrndup(val, val_len); if (sapi_module.input_filter(arg, var, &val, val_len, &new_val_len TSRMLS_CC)) { php_register_variable_safe(var, val, new_val_len, array_ptr TSRMLS_CC); } efree(val); } else { int val_len; unsigned int new_val_len; php_url_decode(var, strlen(var)); val_len = 0; val = estrndup("", val_len); if (sapi_module.input_filter(arg, var, &val, val_len, &new_val_len TSRMLS_CC)) { php_register_variable_safe(var, val, new_val_len, array_ptr TSRMLS_CC); } efree(val); } next_cookie: var = php_strtok_r(NULL, separator, &strtok_buf); } if (arg != PARSE_COOKIE) { efree(separator); } if (free_buffer) { efree(res); } } void _php_import_environment_variables(zval *array_ptr TSRMLS_DC) { char buf[128]; char **env, *p, *t = buf; size_t alloc_size = sizeof(buf); unsigned long nlen; /* ptrdiff_t is not portable */ for (env = environ; env != NULL && *env != NULL; env++) { p = strchr(*env, '='); if (!p) { /* malformed entry? */ continue; } nlen = p - *env; if (nlen >= alloc_size) { alloc_size = nlen + 64; t = (t == buf ? emalloc(alloc_size): erealloc(t, alloc_size)); } memcpy(t, *env, nlen); t[nlen] = '\0'; php_register_variable(t, p + 1, array_ptr TSRMLS_CC); } if (t != buf && t != NULL) { efree(t); } } zend_bool php_std_auto_global_callback(char *name, uint name_len TSRMLS_DC) { zend_printf("%s\n", name); return 0; /* don't rearm */ } /* {{{ php_build_argv */ static void php_build_argv(char *s, zval *track_vars_array TSRMLS_DC) { zval *arr, *argc, *tmp; int count = 0; char *ss, *space; if (!(SG(request_info).argc || track_vars_array)) { return; } ALLOC_INIT_ZVAL(arr); array_init(arr); /* Prepare argv */ if (SG(request_info).argc) { /* are we in cli sapi? */ int i; for (i = 0; i < SG(request_info).argc; i++) { ALLOC_ZVAL(tmp); Z_TYPE_P(tmp) = IS_STRING; Z_STRLEN_P(tmp) = strlen(SG(request_info).argv[i]); Z_STRVAL_P(tmp) = estrndup(SG(request_info).argv[i], Z_STRLEN_P(tmp)); INIT_PZVAL(tmp); if (zend_hash_next_index_insert(Z_ARRVAL_P(arr), &tmp, sizeof(zval *), NULL) == FAILURE) { if (Z_TYPE_P(tmp) == IS_STRING) { efree(Z_STRVAL_P(tmp)); } } } } else if (s && *s) { ss = s; while (ss) { space = strchr(ss, '+'); if (space) { *space = '\0'; } /* auto-type */ ALLOC_ZVAL(tmp); Z_TYPE_P(tmp) = IS_STRING; Z_STRLEN_P(tmp) = strlen(ss); Z_STRVAL_P(tmp) = estrndup(ss, Z_STRLEN_P(tmp)); INIT_PZVAL(tmp); count++; if (zend_hash_next_index_insert(Z_ARRVAL_P(arr), &tmp, sizeof(zval *), NULL) == FAILURE) { if (Z_TYPE_P(tmp) == IS_STRING) { efree(Z_STRVAL_P(tmp)); } } if (space) { *space = '+'; ss = space + 1; } else { ss = space; } } } /* prepare argc */ ALLOC_INIT_ZVAL(argc); if (SG(request_info).argc) { Z_LVAL_P(argc) = SG(request_info).argc; } else { Z_LVAL_P(argc) = count; } Z_TYPE_P(argc) = IS_LONG; if (SG(request_info).argc) { Z_ADDREF_P(arr); Z_ADDREF_P(argc); zend_hash_update(&EG(symbol_table), "argv", sizeof("argv"), &arr, sizeof(zval *), NULL); zend_hash_update(&EG(symbol_table), "argc", sizeof("argc"), &argc, sizeof(zval *), NULL); } if (track_vars_array) { Z_ADDREF_P(arr); Z_ADDREF_P(argc); zend_hash_update(Z_ARRVAL_P(track_vars_array), "argv", sizeof("argv"), &arr, sizeof(zval *), NULL); zend_hash_update(Z_ARRVAL_P(track_vars_array), "argc", sizeof("argc"), &argc, sizeof(zval *), NULL); } zval_ptr_dtor(&arr); zval_ptr_dtor(&argc); } /* }}} */ /* {{{ php_register_server_variables */ static inline void php_register_server_variables(TSRMLS_D) { zval *array_ptr = NULL; ALLOC_ZVAL(array_ptr); array_init(array_ptr); INIT_PZVAL(array_ptr); if (PG(http_globals)[TRACK_VARS_SERVER]) { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_SERVER]); } PG(http_globals)[TRACK_VARS_SERVER] = array_ptr; /* Server variables */ if (sapi_module.register_server_variables) { sapi_module.register_server_variables(array_ptr TSRMLS_CC); } /* PHP Authentication support */ if (SG(request_info).auth_user) { php_register_variable("PHP_AUTH_USER", SG(request_info).auth_user, array_ptr TSRMLS_CC); } if (SG(request_info).auth_password) { php_register_variable("PHP_AUTH_PW", SG(request_info).auth_password, array_ptr TSRMLS_CC); } if (SG(request_info).auth_digest) { php_register_variable("PHP_AUTH_DIGEST", SG(request_info).auth_digest, array_ptr TSRMLS_CC); } /* store request init time */ { zval request_time_float, request_time_long; Z_TYPE(request_time_float) = IS_DOUBLE; Z_DVAL(request_time_float) = sapi_get_request_time(TSRMLS_C); php_register_variable_ex("REQUEST_TIME_FLOAT", &request_time_float, array_ptr TSRMLS_CC); Z_TYPE(request_time_long) = IS_LONG; Z_LVAL(request_time_long) = zend_dval_to_lval(Z_DVAL(request_time_float)); php_register_variable_ex("REQUEST_TIME", &request_time_long, array_ptr TSRMLS_CC); } } /* }}} */ /* {{{ php_autoglobal_merge */ static void php_autoglobal_merge(HashTable *dest, HashTable *src TSRMLS_DC) { zval **src_entry, **dest_entry; char *string_key; uint string_key_len; ulong num_key; HashPosition pos; int key_type; int globals_check = (dest == (&EG(symbol_table))); zend_hash_internal_pointer_reset_ex(src, &pos); while (zend_hash_get_current_data_ex(src, (void **)&src_entry, &pos) == SUCCESS) { key_type = zend_hash_get_current_key_ex(src, &string_key, &string_key_len, &num_key, 0, &pos); if (Z_TYPE_PP(src_entry) != IS_ARRAY || (key_type == HASH_KEY_IS_STRING && zend_hash_find(dest, string_key, string_key_len, (void **) &dest_entry) != SUCCESS) || (key_type == HASH_KEY_IS_LONG && zend_hash_index_find(dest, num_key, (void **)&dest_entry) != SUCCESS) || Z_TYPE_PP(dest_entry) != IS_ARRAY ) { Z_ADDREF_PP(src_entry); if (key_type == HASH_KEY_IS_STRING) { if (!globals_check || string_key_len != sizeof("GLOBALS") || memcmp(string_key, "GLOBALS", sizeof("GLOBALS") - 1)) { zend_hash_update(dest, string_key, string_key_len, src_entry, sizeof(zval *), NULL); } else { Z_DELREF_PP(src_entry); } } else { zend_hash_index_update(dest, num_key, src_entry, sizeof(zval *), NULL); } } else { SEPARATE_ZVAL(dest_entry); php_autoglobal_merge(Z_ARRVAL_PP(dest_entry), Z_ARRVAL_PP(src_entry) TSRMLS_CC); } zend_hash_move_forward_ex(src, &pos); } } /* }}} */ static zend_bool php_auto_globals_create_server(const char *name, uint name_len TSRMLS_DC); static zend_bool php_auto_globals_create_env(const char *name, uint name_len TSRMLS_DC); static zend_bool php_auto_globals_create_request(const char *name, uint name_len TSRMLS_DC); /* {{{ php_hash_environment */ PHPAPI int php_hash_environment(TSRMLS_D) { memset(PG(http_globals), 0, sizeof(PG(http_globals))); zend_activate_auto_globals(TSRMLS_C); if (PG(register_argc_argv)) { php_build_argv(SG(request_info).query_string, PG(http_globals)[TRACK_VARS_SERVER] TSRMLS_CC); } return SUCCESS; } /* }}} */ static zend_bool php_auto_globals_create_get(const char *name, uint name_len TSRMLS_DC) { zval *vars; if (PG(variables_order) && (strchr(PG(variables_order),'G') || strchr(PG(variables_order),'g'))) { sapi_module.treat_data(PARSE_GET, NULL, NULL TSRMLS_CC); vars = PG(http_globals)[TRACK_VARS_GET]; } else { ALLOC_ZVAL(vars); array_init(vars); INIT_PZVAL(vars); if (PG(http_globals)[TRACK_VARS_GET]) { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_GET]); } PG(http_globals)[TRACK_VARS_GET] = vars; } zend_hash_update(&EG(symbol_table), name, name_len + 1, &vars, sizeof(zval *), NULL); Z_ADDREF_P(vars); return 0; /* don't rearm */ } static zend_bool php_auto_globals_create_post(const char *name, uint name_len TSRMLS_DC) { zval *vars; if (PG(variables_order) && (strchr(PG(variables_order),'P') || strchr(PG(variables_order),'p')) && SG(request_info).request_method && !strcasecmp(SG(request_info).request_method, "POST")) { sapi_module.treat_data(PARSE_POST, NULL, NULL TSRMLS_CC); vars = PG(http_globals)[TRACK_VARS_POST]; } else { ALLOC_ZVAL(vars); array_init(vars); INIT_PZVAL(vars); if (PG(http_globals)[TRACK_VARS_POST]) { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_POST]); } PG(http_globals)[TRACK_VARS_POST] = vars; } zend_hash_update(&EG(symbol_table), name, name_len + 1, &vars, sizeof(zval *), NULL); Z_ADDREF_P(vars); return 0; /* don't rearm */ } static zend_bool php_auto_globals_create_cookie(const char *name, uint name_len TSRMLS_DC) { zval *vars; if (PG(variables_order) && (strchr(PG(variables_order),'C') || strchr(PG(variables_order),'c'))) { sapi_module.treat_data(PARSE_COOKIE, NULL, NULL TSRMLS_CC); vars = PG(http_globals)[TRACK_VARS_COOKIE]; } else { ALLOC_ZVAL(vars); array_init(vars); INIT_PZVAL(vars); if (PG(http_globals)[TRACK_VARS_COOKIE]) { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_COOKIE]); } PG(http_globals)[TRACK_VARS_COOKIE] = vars; } zend_hash_update(&EG(symbol_table), name, name_len + 1, &vars, sizeof(zval *), NULL); Z_ADDREF_P(vars); return 0; /* don't rearm */ } static zend_bool php_auto_globals_create_files(const char *name, uint name_len TSRMLS_DC) { zval *vars; if (PG(http_globals)[TRACK_VARS_FILES]) { vars = PG(http_globals)[TRACK_VARS_FILES]; } else { ALLOC_ZVAL(vars); array_init(vars); INIT_PZVAL(vars); PG(http_globals)[TRACK_VARS_FILES] = vars; } zend_hash_update(&EG(symbol_table), name, name_len + 1, &vars, sizeof(zval *), NULL); Z_ADDREF_P(vars); return 0; /* don't rearm */ } /* Upgly hack to fix HTTP_PROXY issue, see bug #72573 */ static void check_http_proxy(HashTable *var_table) { if (zend_hash_exists(var_table, "HTTP_PROXY", sizeof("HTTP_PROXY"))) { char *local_proxy = getenv("HTTP_PROXY"); if (!local_proxy) { zend_hash_del(var_table, "HTTP_PROXY", sizeof("HTTP_PROXY")); } else { zval *local_zval; ALLOC_INIT_ZVAL(local_zval); ZVAL_STRING(local_zval, local_proxy, 1); zend_hash_update(var_table, "HTTP_PROXY", sizeof("HTTP_PROXY"), &local_zval, sizeof(zval **), NULL); } } } static zend_bool php_auto_globals_create_server(const char *name, uint name_len TSRMLS_DC) { if (PG(variables_order) && (strchr(PG(variables_order),'S') || strchr(PG(variables_order),'s'))) { php_register_server_variables(TSRMLS_C); if (PG(register_argc_argv)) { if (SG(request_info).argc) { zval **argc, **argv; if (zend_hash_find(&EG(symbol_table), "argc", sizeof("argc"), (void**)&argc) == SUCCESS && zend_hash_find(&EG(symbol_table), "argv", sizeof("argv"), (void**)&argv) == SUCCESS) { Z_ADDREF_PP(argc); Z_ADDREF_PP(argv); zend_hash_update(Z_ARRVAL_P(PG(http_globals)[TRACK_VARS_SERVER]), "argv", sizeof("argv"), argv, sizeof(zval *), NULL); zend_hash_update(Z_ARRVAL_P(PG(http_globals)[TRACK_VARS_SERVER]), "argc", sizeof("argc"), argc, sizeof(zval *), NULL); } } else { php_build_argv(SG(request_info).query_string, PG(http_globals)[TRACK_VARS_SERVER] TSRMLS_CC); } } } else { zval *server_vars=NULL; ALLOC_ZVAL(server_vars); array_init(server_vars); INIT_PZVAL(server_vars); if (PG(http_globals)[TRACK_VARS_SERVER]) { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_SERVER]); } PG(http_globals)[TRACK_VARS_SERVER] = server_vars; } check_http_proxy(Z_ARRVAL_P(PG(http_globals)[TRACK_VARS_SERVER])); zend_hash_update(&EG(symbol_table), name, name_len + 1, &PG(http_globals)[TRACK_VARS_SERVER], sizeof(zval *), NULL); Z_ADDREF_P(PG(http_globals)[TRACK_VARS_SERVER]); return 0; /* don't rearm */ } static zend_bool php_auto_globals_create_env(const char *name, uint name_len TSRMLS_DC) { zval *env_vars = NULL; ALLOC_ZVAL(env_vars); array_init(env_vars); INIT_PZVAL(env_vars); if (PG(http_globals)[TRACK_VARS_ENV]) { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_ENV]); } PG(http_globals)[TRACK_VARS_ENV] = env_vars; if (PG(variables_order) && (strchr(PG(variables_order),'E') || strchr(PG(variables_order),'e'))) { php_import_environment_variables(PG(http_globals)[TRACK_VARS_ENV] TSRMLS_CC); } check_http_proxy(Z_ARRVAL_P(PG(http_globals)[TRACK_VARS_ENV])); zend_hash_update(&EG(symbol_table), name, name_len + 1, &PG(http_globals)[TRACK_VARS_ENV], sizeof(zval *), NULL); Z_ADDREF_P(PG(http_globals)[TRACK_VARS_ENV]); return 0; /* don't rearm */ } static zend_bool php_auto_globals_create_request(const char *name, uint name_len TSRMLS_DC) { zval *form_variables; unsigned char _gpc_flags[3] = {0, 0, 0}; char *p; ALLOC_ZVAL(form_variables); array_init(form_variables); INIT_PZVAL(form_variables); if (PG(request_order) != NULL) { p = PG(request_order); } else { p = PG(variables_order); } for (; p && *p; p++) { switch (*p) { case 'g': case 'G': if (!_gpc_flags[0]) { php_autoglobal_merge(Z_ARRVAL_P(form_variables), Z_ARRVAL_P(PG(http_globals)[TRACK_VARS_GET]) TSRMLS_CC); _gpc_flags[0] = 1; } break; case 'p': case 'P': if (!_gpc_flags[1]) { php_autoglobal_merge(Z_ARRVAL_P(form_variables), Z_ARRVAL_P(PG(http_globals)[TRACK_VARS_POST]) TSRMLS_CC); _gpc_flags[1] = 1; } break; case 'c': case 'C': if (!_gpc_flags[2]) { php_autoglobal_merge(Z_ARRVAL_P(form_variables), Z_ARRVAL_P(PG(http_globals)[TRACK_VARS_COOKIE]) TSRMLS_CC); _gpc_flags[2] = 1; } break; } } zend_hash_update(&EG(symbol_table), name, name_len + 1, &form_variables, sizeof(zval *), NULL); return 0; } void php_startup_auto_globals(TSRMLS_D) { zend_register_auto_global(ZEND_STRL("_GET"), 0, php_auto_globals_create_get TSRMLS_CC); zend_register_auto_global(ZEND_STRL("_POST"), 0, php_auto_globals_create_post TSRMLS_CC); zend_register_auto_global(ZEND_STRL("_COOKIE"), 0, php_auto_globals_create_cookie TSRMLS_CC); zend_register_auto_global(ZEND_STRL("_SERVER"), PG(auto_globals_jit), php_auto_globals_create_server TSRMLS_CC); zend_register_auto_global(ZEND_STRL("_ENV"), PG(auto_globals_jit), php_auto_globals_create_env TSRMLS_CC); zend_register_auto_global(ZEND_STRL("_REQUEST"), PG(auto_globals_jit), php_auto_globals_create_request TSRMLS_CC); zend_register_auto_global(ZEND_STRL("_FILES"), 0, php_auto_globals_create_files TSRMLS_CC); } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: sw=4 ts=4 fdm=marker * vim<600: sw=4 ts=4 */
./CrossVul/dataset_final_sorted/CWE-400/c/good_2562_0
crossvul-cpp_data_good_1257_0
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/irqdomain.h> #include <linux/pci.h> #include <linux/pm_domain.h> #include <linux/platform_device.h> #include <sound/designware_i2s.h> #include <sound/pcm.h> #include "amdgpu.h" #include "atom.h" #include "amdgpu_acp.h" #include "acp_gfx_if.h" #define ACP_TILE_ON_MASK 0x03 #define ACP_TILE_OFF_MASK 0x02 #define ACP_TILE_ON_RETAIN_REG_MASK 0x1f #define ACP_TILE_OFF_RETAIN_REG_MASK 0x20 #define ACP_TILE_P1_MASK 0x3e #define ACP_TILE_P2_MASK 0x3d #define ACP_TILE_DSP0_MASK 0x3b #define ACP_TILE_DSP1_MASK 0x37 #define ACP_TILE_DSP2_MASK 0x2f #define ACP_DMA_REGS_END 0x146c0 #define ACP_I2S_PLAY_REGS_START 0x14840 #define ACP_I2S_PLAY_REGS_END 0x148b4 #define ACP_I2S_CAP_REGS_START 0x148b8 #define ACP_I2S_CAP_REGS_END 0x1496c #define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 #define ACP_BT_PLAY_REGS_START 0x14970 #define ACP_BT_PLAY_REGS_END 0x14a24 #define ACP_BT_COMP1_REG_OFFSET 0xac #define ACP_BT_COMP2_REG_OFFSET 0xa8 #define mmACP_PGFSM_RETAIN_REG 0x51c9 #define mmACP_PGFSM_CONFIG_REG 0x51ca #define mmACP_PGFSM_READ_REG_0 0x51cc #define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8 #define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9 #define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa #define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb #define mmACP_CONTROL 0x5131 #define mmACP_STATUS 0x5133 #define mmACP_SOFT_RESET 0x5134 #define ACP_CONTROL__ClkEn_MASK 0x1 #define ACP_SOFT_RESET__SoftResetAud_MASK 0x100 #define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000 #define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF #define ACP_TIMEOUT_LOOP 0x000000FF #define ACP_DEVS 4 #define ACP_SRC_ID 162 enum { ACP_TILE_P1 = 0, ACP_TILE_P2, ACP_TILE_DSP0, ACP_TILE_DSP1, ACP_TILE_DSP2, }; static int acp_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->acp.parent = adev->dev; adev->acp.cgs_device = amdgpu_cgs_create_device(adev); if (!adev->acp.cgs_device) return -EINVAL; return 0; } static int acp_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->acp.cgs_device) amdgpu_cgs_destroy_device(adev->acp.cgs_device); return 0; } struct acp_pm_domain { void *adev; struct generic_pm_domain gpd; }; static int acp_poweroff(struct generic_pm_domain *genpd) { struct acp_pm_domain *apd; struct amdgpu_device *adev; apd = container_of(genpd, struct acp_pm_domain, gpd); if (apd != NULL) { adev = apd->adev; /* call smu to POWER GATE ACP block * smu will * 1. turn off the acp clock * 2. power off the acp tiles * 3. check and enter ulv state */ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true); } return 0; } static int acp_poweron(struct generic_pm_domain *genpd) { struct acp_pm_domain *apd; struct amdgpu_device *adev; apd = container_of(genpd, struct acp_pm_domain, gpd); if (apd != NULL) { adev = apd->adev; /* call smu to UNGATE ACP block * smu will * 1. exit ulv * 2. turn on acp clock * 3. power on acp tiles */ if (adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false); } return 0; } static struct device *get_mfd_cell_dev(const char *device_name, int r) { char auto_dev_name[25]; struct device *dev; snprintf(auto_dev_name, sizeof(auto_dev_name), "%s.%d.auto", device_name, r); dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name); dev_info(dev, "device %s added to pm domain\n", auto_dev_name); return dev; } /** * acp_hw_init - start and test ACP block * * @adev: amdgpu_device pointer * */ static int acp_hw_init(void *handle) { int r, i; uint64_t acp_base; u32 val = 0; u32 count = 0; struct device *dev; struct i2s_platform_data *i2s_pdata = NULL; struct amdgpu_device *adev = (struct amdgpu_device *)handle; const struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); if (!ip_block) return -EINVAL; r = amd_acp_hw_init(adev->acp.cgs_device, ip_block->version->major, ip_block->version->minor); /* -ENODEV means board uses AZ rather than ACP */ if (r == -ENODEV) { amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true); return 0; } else if (r) { return r; } if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289) return -EINVAL; acp_base = adev->rmmio_base; adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); if (adev->acp.acp_genpd == NULL) return -ENOMEM; adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; adev->acp.acp_genpd->gpd.power_off = acp_poweroff; adev->acp.acp_genpd->gpd.power_on = acp_poweron; adev->acp.acp_genpd->adev = adev; pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell), GFP_KERNEL); if (adev->acp.acp_cell == NULL) { r = -ENOMEM; goto failure; } adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL); if (adev->acp.acp_res == NULL) { r = -ENOMEM; goto failure; } i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL); if (i2s_pdata == NULL) { r = -ENOMEM; goto failure; } switch (adev->asic_type) { case CHIP_STONEY: i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; break; default: i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; } i2s_pdata[0].cap = DWC_I2S_PLAY; i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000; i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET; i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET; switch (adev->asic_type) { case CHIP_STONEY: i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | DW_I2S_QUIRK_COMP_PARAM1 | DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; break; default: i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | DW_I2S_QUIRK_COMP_PARAM1; } i2s_pdata[1].cap = DWC_I2S_RECORD; i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000; i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET; i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; switch (adev->asic_type) { case CHIP_STONEY: i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; break; default: break; } i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD; i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000; i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET; i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET; adev->acp.acp_res[0].name = "acp2x_dma"; adev->acp.acp_res[0].flags = IORESOURCE_MEM; adev->acp.acp_res[0].start = acp_base; adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END; adev->acp.acp_res[1].name = "acp2x_dw_i2s_play"; adev->acp.acp_res[1].flags = IORESOURCE_MEM; adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START; adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END; adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap"; adev->acp.acp_res[2].flags = IORESOURCE_MEM; adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START; adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END; adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap"; adev->acp.acp_res[3].flags = IORESOURCE_MEM; adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START; adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END; adev->acp.acp_res[4].name = "acp2x_dma_irq"; adev->acp.acp_res[4].flags = IORESOURCE_IRQ; adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162); adev->acp.acp_res[4].end = adev->acp.acp_res[4].start; adev->acp.acp_cell[0].name = "acp_audio_dma"; adev->acp.acp_cell[0].num_resources = 5; adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; adev->acp.acp_cell[0].platform_data = &adev->asic_type; adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type); adev->acp.acp_cell[1].name = "designware-i2s"; adev->acp.acp_cell[1].num_resources = 1; adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1]; adev->acp.acp_cell[1].platform_data = &i2s_pdata[0]; adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data); adev->acp.acp_cell[2].name = "designware-i2s"; adev->acp.acp_cell[2].num_resources = 1; adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2]; adev->acp.acp_cell[2].platform_data = &i2s_pdata[1]; adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data); adev->acp.acp_cell[3].name = "designware-i2s"; adev->acp.acp_cell[3].num_resources = 1; adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3]; adev->acp.acp_cell[3].platform_data = &i2s_pdata[2]; adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data); r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS); if (r) goto failure; for (i = 0; i < ACP_DEVS ; i++) { dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); if (r) { dev_err(dev, "Failed to add dev to genpd\n"); goto failure; } } /* Assert Soft reset of ACP */ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); val |= ACP_SOFT_RESET__SoftResetAud_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE; while (true) { val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); if (ACP_SOFT_RESET__SoftResetAudDone_MASK == (val & ACP_SOFT_RESET__SoftResetAudDone_MASK)) break; if (--count == 0) { dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); r = -ETIMEDOUT; goto failure; } udelay(100); } /* Enable clock to ACP and wait until the clock is enabled */ val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL); val = val | ACP_CONTROL__ClkEn_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val); count = ACP_CLOCK_EN_TIME_OUT_VALUE; while (true) { val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS); if (val & (u32) 0x1) break; if (--count == 0) { dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); r = -ETIMEDOUT; goto failure; } udelay(100); } /* Deassert the SOFT RESET flags */ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); return 0; failure: kfree(i2s_pdata); kfree(adev->acp.acp_res); kfree(adev->acp.acp_cell); kfree(adev->acp.acp_genpd); return r; } /** * acp_hw_fini - stop the hardware block * * @adev: amdgpu_device pointer * */ static int acp_hw_fini(void *handle) { int i, ret; u32 val = 0; u32 count = 0; struct device *dev; struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* return early if no ACP */ if (!adev->acp.acp_genpd) { amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false); return 0; } /* Assert Soft reset of ACP */ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); val |= ACP_SOFT_RESET__SoftResetAud_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE; while (true) { val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); if (ACP_SOFT_RESET__SoftResetAudDone_MASK == (val & ACP_SOFT_RESET__SoftResetAudDone_MASK)) break; if (--count == 0) { dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); return -ETIMEDOUT; } udelay(100); } /* Disable ACP clock */ val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL); val &= ~ACP_CONTROL__ClkEn_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val); count = ACP_CLOCK_EN_TIME_OUT_VALUE; while (true) { val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS); if (val & (u32) 0x1) break; if (--count == 0) { dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); return -ETIMEDOUT; } udelay(100); } for (i = 0; i < ACP_DEVS ; i++) { dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); ret = pm_genpd_remove_device(dev); /* If removal fails, dont giveup and try rest */ if (ret) dev_err(dev, "remove dev from genpd failed\n"); } mfd_remove_devices(adev->acp.parent); kfree(adev->acp.acp_res); kfree(adev->acp.acp_genpd); kfree(adev->acp.acp_cell); return 0; } static int acp_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* power up on suspend */ if (!adev->acp.acp_cell) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false); return 0; } static int acp_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* power down again on resume */ if (!adev->acp.acp_cell) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true); return 0; } static int acp_early_init(void *handle) { return 0; } static bool acp_is_idle(void *handle) { return true; } static int acp_wait_for_idle(void *handle) { return 0; } static int acp_soft_reset(void *handle) { return 0; } static int acp_set_clockgating_state(void *handle, enum amd_clockgating_state state) { return 0; } static int acp_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = state == AMD_PG_STATE_GATE ? true : false; if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable); return 0; } static const struct amd_ip_funcs acp_ip_funcs = { .name = "acp_ip", .early_init = acp_early_init, .late_init = NULL, .sw_init = acp_sw_init, .sw_fini = acp_sw_fini, .hw_init = acp_hw_init, .hw_fini = acp_hw_fini, .suspend = acp_suspend, .resume = acp_resume, .is_idle = acp_is_idle, .wait_for_idle = acp_wait_for_idle, .soft_reset = acp_soft_reset, .set_clockgating_state = acp_set_clockgating_state, .set_powergating_state = acp_set_powergating_state, }; const struct amdgpu_ip_block_version acp_ip_block = { .type = AMD_IP_BLOCK_TYPE_ACP, .major = 2, .minor = 2, .rev = 0, .funcs = &acp_ip_funcs, };
./CrossVul/dataset_final_sorted/CWE-400/c/good_1257_0
crossvul-cpp_data_good_1263_0
/* * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "htc.h" static int htc_issue_send(struct htc_target *target, struct sk_buff* skb, u16 len, u8 flags, u8 epid) { struct htc_frame_hdr *hdr; struct htc_endpoint *endpoint = &target->endpoint[epid]; int status; hdr = skb_push(skb, sizeof(struct htc_frame_hdr)); hdr->endpoint_id = epid; hdr->flags = flags; hdr->payload_len = cpu_to_be16(len); status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb); return status; } static struct htc_endpoint *get_next_avail_ep(struct htc_endpoint *endpoint) { enum htc_endpoint_id avail_epid; for (avail_epid = (ENDPOINT_MAX - 1); avail_epid > ENDPOINT0; avail_epid--) if (endpoint[avail_epid].service_id == 0) return &endpoint[avail_epid]; return NULL; } static u8 service_to_ulpipe(u16 service_id) { switch (service_id) { case WMI_CONTROL_SVC: return 4; case WMI_BEACON_SVC: case WMI_CAB_SVC: case WMI_UAPSD_SVC: case WMI_MGMT_SVC: case WMI_DATA_VO_SVC: case WMI_DATA_VI_SVC: case WMI_DATA_BE_SVC: case WMI_DATA_BK_SVC: return 1; default: return 0; } } static u8 service_to_dlpipe(u16 service_id) { switch (service_id) { case WMI_CONTROL_SVC: return 3; case WMI_BEACON_SVC: case WMI_CAB_SVC: case WMI_UAPSD_SVC: case WMI_MGMT_SVC: case WMI_DATA_VO_SVC: case WMI_DATA_VI_SVC: case WMI_DATA_BE_SVC: case WMI_DATA_BK_SVC: return 2; default: return 0; } } static void htc_process_target_rdy(struct htc_target *target, void *buf) { struct htc_endpoint *endpoint; struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf; target->credit_size = be16_to_cpu(htc_ready_msg->credit_size); endpoint = &target->endpoint[ENDPOINT0]; endpoint->service_id = HTC_CTRL_RSVD_SVC; endpoint->max_msglen = HTC_MAX_CONTROL_MESSAGE_LENGTH; atomic_inc(&target->tgt_ready); complete(&target->target_wait); } static void htc_process_conn_rsp(struct htc_target *target, struct htc_frame_hdr *htc_hdr) { struct htc_conn_svc_rspmsg *svc_rspmsg; struct htc_endpoint *endpoint, *tmp_endpoint = NULL; u16 service_id; u16 max_msglen; enum htc_endpoint_id epid, tepid; svc_rspmsg = (struct htc_conn_svc_rspmsg *) ((void *) htc_hdr + sizeof(struct htc_frame_hdr)); if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) { epid = svc_rspmsg->endpoint_id; service_id = be16_to_cpu(svc_rspmsg->service_id); max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len); endpoint = &target->endpoint[epid]; for (tepid = (ENDPOINT_MAX - 1); tepid > ENDPOINT0; tepid--) { tmp_endpoint = &target->endpoint[tepid]; if (tmp_endpoint->service_id == service_id) { tmp_endpoint->service_id = 0; break; } } if (tepid == ENDPOINT0) return; endpoint->service_id = service_id; endpoint->max_txqdepth = tmp_endpoint->max_txqdepth; endpoint->ep_callbacks = tmp_endpoint->ep_callbacks; endpoint->ul_pipeid = tmp_endpoint->ul_pipeid; endpoint->dl_pipeid = tmp_endpoint->dl_pipeid; endpoint->max_msglen = max_msglen; target->conn_rsp_epid = epid; complete(&target->cmd_wait); } else { target->conn_rsp_epid = ENDPOINT_UNUSED; } } static int htc_config_pipe_credits(struct htc_target *target) { struct sk_buff *skb; struct htc_config_pipe_msg *cp_msg; int ret; unsigned long time_left; skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC); if (!skb) { dev_err(target->dev, "failed to allocate send buffer\n"); return -ENOMEM; } skb_reserve(skb, sizeof(struct htc_frame_hdr)); cp_msg = skb_put(skb, sizeof(struct htc_config_pipe_msg)); cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID); cp_msg->pipe_id = USB_WLAN_TX_PIPE; cp_msg->credits = target->credits; target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS; ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); if (ret) goto err; time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC credit config timeout\n"); kfree_skb(skb); return -ETIMEDOUT; } return 0; err: kfree_skb(skb); return -EINVAL; } static int htc_setup_complete(struct htc_target *target) { struct sk_buff *skb; struct htc_comp_msg *comp_msg; int ret = 0; unsigned long time_left; skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC); if (!skb) { dev_err(target->dev, "failed to allocate send buffer\n"); return -ENOMEM; } skb_reserve(skb, sizeof(struct htc_frame_hdr)); comp_msg = skb_put(skb, sizeof(struct htc_comp_msg)); comp_msg->msg_id = cpu_to_be16(HTC_MSG_SETUP_COMPLETE_ID); target->htc_flags |= HTC_OP_START_WAIT; ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); if (ret) goto err; time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC start timeout\n"); kfree_skb(skb); return -ETIMEDOUT; } return 0; err: kfree_skb(skb); return -EINVAL; } /* HTC APIs */ int htc_init(struct htc_target *target) { int ret; ret = htc_config_pipe_credits(target); if (ret) return ret; return htc_setup_complete(target); } int htc_connect_service(struct htc_target *target, struct htc_service_connreq *service_connreq, enum htc_endpoint_id *conn_rsp_epid) { struct sk_buff *skb; struct htc_endpoint *endpoint; struct htc_conn_svc_msg *conn_msg; int ret; unsigned long time_left; /* Find an available endpoint */ endpoint = get_next_avail_ep(target->endpoint); if (!endpoint) { dev_err(target->dev, "Endpoint is not available for service %d\n", service_connreq->service_id); return -EINVAL; } endpoint->service_id = service_connreq->service_id; endpoint->max_txqdepth = service_connreq->max_send_qdepth; endpoint->ul_pipeid = service_to_ulpipe(service_connreq->service_id); endpoint->dl_pipeid = service_to_dlpipe(service_connreq->service_id); endpoint->ep_callbacks = service_connreq->ep_callbacks; skb = alloc_skb(sizeof(struct htc_conn_svc_msg) + sizeof(struct htc_frame_hdr), GFP_ATOMIC); if (!skb) { dev_err(target->dev, "Failed to allocate buf to send" "service connect req\n"); return -ENOMEM; } skb_reserve(skb, sizeof(struct htc_frame_hdr)); conn_msg = skb_put(skb, sizeof(struct htc_conn_svc_msg)); conn_msg->service_id = cpu_to_be16(service_connreq->service_id); conn_msg->msg_id = cpu_to_be16(HTC_MSG_CONNECT_SERVICE_ID); conn_msg->con_flags = cpu_to_be16(service_connreq->con_flags); conn_msg->dl_pipeid = endpoint->dl_pipeid; conn_msg->ul_pipeid = endpoint->ul_pipeid; ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); if (ret) goto err; time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "Service connection timeout for: %d\n", service_connreq->service_id); kfree_skb(skb); return -ETIMEDOUT; } *conn_rsp_epid = target->conn_rsp_epid; return 0; err: kfree_skb(skb); return ret; } int htc_send(struct htc_target *target, struct sk_buff *skb) { struct ath9k_htc_tx_ctl *tx_ctl; tx_ctl = HTC_SKB_CB(skb); return htc_issue_send(target, skb, skb->len, 0, tx_ctl->epid); } int htc_send_epid(struct htc_target *target, struct sk_buff *skb, enum htc_endpoint_id epid) { return htc_issue_send(target, skb, skb->len, 0, epid); } void htc_stop(struct htc_target *target) { target->hif->stop(target->hif_dev); } void htc_start(struct htc_target *target) { target->hif->start(target->hif_dev); } void htc_sta_drain(struct htc_target *target, u8 idx) { target->hif->sta_drain(target->hif_dev, idx); } void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle, struct sk_buff *skb, bool txok) { struct htc_endpoint *endpoint; struct htc_frame_hdr *htc_hdr = NULL; if (htc_handle->htc_flags & HTC_OP_CONFIG_PIPE_CREDITS) { complete(&htc_handle->cmd_wait); htc_handle->htc_flags &= ~HTC_OP_CONFIG_PIPE_CREDITS; goto ret; } if (htc_handle->htc_flags & HTC_OP_START_WAIT) { complete(&htc_handle->cmd_wait); htc_handle->htc_flags &= ~HTC_OP_START_WAIT; goto ret; } if (skb) { htc_hdr = (struct htc_frame_hdr *) skb->data; endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id]; skb_pull(skb, sizeof(struct htc_frame_hdr)); if (endpoint->ep_callbacks.tx) { endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv, skb, htc_hdr->endpoint_id, txok); } else { kfree_skb(skb); } } return; ret: kfree_skb(skb); } static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle, struct sk_buff *skb) { uint32_t *pattern = (uint32_t *)skb->data; switch (*pattern) { case 0x33221199: { struct htc_panic_bad_vaddr *htc_panic; htc_panic = (struct htc_panic_bad_vaddr *) skb->data; dev_err(htc_handle->dev, "ath: firmware panic! " "exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n", htc_panic->exccause, htc_panic->pc, htc_panic->badvaddr); break; } case 0x33221299: { struct htc_panic_bad_epid *htc_panic; htc_panic = (struct htc_panic_bad_epid *) skb->data; dev_err(htc_handle->dev, "ath: firmware panic! " "bad epid: 0x%08x\n", htc_panic->epid); break; } default: dev_err(htc_handle->dev, "ath: unknown panic pattern!\n"); break; } } /* * HTC Messages are handled directly here and the obtained SKB * is freed. * * Service messages (Data, WMI) passed to the corresponding * endpoint RX handlers, which have to free the SKB. */ void ath9k_htc_rx_msg(struct htc_target *htc_handle, struct sk_buff *skb, u32 len, u8 pipe_id) { struct htc_frame_hdr *htc_hdr; enum htc_endpoint_id epid; struct htc_endpoint *endpoint; __be16 *msg_id; if (!htc_handle || !skb) return; htc_hdr = (struct htc_frame_hdr *) skb->data; epid = htc_hdr->endpoint_id; if (epid == 0x99) { ath9k_htc_fw_panic_report(htc_handle, skb); kfree_skb(skb); return; } if (epid < 0 || epid >= ENDPOINT_MAX) { if (pipe_id != USB_REG_IN_PIPE) dev_kfree_skb_any(skb); else kfree_skb(skb); return; } if (epid == ENDPOINT0) { /* Handle trailer */ if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) { if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000) /* Move past the Watchdog pattern */ htc_hdr = (struct htc_frame_hdr *)(skb->data + 4); } /* Get the message ID */ msg_id = (__be16 *) ((void *) htc_hdr + sizeof(struct htc_frame_hdr)); /* Now process HTC messages */ switch (be16_to_cpu(*msg_id)) { case HTC_MSG_READY_ID: htc_process_target_rdy(htc_handle, htc_hdr); break; case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID: htc_process_conn_rsp(htc_handle, htc_hdr); break; default: break; } kfree_skb(skb); } else { if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) skb_trim(skb, len - htc_hdr->control[0]); skb_pull(skb, sizeof(struct htc_frame_hdr)); endpoint = &htc_handle->endpoint[epid]; if (endpoint->ep_callbacks.rx) endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv, skb, epid); } } struct htc_target *ath9k_htc_hw_alloc(void *hif_handle, struct ath9k_htc_hif *hif, struct device *dev) { struct htc_endpoint *endpoint; struct htc_target *target; target = kzalloc(sizeof(struct htc_target), GFP_KERNEL); if (!target) return NULL; init_completion(&target->target_wait); init_completion(&target->cmd_wait); target->hif = hif; target->hif_dev = hif_handle; target->dev = dev; /* Assign control endpoint pipe IDs */ endpoint = &target->endpoint[ENDPOINT0]; endpoint->ul_pipeid = hif->control_ul_pipe; endpoint->dl_pipeid = hif->control_dl_pipe; atomic_set(&target->tgt_ready, 0); return target; } void ath9k_htc_hw_free(struct htc_target *htc) { kfree(htc); } int ath9k_htc_hw_init(struct htc_target *target, struct device *dev, u16 devid, char *product, u32 drv_info) { if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) { pr_err("Failed to initialize the device\n"); return -ENODEV; } return 0; } void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug) { if (target) ath9k_htc_disconnect_device(target, hot_unplug); }
./CrossVul/dataset_final_sorted/CWE-400/c/good_1263_0
crossvul-cpp_data_bad_1263_0
/* * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "htc.h" static int htc_issue_send(struct htc_target *target, struct sk_buff* skb, u16 len, u8 flags, u8 epid) { struct htc_frame_hdr *hdr; struct htc_endpoint *endpoint = &target->endpoint[epid]; int status; hdr = skb_push(skb, sizeof(struct htc_frame_hdr)); hdr->endpoint_id = epid; hdr->flags = flags; hdr->payload_len = cpu_to_be16(len); status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb); return status; } static struct htc_endpoint *get_next_avail_ep(struct htc_endpoint *endpoint) { enum htc_endpoint_id avail_epid; for (avail_epid = (ENDPOINT_MAX - 1); avail_epid > ENDPOINT0; avail_epid--) if (endpoint[avail_epid].service_id == 0) return &endpoint[avail_epid]; return NULL; } static u8 service_to_ulpipe(u16 service_id) { switch (service_id) { case WMI_CONTROL_SVC: return 4; case WMI_BEACON_SVC: case WMI_CAB_SVC: case WMI_UAPSD_SVC: case WMI_MGMT_SVC: case WMI_DATA_VO_SVC: case WMI_DATA_VI_SVC: case WMI_DATA_BE_SVC: case WMI_DATA_BK_SVC: return 1; default: return 0; } } static u8 service_to_dlpipe(u16 service_id) { switch (service_id) { case WMI_CONTROL_SVC: return 3; case WMI_BEACON_SVC: case WMI_CAB_SVC: case WMI_UAPSD_SVC: case WMI_MGMT_SVC: case WMI_DATA_VO_SVC: case WMI_DATA_VI_SVC: case WMI_DATA_BE_SVC: case WMI_DATA_BK_SVC: return 2; default: return 0; } } static void htc_process_target_rdy(struct htc_target *target, void *buf) { struct htc_endpoint *endpoint; struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf; target->credit_size = be16_to_cpu(htc_ready_msg->credit_size); endpoint = &target->endpoint[ENDPOINT0]; endpoint->service_id = HTC_CTRL_RSVD_SVC; endpoint->max_msglen = HTC_MAX_CONTROL_MESSAGE_LENGTH; atomic_inc(&target->tgt_ready); complete(&target->target_wait); } static void htc_process_conn_rsp(struct htc_target *target, struct htc_frame_hdr *htc_hdr) { struct htc_conn_svc_rspmsg *svc_rspmsg; struct htc_endpoint *endpoint, *tmp_endpoint = NULL; u16 service_id; u16 max_msglen; enum htc_endpoint_id epid, tepid; svc_rspmsg = (struct htc_conn_svc_rspmsg *) ((void *) htc_hdr + sizeof(struct htc_frame_hdr)); if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) { epid = svc_rspmsg->endpoint_id; service_id = be16_to_cpu(svc_rspmsg->service_id); max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len); endpoint = &target->endpoint[epid]; for (tepid = (ENDPOINT_MAX - 1); tepid > ENDPOINT0; tepid--) { tmp_endpoint = &target->endpoint[tepid]; if (tmp_endpoint->service_id == service_id) { tmp_endpoint->service_id = 0; break; } } if (tepid == ENDPOINT0) return; endpoint->service_id = service_id; endpoint->max_txqdepth = tmp_endpoint->max_txqdepth; endpoint->ep_callbacks = tmp_endpoint->ep_callbacks; endpoint->ul_pipeid = tmp_endpoint->ul_pipeid; endpoint->dl_pipeid = tmp_endpoint->dl_pipeid; endpoint->max_msglen = max_msglen; target->conn_rsp_epid = epid; complete(&target->cmd_wait); } else { target->conn_rsp_epid = ENDPOINT_UNUSED; } } static int htc_config_pipe_credits(struct htc_target *target) { struct sk_buff *skb; struct htc_config_pipe_msg *cp_msg; int ret; unsigned long time_left; skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC); if (!skb) { dev_err(target->dev, "failed to allocate send buffer\n"); return -ENOMEM; } skb_reserve(skb, sizeof(struct htc_frame_hdr)); cp_msg = skb_put(skb, sizeof(struct htc_config_pipe_msg)); cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID); cp_msg->pipe_id = USB_WLAN_TX_PIPE; cp_msg->credits = target->credits; target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS; ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); if (ret) goto err; time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC credit config timeout\n"); return -ETIMEDOUT; } return 0; err: kfree_skb(skb); return -EINVAL; } static int htc_setup_complete(struct htc_target *target) { struct sk_buff *skb; struct htc_comp_msg *comp_msg; int ret = 0; unsigned long time_left; skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC); if (!skb) { dev_err(target->dev, "failed to allocate send buffer\n"); return -ENOMEM; } skb_reserve(skb, sizeof(struct htc_frame_hdr)); comp_msg = skb_put(skb, sizeof(struct htc_comp_msg)); comp_msg->msg_id = cpu_to_be16(HTC_MSG_SETUP_COMPLETE_ID); target->htc_flags |= HTC_OP_START_WAIT; ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); if (ret) goto err; time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC start timeout\n"); return -ETIMEDOUT; } return 0; err: kfree_skb(skb); return -EINVAL; } /* HTC APIs */ int htc_init(struct htc_target *target) { int ret; ret = htc_config_pipe_credits(target); if (ret) return ret; return htc_setup_complete(target); } int htc_connect_service(struct htc_target *target, struct htc_service_connreq *service_connreq, enum htc_endpoint_id *conn_rsp_epid) { struct sk_buff *skb; struct htc_endpoint *endpoint; struct htc_conn_svc_msg *conn_msg; int ret; unsigned long time_left; /* Find an available endpoint */ endpoint = get_next_avail_ep(target->endpoint); if (!endpoint) { dev_err(target->dev, "Endpoint is not available for service %d\n", service_connreq->service_id); return -EINVAL; } endpoint->service_id = service_connreq->service_id; endpoint->max_txqdepth = service_connreq->max_send_qdepth; endpoint->ul_pipeid = service_to_ulpipe(service_connreq->service_id); endpoint->dl_pipeid = service_to_dlpipe(service_connreq->service_id); endpoint->ep_callbacks = service_connreq->ep_callbacks; skb = alloc_skb(sizeof(struct htc_conn_svc_msg) + sizeof(struct htc_frame_hdr), GFP_ATOMIC); if (!skb) { dev_err(target->dev, "Failed to allocate buf to send" "service connect req\n"); return -ENOMEM; } skb_reserve(skb, sizeof(struct htc_frame_hdr)); conn_msg = skb_put(skb, sizeof(struct htc_conn_svc_msg)); conn_msg->service_id = cpu_to_be16(service_connreq->service_id); conn_msg->msg_id = cpu_to_be16(HTC_MSG_CONNECT_SERVICE_ID); conn_msg->con_flags = cpu_to_be16(service_connreq->con_flags); conn_msg->dl_pipeid = endpoint->dl_pipeid; conn_msg->ul_pipeid = endpoint->ul_pipeid; ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); if (ret) goto err; time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "Service connection timeout for: %d\n", service_connreq->service_id); return -ETIMEDOUT; } *conn_rsp_epid = target->conn_rsp_epid; return 0; err: kfree_skb(skb); return ret; } int htc_send(struct htc_target *target, struct sk_buff *skb) { struct ath9k_htc_tx_ctl *tx_ctl; tx_ctl = HTC_SKB_CB(skb); return htc_issue_send(target, skb, skb->len, 0, tx_ctl->epid); } int htc_send_epid(struct htc_target *target, struct sk_buff *skb, enum htc_endpoint_id epid) { return htc_issue_send(target, skb, skb->len, 0, epid); } void htc_stop(struct htc_target *target) { target->hif->stop(target->hif_dev); } void htc_start(struct htc_target *target) { target->hif->start(target->hif_dev); } void htc_sta_drain(struct htc_target *target, u8 idx) { target->hif->sta_drain(target->hif_dev, idx); } void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle, struct sk_buff *skb, bool txok) { struct htc_endpoint *endpoint; struct htc_frame_hdr *htc_hdr = NULL; if (htc_handle->htc_flags & HTC_OP_CONFIG_PIPE_CREDITS) { complete(&htc_handle->cmd_wait); htc_handle->htc_flags &= ~HTC_OP_CONFIG_PIPE_CREDITS; goto ret; } if (htc_handle->htc_flags & HTC_OP_START_WAIT) { complete(&htc_handle->cmd_wait); htc_handle->htc_flags &= ~HTC_OP_START_WAIT; goto ret; } if (skb) { htc_hdr = (struct htc_frame_hdr *) skb->data; endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id]; skb_pull(skb, sizeof(struct htc_frame_hdr)); if (endpoint->ep_callbacks.tx) { endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv, skb, htc_hdr->endpoint_id, txok); } else { kfree_skb(skb); } } return; ret: kfree_skb(skb); } static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle, struct sk_buff *skb) { uint32_t *pattern = (uint32_t *)skb->data; switch (*pattern) { case 0x33221199: { struct htc_panic_bad_vaddr *htc_panic; htc_panic = (struct htc_panic_bad_vaddr *) skb->data; dev_err(htc_handle->dev, "ath: firmware panic! " "exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n", htc_panic->exccause, htc_panic->pc, htc_panic->badvaddr); break; } case 0x33221299: { struct htc_panic_bad_epid *htc_panic; htc_panic = (struct htc_panic_bad_epid *) skb->data; dev_err(htc_handle->dev, "ath: firmware panic! " "bad epid: 0x%08x\n", htc_panic->epid); break; } default: dev_err(htc_handle->dev, "ath: unknown panic pattern!\n"); break; } } /* * HTC Messages are handled directly here and the obtained SKB * is freed. * * Service messages (Data, WMI) passed to the corresponding * endpoint RX handlers, which have to free the SKB. */ void ath9k_htc_rx_msg(struct htc_target *htc_handle, struct sk_buff *skb, u32 len, u8 pipe_id) { struct htc_frame_hdr *htc_hdr; enum htc_endpoint_id epid; struct htc_endpoint *endpoint; __be16 *msg_id; if (!htc_handle || !skb) return; htc_hdr = (struct htc_frame_hdr *) skb->data; epid = htc_hdr->endpoint_id; if (epid == 0x99) { ath9k_htc_fw_panic_report(htc_handle, skb); kfree_skb(skb); return; } if (epid < 0 || epid >= ENDPOINT_MAX) { if (pipe_id != USB_REG_IN_PIPE) dev_kfree_skb_any(skb); else kfree_skb(skb); return; } if (epid == ENDPOINT0) { /* Handle trailer */ if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) { if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000) /* Move past the Watchdog pattern */ htc_hdr = (struct htc_frame_hdr *)(skb->data + 4); } /* Get the message ID */ msg_id = (__be16 *) ((void *) htc_hdr + sizeof(struct htc_frame_hdr)); /* Now process HTC messages */ switch (be16_to_cpu(*msg_id)) { case HTC_MSG_READY_ID: htc_process_target_rdy(htc_handle, htc_hdr); break; case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID: htc_process_conn_rsp(htc_handle, htc_hdr); break; default: break; } kfree_skb(skb); } else { if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) skb_trim(skb, len - htc_hdr->control[0]); skb_pull(skb, sizeof(struct htc_frame_hdr)); endpoint = &htc_handle->endpoint[epid]; if (endpoint->ep_callbacks.rx) endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv, skb, epid); } } struct htc_target *ath9k_htc_hw_alloc(void *hif_handle, struct ath9k_htc_hif *hif, struct device *dev) { struct htc_endpoint *endpoint; struct htc_target *target; target = kzalloc(sizeof(struct htc_target), GFP_KERNEL); if (!target) return NULL; init_completion(&target->target_wait); init_completion(&target->cmd_wait); target->hif = hif; target->hif_dev = hif_handle; target->dev = dev; /* Assign control endpoint pipe IDs */ endpoint = &target->endpoint[ENDPOINT0]; endpoint->ul_pipeid = hif->control_ul_pipe; endpoint->dl_pipeid = hif->control_dl_pipe; atomic_set(&target->tgt_ready, 0); return target; } void ath9k_htc_hw_free(struct htc_target *htc) { kfree(htc); } int ath9k_htc_hw_init(struct htc_target *target, struct device *dev, u16 devid, char *product, u32 drv_info) { if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) { pr_err("Failed to initialize the device\n"); return -ENODEV; } return 0; } void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug) { if (target) ath9k_htc_disconnect_device(target, hot_unplug); }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1263_0
crossvul-cpp_data_good_1876_1
/* $Id: fpm_children.c,v 1.32.2.2 2008/12/13 03:21:18 anight Exp $ */ /* (c) 2007,2008 Andrei Nigmatulin */ #include "fpm_config.h" #include <sys/types.h> #include <sys/wait.h> #include <time.h> #include <unistd.h> #include <string.h> #include <stdio.h> #include "fpm.h" #include "fpm_children.h" #include "fpm_signals.h" #include "fpm_worker_pool.h" #include "fpm_sockets.h" #include "fpm_process_ctl.h" #include "fpm_php.h" #include "fpm_conf.h" #include "fpm_cleanup.h" #include "fpm_events.h" #include "fpm_clock.h" #include "fpm_stdio.h" #include "fpm_unix.h" #include "fpm_env.h" #include "fpm_scoreboard.h" #include "fpm_status.h" #include "fpm_log.h" #include "zlog.h" static time_t *last_faults; static int fault; static void fpm_children_cleanup(int which, void *arg) /* {{{ */ { free(last_faults); } /* }}} */ static struct fpm_child_s *fpm_child_alloc() /* {{{ */ { struct fpm_child_s *ret; ret = malloc(sizeof(struct fpm_child_s)); if (!ret) { return 0; } memset(ret, 0, sizeof(*ret)); ret->scoreboard_i = -1; return ret; } /* }}} */ static void fpm_child_free(struct fpm_child_s *child) /* {{{ */ { free(child); } /* }}} */ static void fpm_child_close(struct fpm_child_s *child, int in_event_loop) /* {{{ */ { if (child->fd_stdout != -1) { if (in_event_loop) { fpm_event_fire(&child->ev_stdout); } if (child->fd_stdout != -1) { close(child->fd_stdout); } } if (child->fd_stderr != -1) { if (in_event_loop) { fpm_event_fire(&child->ev_stderr); } if (child->fd_stderr != -1) { close(child->fd_stderr); } } fpm_child_free(child); } /* }}} */ static void fpm_child_link(struct fpm_child_s *child) /* {{{ */ { struct fpm_worker_pool_s *wp = child->wp; ++wp->running_children; ++fpm_globals.running_children; child->next = wp->children; if (child->next) { child->next->prev = child; } child->prev = 0; wp->children = child; } /* }}} */ static void fpm_child_unlink(struct fpm_child_s *child) /* {{{ */ { --child->wp->running_children; --fpm_globals.running_children; if (child->prev) { child->prev->next = child->next; } else { child->wp->children = child->next; } if (child->next) { child->next->prev = child->prev; } } /* }}} */ static struct fpm_child_s *fpm_child_find(pid_t pid) /* {{{ */ { struct fpm_worker_pool_s *wp; struct fpm_child_s *child = 0; for (wp = fpm_worker_all_pools; wp; wp = wp->next) { for (child = wp->children; child; child = child->next) { if (child->pid == pid) { break; } } if (child) break; } if (!child) { return 0; } return child; } /* }}} */ static void fpm_child_init(struct fpm_worker_pool_s *wp) /* {{{ */ { fpm_globals.max_requests = wp->config->pm_max_requests; fpm_globals.listening_socket = dup(wp->listening_socket); if (0 > fpm_stdio_init_child(wp) || 0 > fpm_log_init_child(wp) || 0 > fpm_status_init_child(wp) || 0 > fpm_unix_init_child(wp) || 0 > fpm_signals_init_child() || 0 > fpm_env_init_child(wp) || 0 > fpm_php_init_child(wp)) { zlog(ZLOG_ERROR, "[pool %s] child failed to initialize", wp->config->name); exit(FPM_EXIT_SOFTWARE); } } /* }}} */ int fpm_children_free(struct fpm_child_s *child) /* {{{ */ { struct fpm_child_s *next; for (; child; child = next) { next = child->next; fpm_child_close(child, 0 /* in_event_loop */); } return 0; } /* }}} */ void fpm_children_bury() /* {{{ */ { int status; pid_t pid; struct fpm_child_s *child; while ( (pid = waitpid(-1, &status, WNOHANG | WUNTRACED)) > 0) { char buf[128]; int severity = ZLOG_NOTICE; int restart_child = 1; child = fpm_child_find(pid); if (WIFEXITED(status)) { snprintf(buf, sizeof(buf), "with code %d", WEXITSTATUS(status)); /* if it's been killed because of dynamic process management * don't restart it automaticaly */ if (child && child->idle_kill) { restart_child = 0; } if (WEXITSTATUS(status) != FPM_EXIT_OK) { severity = ZLOG_WARNING; } } else if (WIFSIGNALED(status)) { const char *signame = fpm_signal_names[WTERMSIG(status)]; const char *have_core = WCOREDUMP(status) ? " - core dumped" : ""; if (signame == NULL) { signame = ""; } snprintf(buf, sizeof(buf), "on signal %d (%s%s)", WTERMSIG(status), signame, have_core); /* if it's been killed because of dynamic process management * don't restart it automaticaly */ if (child && child->idle_kill && WTERMSIG(status) == SIGQUIT) { restart_child = 0; } if (WTERMSIG(status) != SIGQUIT) { /* possible request loss */ severity = ZLOG_WARNING; } } else if (WIFSTOPPED(status)) { zlog(ZLOG_NOTICE, "child %d stopped for tracing", (int) pid); if (child && child->tracer) { child->tracer(child); } continue; } if (child) { struct fpm_worker_pool_s *wp = child->wp; struct timeval tv1, tv2; fpm_child_unlink(child); fpm_scoreboard_proc_free(wp->scoreboard, child->scoreboard_i); fpm_clock_get(&tv1); timersub(&tv1, &child->started, &tv2); if (restart_child) { if (!fpm_pctl_can_spawn_children()) { severity = ZLOG_DEBUG; } zlog(severity, "[pool %s] child %d exited %s after %ld.%06d seconds from start", child->wp->config->name, (int) pid, buf, tv2.tv_sec, (int) tv2.tv_usec); } else { zlog(ZLOG_DEBUG, "[pool %s] child %d has been killed by the process management after %ld.%06d seconds from start", child->wp->config->name, (int) pid, tv2.tv_sec, (int) tv2.tv_usec); } fpm_child_close(child, 1 /* in event_loop */); fpm_pctl_child_exited(); if (last_faults && (WTERMSIG(status) == SIGSEGV || WTERMSIG(status) == SIGBUS)) { time_t now = tv1.tv_sec; int restart_condition = 1; int i; last_faults[fault++] = now; if (fault == fpm_global_config.emergency_restart_threshold) { fault = 0; } for (i = 0; i < fpm_global_config.emergency_restart_threshold; i++) { if (now - last_faults[i] > fpm_global_config.emergency_restart_interval) { restart_condition = 0; break; } } if (restart_condition) { zlog(ZLOG_WARNING, "failed processes threshold (%d in %d sec) is reached, initiating reload", fpm_global_config.emergency_restart_threshold, fpm_global_config.emergency_restart_interval); fpm_pctl(FPM_PCTL_STATE_RELOADING, FPM_PCTL_ACTION_SET); } } if (restart_child) { fpm_children_make(wp, 1 /* in event loop */, 1, 0); if (fpm_globals.is_child) { break; } } } else { zlog(ZLOG_ALERT, "oops, unknown child (%d) exited %s. Please open a bug report (https://bugs.php.net).", pid, buf); } } } /* }}} */ static struct fpm_child_s *fpm_resources_prepare(struct fpm_worker_pool_s *wp) /* {{{ */ { struct fpm_child_s *c; c = fpm_child_alloc(); if (!c) { zlog(ZLOG_ERROR, "[pool %s] unable to malloc new child", wp->config->name); return 0; } c->wp = wp; c->fd_stdout = -1; c->fd_stderr = -1; if (0 > fpm_stdio_prepare_pipes(c)) { fpm_child_free(c); return 0; } if (0 > fpm_scoreboard_proc_alloc(wp->scoreboard, &c->scoreboard_i)) { fpm_stdio_discard_pipes(c); fpm_child_free(c); return 0; } return c; } /* }}} */ static void fpm_resources_discard(struct fpm_child_s *child) /* {{{ */ { fpm_scoreboard_proc_free(child->wp->scoreboard, child->scoreboard_i); fpm_stdio_discard_pipes(child); fpm_child_free(child); } /* }}} */ static void fpm_child_resources_use(struct fpm_child_s *child) /* {{{ */ { struct fpm_worker_pool_s *wp; for (wp = fpm_worker_all_pools; wp; wp = wp->next) { if (wp == child->wp) { continue; } fpm_scoreboard_free(wp->scoreboard); } fpm_scoreboard_child_use(child->wp->scoreboard, child->scoreboard_i, getpid()); fpm_stdio_child_use_pipes(child); fpm_child_free(child); } /* }}} */ static void fpm_parent_resources_use(struct fpm_child_s *child) /* {{{ */ { fpm_stdio_parent_use_pipes(child); fpm_child_link(child); } /* }}} */ int fpm_children_make(struct fpm_worker_pool_s *wp, int in_event_loop, int nb_to_spawn, int is_debug) /* {{{ */ { pid_t pid; struct fpm_child_s *child; int max; static int warned = 0; if (wp->config->pm == PM_STYLE_DYNAMIC) { if (!in_event_loop) { /* starting */ max = wp->config->pm_start_servers; } else { max = wp->running_children + nb_to_spawn; } } else if (wp->config->pm == PM_STYLE_ONDEMAND) { if (!in_event_loop) { /* starting */ max = 0; /* do not create any child at startup */ } else { max = wp->running_children + nb_to_spawn; } } else { /* PM_STYLE_STATIC */ max = wp->config->pm_max_children; } /* * fork children while: * - fpm_pctl_can_spawn_children : FPM is running in a NORMAL state (aka not restart, stop or reload) * - wp->running_children < max : there is less than the max process for the current pool * - (fpm_global_config.process_max < 1 || fpm_globals.running_children < fpm_global_config.process_max): * if fpm_global_config.process_max is set, FPM has not fork this number of processes (globaly) */ while (fpm_pctl_can_spawn_children() && wp->running_children < max && (fpm_global_config.process_max < 1 || fpm_globals.running_children < fpm_global_config.process_max)) { warned = 0; child = fpm_resources_prepare(wp); if (!child) { return 2; } pid = fork(); switch (pid) { case 0 : fpm_child_resources_use(child); fpm_globals.is_child = 1; fpm_child_init(wp); return 0; case -1 : zlog(ZLOG_SYSERROR, "fork() failed"); fpm_resources_discard(child); return 2; default : child->pid = pid; fpm_clock_get(&child->started); fpm_parent_resources_use(child); zlog(is_debug ? ZLOG_DEBUG : ZLOG_NOTICE, "[pool %s] child %d started", wp->config->name, (int) pid); } } if (!warned && fpm_global_config.process_max > 0 && fpm_globals.running_children >= fpm_global_config.process_max) { if (wp->running_children < max) { warned = 1; zlog(ZLOG_WARNING, "The maximum number of processes has been reached. Please review your configuration and consider raising 'process.max'"); } } return 1; /* we are done */ } /* }}} */ int fpm_children_create_initial(struct fpm_worker_pool_s *wp) /* {{{ */ { if (wp->config->pm == PM_STYLE_ONDEMAND) { wp->ondemand_event = (struct fpm_event_s *)malloc(sizeof(struct fpm_event_s)); if (!wp->ondemand_event) { zlog(ZLOG_ERROR, "[pool %s] unable to malloc the ondemand socket event", wp->config->name); // FIXME handle crash return 1; } memset(wp->ondemand_event, 0, sizeof(struct fpm_event_s)); fpm_event_set(wp->ondemand_event, wp->listening_socket, FPM_EV_READ | FPM_EV_EDGE, fpm_pctl_on_socket_accept, wp); wp->socket_event_set = 1; fpm_event_add(wp->ondemand_event, 0); return 1; } return fpm_children_make(wp, 0 /* not in event loop yet */, 0, 1); } /* }}} */ int fpm_children_init_main() /* {{{ */ { if (fpm_global_config.emergency_restart_threshold && fpm_global_config.emergency_restart_interval) { last_faults = malloc(sizeof(time_t) * fpm_global_config.emergency_restart_threshold); if (!last_faults) { return -1; } memset(last_faults, 0, sizeof(time_t) * fpm_global_config.emergency_restart_threshold); } if (0 > fpm_cleanup_add(FPM_CLEANUP_ALL, fpm_children_cleanup, 0)) { return -1; } return 0; } /* }}} */
./CrossVul/dataset_final_sorted/CWE-400/c/good_1876_1
crossvul-cpp_data_good_4472_0
/* This file is copied from the libsrs2 sources */ /* Modified by Timo Röhling <timo.roehling@gmx.de> */ /* Copyright (c) 2004 Shevek (srs@anarres.org) * All rights reserved. * * This file is a part of libsrs2 from http://www.libsrs2.org/ * * Redistribution and use in source and binary forms, with or without * modification, under the terms of either the GNU General Public * License version 2 or the BSD license, at the discretion of the * user. Copies of these licenses have been included in the libsrs2 * distribution. See the the file called LICENSE for more * information. */ #undef USE_OPENSSL #include <stdarg.h> #include <string.h> /* memcpy, strcpy, memset */ #ifdef HAVE_ALLOCA_H #include <alloca.h> #endif #ifdef USE_OPENSSL #include <openssl/hmac.h> #endif #include "srs2.h" #ifndef EVP_MAX_MD_SIZE #define EVP_MAX_MD_SIZE (16+20) /* The SSLv3 md5+sha1 type */ #endif #ifndef HAVE_STRCASECMP # ifdef HAVE__STRICMP # define strcasecmp _stricmp # endif #endif #ifndef HAVE_STRNCASECMP # ifdef HAVE__STRNICMP # define strncasecmp _strnicmp # endif #endif /* Use this */ #define STRINGP(s) ((s != NULL) && (*(s) != '\0')) static const char *srs_separators = "=-+"; static srs_malloc_t srs_f_malloc = malloc; static srs_realloc_t srs_f_realloc = realloc; static srs_free_t srs_f_free = free; int srs_set_malloc(srs_malloc_t m, srs_realloc_t r, srs_free_t f) { srs_f_malloc = m; srs_f_realloc = r; srs_f_free = f; return SRS_SUCCESS; } const char * srs_strerror(int code) { switch (code) { /* Simple errors */ case SRS_SUCCESS: return "Success"; case SRS_ENOTSRSADDRESS: return "Not an SRS address."; /* Config errors */ case SRS_ENOSECRETS: return "No secrets in SRS configuration."; case SRS_ESEPARATORINVALID: return "Invalid separator suggested."; /* Input errors */ case SRS_ENOSENDERATSIGN: return "No at sign in sender address"; case SRS_EBUFTOOSMALL: return "Buffer too small."; /* Syntax errors */ case SRS_ENOSRS0HOST: return "No host in SRS0 address."; case SRS_ENOSRS0USER: return "No user in SRS0 address."; case SRS_ENOSRS0HASH: return "No hash in SRS0 address."; case SRS_ENOSRS0STAMP: return "No timestamp in SRS0 address."; case SRS_ENOSRS1HOST: return "No host in SRS1 address."; case SRS_ENOSRS1USER: return "No user in SRS1 address."; case SRS_ENOSRS1HASH: return "No hash in SRS1 address."; case SRS_EBADTIMESTAMPCHAR: return "Bad base32 character in timestamp."; case SRS_EHASHTOOSHORT: return "Hash too short in SRS address."; /* SRS errors */ case SRS_ETIMESTAMPOUTOFDATE: return "Time stamp out of date."; case SRS_EHASHINVALID: return "Hash invalid in SRS address."; default: return "Unknown error in SRS library."; } } srs_t * srs_new() { srs_t *srs = (srs_t *)srs_f_malloc(sizeof(srs_t)); srs_init(srs); return srs; } void srs_init(srs_t *srs) { memset(srs, 0, sizeof(srs_t)); srs->secrets = NULL; srs->numsecrets = 0; srs->separator = '='; srs->maxage = 21; srs->hashlength = 4; srs->hashmin = srs->hashlength; srs->alwaysrewrite = FALSE; } void srs_free(srs_t *srs) { int i; for (i = 0; i < srs->numsecrets; i++) { memset(srs->secrets[i], 0, strlen(srs->secrets[i])); srs_f_free(srs->secrets[i]); srs->secrets[i] = 0; } srs_f_free(srs); } int srs_add_secret(srs_t *srs, const char *secret) { int newlen = (srs->numsecrets + 1) * sizeof(char *); srs->secrets = (char **)srs_f_realloc(srs->secrets, newlen); srs->secrets[srs->numsecrets++] = strdup(secret); return SRS_SUCCESS; } const char * srs_get_secret(srs_t *srs, int idx) { if (idx < srs->numsecrets) return srs->secrets[idx]; return NULL; } #define SRS_PARAM_DEFINE(n, t) \ int srs_set_ ## n (srs_t *srs, t value) { \ srs->n = value; \ return SRS_SUCCESS; \ } \ t srs_get_ ## n (srs_t *srs) { \ return srs->n; \ } int srs_set_separator(srs_t *srs, char value) { if (strchr(srs_separators, value) == NULL) return SRS_ESEPARATORINVALID; srs->separator = value; return SRS_SUCCESS; } char srs_get_separator(srs_t *srs) { return srs->separator; } SRS_PARAM_DEFINE(maxage, int) /* XXX Check hashlength >= hashmin */ SRS_PARAM_DEFINE(hashlength, int) SRS_PARAM_DEFINE(hashmin, int) SRS_PARAM_DEFINE(alwaysrewrite, srs_bool) SRS_PARAM_DEFINE(noforward, srs_bool) SRS_PARAM_DEFINE(noreverse, srs_bool) /* Don't mess with these unless you know what you're doing well * enough to rewrite the timestamp functions. These are based on * a 2 character timestamp. Changing these in the wild is probably * a bad idea. */ #define SRS_TIME_PRECISION (60 * 60 * 24) /* One day */ #define SRS_TIME_BASEBITS 5 /* 2^5 = 32 = strlen(CHARS) */ /* This had better be a real variable since we do arithmethic * with it. */ const char *SRS_TIME_BASECHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"; #define SRS_TIME_SIZE 2 #define SRS_TIME_SLOTS (1<<(SRS_TIME_BASEBITS<<(SRS_TIME_SIZE-1))) int srs_timestamp_create(srs_t *srs __attribute__((unused)), char *buf, time_t now) { now = now / SRS_TIME_PRECISION; buf[1] = SRS_TIME_BASECHARS[now & ((1 << SRS_TIME_BASEBITS) - 1)]; now = now >> SRS_TIME_BASEBITS; buf[0] = SRS_TIME_BASECHARS[now & ((1 << SRS_TIME_BASEBITS) - 1)]; buf[2] = '\0'; return SRS_SUCCESS; } int srs_timestamp_check(srs_t *srs, const char *stamp) { const char *sp; char *bp; int off; time_t now; time_t then; if (strlen(stamp) != 2) return SRS_ETIMESTAMPOUTOFDATE; /* We had better go around this loop exactly twice! */ then = 0; for (sp = stamp; *sp; sp++) { bp = strchr(SRS_TIME_BASECHARS, toupper(*sp)); if (bp == NULL) return SRS_EBADTIMESTAMPCHAR; off = bp - SRS_TIME_BASECHARS; then = (then << SRS_TIME_BASEBITS) | off; } time(&now); now = (now / SRS_TIME_PRECISION) % SRS_TIME_SLOTS; while (now < then) now = now + SRS_TIME_SLOTS; if (now <= then + srs->maxage) return SRS_SUCCESS; return SRS_ETIMESTAMPOUTOFDATE; } const char *SRS_HASH_BASECHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz" "0123456789+/"; static void srs_hash_create_v(srs_t *srs, int idx, char *buf, int nargs, va_list ap) { #ifdef USE_OPENSSL HMAC_CTX ctx; int srshashlen; char srshash[EVP_MAX_MD_SIZE + 1]; #else srs_hmac_ctx_t ctx; char srshash[SHA_DIGESTSIZE + 1]; #endif char *secret; char *data; int len; char *lcdata; unsigned char *hp; char *bp; int i; int j; secret = srs->secrets[idx]; #ifdef USE_OPENSSL HMAC_CTX_init(&ctx); HMAC_Init(&ctx, secret, strlen(secret), EVP_sha1()); #else srs_hmac_init(&ctx, secret, strlen(secret)); #endif for (i = 0; i < nargs; i++) { data = va_arg(ap, char *); len = strlen(data); lcdata = alloca(len + 1); for (j = 0; j < len; j++) { if (isupper(data[j])) lcdata[j] = tolower(data[j]); else lcdata[j] = data[j]; } #ifdef USE_OPENSSL HMAC_Update(&ctx, lcdata, len); #else srs_hmac_update(&ctx, lcdata, len); #endif } #ifdef USE_OPENSSL HMAC_Final(&ctx, srshash, &srshashlen); HMAC_CTX_cleanup(&ctx); srshash[EVP_MAX_MD_SIZE] = '\0'; #else srs_hmac_fini(&ctx, srshash); srshash[SHA_DIGESTSIZE] = '\0'; #endif /* A little base64 encoding. Just a little. */ hp = (unsigned char *)srshash; bp = buf; for (i = 0; i < srs->hashlength; i++) { switch (i & 0x03) { default: /* NOTREACHED */ case 0: j = (*hp >> 2); break; case 1: j = ((*hp & 0x03) << 4) | ((*(hp + 1) & 0xF0) >> 4); hp++; break; case 2: j = ((*hp & 0x0F) << 2) | ((*(hp + 1) & 0xC0) >> 6); hp++; break; case 3: j = (*hp++ & 0x3F); break; } *bp++ = SRS_HASH_BASECHARS[j]; } *bp = '\0'; buf[srs->hashlength] = '\0'; } int srs_hash_create(srs_t *srs, char *buf, int nargs, ...) { va_list ap; if (srs->numsecrets == 0) return SRS_ENOSECRETS; if (srs->secrets == NULL) return SRS_ENOSECRETS; if (srs->secrets[0] == NULL) return SRS_ENOSECRETS; va_start(ap, nargs); srs_hash_create_v(srs, 0, buf, nargs, ap); va_end(ap); return SRS_SUCCESS; } int srs_hash_check(srs_t *srs, char *hash, int nargs, ...) { va_list ap; char *srshash; char *tmp; int len; int i; len = strlen(hash); if (len < srs->hashmin) return SRS_EHASHTOOSHORT; if (len > srs->hashlength) { tmp = alloca(srs->hashlength + 1); strncpy(tmp, hash, srs->hashlength); tmp[srs->hashlength] = '\0'; hash = tmp; len = srs->hashlength; } for (i = 0; i < srs->numsecrets; i++) { va_start(ap, nargs); srshash = alloca(srs->hashlength + 1); srs_hash_create_v(srs, i, srshash, nargs, ap); va_end(ap); if (strncasecmp(hash, srshash, len) == 0) return SRS_SUCCESS; } return SRS_EHASHINVALID; } int srs_compile_shortcut(srs_t *srs, char *buf, int buflen, char *sendhost, char *senduser, const char *aliashost) { char *srshash; char srsstamp[SRS_TIME_SIZE + 1]; int len; int ret; /* This never happens if we get called from guarded() */ if ((strncasecmp(senduser, SRS0TAG, 4) == 0) && (strchr(srs_separators, senduser[4]) != NULL)) { sendhost = senduser + 5; if (*sendhost == '\0') return SRS_ENOSRS0HOST; senduser = strchr(sendhost, SRSSEP); if ((senduser == NULL) || (*senduser == '\0')) return SRS_ENOSRS0USER; } len = strlen(SRS0TAG) + 1 + srs->hashlength + 1 + SRS_TIME_SIZE + 1 + strlen(sendhost) + 1 + strlen(senduser) + 1 + strlen(aliashost); if (len >= buflen) return SRS_EBUFTOOSMALL; ret = srs_timestamp_create(srs, srsstamp, time(NULL)); if (ret != SRS_SUCCESS) return ret; srshash = alloca(srs->hashlength + 1); ret = srs_hash_create(srs, srshash,3, srsstamp, sendhost, senduser); if (ret != SRS_SUCCESS) return ret; sprintf(buf, SRS0TAG "%c%s%c%s%c%s%c%s@%s", srs->separator, srshash, SRSSEP, srsstamp, SRSSEP, sendhost, SRSSEP, senduser, aliashost); return SRS_SUCCESS; } int srs_compile_guarded(srs_t *srs, char *buf, int buflen, char *sendhost, char *senduser, const char *aliashost) { char *srshost; char *srsuser; char *srshash; int len; int ret; if ((strncasecmp(senduser, SRS1TAG, 4) == 0) && (strchr(srs_separators, senduser[4]) != NULL)) { /* Used as a temporary convenience var */ srshash = senduser + 5; if (*srshash == '\0') return SRS_ENOSRS1HASH; /* Used as a temporary convenience var */ srshost = strchr(srshash, SRSSEP); if (!STRINGP(srshost)) return SRS_ENOSRS1HOST; *srshost++ = '\0'; srsuser = strchr(srshost, SRSSEP); if (!STRINGP(srsuser)) return SRS_ENOSRS1USER; *srsuser++ = '\0'; srshash = alloca(srs->hashlength + 1); ret = srs_hash_create(srs, srshash, 2, srshost, srsuser); if (ret != SRS_SUCCESS) return ret; len = strlen(SRS1TAG) + 1 + srs->hashlength + 1 + strlen(srshost) + 1 + strlen(srsuser) + 1 + strlen(aliashost); if (len >= buflen) return SRS_EBUFTOOSMALL; sprintf(buf, SRS1TAG "%c%s%c%s%c%s@%s", srs->separator, srshash, SRSSEP, srshost, SRSSEP, srsuser, aliashost); return SRS_SUCCESS; } else if ((strncasecmp(senduser, SRS0TAG, 4) == 0) && (strchr(srs_separators, senduser[4]) != NULL)) { srsuser = senduser + 4; srshost = sendhost; srshash = alloca(srs->hashlength + 1); ret = srs_hash_create(srs, srshash, 2, srshost, srsuser); if (ret != SRS_SUCCESS) return ret; len = strlen(SRS1TAG) + 1 + srs->hashlength + 1 + strlen(srshost) + 1 + strlen(srsuser) + 1 + strlen(aliashost); if (len >= buflen) return SRS_EBUFTOOSMALL; sprintf(buf, SRS1TAG "%c%s%c%s%c%s@%s", srs->separator, srshash, SRSSEP, srshost, SRSSEP, srsuser, aliashost); } else { return srs_compile_shortcut(srs, buf, buflen, sendhost, senduser, aliashost); } return SRS_SUCCESS; } int srs_parse_shortcut(srs_t *srs, char *buf, unsigned buflen, char *senduser) { char *srshash; char *srsstamp; char *srshost; char *srsuser; int ret; if (strncasecmp(senduser, SRS0TAG, 4) == 0) { srshash = senduser + 5; if (!STRINGP(srshash)) return SRS_ENOSRS0HASH; srsstamp = strchr(srshash, SRSSEP); if (!STRINGP(srsstamp)) return SRS_ENOSRS0STAMP; *srsstamp++ = '\0'; srshost = strchr(srsstamp, SRSSEP); if (!STRINGP(srshost)) return SRS_ENOSRS0HOST; *srshost++ = '\0'; srsuser = strchr(srshost, SRSSEP); if (!STRINGP(srsuser)) return SRS_ENOSRS0USER; *srsuser++ = '\0'; ret = srs_timestamp_check(srs, srsstamp); if (ret != SRS_SUCCESS) return ret; ret = srs_hash_check(srs, srshash, 3, srsstamp, srshost, srsuser); if (ret != SRS_SUCCESS) return ret; snprintf(buf, buflen, "%s@%s", srsuser, srshost); return SRS_SUCCESS; } return SRS_ENOTSRSADDRESS; } int srs_parse_guarded(srs_t *srs, char *buf, int buflen, char *senduser) { char *srshash; char *srshost; char *srsuser; int ret; if (strncasecmp(senduser, SRS1TAG, 4) == 0) { srshash = senduser + 5; if (!STRINGP(srshash)) return SRS_ENOSRS1HASH; srshost = strchr(srshash, SRSSEP); if (!STRINGP(srshost)) return SRS_ENOSRS1HOST; *srshost++ = '\0'; srsuser = strchr(srshost, SRSSEP); if (!STRINGP(srsuser)) return SRS_ENOSRS1USER; *srsuser++ = '\0'; ret = srs_hash_check(srs, srshash, 2, srshost, srsuser); if (ret != SRS_SUCCESS) return ret; sprintf(buf, SRS0TAG "%s@%s", srsuser, srshost); return SRS_SUCCESS; } else { return srs_parse_shortcut(srs, buf, buflen, senduser); } } int srs_forward(srs_t *srs, char *buf, unsigned buflen, const char *sender, const char *alias) { char *senduser; char *sendhost; char *tmp; unsigned len; if (srs->noforward) return SRS_ENOTREWRITTEN; /* This is allowed to be a plain domain */ while ((tmp = strchr(alias, '@')) != NULL) alias = tmp + 1; tmp = strchr(sender, '@'); if (tmp == NULL) return SRS_ENOSENDERATSIGN; sendhost = tmp + 1; len = strlen(sender); if (! srs->alwaysrewrite) { if (strcasecmp(sendhost, alias) == 0) { if (strlen(sender) >= buflen) return SRS_EBUFTOOSMALL; strcpy(buf, sender); return SRS_SUCCESS; } } /* Reconstruct the whole show into our alloca() buffer. */ senduser = alloca(len + 1); strcpy(senduser, sender); tmp = (senduser + (tmp - sender)); sendhost = tmp + 1; *tmp = '\0'; return srs_compile_guarded(srs, buf, buflen, sendhost, senduser, alias); } int srs_forward_alloc(srs_t *srs, char **sptr, const char *sender, const char *alias) { char *buf; int slen; int alen; int len; int ret; if (srs->noforward) return SRS_ENOTREWRITTEN; slen = strlen(sender); alen = strlen(alias); /* strlen(SRSxTAG) + strlen("====+@") < 64 */ len = slen + alen + srs->hashlength + SRS_TIME_SIZE + 64; buf = (char *)srs_f_malloc(len); ret = srs_forward(srs, buf, len, sender, alias); if (ret == SRS_SUCCESS) *sptr = buf; else srs_f_free(buf); return ret; } int srs_reverse(srs_t *srs, char *buf, unsigned buflen, const char *sender) { char *senduser; char *tmp; unsigned len; if (!SRS_IS_SRS_ADDRESS(sender)) return SRS_ENOTSRSADDRESS; if (srs->noreverse) return SRS_ENOTREWRITTEN; len = strlen(sender); if (len >= buflen) return SRS_EBUFTOOSMALL; senduser = alloca(len + 1); strcpy(senduser, sender); /* We don't really care about the host for reversal. */ tmp = strchr(senduser, '@'); if (tmp != NULL) *tmp = '\0'; return srs_parse_guarded(srs, buf, buflen, senduser); } int srs_reverse_alloc(srs_t *srs, char **sptr, const char *sender) { char *buf; int len; int ret; *sptr = NULL; if (!SRS_IS_SRS_ADDRESS(sender)) return SRS_ENOTSRSADDRESS; if (srs->noreverse) return SRS_ENOTREWRITTEN; len = strlen(sender) + 1; buf = (char *)srs_f_malloc(len); ret = srs_reverse(srs, buf, len, sender); if (ret == SRS_SUCCESS) *sptr = buf; else srs_f_free(buf); return ret; }
./CrossVul/dataset_final_sorted/CWE-400/c/good_4472_0
crossvul-cpp_data_bad_1362_0
/** * @file resolve.c * @author Michal Vasko <mvasko@cesnet.cz> * @brief libyang resolve functions * * Copyright (c) 2015 - 2018 CESNET, z.s.p.o. * * This source code is licensed under BSD 3-Clause License (the "License"). * You may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://opensource.org/licenses/BSD-3-Clause */ #define _GNU_SOURCE #include <stdlib.h> #include <assert.h> #include <string.h> #include <ctype.h> #include <limits.h> #include "libyang.h" #include "resolve.h" #include "common.h" #include "xpath.h" #include "parser.h" #include "parser_yang.h" #include "xml_internal.h" #include "hash_table.h" #include "tree_internal.h" #include "extensions.h" #include "validation.h" /* internal parsed predicate structure */ struct parsed_pred { const struct lys_node *schema; int len; struct { const char *mod_name; int mod_name_len; const char *name; int nam_len; const char *value; int val_len; } *pred; }; int parse_range_dec64(const char **str_num, uint8_t dig, int64_t *num) { const char *ptr; int minus = 0; int64_t ret = 0, prev_ret; int8_t str_exp, str_dig = -1, trailing_zeros = 0; ptr = *str_num; if (ptr[0] == '-') { minus = 1; ++ptr; } else if (ptr[0] == '+') { ++ptr; } if (!isdigit(ptr[0])) { /* there must be at least one */ return 1; } for (str_exp = 0; isdigit(ptr[0]) || ((ptr[0] == '.') && (str_dig < 0)); ++ptr) { if (str_exp > 18) { return 1; } if (ptr[0] == '.') { if (ptr[1] == '.') { /* it's the next interval */ break; } ++str_dig; } else { prev_ret = ret; if (minus) { ret = ret * 10 - (ptr[0] - '0'); if (ret > prev_ret) { return 1; } } else { ret = ret * 10 + (ptr[0] - '0'); if (ret < prev_ret) { return 1; } } if (str_dig > -1) { ++str_dig; if (ptr[0] == '0') { /* possibly trailing zero */ trailing_zeros++; } else { trailing_zeros = 0; } } ++str_exp; } } if (str_dig == 0) { /* no digits after '.' */ return 1; } else if (str_dig == -1) { /* there are 0 numbers after the floating point */ str_dig = 0; } /* remove trailing zeros */ if (trailing_zeros) { str_dig -= trailing_zeros; str_exp -= trailing_zeros; ret = ret / dec_pow(trailing_zeros); } /* it's parsed, now adjust the number based on fraction-digits, if needed */ if (str_dig < dig) { if ((str_exp - 1) + (dig - str_dig) > 18) { return 1; } prev_ret = ret; ret *= dec_pow(dig - str_dig); if ((minus && (ret > prev_ret)) || (!minus && (ret < prev_ret))) { return 1; } } if (str_dig > dig) { return 1; } *str_num = ptr; *num = ret; return 0; } /** * @brief Parse an identifier. * * ;; An identifier MUST NOT start with (('X'|'x') ('M'|'m') ('L'|'l')) * identifier = (ALPHA / "_") * *(ALPHA / DIGIT / "_" / "-" / ".") * * @param[in] id Identifier to use. * * @return Number of characters successfully parsed. */ unsigned int parse_identifier(const char *id) { unsigned int parsed = 0; assert(id); if (!isalpha(id[0]) && (id[0] != '_')) { return -parsed; } ++parsed; ++id; while (isalnum(id[0]) || (id[0] == '_') || (id[0] == '-') || (id[0] == '.')) { ++parsed; ++id; } return parsed; } /** * @brief Parse a node-identifier. * * node-identifier = [module-name ":"] identifier * * @param[in] id Identifier to use. * @param[out] mod_name Points to the module name, NULL if there is not any. * @param[out] mod_name_len Length of the module name, 0 if there is not any. * @param[out] name Points to the node name. * @param[out] nam_len Length of the node name. * @param[out] all_desc Whether the path starts with '/', only supported in extended paths. * @param[in] extended Whether to accept an extended path (support for [prefix:]*, /[prefix:]*, /[prefix:]., prefix:#identifier). * * @return Number of characters successfully parsed, * positive on success, negative on failure. */ static int parse_node_identifier(const char *id, const char **mod_name, int *mod_name_len, const char **name, int *nam_len, int *all_desc, int extended) { int parsed = 0, ret, all_desc_local = 0, first_id_len; const char *first_id; assert(id); assert((mod_name && mod_name_len) || (!mod_name && !mod_name_len)); assert((name && nam_len) || (!name && !nam_len)); if (mod_name) { *mod_name = NULL; *mod_name_len = 0; } if (name) { *name = NULL; *nam_len = 0; } if (extended) { /* try to parse only the extended expressions */ if (id[parsed] == '/') { if (all_desc) { *all_desc = 1; } all_desc_local = 1; } else { if (all_desc) { *all_desc = 0; } } /* is there a prefix? */ ret = parse_identifier(id + all_desc_local); if (ret > 0) { if (id[all_desc_local + ret] != ':') { /* this is not a prefix, so not an extended id */ goto standard_id; } if (mod_name) { *mod_name = id + all_desc_local; *mod_name_len = ret; } /* "/" and ":" */ ret += all_desc_local + 1; } else { ret = all_desc_local; } /* parse either "*" or "." */ if (*(id + ret) == '*') { if (name) { *name = id + ret; *nam_len = 1; } ++ret; return ret; } else if (*(id + ret) == '.') { if (!all_desc_local) { /* /. is redundant expression, we do not accept it */ return -ret; } if (name) { *name = id + ret; *nam_len = 1; } ++ret; return ret; } else if (*(id + ret) == '#') { if (all_desc_local || !ret) { /* no prefix */ return 0; } parsed = ret + 1; if ((ret = parse_identifier(id + parsed)) < 1) { return -parsed + ret; } *name = id + parsed - 1; *nam_len = ret + 1; return parsed + ret; } /* else a standard id, parse it all again */ } standard_id: if ((ret = parse_identifier(id)) < 1) { return ret; } first_id = id; first_id_len = ret; parsed += ret; id += ret; /* there is prefix */ if (id[0] == ':') { ++parsed; ++id; /* there isn't */ } else { if (name) { *name = first_id; *nam_len = first_id_len; } return parsed; } /* identifier (node name) */ if ((ret = parse_identifier(id)) < 1) { return -parsed + ret; } if (mod_name) { *mod_name = first_id; *mod_name_len = first_id_len; } if (name) { *name = id; *nam_len = ret; } return parsed + ret; } /** * @brief Parse a path-predicate (leafref). * * path-predicate = "[" *WSP path-equality-expr *WSP "]" * path-equality-expr = node-identifier *WSP "=" *WSP path-key-expr * * @param[in] id Identifier to use. * @param[out] prefix Points to the prefix, NULL if there is not any. * @param[out] pref_len Length of the prefix, 0 if there is not any. * @param[out] name Points to the node name. * @param[out] nam_len Length of the node name. * @param[out] path_key_expr Points to the path-key-expr. * @param[out] pke_len Length of the path-key-expr. * @param[out] has_predicate Flag to mark whether there is another predicate following. * * @return Number of characters successfully parsed, * positive on success, negative on failure. */ static int parse_path_predicate(const char *id, const char **prefix, int *pref_len, const char **name, int *nam_len, const char **path_key_expr, int *pke_len, int *has_predicate) { const char *ptr; int parsed = 0, ret; assert(id); if (prefix) { *prefix = NULL; } if (pref_len) { *pref_len = 0; } if (name) { *name = NULL; } if (nam_len) { *nam_len = 0; } if (path_key_expr) { *path_key_expr = NULL; } if (pke_len) { *pke_len = 0; } if (has_predicate) { *has_predicate = 0; } if (id[0] != '[') { return -parsed; } ++parsed; ++id; while (isspace(id[0])) { ++parsed; ++id; } if ((ret = parse_node_identifier(id, prefix, pref_len, name, nam_len, NULL, 0)) < 1) { return -parsed+ret; } parsed += ret; id += ret; while (isspace(id[0])) { ++parsed; ++id; } if (id[0] != '=') { return -parsed; } ++parsed; ++id; while (isspace(id[0])) { ++parsed; ++id; } if ((ptr = strchr(id, ']')) == NULL) { return -parsed; } --ptr; while (isspace(ptr[0])) { --ptr; } ++ptr; ret = ptr-id; if (path_key_expr) { *path_key_expr = id; } if (pke_len) { *pke_len = ret; } parsed += ret; id += ret; while (isspace(id[0])) { ++parsed; ++id; } assert(id[0] == ']'); if (id[1] == '[') { *has_predicate = 1; } return parsed+1; } /** * @brief Parse a path-key-expr (leafref). First call parses "current()", all * the ".." and the first node-identifier, other calls parse a single * node-identifier each. * * path-key-expr = current-function-invocation *WSP "/" *WSP * rel-path-keyexpr * rel-path-keyexpr = 1*(".." *WSP "/" *WSP) * *(node-identifier *WSP "/" *WSP) * node-identifier * * @param[in] id Identifier to use. * @param[out] prefix Points to the prefix, NULL if there is not any. * @param[out] pref_len Length of the prefix, 0 if there is not any. * @param[out] name Points to the node name. * @param[out] nam_len Length of the node name. * @param[out] parent_times Number of ".." in the path. Must be 0 on the first call, * must not be changed between consecutive calls. * @return Number of characters successfully parsed, * positive on success, negative on failure. */ static int parse_path_key_expr(const char *id, const char **prefix, int *pref_len, const char **name, int *nam_len, int *parent_times) { int parsed = 0, ret, par_times = 0; assert(id); assert(parent_times); if (prefix) { *prefix = NULL; } if (pref_len) { *pref_len = 0; } if (name) { *name = NULL; } if (nam_len) { *nam_len = 0; } if (!*parent_times) { /* current-function-invocation *WSP "/" *WSP rel-path-keyexpr */ if (strncmp(id, "current()", 9)) { return -parsed; } parsed += 9; id += 9; while (isspace(id[0])) { ++parsed; ++id; } if (id[0] != '/') { return -parsed; } ++parsed; ++id; while (isspace(id[0])) { ++parsed; ++id; } /* rel-path-keyexpr */ if (strncmp(id, "..", 2)) { return -parsed; } ++par_times; parsed += 2; id += 2; while (isspace(id[0])) { ++parsed; ++id; } } /* 1*(".." *WSP "/" *WSP) *(node-identifier *WSP "/" *WSP) node-identifier * * first parent reference with whitespaces already parsed */ if (id[0] != '/') { return -parsed; } ++parsed; ++id; while (isspace(id[0])) { ++parsed; ++id; } while (!strncmp(id, "..", 2) && !*parent_times) { ++par_times; parsed += 2; id += 2; while (isspace(id[0])) { ++parsed; ++id; } if (id[0] != '/') { return -parsed; } ++parsed; ++id; while (isspace(id[0])) { ++parsed; ++id; } } if (!*parent_times) { *parent_times = par_times; } /* all parent references must be parsed at this point */ if ((ret = parse_node_identifier(id, prefix, pref_len, name, nam_len, NULL, 0)) < 1) { return -parsed + ret; } parsed += ret; id += ret; return parsed; } /** * @brief Parse path-arg (leafref). * * path-arg = absolute-path / relative-path * absolute-path = 1*("/" (node-identifier *path-predicate)) * relative-path = 1*(".." "/") descendant-path * * @param[in] mod Module of the context node to get correct prefix in case it is not explicitly specified * @param[in] id Identifier to use. * @param[out] prefix Points to the prefix, NULL if there is not any. * @param[out] pref_len Length of the prefix, 0 if there is not any. * @param[out] name Points to the node name. * @param[out] nam_len Length of the node name. * @param[out] parent_times Number of ".." in the path. Must be 0 on the first call, * must not be changed between consecutive calls. -1 if the * path is relative. * @param[out] has_predicate Flag to mark whether there is a predicate specified. * * @return Number of characters successfully parsed, * positive on success, negative on failure. */ static int parse_path_arg(const struct lys_module *mod, const char *id, const char **prefix, int *pref_len, const char **name, int *nam_len, int *parent_times, int *has_predicate) { int parsed = 0, ret, par_times = 0; assert(id); assert(parent_times); if (prefix) { *prefix = NULL; } if (pref_len) { *pref_len = 0; } if (name) { *name = NULL; } if (nam_len) { *nam_len = 0; } if (has_predicate) { *has_predicate = 0; } if (!*parent_times && !strncmp(id, "..", 2)) { ++par_times; parsed += 2; id += 2; while (!strncmp(id, "/..", 3)) { ++par_times; parsed += 3; id += 3; } } if (!*parent_times) { if (par_times) { *parent_times = par_times; } else { *parent_times = -1; } } if (id[0] != '/') { return -parsed; } /* skip '/' */ ++parsed; ++id; /* node-identifier ([prefix:]identifier) */ if ((ret = parse_node_identifier(id, prefix, pref_len, name, nam_len, NULL, 0)) < 1) { return -parsed - ret; } if (prefix && !(*prefix)) { /* actually we always need prefix even it is not specified */ *prefix = lys_main_module(mod)->name; *pref_len = strlen(*prefix); } parsed += ret; id += ret; /* there is no predicate */ if ((id[0] == '/') || !id[0]) { return parsed; } else if (id[0] != '[') { return -parsed; } if (has_predicate) { *has_predicate = 1; } return parsed; } /** * @brief Parse instance-identifier in JSON data format. That means that prefixes * are actually model names. * * instance-identifier = 1*("/" (node-identifier *predicate)) * * @param[in] id Identifier to use. * @param[out] model Points to the model name. * @param[out] mod_len Length of the model name. * @param[out] name Points to the node name. * @param[out] nam_len Length of the node name. * @param[out] has_predicate Flag to mark whether there is a predicate specified. * * @return Number of characters successfully parsed, * positive on success, negative on failure. */ static int parse_instance_identifier(const char *id, const char **model, int *mod_len, const char **name, int *nam_len, int *has_predicate) { int parsed = 0, ret; assert(id && model && mod_len && name && nam_len); if (has_predicate) { *has_predicate = 0; } if (id[0] != '/') { return -parsed; } ++parsed; ++id; if ((ret = parse_identifier(id)) < 1) { return ret; } *name = id; *nam_len = ret; parsed += ret; id += ret; if (id[0] == ':') { /* we have prefix */ *model = *name; *mod_len = *nam_len; ++parsed; ++id; if ((ret = parse_identifier(id)) < 1) { return ret; } *name = id; *nam_len = ret; parsed += ret; id += ret; } if (id[0] == '[' && has_predicate) { *has_predicate = 1; } return parsed; } /** * @brief Parse predicate (instance-identifier) in JSON data format. That means that prefixes * (which are mandatory) are actually model names. * * predicate = "[" *WSP (predicate-expr / pos) *WSP "]" * predicate-expr = (node-identifier / ".") *WSP "=" *WSP * ((DQUOTE string DQUOTE) / * (SQUOTE string SQUOTE)) * pos = non-negative-integer-value * * @param[in] id Identifier to use. * @param[out] model Points to the model name. * @param[out] mod_len Length of the model name. * @param[out] name Points to the node name. Can be identifier (from node-identifier), "." or pos. * @param[out] nam_len Length of the node name. * @param[out] value Value the node-identifier must have (string from the grammar), * NULL if there is not any. * @param[out] val_len Length of the value, 0 if there is not any. * @param[out] has_predicate Flag to mark whether there is a predicate specified. * * @return Number of characters successfully parsed, * positive on success, negative on failure. */ static int parse_predicate(const char *id, const char **model, int *mod_len, const char **name, int *nam_len, const char **value, int *val_len, int *has_predicate) { const char *ptr; int parsed = 0, ret; char quote; assert(id); if (model) { assert(mod_len); *model = NULL; *mod_len = 0; } if (name) { assert(nam_len); *name = NULL; *nam_len = 0; } if (value) { assert(val_len); *value = NULL; *val_len = 0; } if (has_predicate) { *has_predicate = 0; } if (id[0] != '[') { return -parsed; } ++parsed; ++id; while (isspace(id[0])) { ++parsed; ++id; } /* pos */ if (isdigit(id[0])) { if (name) { *name = id; } if (id[0] == '0') { return -parsed; } while (isdigit(id[0])) { ++parsed; ++id; } if (nam_len) { *nam_len = id-(*name); } /* "." or node-identifier */ } else { if (id[0] == '.') { if (name) { *name = id; } if (nam_len) { *nam_len = 1; } ++parsed; ++id; } else { if ((ret = parse_node_identifier(id, model, mod_len, name, nam_len, NULL, 0)) < 1) { return -parsed + ret; } parsed += ret; id += ret; } while (isspace(id[0])) { ++parsed; ++id; } if (id[0] != '=') { return -parsed; } ++parsed; ++id; while (isspace(id[0])) { ++parsed; ++id; } /* ((DQUOTE string DQUOTE) / (SQUOTE string SQUOTE)) */ if ((id[0] == '\"') || (id[0] == '\'')) { quote = id[0]; ++parsed; ++id; if ((ptr = strchr(id, quote)) == NULL) { return -parsed; } ret = ptr - id; if (value) { *value = id; } if (val_len) { *val_len = ret; } parsed += ret + 1; id += ret + 1; } else { return -parsed; } } while (isspace(id[0])) { ++parsed; ++id; } if (id[0] != ']') { return -parsed; } ++parsed; ++id; if ((id[0] == '[') && has_predicate) { *has_predicate = 1; } return parsed; } /** * @brief Parse schema-nodeid. * * schema-nodeid = absolute-schema-nodeid / * descendant-schema-nodeid * absolute-schema-nodeid = 1*("/" node-identifier) * descendant-schema-nodeid = ["." "/"] * node-identifier * absolute-schema-nodeid * * @param[in] id Identifier to use. * @param[out] mod_name Points to the module name, NULL if there is not any. * @param[out] mod_name_len Length of the module name, 0 if there is not any. * @param[out] name Points to the node name. * @param[out] nam_len Length of the node name. * @param[out] is_relative Flag to mark whether the nodeid is absolute or descendant. Must be -1 * on the first call, must not be changed between consecutive calls. * @param[out] has_predicate Flag to mark whether there is a predicate specified. It cannot be * based on the grammar, in those cases use NULL. * @param[in] extended Whether to accept an extended path (support for /[prefix:]*, //[prefix:]*, //[prefix:].). * * @return Number of characters successfully parsed, * positive on success, negative on failure. */ int parse_schema_nodeid(const char *id, const char **mod_name, int *mod_name_len, const char **name, int *nam_len, int *is_relative, int *has_predicate, int *all_desc, int extended) { int parsed = 0, ret; assert(id); assert(is_relative); if (has_predicate) { *has_predicate = 0; } if (id[0] != '/') { if (*is_relative != -1) { return -parsed; } else { *is_relative = 1; } if (!strncmp(id, "./", 2)) { parsed += 2; id += 2; } } else { if (*is_relative == -1) { *is_relative = 0; } ++parsed; ++id; } if ((ret = parse_node_identifier(id, mod_name, mod_name_len, name, nam_len, all_desc, extended)) < 1) { return -parsed + ret; } parsed += ret; id += ret; if ((id[0] == '[') && has_predicate) { *has_predicate = 1; } return parsed; } /** * @brief Parse schema predicate (special format internally used). * * predicate = "[" *WSP predicate-expr *WSP "]" * predicate-expr = "." / [prefix:]identifier / positive-integer / key-with-value * key-with-value = identifier *WSP "=" *WSP * ((DQUOTE string DQUOTE) / * (SQUOTE string SQUOTE)) * * @param[in] id Identifier to use. * @param[out] mod_name Points to the list key module name. * @param[out] mod_name_len Length of \p mod_name. * @param[out] name Points to the list key name. * @param[out] nam_len Length of \p name. * @param[out] value Points to the key value. If specified, key-with-value is expected. * @param[out] val_len Length of \p value. * @param[out] has_predicate Flag to mark whether there is another predicate specified. */ int parse_schema_json_predicate(const char *id, const char **mod_name, int *mod_name_len, const char **name, int *nam_len, const char **value, int *val_len, int *has_predicate) { const char *ptr; int parsed = 0, ret; char quote; assert(id); if (mod_name) { *mod_name = NULL; } if (mod_name_len) { *mod_name_len = 0; } if (name) { *name = NULL; } if (nam_len) { *nam_len = 0; } if (value) { *value = NULL; } if (val_len) { *val_len = 0; } if (has_predicate) { *has_predicate = 0; } if (id[0] != '[') { return -parsed; } ++parsed; ++id; while (isspace(id[0])) { ++parsed; ++id; } /* identifier */ if (id[0] == '.') { ret = 1; if (name) { *name = id; } if (nam_len) { *nam_len = ret; } } else if (isdigit(id[0])) { if (id[0] == '0') { return -parsed; } ret = 1; while (isdigit(id[ret])) { ++ret; } if (name) { *name = id; } if (nam_len) { *nam_len = ret; } } else if ((ret = parse_node_identifier(id, mod_name, mod_name_len, name, nam_len, NULL, 0)) < 1) { return -parsed + ret; } parsed += ret; id += ret; while (isspace(id[0])) { ++parsed; ++id; } /* there is value as well */ if (id[0] == '=') { if (name && isdigit(**name)) { return -parsed; } ++parsed; ++id; while (isspace(id[0])) { ++parsed; ++id; } /* ((DQUOTE string DQUOTE) / (SQUOTE string SQUOTE)) */ if ((id[0] == '\"') || (id[0] == '\'')) { quote = id[0]; ++parsed; ++id; if ((ptr = strchr(id, quote)) == NULL) { return -parsed; } ret = ptr - id; if (value) { *value = id; } if (val_len) { *val_len = ret; } parsed += ret + 1; id += ret + 1; } else { return -parsed; } while (isspace(id[0])) { ++parsed; ++id; } } if (id[0] != ']') { return -parsed; } ++parsed; ++id; if ((id[0] == '[') && has_predicate) { *has_predicate = 1; } return parsed; } #ifdef LY_ENABLED_CACHE static int resolve_hash_table_find_equal(void *val1_p, void *val2_p, int mod, void *UNUSED(cb_data)) { struct lyd_node *val2, *elem2; struct parsed_pred pp; const char *str; int i; assert(!mod); (void)mod; pp = *((struct parsed_pred *)val1_p); val2 = *((struct lyd_node **)val2_p); if (val2->schema != pp.schema) { return 0; } switch (val2->schema->nodetype) { case LYS_CONTAINER: case LYS_LEAF: case LYS_ANYXML: case LYS_ANYDATA: return 1; case LYS_LEAFLIST: str = ((struct lyd_node_leaf_list *)val2)->value_str; if (!strncmp(str, pp.pred[0].value, pp.pred[0].val_len) && !str[pp.pred[0].val_len]) { return 1; } return 0; case LYS_LIST: assert(((struct lys_node_list *)val2->schema)->keys_size); assert(((struct lys_node_list *)val2->schema)->keys_size == pp.len); /* lists with keys, their equivalence is based on their keys */ elem2 = val2->child; /* the exact data order is guaranteed */ for (i = 0; elem2 && (i < pp.len); ++i) { /* module check */ if (pp.pred[i].mod_name) { if (strncmp(lyd_node_module(elem2)->name, pp.pred[i].mod_name, pp.pred[i].mod_name_len) || lyd_node_module(elem2)->name[pp.pred[i].mod_name_len]) { break; } } else { if (lyd_node_module(elem2) != lys_node_module(pp.schema)) { break; } } /* name check */ if (strncmp(elem2->schema->name, pp.pred[i].name, pp.pred[i].nam_len) || elem2->schema->name[pp.pred[i].nam_len]) { break; } /* value check */ str = ((struct lyd_node_leaf_list *)elem2)->value_str; if (strncmp(str, pp.pred[i].value, pp.pred[i].val_len) || str[pp.pred[i].val_len]) { break; } /* next key */ elem2 = elem2->next; } if (i == pp.len) { return 1; } return 0; default: break; } LOGINT(val2->schema->module->ctx); return 0; } static struct lyd_node * resolve_json_data_node_hash(struct lyd_node *parent, struct parsed_pred pp) { values_equal_cb prev_cb; struct lyd_node **ret = NULL; uint32_t hash; int i; assert(parent && parent->hash); /* set our value equivalence callback that does not require data nodes */ prev_cb = lyht_set_cb(parent->ht, resolve_hash_table_find_equal); /* get the hash of the searched node */ hash = dict_hash_multi(0, lys_node_module(pp.schema)->name, strlen(lys_node_module(pp.schema)->name)); hash = dict_hash_multi(hash, pp.schema->name, strlen(pp.schema->name)); if (pp.schema->nodetype == LYS_LEAFLIST) { assert((pp.len == 1) && (pp.pred[0].name[0] == '.') && (pp.pred[0].nam_len == 1)); /* leaf-list value in predicate */ hash = dict_hash_multi(hash, pp.pred[0].value, pp.pred[0].val_len); } else if (pp.schema->nodetype == LYS_LIST) { /* list keys in predicates */ for (i = 0; i < pp.len; ++i) { hash = dict_hash_multi(hash, pp.pred[i].value, pp.pred[i].val_len); } } hash = dict_hash_multi(hash, NULL, 0); /* try to find the node */ i = lyht_find(parent->ht, &pp, hash, (void **)&ret); assert(i || *ret); /* restore the original callback */ lyht_set_cb(parent->ht, prev_cb); return (i ? NULL : *ret); } #endif /** * @brief Resolve (find) a feature definition. Logs directly. * * @param[in] feat_name Feature name to resolve. * @param[in] len Length of \p feat_name. * @param[in] node Node with the if-feature expression. * @param[out] feature Pointer to be set to point to the feature definition, if feature not found * (return code 1), the pointer is untouched. * * @return 0 on success, 1 on forward reference, -1 on error. */ static int resolve_feature(const char *feat_name, uint16_t len, const struct lys_node *node, struct lys_feature **feature) { char *str; const char *mod_name, *name; int mod_name_len, nam_len, i, j; const struct lys_module *module; assert(feature); /* check prefix */ if ((i = parse_node_identifier(feat_name, &mod_name, &mod_name_len, &name, &nam_len, NULL, 0)) < 1) { LOGVAL(node->module->ctx, LYE_INCHAR, LY_VLOG_NONE, NULL, feat_name[-i], &feat_name[-i]); return -1; } module = lyp_get_module(lys_node_module(node), NULL, 0, mod_name, mod_name_len, 0); if (!module) { /* identity refers unknown data model */ LOGVAL(node->module->ctx, LYE_INMOD_LEN, LY_VLOG_NONE, NULL, mod_name_len, mod_name); return -1; } if (module != node->module && module == lys_node_module(node)) { /* first, try to search directly in submodule where the feature was mentioned */ for (j = 0; j < node->module->features_size; j++) { if (!strncmp(name, node->module->features[j].name, nam_len) && !node->module->features[j].name[nam_len]) { /* check status */ if (lyp_check_status(node->flags, lys_node_module(node), node->name, node->module->features[j].flags, node->module->features[j].module, node->module->features[j].name, NULL)) { return -1; } *feature = &node->module->features[j]; return 0; } } } /* search in the identified module ... */ for (j = 0; j < module->features_size; j++) { if (!strncmp(name, module->features[j].name, nam_len) && !module->features[j].name[nam_len]) { /* check status */ if (lyp_check_status(node->flags, lys_node_module(node), node->name, module->features[j].flags, module->features[j].module, module->features[j].name, NULL)) { return -1; } *feature = &module->features[j]; return 0; } } /* ... and all its submodules */ for (i = 0; i < module->inc_size && module->inc[i].submodule; i++) { for (j = 0; j < module->inc[i].submodule->features_size; j++) { if (!strncmp(name, module->inc[i].submodule->features[j].name, nam_len) && !module->inc[i].submodule->features[j].name[nam_len]) { /* check status */ if (lyp_check_status(node->flags, lys_node_module(node), node->name, module->inc[i].submodule->features[j].flags, module->inc[i].submodule->features[j].module, module->inc[i].submodule->features[j].name, NULL)) { return -1; } *feature = &module->inc[i].submodule->features[j]; return 0; } } } /* not found */ str = strndup(feat_name, len); LOGVAL(node->module->ctx, LYE_INRESOLV, LY_VLOG_NONE, NULL, "feature", str); free(str); return 1; } /* * @return * - 1 if enabled * - 0 if disabled */ static int resolve_feature_value(const struct lys_feature *feat) { int i; for (i = 0; i < feat->iffeature_size; i++) { if (!resolve_iffeature(&feat->iffeature[i])) { return 0; } } return feat->flags & LYS_FENABLED ? 1 : 0; } static int resolve_iffeature_recursive(struct lys_iffeature *expr, int *index_e, int *index_f) { uint8_t op; int a, b; op = iff_getop(expr->expr, *index_e); (*index_e)++; switch (op) { case LYS_IFF_F: /* resolve feature */ return resolve_feature_value(expr->features[(*index_f)++]); case LYS_IFF_NOT: /* invert result */ return resolve_iffeature_recursive(expr, index_e, index_f) ? 0 : 1; case LYS_IFF_AND: case LYS_IFF_OR: a = resolve_iffeature_recursive(expr, index_e, index_f); b = resolve_iffeature_recursive(expr, index_e, index_f); if (op == LYS_IFF_AND) { return a && b; } else { /* LYS_IFF_OR */ return a || b; } } return 0; } int resolve_iffeature(struct lys_iffeature *expr) { int index_e = 0, index_f = 0; if (expr->expr) { return resolve_iffeature_recursive(expr, &index_e, &index_f); } return 0; } struct iff_stack { int size; int index; /* first empty item */ uint8_t *stack; }; static int iff_stack_push(struct iff_stack *stack, uint8_t value) { if (stack->index == stack->size) { stack->size += 4; stack->stack = ly_realloc(stack->stack, stack->size * sizeof *stack->stack); LY_CHECK_ERR_RETURN(!stack->stack, LOGMEM(NULL); stack->size = 0, EXIT_FAILURE); } stack->stack[stack->index++] = value; return EXIT_SUCCESS; } static uint8_t iff_stack_pop(struct iff_stack *stack) { stack->index--; return stack->stack[stack->index]; } static void iff_stack_clean(struct iff_stack *stack) { stack->size = 0; free(stack->stack); } static void iff_setop(uint8_t *list, uint8_t op, int pos) { uint8_t *item; uint8_t mask = 3; assert(pos >= 0); assert(op <= 3); /* max 2 bits */ item = &list[pos / 4]; mask = mask << 2 * (pos % 4); *item = (*item) & ~mask; *item = (*item) | (op << 2 * (pos % 4)); } uint8_t iff_getop(uint8_t *list, int pos) { uint8_t *item; uint8_t mask = 3, result; assert(pos >= 0); item = &list[pos / 4]; result = (*item) & (mask << 2 * (pos % 4)); return result >> 2 * (pos % 4); } #define LYS_IFF_LP 0x04 /* ( */ #define LYS_IFF_RP 0x08 /* ) */ /* internal structure for passing data for UNRES_IFFEAT */ struct unres_iffeat_data { struct lys_node *node; const char *fname; int infeature; }; void resolve_iffeature_getsizes(struct lys_iffeature *iffeat, unsigned int *expr_size, unsigned int *feat_size) { unsigned int e = 0, f = 0, r = 0; uint8_t op; assert(iffeat); if (!iffeat->expr) { goto result; } do { op = iff_getop(iffeat->expr, e++); switch (op) { case LYS_IFF_NOT: if (!r) { r += 1; } break; case LYS_IFF_AND: case LYS_IFF_OR: if (!r) { r += 2; } else { r += 1; } break; case LYS_IFF_F: f++; if (r) { r--; } break; } } while(r); result: if (expr_size) { *expr_size = e; } if (feat_size) { *feat_size = f; } } int resolve_iffeature_compile(struct lys_iffeature *iffeat_expr, const char *value, struct lys_node *node, int infeature, struct unres_schema *unres) { const char *c = value; int r, rc = EXIT_FAILURE; int i, j, last_not, checkversion = 0; unsigned int f_size = 0, expr_size = 0, f_exp = 1; uint8_t op; struct iff_stack stack = {0, 0, NULL}; struct unres_iffeat_data *iff_data; struct ly_ctx *ctx = node->module->ctx; assert(c); if (isspace(c[0])) { LOGVAL(ctx, LYE_INCHAR, LY_VLOG_NONE, NULL, c[0], c); return EXIT_FAILURE; } /* pre-parse the expression to get sizes for arrays, also do some syntax checks of the expression */ for (i = j = last_not = 0; c[i]; i++) { if (c[i] == '(') { checkversion = 1; j++; continue; } else if (c[i] == ')') { j--; continue; } else if (isspace(c[i])) { continue; } if (!strncmp(&c[i], "not", r = 3) || !strncmp(&c[i], "and", r = 3) || !strncmp(&c[i], "or", r = 2)) { if (c[i + r] == '\0') { LOGVAL(ctx, LYE_INARG, LY_VLOG_NONE, NULL, value, "if-feature"); return EXIT_FAILURE; } else if (!isspace(c[i + r])) { /* feature name starting with the not/and/or */ last_not = 0; f_size++; } else if (c[i] == 'n') { /* not operation */ if (last_not) { /* double not */ expr_size = expr_size - 2; last_not = 0; } else { last_not = 1; } } else { /* and, or */ f_exp++; /* not a not operation */ last_not = 0; } i += r; } else { f_size++; last_not = 0; } expr_size++; while (!isspace(c[i])) { if (!c[i] || c[i] == ')') { i--; break; } i++; } } if (j || f_exp != f_size) { /* not matching count of ( and ) */ LOGVAL(ctx, LYE_INARG, LY_VLOG_NONE, NULL, value, "if-feature"); return EXIT_FAILURE; } if (checkversion || expr_size > 1) { /* check that we have 1.1 module */ if (node->module->version != LYS_VERSION_1_1) { LOGVAL(ctx, LYE_INARG, LY_VLOG_NONE, NULL, value, "if-feature"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "YANG 1.1 if-feature expression found in 1.0 module."); return EXIT_FAILURE; } } /* allocate the memory */ iffeat_expr->expr = calloc((j = (expr_size / 4) + ((expr_size % 4) ? 1 : 0)), sizeof *iffeat_expr->expr); iffeat_expr->features = calloc(f_size, sizeof *iffeat_expr->features); stack.stack = malloc(expr_size * sizeof *stack.stack); LY_CHECK_ERR_GOTO(!stack.stack || !iffeat_expr->expr || !iffeat_expr->features, LOGMEM(ctx), error); stack.size = expr_size; f_size--; expr_size--; /* used as indexes from now */ for (i--; i >= 0; i--) { if (c[i] == ')') { /* push it on stack */ iff_stack_push(&stack, LYS_IFF_RP); continue; } else if (c[i] == '(') { /* pop from the stack into result all operators until ) */ while((op = iff_stack_pop(&stack)) != LYS_IFF_RP) { iff_setop(iffeat_expr->expr, op, expr_size--); } continue; } else if (isspace(c[i])) { continue; } /* end operator or operand -> find beginning and get what is it */ j = i + 1; while (i >= 0 && !isspace(c[i]) && c[i] != '(') { i--; } i++; /* get back by one step */ if (!strncmp(&c[i], "not", 3) && isspace(c[i + 3])) { if (stack.index && stack.stack[stack.index - 1] == LYS_IFF_NOT) { /* double not */ iff_stack_pop(&stack); } else { /* not has the highest priority, so do not pop from the stack * as in case of AND and OR */ iff_stack_push(&stack, LYS_IFF_NOT); } } else if (!strncmp(&c[i], "and", 3) && isspace(c[i + 3])) { /* as for OR - pop from the stack all operators with the same or higher * priority and store them to the result, then push the AND to the stack */ while (stack.index && stack.stack[stack.index - 1] <= LYS_IFF_AND) { op = iff_stack_pop(&stack); iff_setop(iffeat_expr->expr, op, expr_size--); } iff_stack_push(&stack, LYS_IFF_AND); } else if (!strncmp(&c[i], "or", 2) && isspace(c[i + 2])) { while (stack.index && stack.stack[stack.index - 1] <= LYS_IFF_OR) { op = iff_stack_pop(&stack); iff_setop(iffeat_expr->expr, op, expr_size--); } iff_stack_push(&stack, LYS_IFF_OR); } else { /* feature name, length is j - i */ /* add it to the result */ iff_setop(iffeat_expr->expr, LYS_IFF_F, expr_size--); /* now get the link to the feature definition. Since it can be * forward referenced, we have to keep the feature name in auxiliary * structure passed into unres */ iff_data = malloc(sizeof *iff_data); LY_CHECK_ERR_GOTO(!iff_data, LOGMEM(ctx), error); iff_data->node = node; iff_data->fname = lydict_insert(node->module->ctx, &c[i], j - i); iff_data->infeature = infeature; r = unres_schema_add_node(node->module, unres, &iffeat_expr->features[f_size], UNRES_IFFEAT, (struct lys_node *)iff_data); f_size--; if (r == -1) { lydict_remove(node->module->ctx, iff_data->fname); free(iff_data); goto error; } } } while (stack.index) { op = iff_stack_pop(&stack); iff_setop(iffeat_expr->expr, op, expr_size--); } if (++expr_size || ++f_size) { /* not all expected operators and operands found */ LOGVAL(ctx, LYE_INARG, LY_VLOG_NONE, NULL, value, "if-feature"); rc = EXIT_FAILURE; } else { rc = EXIT_SUCCESS; } error: /* cleanup */ iff_stack_clean(&stack); return rc; } /** * @brief Resolve (find) a data node based on a schema-nodeid. * * Used for resolving unique statements - so id is expected to be relative and local (without reference to a different * module). * */ struct lyd_node * resolve_data_descendant_schema_nodeid(const char *nodeid, struct lyd_node *start) { char *str, *token, *p; struct lyd_node *result = NULL, *iter; const struct lys_node *schema = NULL; assert(nodeid && start); if (nodeid[0] == '/') { return NULL; } str = p = strdup(nodeid); LY_CHECK_ERR_RETURN(!str, LOGMEM(start->schema->module->ctx), NULL); while (p) { token = p; p = strchr(p, '/'); if (p) { *p = '\0'; p++; } if (p) { /* inner node */ if (resolve_descendant_schema_nodeid(token, schema ? schema->child : start->schema, LYS_CONTAINER | LYS_CHOICE | LYS_CASE | LYS_LEAF, 0, &schema) || !schema) { result = NULL; break; } if (schema->nodetype & (LYS_CHOICE | LYS_CASE)) { continue; } } else { /* final node */ if (resolve_descendant_schema_nodeid(token, schema ? schema->child : start->schema, LYS_LEAF, 0, &schema) || !schema) { result = NULL; break; } } LY_TREE_FOR(result ? result->child : start, iter) { if (iter->schema == schema) { /* move in data tree according to returned schema */ result = iter; break; } } if (!iter) { /* instance not found */ result = NULL; break; } } free(str); return result; } int schema_nodeid_siblingcheck(const struct lys_node *sibling, const struct lys_module *cur_module, const char *mod_name, int mod_name_len, const char *name, int nam_len) { const struct lys_module *prefix_mod; /* handle special names */ if (name[0] == '*') { return 2; } else if (name[0] == '.') { return 3; } /* name check */ if (strncmp(name, sibling->name, nam_len) || sibling->name[nam_len]) { return 1; } /* module check */ if (mod_name) { prefix_mod = lyp_get_module(cur_module, NULL, 0, mod_name, mod_name_len, 0); if (!prefix_mod) { return -1; } } else { prefix_mod = cur_module; } if (prefix_mod != lys_node_module(sibling)) { return 1; } /* match */ return 0; } /* keys do not have to be ordered and do not have to be all of them */ static int resolve_extended_schema_nodeid_predicate(const char *nodeid, const struct lys_node *node, const struct lys_module *cur_module, int *nodeid_end) { int mod_len, nam_len, has_predicate, r, i; const char *model, *name; struct lys_node_list *list; if (!(node->nodetype & (LYS_LIST | LYS_LEAFLIST))) { return 1; } list = (struct lys_node_list *)node; do { r = parse_schema_json_predicate(nodeid, &model, &mod_len, &name, &nam_len, NULL, NULL, &has_predicate); if (r < 1) { LOGVAL(cur_module->ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, nodeid[r], &nodeid[r]); return -1; } nodeid += r; if (node->nodetype == LYS_LEAFLIST) { /* just check syntax */ if (model || !name || (name[0] != '.') || has_predicate) { return 1; } break; } else { /* check the key */ for (i = 0; i < list->keys_size; ++i) { if (strncmp(list->keys[i]->name, name, nam_len) || list->keys[i]->name[nam_len]) { continue; } if (model) { if (strncmp(lys_node_module((struct lys_node *)list->keys[i])->name, model, mod_len) || lys_node_module((struct lys_node *)list->keys[i])->name[mod_len]) { continue; } } else { if (lys_node_module((struct lys_node *)list->keys[i]) != cur_module) { continue; } } /* match */ break; } if (i == list->keys_size) { return 1; } } } while (has_predicate); if (!nodeid[0]) { *nodeid_end = 1; } return 0; } /* start_parent - relative, module - absolute, -1 error (logged), EXIT_SUCCESS ok */ int resolve_schema_nodeid(const char *nodeid, const struct lys_node *start_parent, const struct lys_module *cur_module, struct ly_set **ret, int extended, int no_node_error) { const char *name, *mod_name, *id, *backup_mod_name = NULL, *yang_data_name = NULL; const struct lys_node *sibling, *next, *elem; struct lys_node_augment *last_aug; int r, nam_len, mod_name_len = 0, is_relative = -1, all_desc, has_predicate, nodeid_end = 0; int yang_data_name_len, backup_mod_name_len = 0; /* resolved import module from the start module, it must match the next node-name-match sibling */ const struct lys_module *start_mod, *aux_mod = NULL; char *str; struct ly_ctx *ctx; assert(nodeid && (start_parent || cur_module) && ret); *ret = NULL; if (!cur_module) { cur_module = lys_node_module(start_parent); } ctx = cur_module->ctx; id = nodeid; r = parse_schema_nodeid(id, &mod_name, &mod_name_len, &name, &nam_len, &is_relative, NULL, NULL, 1); if (r < 1) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, id[r], &id[r]); return -1; } if (name[0] == '#') { if (is_relative) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, '#', name); return -1; } yang_data_name = name + 1; yang_data_name_len = nam_len - 1; backup_mod_name = mod_name; backup_mod_name_len = mod_name_len; id += r; } else { is_relative = -1; } r = parse_schema_nodeid(id, &mod_name, &mod_name_len, &name, &nam_len, &is_relative, &has_predicate, (extended ? &all_desc : NULL), extended); if (r < 1) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, id[r], &id[r]); return -1; } id += r; if (backup_mod_name) { mod_name = backup_mod_name; mod_name_len = backup_mod_name_len; } if (is_relative && !start_parent) { LOGVAL(ctx, LYE_SPEC, LY_VLOG_STR, nodeid, "Starting node must be provided for relative paths."); return -1; } /* descendant-schema-nodeid */ if (is_relative) { cur_module = start_mod = lys_node_module(start_parent); /* absolute-schema-nodeid */ } else { start_mod = lyp_get_module(cur_module, NULL, 0, mod_name, mod_name_len, 0); if (!start_mod) { str = strndup(mod_name, mod_name_len); LOGVAL(ctx, LYE_PATH_INMOD, LY_VLOG_STR, str); free(str); return -1; } start_parent = NULL; if (yang_data_name) { start_parent = lyp_get_yang_data_template(start_mod, yang_data_name, yang_data_name_len); if (!start_parent) { str = strndup(nodeid, (yang_data_name + yang_data_name_len) - nodeid); LOGVAL(ctx, LYE_PATH_INNODE, LY_VLOG_STR, str); free(str); return -1; } } } while (1) { sibling = NULL; last_aug = NULL; if (start_parent) { if (mod_name && (strncmp(mod_name, cur_module->name, mod_name_len) || (mod_name_len != (signed)strlen(cur_module->name)))) { /* we are getting into another module (augment) */ aux_mod = lyp_get_module(cur_module, NULL, 0, mod_name, mod_name_len, 0); if (!aux_mod) { str = strndup(mod_name, mod_name_len); LOGVAL(ctx, LYE_PATH_INMOD, LY_VLOG_STR, str); free(str); return -1; } } else { /* there is no mod_name, so why are we checking augments again? * because this module may be not implemented and it augments something in another module and * there is another augment augmenting that previous one */ aux_mod = cur_module; } /* look into augments */ if (!extended) { get_next_augment: last_aug = lys_getnext_target_aug(last_aug, aux_mod, start_parent); } } while ((sibling = lys_getnext(sibling, (last_aug ? (struct lys_node *)last_aug : start_parent), start_mod, LYS_GETNEXT_WITHCHOICE | LYS_GETNEXT_WITHCASE | LYS_GETNEXT_WITHINOUT | LYS_GETNEXT_PARENTUSES | LYS_GETNEXT_NOSTATECHECK))) { r = schema_nodeid_siblingcheck(sibling, cur_module, mod_name, mod_name_len, name, nam_len); /* resolve predicate */ if (extended && ((r == 0) || (r == 2) || (r == 3)) && has_predicate) { r = resolve_extended_schema_nodeid_predicate(id, sibling, cur_module, &nodeid_end); if (r == 1) { continue; } else if (r == -1) { return -1; } } else if (!id[0]) { nodeid_end = 1; } if (r == 0) { /* one matching result */ if (nodeid_end) { *ret = ly_set_new(); LY_CHECK_ERR_RETURN(!*ret, LOGMEM(ctx), -1); ly_set_add(*ret, (void *)sibling, LY_SET_OPT_USEASLIST); } else { if (sibling->nodetype & (LYS_LEAF | LYS_LEAFLIST | LYS_ANYDATA)) { return -1; } start_parent = sibling; } break; } else if (r == 1) { continue; } else if (r == 2) { /* "*" */ if (!*ret) { *ret = ly_set_new(); LY_CHECK_ERR_RETURN(!*ret, LOGMEM(ctx), -1); } ly_set_add(*ret, (void *)sibling, LY_SET_OPT_USEASLIST); if (all_desc) { LY_TREE_DFS_BEGIN(sibling, next, elem) { if (elem != sibling) { ly_set_add(*ret, (void *)elem, LY_SET_OPT_USEASLIST); } LY_TREE_DFS_END(sibling, next, elem); } } } else if (r == 3) { /* "." */ if (!*ret) { *ret = ly_set_new(); LY_CHECK_ERR_RETURN(!*ret, LOGMEM(ctx), -1); ly_set_add(*ret, (void *)start_parent, LY_SET_OPT_USEASLIST); } ly_set_add(*ret, (void *)sibling, LY_SET_OPT_USEASLIST); if (all_desc) { LY_TREE_DFS_BEGIN(sibling, next, elem) { if (elem != sibling) { ly_set_add(*ret, (void *)elem, LY_SET_OPT_USEASLIST); } LY_TREE_DFS_END(sibling, next, elem); } } } else { LOGINT(ctx); return -1; } } /* skip predicate */ if (extended && has_predicate) { while (id[0] == '[') { id = strchr(id, ']'); if (!id) { LOGINT(ctx); return -1; } ++id; } } if (nodeid_end && ((r == 0) || (r == 2) || (r == 3))) { return EXIT_SUCCESS; } /* no match */ if (!sibling) { if (last_aug) { /* it still could be in another augment */ goto get_next_augment; } if (no_node_error) { str = strndup(nodeid, (name - nodeid) + nam_len); LOGVAL(ctx, LYE_PATH_INNODE, LY_VLOG_STR, str); free(str); return -1; } *ret = NULL; return EXIT_SUCCESS; } r = parse_schema_nodeid(id, &mod_name, &mod_name_len, &name, &nam_len, &is_relative, &has_predicate, (extended ? &all_desc : NULL), extended); if (r < 1) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, id[r], &id[r]); return -1; } id += r; } /* cannot get here */ LOGINT(ctx); return -1; } /* unique, refine, * >0 - unexpected char on position (ret - 1), * 0 - ok (but ret can still be NULL), * -1 - error, * -2 - violated no_innerlist */ int resolve_descendant_schema_nodeid(const char *nodeid, const struct lys_node *start, int ret_nodetype, int no_innerlist, const struct lys_node **ret) { const char *name, *mod_name, *id; const struct lys_node *sibling, *start_parent; int r, nam_len, mod_name_len, is_relative = -1; /* resolved import module from the start module, it must match the next node-name-match sibling */ const struct lys_module *module; assert(nodeid && ret); assert(!(ret_nodetype & (LYS_USES | LYS_AUGMENT | LYS_GROUPING))); if (!start) { /* leaf not found */ return 0; } id = nodeid; module = lys_node_module(start); if ((r = parse_schema_nodeid(id, &mod_name, &mod_name_len, &name, &nam_len, &is_relative, NULL, NULL, 0)) < 1) { return ((id - nodeid) - r) + 1; } id += r; if (!is_relative) { return -1; } start_parent = lys_parent(start); while ((start_parent->nodetype == LYS_USES) && lys_parent(start_parent)) { start_parent = lys_parent(start_parent); } while (1) { sibling = NULL; while ((sibling = lys_getnext(sibling, start_parent, module, LYS_GETNEXT_WITHCHOICE | LYS_GETNEXT_WITHCASE | LYS_GETNEXT_PARENTUSES | LYS_GETNEXT_NOSTATECHECK))) { r = schema_nodeid_siblingcheck(sibling, module, mod_name, mod_name_len, name, nam_len); if (r == 0) { if (!id[0]) { if (!(sibling->nodetype & ret_nodetype)) { /* wrong node type, too bad */ continue; } *ret = sibling; return EXIT_SUCCESS; } start_parent = sibling; break; } else if (r == 1) { continue; } else { return -1; } } /* no match */ if (!sibling) { *ret = NULL; return EXIT_SUCCESS; } else if (no_innerlist && sibling->nodetype == LYS_LIST) { *ret = NULL; return -2; } if ((r = parse_schema_nodeid(id, &mod_name, &mod_name_len, &name, &nam_len, &is_relative, NULL, NULL, 0)) < 1) { return ((id - nodeid) - r) + 1; } id += r; } /* cannot get here */ LOGINT(module->ctx); return -1; } /* choice default */ int resolve_choice_default_schema_nodeid(const char *nodeid, const struct lys_node *start, const struct lys_node **ret) { /* cannot actually be a path */ if (strchr(nodeid, '/')) { return -1; } return resolve_descendant_schema_nodeid(nodeid, start, LYS_NO_RPC_NOTIF_NODE, 0, ret); } /* uses, -1 error, EXIT_SUCCESS ok (but ret can still be NULL), >0 unexpected char on ret - 1 */ static int resolve_uses_schema_nodeid(const char *nodeid, const struct lys_node *start, const struct lys_node_grp **ret) { const struct lys_module *module; const char *mod_prefix, *name; int i, mod_prefix_len, nam_len; /* parse the identifier, it must be parsed on one call */ if (((i = parse_node_identifier(nodeid, &mod_prefix, &mod_prefix_len, &name, &nam_len, NULL, 0)) < 1) || nodeid[i]) { return -i + 1; } module = lyp_get_module(start->module, mod_prefix, mod_prefix_len, NULL, 0, 0); if (!module) { return -1; } if (module != lys_main_module(start->module)) { start = module->data; } *ret = lys_find_grouping_up(name, (struct lys_node *)start); return EXIT_SUCCESS; } int resolve_absolute_schema_nodeid(const char *nodeid, const struct lys_module *module, int ret_nodetype, const struct lys_node **ret) { const char *name, *mod_name, *id; const struct lys_node *sibling, *start_parent; int r, nam_len, mod_name_len, is_relative = -1; const struct lys_module *abs_start_mod; assert(nodeid && module && ret); assert(!(ret_nodetype & (LYS_USES | LYS_AUGMENT)) && ((ret_nodetype == LYS_GROUPING) || !(ret_nodetype & LYS_GROUPING))); id = nodeid; start_parent = NULL; if ((r = parse_schema_nodeid(id, &mod_name, &mod_name_len, &name, &nam_len, &is_relative, NULL, NULL, 0)) < 1) { return ((id - nodeid) - r) + 1; } id += r; if (is_relative) { return -1; } abs_start_mod = lyp_get_module(module, NULL, 0, mod_name, mod_name_len, 0); if (!abs_start_mod) { return -1; } while (1) { sibling = NULL; while ((sibling = lys_getnext(sibling, start_parent, abs_start_mod, LYS_GETNEXT_WITHCHOICE | LYS_GETNEXT_WITHCASE | LYS_GETNEXT_WITHINOUT | LYS_GETNEXT_WITHGROUPING | LYS_GETNEXT_NOSTATECHECK))) { r = schema_nodeid_siblingcheck(sibling, module, mod_name, mod_name_len, name, nam_len); if (r == 0) { if (!id[0]) { if (!(sibling->nodetype & ret_nodetype)) { /* wrong node type, too bad */ continue; } *ret = sibling; return EXIT_SUCCESS; } start_parent = sibling; break; } else if (r == 1) { continue; } else { return -1; } } /* no match */ if (!sibling) { *ret = NULL; return EXIT_SUCCESS; } if ((r = parse_schema_nodeid(id, &mod_name, &mod_name_len, &name, &nam_len, &is_relative, NULL, NULL, 0)) < 1) { return ((id - nodeid) - r) + 1; } id += r; } /* cannot get here */ LOGINT(module->ctx); return -1; } static int resolve_json_schema_list_predicate(const char *predicate, const struct lys_node_list *list, int *parsed) { const char *mod_name, *name; int mod_name_len, nam_len, has_predicate, i; struct lys_node *key; if (((i = parse_schema_json_predicate(predicate, &mod_name, &mod_name_len, &name, &nam_len, NULL, NULL, &has_predicate)) < 1) || !strncmp(name, ".", nam_len)) { LOGVAL(list->module->ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, predicate[-i], &predicate[-i]); return -1; } predicate += i; *parsed += i; if (!isdigit(name[0])) { for (i = 0; i < list->keys_size; ++i) { key = (struct lys_node *)list->keys[i]; if (!strncmp(key->name, name, nam_len) && !key->name[nam_len]) { break; } } if (i == list->keys_size) { LOGVAL(list->module->ctx, LYE_PATH_INKEY, LY_VLOG_NONE, NULL, name); return -1; } } /* more predicates? */ if (has_predicate) { return resolve_json_schema_list_predicate(predicate, list, parsed); } return 0; } /* cannot return LYS_GROUPING, LYS_AUGMENT, LYS_USES, logs directly */ const struct lys_node * resolve_json_nodeid(const char *nodeid, struct ly_ctx *ctx, const struct lys_node *start, int output) { char *str; const char *name, *mod_name, *id, *backup_mod_name = NULL, *yang_data_name = NULL; const struct lys_node *sibling, *start_parent, *parent; int r, nam_len, mod_name_len, is_relative = -1, has_predicate; int yang_data_name_len, backup_mod_name_len; /* resolved import module from the start module, it must match the next node-name-match sibling */ const struct lys_module *prefix_mod, *module, *prev_mod; assert(nodeid && (ctx || start)); if (!ctx) { ctx = start->module->ctx; } id = nodeid; if ((r = parse_schema_nodeid(id, &mod_name, &mod_name_len, &name, &nam_len, &is_relative, NULL, NULL, 1)) < 1) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, id[-r], &id[-r]); return NULL; } if (name[0] == '#') { if (is_relative) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, '#', name); return NULL; } yang_data_name = name + 1; yang_data_name_len = nam_len - 1; backup_mod_name = mod_name; backup_mod_name_len = mod_name_len; id += r; } else { is_relative = -1; } if ((r = parse_schema_nodeid(id, &mod_name, &mod_name_len, &name, &nam_len, &is_relative, &has_predicate, NULL, 0)) < 1) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, id[-r], &id[-r]); return NULL; } id += r; if (backup_mod_name) { mod_name = backup_mod_name; mod_name_len = backup_mod_name_len; } if (is_relative) { assert(start); start_parent = start; while (start_parent && (start_parent->nodetype == LYS_USES)) { start_parent = lys_parent(start_parent); } module = start->module; } else { if (!mod_name) { str = strndup(nodeid, (name + nam_len) - nodeid); LOGVAL(ctx, LYE_PATH_MISSMOD, LY_VLOG_STR, nodeid); free(str); return NULL; } str = strndup(mod_name, mod_name_len); module = ly_ctx_get_module(ctx, str, NULL, 1); free(str); if (!module) { str = strndup(nodeid, (mod_name + mod_name_len) - nodeid); LOGVAL(ctx, LYE_PATH_INMOD, LY_VLOG_STR, str); free(str); return NULL; } start_parent = NULL; if (yang_data_name) { start_parent = lyp_get_yang_data_template(module, yang_data_name, yang_data_name_len); if (!start_parent) { str = strndup(nodeid, (yang_data_name + yang_data_name_len) - nodeid); LOGVAL(ctx, LYE_PATH_INNODE, LY_VLOG_STR, str); free(str); return NULL; } } /* now it's as if there was no module name */ mod_name = NULL; mod_name_len = 0; } prev_mod = module; while (1) { sibling = NULL; while ((sibling = lys_getnext(sibling, start_parent, module, 0))) { /* name match */ if (sibling->name && !strncmp(name, sibling->name, nam_len) && !sibling->name[nam_len]) { /* output check */ for (parent = lys_parent(sibling); parent && !(parent->nodetype & (LYS_INPUT | LYS_OUTPUT)); parent = lys_parent(parent)); if (parent) { if (output && (parent->nodetype == LYS_INPUT)) { continue; } else if (!output && (parent->nodetype == LYS_OUTPUT)) { continue; } } /* module check */ if (mod_name) { /* will also find an augment module */ prefix_mod = ly_ctx_nget_module(ctx, mod_name, mod_name_len, NULL, 1); if (!prefix_mod) { str = strndup(nodeid, (mod_name + mod_name_len) - nodeid); LOGVAL(ctx, LYE_PATH_INMOD, LY_VLOG_STR, str); free(str); return NULL; } } else { prefix_mod = prev_mod; } if (prefix_mod != lys_node_module(sibling)) { continue; } /* do we have some predicates on it? */ if (has_predicate) { r = 0; if (sibling->nodetype & (LYS_LEAF | LYS_LEAFLIST)) { if ((r = parse_schema_json_predicate(id, NULL, NULL, NULL, NULL, NULL, NULL, &has_predicate)) < 1) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, id[-r], &id[-r]); return NULL; } } else if (sibling->nodetype == LYS_LIST) { if (resolve_json_schema_list_predicate(id, (const struct lys_node_list *)sibling, &r)) { return NULL; } } else { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, id[0], id); return NULL; } id += r; } /* the result node? */ if (!id[0]) { return sibling; } /* move down the tree, if possible */ if (sibling->nodetype & (LYS_LEAF | LYS_LEAFLIST | LYS_ANYDATA)) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, id[0], id); return NULL; } start_parent = sibling; /* update prev mod */ prev_mod = (start_parent->child ? lys_node_module(start_parent->child) : module); break; } } /* no match */ if (!sibling) { str = strndup(nodeid, (name + nam_len) - nodeid); LOGVAL(ctx, LYE_PATH_INNODE, LY_VLOG_STR, str); free(str); return NULL; } if ((r = parse_schema_nodeid(id, &mod_name, &mod_name_len, &name, &nam_len, &is_relative, &has_predicate, NULL, 0)) < 1) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, id[-r], &id[-r]); return NULL; } id += r; } /* cannot get here */ LOGINT(ctx); return NULL; } static int resolve_partial_json_data_list_predicate(struct parsed_pred pp, struct lyd_node *node, int position) { uint16_t i; struct lyd_node_leaf_list *key; struct lys_node_list *slist; struct ly_ctx *ctx; assert(node); assert(node->schema->nodetype == LYS_LIST); assert(pp.len); ctx = node->schema->module->ctx; slist = (struct lys_node_list *)node->schema; /* is the predicate a number? */ if (isdigit(pp.pred[0].name[0])) { if (position == atoi(pp.pred[0].name)) { /* match */ return 0; } else { /* not a match */ return 1; } } key = (struct lyd_node_leaf_list *)node->child; if (!key) { /* it is not a position, so we need a key for it to be a match */ return 1; } /* go through all the keys */ for (i = 0; i < slist->keys_size; ++i) { if (strncmp(key->schema->name, pp.pred[i].name, pp.pred[i].nam_len) || key->schema->name[pp.pred[i].nam_len]) { LOGVAL(ctx, LYE_PATH_INKEY, LY_VLOG_NONE, NULL, pp.pred[i].name); return -1; } if (pp.pred[i].mod_name) { /* specific module, check that the found key is from that module */ if (strncmp(lyd_node_module((struct lyd_node *)key)->name, pp.pred[i].mod_name, pp.pred[i].mod_name_len) || lyd_node_module((struct lyd_node *)key)->name[pp.pred[i].mod_name_len]) { LOGVAL(ctx, LYE_PATH_INKEY, LY_VLOG_NONE, NULL, pp.pred[i].name); return -1; } /* but if the module is the same as the parent, it should have been omitted */ if (lyd_node_module((struct lyd_node *)key) == lyd_node_module(node)) { LOGVAL(ctx, LYE_PATH_INKEY, LY_VLOG_NONE, NULL, pp.pred[i].name); return -1; } } else { /* no module, so it must be the same as the list (parent) */ if (lyd_node_module((struct lyd_node *)key) != lyd_node_module(node)) { LOGVAL(ctx, LYE_PATH_INKEY, LY_VLOG_NONE, NULL, pp.pred[i].name); return -1; } } /* value does not match */ if (strncmp(key->value_str, pp.pred[i].value, pp.pred[i].val_len) || key->value_str[pp.pred[i].val_len]) { return 1; } key = (struct lyd_node_leaf_list *)key->next; } return 0; } /** * @brief get the closest parent of the node (or the node itself) identified by the nodeid (path) * * @param[in] nodeid Node data path to find * @param[in] llist_value If the \p nodeid identifies leaf-list, this is expected value of the leaf-list instance. * @param[in] options Bitmask of options flags, see @ref pathoptions. * @param[out] parsed Number of characters processed in \p id * @return The closes parent (or the node itself) from the path */ struct lyd_node * resolve_partial_json_data_nodeid(const char *nodeid, const char *llist_value, struct lyd_node *start, int options, int *parsed) { const char *id, *mod_name, *name, *data_val, *llval; int r, ret, mod_name_len, nam_len, is_relative = -1, list_instance_position; int has_predicate, last_parsed = 0, llval_len; struct lyd_node *sibling, *last_match = NULL; struct lyd_node_leaf_list *llist; const struct lys_module *prev_mod; struct ly_ctx *ctx; const struct lys_node *ssibling, *sparent; struct lys_node_list *slist; struct parsed_pred pp; assert(nodeid && start && parsed); memset(&pp, 0, sizeof pp); ctx = start->schema->module->ctx; id = nodeid; /* parse first nodeid in case it is yang-data extension */ if ((r = parse_schema_nodeid(id, &mod_name, &mod_name_len, &name, &nam_len, &is_relative, NULL, NULL, 1)) < 1) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, id[-r], &id[-r]); goto error; } if (name[0] == '#') { if (is_relative) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, '#', name); goto error; } id += r; last_parsed = r; } else { is_relative = -1; } /* parse first nodeid */ if ((r = parse_schema_nodeid(id, &mod_name, &mod_name_len, &name, &nam_len, &is_relative, &has_predicate, NULL, 0)) < 1) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, id[-r], &id[-r]); goto error; } id += r; /* add it to parsed only after the data node was actually found */ last_parsed += r; if (is_relative) { prev_mod = lyd_node_module(start); start = (start->schema->nodetype & (LYS_CONTAINER | LYS_LIST | LYS_RPC | LYS_ACTION | LYS_NOTIF)) ? start->child : NULL; } else { for (; start->parent; start = start->parent); prev_mod = lyd_node_module(start); } if (!start) { /* there are no siblings to search */ return NULL; } /* do not duplicate code, use predicate parsing from the loop */ goto parse_predicates; while (1) { /* find the correct schema node first */ ssibling = NULL; sparent = (start && start->parent) ? start->parent->schema : NULL; while ((ssibling = lys_getnext(ssibling, sparent, prev_mod, 0))) { /* skip invalid input/output nodes */ if (sparent && (sparent->nodetype & (LYS_RPC | LYS_ACTION))) { if (options & LYD_PATH_OPT_OUTPUT) { if (lys_parent(ssibling)->nodetype == LYS_INPUT) { continue; } } else { if (lys_parent(ssibling)->nodetype == LYS_OUTPUT) { continue; } } } if (!schema_nodeid_siblingcheck(ssibling, prev_mod, mod_name, mod_name_len, name, nam_len)) { break; } } if (!ssibling) { /* there is not even such a schema node */ free(pp.pred); return last_match; } pp.schema = ssibling; /* unify leaf-list value - it is possible to specify last-node value as both a predicate or parameter if * is a leaf-list, unify both cases and the value will in both cases be in the predicate structure */ if (!id[0] && !pp.len && (ssibling->nodetype == LYS_LEAFLIST)) { pp.len = 1; pp.pred = calloc(1, sizeof *pp.pred); LY_CHECK_ERR_GOTO(!pp.pred, LOGMEM(ctx), error); pp.pred[0].name = "."; pp.pred[0].nam_len = 1; pp.pred[0].value = (llist_value ? llist_value : ""); pp.pred[0].val_len = strlen(pp.pred[0].value); } if (ssibling->nodetype & (LYS_LEAFLIST | LYS_LEAF)) { /* check leaf/leaf-list predicate */ if (pp.len > 1) { LOGVAL(ctx, LYE_PATH_PREDTOOMANY, LY_VLOG_NONE, NULL); goto error; } else if (pp.len) { if ((pp.pred[0].name[0] != '.') || (pp.pred[0].nam_len != 1)) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, pp.pred[0].name[0], pp.pred[0].name); goto error; } if ((((struct lys_node_leaf *)ssibling)->type.base == LY_TYPE_IDENT) && !strnchr(pp.pred[0].value, ':', pp.pred[0].val_len)) { LOGVAL(ctx, LYE_PATH_INIDENTREF, LY_VLOG_LYS, ssibling, pp.pred[0].val_len, pp.pred[0].value); goto error; } } } else if (ssibling->nodetype == LYS_LIST) { /* list should have predicates for all the keys or position */ slist = (struct lys_node_list *)ssibling; if (!pp.len) { /* none match */ return last_match; } else if (!isdigit(pp.pred[0].name[0])) { /* list predicate is not a position, so there must be all the keys */ if (pp.len > slist->keys_size) { LOGVAL(ctx, LYE_PATH_PREDTOOMANY, LY_VLOG_NONE, NULL); goto error; } else if (pp.len < slist->keys_size) { LOGVAL(ctx, LYE_PATH_MISSKEY, LY_VLOG_NONE, NULL, slist->keys[pp.len]->name); goto error; } /* check that all identityrefs have module name, otherwise the hash of the list instance will never match!! */ for (r = 0; r < pp.len; ++r) { if ((slist->keys[r]->type.base == LY_TYPE_IDENT) && !strnchr(pp.pred[r].value, ':', pp.pred[r].val_len)) { LOGVAL(ctx, LYE_PATH_INIDENTREF, LY_VLOG_LYS, slist->keys[r], pp.pred[r].val_len, pp.pred[r].value); goto error; } } } } else if (pp.pred) { /* no other nodes allow predicates */ LOGVAL(ctx, LYE_PATH_PREDTOOMANY, LY_VLOG_NONE, NULL); goto error; } #ifdef LY_ENABLED_CACHE /* we will not be matching keyless lists or state leaf-lists this way */ if (start->parent && start->parent->ht && ((pp.schema->nodetype != LYS_LIST) || ((struct lys_node_list *)pp.schema)->keys_size) && ((pp.schema->nodetype != LYS_LEAFLIST) || (pp.schema->flags & LYS_CONFIG_W))) { sibling = resolve_json_data_node_hash(start->parent, pp); } else #endif { list_instance_position = 0; LY_TREE_FOR(start, sibling) { /* RPC/action data check, return simply invalid argument, because the data tree is invalid */ if (lys_parent(sibling->schema)) { if (options & LYD_PATH_OPT_OUTPUT) { if (lys_parent(sibling->schema)->nodetype == LYS_INPUT) { LOGERR(ctx, LY_EINVAL, "Provided data tree includes some RPC input nodes (%s).", sibling->schema->name); goto error; } } else { if (lys_parent(sibling->schema)->nodetype == LYS_OUTPUT) { LOGERR(ctx, LY_EINVAL, "Provided data tree includes some RPC output nodes (%s).", sibling->schema->name); goto error; } } } if (sibling->schema != ssibling) { /* wrong schema node */ continue; } /* leaf-list, did we find it with the correct value or not? */ if (ssibling->nodetype == LYS_LEAFLIST) { if (ssibling->flags & LYS_CONFIG_R) { /* state leaf-lists will never match */ continue; } llist = (struct lyd_node_leaf_list *)sibling; /* get the expected leaf-list value */ llval = NULL; llval_len = 0; if (pp.pred) { /* it was already checked that it is correct */ llval = pp.pred[0].value; llval_len = pp.pred[0].val_len; } /* make value canonical (remove module name prefix) unless it was specified with it */ if (llval && !strchr(llval, ':') && (llist->value_type & LY_TYPE_IDENT) && !strncmp(llist->value_str, lyd_node_module(sibling)->name, strlen(lyd_node_module(sibling)->name)) && (llist->value_str[strlen(lyd_node_module(sibling)->name)] == ':')) { data_val = llist->value_str + strlen(lyd_node_module(sibling)->name) + 1; } else { data_val = llist->value_str; } if ((!llval && data_val && data_val[0]) || (llval && (strncmp(llval, data_val, llval_len) || data_val[llval_len]))) { continue; } } else if (ssibling->nodetype == LYS_LIST) { /* list, we likely need predicates'n'stuff then, but if without a predicate, we are always creating it */ ++list_instance_position; ret = resolve_partial_json_data_list_predicate(pp, sibling, list_instance_position); if (ret == -1) { goto error; } else if (ret == 1) { /* this list instance does not match */ continue; } } break; } } /* no match, return last match */ if (!sibling) { free(pp.pred); return last_match; } /* we found a next matching node */ *parsed += last_parsed; last_match = sibling; prev_mod = lyd_node_module(sibling); /* the result node? */ if (!id[0]) { free(pp.pred); return last_match; } /* move down the tree, if possible, and continue */ if (ssibling->nodetype & (LYS_LEAF | LYS_LEAFLIST | LYS_ANYDATA)) { /* there can be no children even through expected, error */ LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, id[0], id); goto error; } else if (!sibling->child) { /* there could be some children, but are not, return what we found so far */ free(pp.pred); return last_match; } start = sibling->child; /* parse nodeid */ if ((r = parse_schema_nodeid(id, &mod_name, &mod_name_len, &name, &nam_len, &is_relative, &has_predicate, NULL, 0)) < 1) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, id[-r], &id[-r]); goto error; } id += r; last_parsed = r; parse_predicates: /* parse all the predicates */ free(pp.pred); pp.schema = NULL; pp.len = 0; pp.pred = NULL; while (has_predicate) { ++pp.len; pp.pred = ly_realloc(pp.pred, pp.len * sizeof *pp.pred); LY_CHECK_ERR_GOTO(!pp.pred, LOGMEM(ctx), error); if ((r = parse_schema_json_predicate(id, &pp.pred[pp.len - 1].mod_name, &pp.pred[pp.len - 1].mod_name_len, &pp.pred[pp.len - 1].name, &pp.pred[pp.len - 1].nam_len, &pp.pred[pp.len - 1].value, &pp.pred[pp.len - 1].val_len, &has_predicate)) < 1) { LOGVAL(ctx, LYE_PATH_INCHAR, LY_VLOG_NONE, NULL, id[0], id); goto error; } id += r; last_parsed += r; } } error: *parsed = -1; free(pp.pred); return NULL; } /** * @brief Resolves length or range intervals. Does not log. * Syntax is assumed to be correct, *ret MUST be NULL. * * @param[in] ctx Context for errors. * @param[in] str_restr Restriction as a string. * @param[in] type Type of the restriction. * @param[out] ret Final interval structure that starts with * the interval of the initial type, continues with intervals * of any superior types derived from the initial one, and * finishes with intervals from our \p type. * * @return EXIT_SUCCESS on succes, -1 on error. */ int resolve_len_ran_interval(struct ly_ctx *ctx, const char *str_restr, struct lys_type *type, struct len_ran_intv **ret) { /* 0 - unsigned, 1 - signed, 2 - floating point */ int kind; int64_t local_smin = 0, local_smax = 0, local_fmin, local_fmax; uint64_t local_umin, local_umax = 0; uint8_t local_fdig = 0; const char *seg_ptr, *ptr; struct len_ran_intv *local_intv = NULL, *tmp_local_intv = NULL, *tmp_intv, *intv = NULL; switch (type->base) { case LY_TYPE_BINARY: kind = 0; local_umin = 0; local_umax = 18446744073709551615UL; if (!str_restr && type->info.binary.length) { str_restr = type->info.binary.length->expr; } break; case LY_TYPE_DEC64: kind = 2; local_fmin = __INT64_C(-9223372036854775807) - __INT64_C(1); local_fmax = __INT64_C(9223372036854775807); local_fdig = type->info.dec64.dig; if (!str_restr && type->info.dec64.range) { str_restr = type->info.dec64.range->expr; } break; case LY_TYPE_INT8: kind = 1; local_smin = __INT64_C(-128); local_smax = __INT64_C(127); if (!str_restr && type->info.num.range) { str_restr = type->info.num.range->expr; } break; case LY_TYPE_INT16: kind = 1; local_smin = __INT64_C(-32768); local_smax = __INT64_C(32767); if (!str_restr && type->info.num.range) { str_restr = type->info.num.range->expr; } break; case LY_TYPE_INT32: kind = 1; local_smin = __INT64_C(-2147483648); local_smax = __INT64_C(2147483647); if (!str_restr && type->info.num.range) { str_restr = type->info.num.range->expr; } break; case LY_TYPE_INT64: kind = 1; local_smin = __INT64_C(-9223372036854775807) - __INT64_C(1); local_smax = __INT64_C(9223372036854775807); if (!str_restr && type->info.num.range) { str_restr = type->info.num.range->expr; } break; case LY_TYPE_UINT8: kind = 0; local_umin = __UINT64_C(0); local_umax = __UINT64_C(255); if (!str_restr && type->info.num.range) { str_restr = type->info.num.range->expr; } break; case LY_TYPE_UINT16: kind = 0; local_umin = __UINT64_C(0); local_umax = __UINT64_C(65535); if (!str_restr && type->info.num.range) { str_restr = type->info.num.range->expr; } break; case LY_TYPE_UINT32: kind = 0; local_umin = __UINT64_C(0); local_umax = __UINT64_C(4294967295); if (!str_restr && type->info.num.range) { str_restr = type->info.num.range->expr; } break; case LY_TYPE_UINT64: kind = 0; local_umin = __UINT64_C(0); local_umax = __UINT64_C(18446744073709551615); if (!str_restr && type->info.num.range) { str_restr = type->info.num.range->expr; } break; case LY_TYPE_STRING: kind = 0; local_umin = __UINT64_C(0); local_umax = __UINT64_C(18446744073709551615); if (!str_restr && type->info.str.length) { str_restr = type->info.str.length->expr; } break; default: return -1; } /* process superior types */ if (type->der) { if (resolve_len_ran_interval(ctx, NULL, &type->der->type, &intv)) { return -1; } assert(!intv || (intv->kind == kind)); } if (!str_restr) { /* we do not have any restriction, return superior ones */ *ret = intv; return EXIT_SUCCESS; } /* adjust local min and max */ if (intv) { tmp_intv = intv; if (kind == 0) { local_umin = tmp_intv->value.uval.min; } else if (kind == 1) { local_smin = tmp_intv->value.sval.min; } else if (kind == 2) { local_fmin = tmp_intv->value.fval.min; } while (tmp_intv->next) { tmp_intv = tmp_intv->next; } if (kind == 0) { local_umax = tmp_intv->value.uval.max; } else if (kind == 1) { local_smax = tmp_intv->value.sval.max; } else if (kind == 2) { local_fmax = tmp_intv->value.fval.max; } } /* finally parse our restriction */ seg_ptr = str_restr; tmp_intv = NULL; while (1) { if (!tmp_local_intv) { assert(!local_intv); local_intv = malloc(sizeof *local_intv); tmp_local_intv = local_intv; } else { tmp_local_intv->next = malloc(sizeof *tmp_local_intv); tmp_local_intv = tmp_local_intv->next; } LY_CHECK_ERR_GOTO(!tmp_local_intv, LOGMEM(ctx), error); tmp_local_intv->kind = kind; tmp_local_intv->type = type; tmp_local_intv->next = NULL; /* min */ ptr = seg_ptr; while (isspace(ptr[0])) { ++ptr; } if (isdigit(ptr[0]) || (ptr[0] == '+') || (ptr[0] == '-')) { if (kind == 0) { tmp_local_intv->value.uval.min = strtoll(ptr, (char **)&ptr, 10); } else if (kind == 1) { tmp_local_intv->value.sval.min = strtoll(ptr, (char **)&ptr, 10); } else if (kind == 2) { if (parse_range_dec64(&ptr, local_fdig, &tmp_local_intv->value.fval.min)) { LOGVAL(ctx, LYE_INARG, LY_VLOG_NONE, NULL, ptr, "range"); goto error; } } } else if (!strncmp(ptr, "min", 3)) { if (kind == 0) { tmp_local_intv->value.uval.min = local_umin; } else if (kind == 1) { tmp_local_intv->value.sval.min = local_smin; } else if (kind == 2) { tmp_local_intv->value.fval.min = local_fmin; } ptr += 3; } else if (!strncmp(ptr, "max", 3)) { if (kind == 0) { tmp_local_intv->value.uval.min = local_umax; } else if (kind == 1) { tmp_local_intv->value.sval.min = local_smax; } else if (kind == 2) { tmp_local_intv->value.fval.min = local_fmax; } ptr += 3; } else { goto error; } while (isspace(ptr[0])) { ptr++; } /* no interval or interval */ if ((ptr[0] == '|') || !ptr[0]) { if (kind == 0) { tmp_local_intv->value.uval.max = tmp_local_intv->value.uval.min; } else if (kind == 1) { tmp_local_intv->value.sval.max = tmp_local_intv->value.sval.min; } else if (kind == 2) { tmp_local_intv->value.fval.max = tmp_local_intv->value.fval.min; } } else if (!strncmp(ptr, "..", 2)) { /* skip ".." */ ptr += 2; while (isspace(ptr[0])) { ++ptr; } /* max */ if (isdigit(ptr[0]) || (ptr[0] == '+') || (ptr[0] == '-')) { if (kind == 0) { tmp_local_intv->value.uval.max = strtoll(ptr, (char **)&ptr, 10); } else if (kind == 1) { tmp_local_intv->value.sval.max = strtoll(ptr, (char **)&ptr, 10); } else if (kind == 2) { if (parse_range_dec64(&ptr, local_fdig, &tmp_local_intv->value.fval.max)) { LOGVAL(ctx, LYE_INARG, LY_VLOG_NONE, NULL, ptr, "range"); goto error; } } } else if (!strncmp(ptr, "max", 3)) { if (kind == 0) { tmp_local_intv->value.uval.max = local_umax; } else if (kind == 1) { tmp_local_intv->value.sval.max = local_smax; } else if (kind == 2) { tmp_local_intv->value.fval.max = local_fmax; } } else { goto error; } } else { goto error; } /* check min and max in correct order*/ if (kind == 0) { /* current segment */ if (tmp_local_intv->value.uval.min > tmp_local_intv->value.uval.max) { goto error; } if (tmp_local_intv->value.uval.min < local_umin || tmp_local_intv->value.uval.max > local_umax) { goto error; } /* segments sholud be ascending order */ if (tmp_intv && (tmp_intv->value.uval.max >= tmp_local_intv->value.uval.min)) { goto error; } } else if (kind == 1) { if (tmp_local_intv->value.sval.min > tmp_local_intv->value.sval.max) { goto error; } if (tmp_local_intv->value.sval.min < local_smin || tmp_local_intv->value.sval.max > local_smax) { goto error; } if (tmp_intv && (tmp_intv->value.sval.max >= tmp_local_intv->value.sval.min)) { goto error; } } else if (kind == 2) { if (tmp_local_intv->value.fval.min > tmp_local_intv->value.fval.max) { goto error; } if (tmp_local_intv->value.fval.min < local_fmin || tmp_local_intv->value.fval.max > local_fmax) { goto error; } if (tmp_intv && (tmp_intv->value.fval.max >= tmp_local_intv->value.fval.min)) { /* fraction-digits value is always the same (it cannot be changed in derived types) */ goto error; } } /* next segment (next OR) */ seg_ptr = strchr(seg_ptr, '|'); if (!seg_ptr) { break; } seg_ptr++; tmp_intv = tmp_local_intv; } /* check local restrictions against superior ones */ if (intv) { tmp_intv = intv; tmp_local_intv = local_intv; while (tmp_local_intv && tmp_intv) { /* reuse local variables */ if (kind == 0) { local_umin = tmp_local_intv->value.uval.min; local_umax = tmp_local_intv->value.uval.max; /* it must be in this interval */ if ((local_umin >= tmp_intv->value.uval.min) && (local_umin <= tmp_intv->value.uval.max)) { /* this interval is covered, next one */ if (local_umax <= tmp_intv->value.uval.max) { tmp_local_intv = tmp_local_intv->next; continue; /* ascending order of restrictions -> fail */ } else { goto error; } } } else if (kind == 1) { local_smin = tmp_local_intv->value.sval.min; local_smax = tmp_local_intv->value.sval.max; if ((local_smin >= tmp_intv->value.sval.min) && (local_smin <= tmp_intv->value.sval.max)) { if (local_smax <= tmp_intv->value.sval.max) { tmp_local_intv = tmp_local_intv->next; continue; } else { goto error; } } } else if (kind == 2) { local_fmin = tmp_local_intv->value.fval.min; local_fmax = tmp_local_intv->value.fval.max; if ((dec64cmp(local_fmin, local_fdig, tmp_intv->value.fval.min, local_fdig) > -1) && (dec64cmp(local_fmin, local_fdig, tmp_intv->value.fval.max, local_fdig) < 1)) { if (dec64cmp(local_fmax, local_fdig, tmp_intv->value.fval.max, local_fdig) < 1) { tmp_local_intv = tmp_local_intv->next; continue; } else { goto error; } } } tmp_intv = tmp_intv->next; } /* some interval left uncovered -> fail */ if (tmp_local_intv) { goto error; } } /* append the local intervals to all the intervals of the superior types, return it all */ if (intv) { for (tmp_intv = intv; tmp_intv->next; tmp_intv = tmp_intv->next); tmp_intv->next = local_intv; } else { intv = local_intv; } *ret = intv; return EXIT_SUCCESS; error: while (intv) { tmp_intv = intv->next; free(intv); intv = tmp_intv; } while (local_intv) { tmp_local_intv = local_intv->next; free(local_intv); local_intv = tmp_local_intv; } return -1; } /** * @brief Resolve a typedef, return only resolved typedefs if derived. If leafref, it must be * resolved for this function to return it. Does not log. * * @param[in] name Typedef name. * @param[in] mod_name Typedef name module name. * @param[in] module Main module. * @param[in] parent Parent of the resolved type definition. * @param[out] ret Pointer to the resolved typedef. Can be NULL. * * @return EXIT_SUCCESS on success, EXIT_FAILURE on forward reference, -1 on error. */ int resolve_superior_type(const char *name, const char *mod_name, const struct lys_module *module, const struct lys_node *parent, struct lys_tpdf **ret) { int i, j; struct lys_tpdf *tpdf, *match; int tpdf_size; if (!mod_name) { /* no prefix, try built-in types */ for (i = 1; i < LY_DATA_TYPE_COUNT; i++) { if (!strcmp(ly_types[i]->name, name)) { if (ret) { *ret = ly_types[i]; } return EXIT_SUCCESS; } } } else { if (!strcmp(mod_name, module->name)) { /* prefix refers to the current module, ignore it */ mod_name = NULL; } } if (!mod_name && parent) { /* search in local typedefs */ while (parent) { switch (parent->nodetype) { case LYS_CONTAINER: tpdf_size = ((struct lys_node_container *)parent)->tpdf_size; tpdf = ((struct lys_node_container *)parent)->tpdf; break; case LYS_LIST: tpdf_size = ((struct lys_node_list *)parent)->tpdf_size; tpdf = ((struct lys_node_list *)parent)->tpdf; break; case LYS_GROUPING: tpdf_size = ((struct lys_node_grp *)parent)->tpdf_size; tpdf = ((struct lys_node_grp *)parent)->tpdf; break; case LYS_RPC: case LYS_ACTION: tpdf_size = ((struct lys_node_rpc_action *)parent)->tpdf_size; tpdf = ((struct lys_node_rpc_action *)parent)->tpdf; break; case LYS_NOTIF: tpdf_size = ((struct lys_node_notif *)parent)->tpdf_size; tpdf = ((struct lys_node_notif *)parent)->tpdf; break; case LYS_INPUT: case LYS_OUTPUT: tpdf_size = ((struct lys_node_inout *)parent)->tpdf_size; tpdf = ((struct lys_node_inout *)parent)->tpdf; break; default: parent = lys_parent(parent); continue; } for (i = 0; i < tpdf_size; i++) { if (!strcmp(tpdf[i].name, name) && tpdf[i].type.base > 0) { match = &tpdf[i]; goto check_leafref; } } parent = lys_parent(parent); } } else { /* get module where to search */ module = lyp_get_module(module, NULL, 0, mod_name, 0, 0); if (!module) { return -1; } } /* search in top level typedefs */ for (i = 0; i < module->tpdf_size; i++) { if (!strcmp(module->tpdf[i].name, name) && module->tpdf[i].type.base > 0) { match = &module->tpdf[i]; goto check_leafref; } } /* search in submodules */ for (i = 0; i < module->inc_size && module->inc[i].submodule; i++) { for (j = 0; j < module->inc[i].submodule->tpdf_size; j++) { if (!strcmp(module->inc[i].submodule->tpdf[j].name, name) && module->inc[i].submodule->tpdf[j].type.base > 0) { match = &module->inc[i].submodule->tpdf[j]; goto check_leafref; } } } return EXIT_FAILURE; check_leafref: if (ret) { *ret = match; } if (match->type.base == LY_TYPE_LEAFREF) { while (!match->type.info.lref.path) { match = match->type.der; assert(match); } } return EXIT_SUCCESS; } /** * @brief Check the default \p value of the \p type. Logs directly. * * @param[in] type Type definition to use. * @param[in] value Default value to check. * @param[in] module Type module. * * @return EXIT_SUCCESS on success, EXIT_FAILURE on forward reference, -1 on error. */ static int check_default(struct lys_type *type, const char **value, struct lys_module *module, int tpdf) { struct lys_tpdf *base_tpdf = NULL; struct lyd_node_leaf_list node; const char *dflt = NULL; char *s; int ret = EXIT_SUCCESS, r; struct ly_ctx *ctx = module->ctx; assert(value); memset(&node, 0, sizeof node); if (type->base <= LY_TYPE_DER) { /* the type was not resolved yet, nothing to do for now */ ret = EXIT_FAILURE; goto cleanup; } else if (!tpdf && !module->implemented) { /* do not check defaults in not implemented module's data */ goto cleanup; } else if (tpdf && !module->implemented && type->base == LY_TYPE_IDENT) { /* identityrefs are checked when instantiated in data instead of typedef, * but in typedef the value has to be modified to include the prefix */ if (*value) { if (strchr(*value, ':')) { dflt = transform_schema2json(module, *value); } else { /* default prefix of the module where the typedef is defined */ if (asprintf(&s, "%s:%s", lys_main_module(module)->name, *value) == -1) { LOGMEM(ctx); ret = -1; goto cleanup; } dflt = lydict_insert_zc(ctx, s); } lydict_remove(ctx, *value); *value = dflt; dflt = NULL; } goto cleanup; } else if (type->base == LY_TYPE_LEAFREF && tpdf) { /* leafref in typedef cannot be checked */ goto cleanup; } dflt = lydict_insert(ctx, *value, 0); if (!dflt) { /* we do not have a new default value, so is there any to check even, in some base type? */ for (base_tpdf = type->der; base_tpdf->type.der; base_tpdf = base_tpdf->type.der) { if (base_tpdf->dflt) { dflt = lydict_insert(ctx, base_tpdf->dflt, 0); break; } } if (!dflt) { /* no default value, nothing to check, all is well */ goto cleanup; } /* so there is a default value in a base type, but can the default value be no longer valid (did we define some new restrictions)? */ switch (type->base) { case LY_TYPE_IDENT: if (lys_main_module(base_tpdf->type.parent->module)->implemented) { goto cleanup; } else { /* check the default value from typedef, but use also the typedef's module * due to possible searching in imported modules which is expected in * typedef's module instead of module where the typedef is used */ module = base_tpdf->module; } break; case LY_TYPE_INST: case LY_TYPE_LEAFREF: case LY_TYPE_BOOL: case LY_TYPE_EMPTY: /* these have no restrictions, so we would do the exact same work as the unres in the base typedef */ goto cleanup; case LY_TYPE_BITS: /* the default value must match the restricted list of values, if the type was restricted */ if (type->info.bits.count) { break; } goto cleanup; case LY_TYPE_ENUM: /* the default value must match the restricted list of values, if the type was restricted */ if (type->info.enums.count) { break; } goto cleanup; case LY_TYPE_DEC64: if (type->info.dec64.range) { break; } goto cleanup; case LY_TYPE_BINARY: if (type->info.binary.length) { break; } goto cleanup; case LY_TYPE_INT8: case LY_TYPE_INT16: case LY_TYPE_INT32: case LY_TYPE_INT64: case LY_TYPE_UINT8: case LY_TYPE_UINT16: case LY_TYPE_UINT32: case LY_TYPE_UINT64: if (type->info.num.range) { break; } goto cleanup; case LY_TYPE_STRING: if (type->info.str.length || type->info.str.patterns) { break; } goto cleanup; case LY_TYPE_UNION: /* way too much trouble learning whether we need to check the default again, so just do it */ break; default: LOGINT(ctx); ret = -1; goto cleanup; } } else if (type->base == LY_TYPE_EMPTY) { LOGVAL(ctx, LYE_INCHILDSTMT, LY_VLOG_NONE, NULL, "default", type->parent->name); LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "The \"empty\" data type cannot have a default value."); ret = -1; goto cleanup; } /* dummy leaf */ memset(&node, 0, sizeof node); node.value_str = lydict_insert(ctx, dflt, 0); node.value_type = type->base; if (tpdf) { node.schema = calloc(1, sizeof (struct lys_node_leaf)); if (!node.schema) { LOGMEM(ctx); ret = -1; goto cleanup; } r = asprintf((char **)&node.schema->name, "typedef-%s-default", ((struct lys_tpdf *)type->parent)->name); if (r == -1) { LOGMEM(ctx); ret = -1; goto cleanup; } node.schema->module = module; memcpy(&((struct lys_node_leaf *)node.schema)->type, type, sizeof *type); } else { node.schema = (struct lys_node *)type->parent; } if (type->base == LY_TYPE_LEAFREF) { if (!type->info.lref.target) { ret = EXIT_FAILURE; LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Default value \"%s\" cannot be checked in an unresolved leafref.", dflt); goto cleanup; } ret = check_default(&type->info.lref.target->type, &dflt, module, 0); if (!ret) { /* adopt possibly changed default value to its canonical form */ if (*value) { lydict_remove(ctx, *value); *value = dflt; dflt = NULL; } } } else { if (!lyp_parse_value(type, &node.value_str, NULL, &node, NULL, module, 1, 1, 0)) { /* possible forward reference */ ret = EXIT_FAILURE; if (base_tpdf) { /* default value is defined in some base typedef */ if ((type->base == LY_TYPE_BITS && type->der->type.der) || (type->base == LY_TYPE_ENUM && type->der->type.der)) { /* we have refined bits/enums */ LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Invalid value \"%s\" of the default statement inherited to \"%s\" from \"%s\" base type.", dflt, type->parent->name, base_tpdf->name); } } } else { /* success - adopt canonical form from the node into the default value */ if (!ly_strequal(dflt, node.value_str, 1)) { /* this can happen only if we have non-inherited default value, * inherited default values are already in canonical form */ assert(ly_strequal(dflt, *value, 1)); lydict_remove(ctx, *value); *value = node.value_str; node.value_str = NULL; } } } cleanup: lyd_free_value(node.value, node.value_type, node.value_flags, type, NULL, NULL, NULL); lydict_remove(ctx, node.value_str); if (tpdf && node.schema) { free((char *)node.schema->name); free(node.schema); } lydict_remove(ctx, dflt); return ret; } /** * @brief Check a key for mandatory attributes. Logs directly. * * @param[in] key The key to check. * @param[in] flags What flags to check. * @param[in] list The list of all the keys. * @param[in] index Index of the key in the key list. * @param[in] name The name of the keys. * @param[in] len The name length. * * @return EXIT_SUCCESS on success, -1 on error. */ static int check_key(struct lys_node_list *list, int index, const char *name, int len) { struct lys_node_leaf *key = list->keys[index]; char *dup = NULL; int j; struct ly_ctx *ctx = list->module->ctx; /* existence */ if (!key) { if (name[len] != '\0') { dup = strdup(name); LY_CHECK_ERR_RETURN(!dup, LOGMEM(ctx), -1); dup[len] = '\0'; name = dup; } LOGVAL(ctx, LYE_KEY_MISS, LY_VLOG_LYS, list, name); free(dup); return -1; } /* uniqueness */ for (j = index - 1; j >= 0; j--) { if (key == list->keys[j]) { LOGVAL(ctx, LYE_KEY_DUP, LY_VLOG_LYS, list, key->name); return -1; } } /* key is a leaf */ if (key->nodetype != LYS_LEAF) { LOGVAL(ctx, LYE_KEY_NLEAF, LY_VLOG_LYS, list, key->name); return -1; } /* type of the leaf is not built-in empty */ if (key->type.base == LY_TYPE_EMPTY && key->module->version < LYS_VERSION_1_1) { LOGVAL(ctx, LYE_KEY_TYPE, LY_VLOG_LYS, list, key->name); return -1; } /* config attribute is the same as of the list */ if ((key->flags & LYS_CONFIG_MASK) && (list->flags & LYS_CONFIG_MASK) && ((list->flags & LYS_CONFIG_MASK) != (key->flags & LYS_CONFIG_MASK))) { LOGVAL(ctx, LYE_KEY_CONFIG, LY_VLOG_LYS, list, key->name); return -1; } /* key is not placed from augment */ if (key->parent->nodetype == LYS_AUGMENT) { LOGVAL(ctx, LYE_KEY_MISS, LY_VLOG_LYS, key, key->name); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Key inserted from augment."); return -1; } /* key is not when/if-feature -conditional */ j = 0; if (key->when || (key->iffeature_size && (j = 1))) { LOGVAL(ctx, LYE_INCHILDSTMT, LY_VLOG_LYS, key, j ? "if-feature" : "when", "leaf"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Key definition cannot depend on a \"%s\" condition.", j ? "if-feature" : "when"); return -1; } return EXIT_SUCCESS; } /** * @brief Resolve (test the target exists) unique. Logs directly. * * @param[in] parent The parent node of the unique structure. * @param[in] uniq_str_path One path from the unique string. * * @return EXIT_SUCCESS on succes, EXIT_FAILURE on forward reference, -1 on error. */ int resolve_unique(struct lys_node *parent, const char *uniq_str_path, uint8_t *trg_type) { int rc; const struct lys_node *leaf = NULL; struct ly_ctx *ctx = parent->module->ctx; rc = resolve_descendant_schema_nodeid(uniq_str_path, *lys_child(parent, LYS_LEAF), LYS_LEAF, 1, &leaf); if (rc || !leaf) { if (rc) { LOGVAL(ctx, LYE_INARG, LY_VLOG_LYS, parent, uniq_str_path, "unique"); if (rc > 0) { LOGVAL(ctx, LYE_INCHAR, LY_VLOG_PREV, NULL, uniq_str_path[rc - 1], &uniq_str_path[rc - 1]); } else if (rc == -2) { LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Unique argument references list."); } rc = -1; } else { LOGVAL(ctx, LYE_INARG, LY_VLOG_LYS, parent, uniq_str_path, "unique"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Target leaf not found."); rc = EXIT_FAILURE; } goto error; } if (leaf->nodetype != LYS_LEAF) { LOGVAL(ctx, LYE_INARG, LY_VLOG_LYS, parent, uniq_str_path, "unique"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Target is not a leaf."); return -1; } /* check status */ if (parent->nodetype != LYS_EXT && lyp_check_status(parent->flags, parent->module, parent->name, leaf->flags, leaf->module, leaf->name, leaf)) { return -1; } /* check that all unique's targets are of the same config type */ if (*trg_type) { if (((*trg_type == 1) && (leaf->flags & LYS_CONFIG_R)) || ((*trg_type == 2) && (leaf->flags & LYS_CONFIG_W))) { LOGVAL(ctx, LYE_INARG, LY_VLOG_LYS, parent, uniq_str_path, "unique"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Leaf \"%s\" referenced in unique statement is config %s, but previous referenced leaf is config %s.", uniq_str_path, *trg_type == 1 ? "false" : "true", *trg_type == 1 ? "true" : "false"); return -1; } } else { /* first unique */ if (leaf->flags & LYS_CONFIG_W) { *trg_type = 1; } else { *trg_type = 2; } } /* set leaf's unique flag */ ((struct lys_node_leaf *)leaf)->flags |= LYS_UNIQUE; return EXIT_SUCCESS; error: return rc; } void unres_data_del(struct unres_data *unres, uint32_t i) { /* there are items after the one deleted */ if (i+1 < unres->count) { /* we only move the data, memory is left allocated, why bother */ memmove(&unres->node[i], &unres->node[i+1], (unres->count-(i+1)) * sizeof *unres->node); /* deleting the last item */ } else if (i == 0) { free(unres->node); unres->node = NULL; } /* if there are no items after and it is not the last one, just move the counter */ --unres->count; } /** * @brief Resolve (find) a data node from a specific module. Does not log. * * @param[in] mod Module to search in. * @param[in] name Name of the data node. * @param[in] nam_len Length of the name. * @param[in] start Data node to start the search from. * @param[in,out] parents Resolved nodes. If there are some parents, * they are replaced (!!) with the resolvents. * * @return EXIT_SUCCESS on success, EXIT_FAILURE on forward reference, -1 on error. */ static int resolve_data(const struct lys_module *mod, const char *name, int nam_len, struct lyd_node *start, struct unres_data *parents) { struct lyd_node *node; int flag; uint32_t i; if (!parents->count) { parents->count = 1; parents->node = malloc(sizeof *parents->node); LY_CHECK_ERR_RETURN(!parents->node, LOGMEM(mod->ctx), -1); parents->node[0] = NULL; } for (i = 0; i < parents->count;) { if (parents->node[i] && (parents->node[i]->schema->nodetype & (LYS_LEAF | LYS_LEAFLIST | LYS_ANYDATA))) { /* skip */ ++i; continue; } flag = 0; LY_TREE_FOR(parents->node[i] ? parents->node[i]->child : start, node) { if (lyd_node_module(node) == mod && !strncmp(node->schema->name, name, nam_len) && node->schema->name[nam_len] == '\0') { /* matching target */ if (!flag) { /* put node instead of the current parent */ parents->node[i] = node; flag = 1; } else { /* multiple matching, so create a new node */ ++parents->count; parents->node = ly_realloc(parents->node, parents->count * sizeof *parents->node); LY_CHECK_ERR_RETURN(!parents->node, LOGMEM(mod->ctx), EXIT_FAILURE); parents->node[parents->count-1] = node; ++i; } } } if (!flag) { /* remove item from the parents list */ unres_data_del(parents, i); } else { ++i; } } return parents->count ? EXIT_SUCCESS : EXIT_FAILURE; } static int resolve_schema_leafref_valid_dep_flag(const struct lys_node *op_node, const struct lys_module *local_mod, const struct lys_node *first_node, int abs_path) { int dep1, dep2; const struct lys_node *node; if (!op_node) { /* leafref pointing to a different module */ if (local_mod != lys_node_module(first_node)) { return 1; } } else if (lys_parent(op_node)) { /* inner operation (notif/action) */ if (abs_path) { return 1; } else { /* compare depth of both nodes */ for (dep1 = 0, node = op_node; lys_parent(node); node = lys_parent(node)); for (dep2 = 0, node = first_node; lys_parent(node); node = lys_parent(node)); if ((dep2 > dep1) || ((dep2 == dep1) && (op_node != first_node))) { return 1; } } } else { /* top-level operation (notif/rpc) */ if (op_node != first_node) { return 1; } } return 0; } /** * @brief Resolve a path (leafref) predicate in JSON schema context. Logs directly. * * @param[in] path Path to use. * @param[in] context_node Predicate context node (where the predicate is placed). * @param[in] parent Path context node (where the path begins/is placed). * @param[in] op_node Optional node if the leafref is in an operation (action/rpc/notif). * * @return 0 on forward reference, otherwise the number * of characters successfully parsed, * positive on success, negative on failure. */ static int resolve_schema_leafref_predicate(const char *path, const struct lys_node *context_node, struct lys_node *parent) { const struct lys_module *trg_mod; const struct lys_node *src_node, *dst_node; const char *path_key_expr, *source, *sour_pref, *dest, *dest_pref; int pke_len, sour_len, sour_pref_len, dest_len, dest_pref_len, pke_parsed, parsed = 0; int has_predicate, dest_parent_times, i, rc; struct ly_ctx *ctx = context_node->module->ctx; do { if ((i = parse_path_predicate(path, &sour_pref, &sour_pref_len, &source, &sour_len, &path_key_expr, &pke_len, &has_predicate)) < 1) { LOGVAL(ctx, LYE_INCHAR, LY_VLOG_LYS, parent, path[-i], path-i); return -parsed+i; } parsed += i; path += i; /* source (must be leaf) */ if (sour_pref) { trg_mod = lyp_get_module(lys_node_module(parent), NULL, 0, sour_pref, sour_pref_len, 0); } else { trg_mod = lys_node_module(parent); } rc = lys_getnext_data(trg_mod, context_node, source, sour_len, LYS_LEAF | LYS_LEAFLIST, LYS_GETNEXT_NOSTATECHECK, &src_node); if (rc) { LOGVAL(ctx, LYE_NORESOLV, LY_VLOG_LYS, parent, "leafref predicate", path-parsed); return 0; } /* destination */ dest_parent_times = 0; pke_parsed = 0; if ((i = parse_path_key_expr(path_key_expr, &dest_pref, &dest_pref_len, &dest, &dest_len, &dest_parent_times)) < 1) { LOGVAL(ctx, LYE_INCHAR, LY_VLOG_LYS, parent, path_key_expr[-i], path_key_expr-i); return -parsed; } pke_parsed += i; for (i = 0, dst_node = parent; i < dest_parent_times; ++i) { if (!dst_node) { /* we went too much into parents, there is no parent anymore */ LOGVAL(ctx, LYE_NORESOLV, LY_VLOG_LYS, parent, "leafref predicate", path_key_expr); return 0; } if (dst_node->parent && (dst_node->parent->nodetype == LYS_AUGMENT) && !((struct lys_node_augment *)dst_node->parent)->target) { /* we are in an unresolved augment, cannot evaluate */ LOGVAL(ctx, LYE_SPEC, LY_VLOG_LYS, dst_node->parent, "Cannot resolve leafref predicate \"%s\" because it is in an unresolved augment.", path_key_expr); return 0; } /* path is supposed to be evaluated in data tree, so we have to skip * all schema nodes that cannot be instantiated in data tree */ for (dst_node = lys_parent(dst_node); dst_node && !(dst_node->nodetype & (LYS_CONTAINER | LYS_LIST | LYS_ACTION | LYS_NOTIF | LYS_RPC)); dst_node = lys_parent(dst_node)); } while (1) { if (dest_pref) { trg_mod = lyp_get_module(lys_node_module(parent), NULL, 0, dest_pref, dest_pref_len, 0); } else { trg_mod = lys_node_module(parent); } rc = lys_getnext_data(trg_mod, dst_node, dest, dest_len, LYS_CONTAINER | LYS_LIST | LYS_LEAF, LYS_GETNEXT_NOSTATECHECK, &dst_node); if (rc) { LOGVAL(ctx, LYE_NORESOLV, LY_VLOG_LYS, parent, "leafref predicate", path_key_expr); return 0; } if (pke_len == pke_parsed) { break; } if ((i = parse_path_key_expr(path_key_expr + pke_parsed, &dest_pref, &dest_pref_len, &dest, &dest_len, &dest_parent_times)) < 1) { LOGVAL(ctx, LYE_INCHAR, LY_VLOG_LYS, parent, (path_key_expr + pke_parsed)[-i], (path_key_expr + pke_parsed)-i); return -parsed; } pke_parsed += i; } /* check source - dest match */ if (dst_node->nodetype != src_node->nodetype) { LOGVAL(ctx, LYE_NORESOLV, LY_VLOG_LYS, parent, "leafref predicate", path - parsed); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Destination node is not a %s, but a %s.", strnodetype(src_node->nodetype), strnodetype(dst_node->nodetype)); return -parsed; } } while (has_predicate); return parsed; } static int check_leafref_features(struct lys_type *type) { struct lys_node *iter; struct ly_set *src_parents, *trg_parents, *features; struct lys_node_augment *aug; struct ly_ctx *ctx = ((struct lys_tpdf *)type->parent)->module->ctx; unsigned int i, j, size, x; int ret = EXIT_SUCCESS; assert(type->parent); src_parents = ly_set_new(); trg_parents = ly_set_new(); features = ly_set_new(); /* get parents chain of source (leafref) */ for (iter = (struct lys_node *)type->parent; iter; iter = lys_parent(iter)) { if (iter->nodetype & (LYS_INPUT | LYS_OUTPUT)) { continue; } if (iter->parent && (iter->parent->nodetype == LYS_AUGMENT)) { aug = (struct lys_node_augment *)iter->parent; if ((aug->module->implemented && (aug->flags & LYS_NOTAPPLIED)) || !aug->target) { /* unresolved augment, wait until it's resolved */ LOGVAL(ctx, LYE_SPEC, LY_VLOG_LYS, aug, "Cannot check leafref \"%s\" if-feature consistency because of an unresolved augment.", type->info.lref.path); ret = EXIT_FAILURE; goto cleanup; } /* also add this augment */ ly_set_add(src_parents, aug, LY_SET_OPT_USEASLIST); } ly_set_add(src_parents, iter, LY_SET_OPT_USEASLIST); } /* get parents chain of target */ for (iter = (struct lys_node *)type->info.lref.target; iter; iter = lys_parent(iter)) { if (iter->nodetype & (LYS_INPUT | LYS_OUTPUT)) { continue; } if (iter->parent && (iter->parent->nodetype == LYS_AUGMENT)) { aug = (struct lys_node_augment *)iter->parent; if ((aug->module->implemented && (aug->flags & LYS_NOTAPPLIED)) || !aug->target) { /* unresolved augment, wait until it's resolved */ LOGVAL(ctx, LYE_SPEC, LY_VLOG_LYS, aug, "Cannot check leafref \"%s\" if-feature consistency because of an unresolved augment.", type->info.lref.path); ret = EXIT_FAILURE; goto cleanup; } } ly_set_add(trg_parents, iter, LY_SET_OPT_USEASLIST); } /* compare the features used in if-feature statements in the rest of both * chains of parents. The set of features used for target must be a subset * of features used for the leafref. This is not a perfect, we should compare * the truth tables but it could require too much resources, so we simplify that */ for (i = 0; i < src_parents->number; i++) { iter = src_parents->set.s[i]; /* shortcut */ if (!iter->iffeature_size) { continue; } for (j = 0; j < iter->iffeature_size; j++) { resolve_iffeature_getsizes(&iter->iffeature[j], NULL, &size); for (; size; size--) { if (!iter->iffeature[j].features[size - 1]) { /* not yet resolved feature, postpone this check */ ret = EXIT_FAILURE; goto cleanup; } ly_set_add(features, iter->iffeature[j].features[size - 1], 0); } } } x = features->number; for (i = 0; i < trg_parents->number; i++) { iter = trg_parents->set.s[i]; /* shortcut */ if (!iter->iffeature_size) { continue; } for (j = 0; j < iter->iffeature_size; j++) { resolve_iffeature_getsizes(&iter->iffeature[j], NULL, &size); for (; size; size--) { if (!iter->iffeature[j].features[size - 1]) { /* not yet resolved feature, postpone this check */ ret = EXIT_FAILURE; goto cleanup; } if ((unsigned)ly_set_add(features, iter->iffeature[j].features[size - 1], 0) >= x) { /* the feature is not present in features set of target's parents chain */ LOGVAL(ctx, LYE_NORESOLV, LY_VLOG_LYS, type->parent, "leafref", type->info.lref.path); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Leafref is not conditional based on \"%s\" feature as its target.", iter->iffeature[j].features[size - 1]->name); ret = -1; goto cleanup; } } } } cleanup: ly_set_free(features); ly_set_free(src_parents); ly_set_free(trg_parents); return ret; } /** * @brief Resolve a path (leafref) in JSON schema context. Logs directly. * * @param[in] path Path to use. * @param[in] parent_node Parent of the leafref. * @param[out] ret Pointer to the resolved schema node. Can be NULL. * * @return EXIT_SUCCESS on success, EXIT_FAILURE on forward reference, -1 on error. */ static int resolve_schema_leafref(struct lys_type *type, struct lys_node *parent, struct unres_schema *unres) { const struct lys_node *node, *op_node = NULL, *tmp_parent; struct lys_node_augment *last_aug; const struct lys_module *tmp_mod, *cur_module; const char *id, *prefix, *name; int pref_len, nam_len, parent_times, has_predicate; int i, first_iter; struct ly_ctx *ctx = parent->module->ctx; if (!type->info.lref.target) { first_iter = 1; parent_times = 0; id = type->info.lref.path; /* find operation schema we are in */ for (op_node = lys_parent(parent); op_node && !(op_node->nodetype & (LYS_ACTION | LYS_NOTIF | LYS_RPC)); op_node = lys_parent(op_node)); cur_module = lys_node_module(parent); do { if ((i = parse_path_arg(cur_module, id, &prefix, &pref_len, &name, &nam_len, &parent_times, &has_predicate)) < 1) { LOGVAL(ctx, LYE_INCHAR, LY_VLOG_LYS, parent, id[-i], &id[-i]); return -1; } id += i; /* get the current module */ tmp_mod = prefix ? lyp_get_module(cur_module, NULL, 0, prefix, pref_len, 0) : cur_module; if (!tmp_mod) { LOGVAL(ctx, LYE_NORESOLV, LY_VLOG_LYS, parent, "leafref", type->info.lref.path); return EXIT_FAILURE; } last_aug = NULL; if (first_iter) { if (parent_times == -1) { /* use module data */ node = NULL; } else if (parent_times > 0) { /* we are looking for the right parent */ for (i = 0, node = parent; i < parent_times; i++) { if (node->parent && (node->parent->nodetype == LYS_AUGMENT) && !((struct lys_node_augment *)node->parent)->target) { /* we are in an unresolved augment, cannot evaluate */ LOGVAL(ctx, LYE_SPEC, LY_VLOG_LYS, node->parent, "Cannot resolve leafref \"%s\" because it is in an unresolved augment.", type->info.lref.path); return EXIT_FAILURE; } /* path is supposed to be evaluated in data tree, so we have to skip * all schema nodes that cannot be instantiated in data tree */ for (node = lys_parent(node); node && !(node->nodetype & (LYS_CONTAINER | LYS_LIST | LYS_ACTION | LYS_NOTIF | LYS_RPC)); node = lys_parent(node)); if (!node) { if (i == parent_times - 1) { /* top-level */ break; } /* higher than top-level */ LOGVAL(ctx, LYE_NORESOLV, LY_VLOG_LYS, parent, "leafref", type->info.lref.path); return EXIT_FAILURE; } } } else { LOGINT(ctx); return -1; } } /* find the next node (either in unconnected augment or as a schema sibling, node is NULL for top-level node - * - useless to search for that in augments) */ if (!tmp_mod->implemented && node) { get_next_augment: last_aug = lys_getnext_target_aug(last_aug, tmp_mod, node); } tmp_parent = (last_aug ? (struct lys_node *)last_aug : node); node = NULL; while ((node = lys_getnext(node, tmp_parent, tmp_mod, LYS_GETNEXT_NOSTATECHECK))) { if (lys_node_module(node) != lys_main_module(tmp_mod)) { continue; } if (strncmp(node->name, name, nam_len) || node->name[nam_len]) { continue; } /* match */ break; } if (!node) { if (last_aug) { /* restore the correct augment target */ node = last_aug->target; goto get_next_augment; } LOGVAL(ctx, LYE_NORESOLV, LY_VLOG_LYS, parent, "leafref", type->info.lref.path); return EXIT_FAILURE; } if (first_iter) { /* set external dependency flag, we can decide based on the first found node */ if (resolve_schema_leafref_valid_dep_flag(op_node, cur_module, node, (parent_times == -1 ? 1 : 0))) { parent->flags |= LYS_LEAFREF_DEP; } first_iter = 0; } if (has_predicate) { /* we have predicate, so the current result must be list */ if (node->nodetype != LYS_LIST) { LOGVAL(ctx, LYE_NORESOLV, LY_VLOG_LYS, parent, "leafref", type->info.lref.path); return -1; } i = resolve_schema_leafref_predicate(id, node, parent); if (!i) { return EXIT_FAILURE; } else if (i < 0) { return -1; } id += i; has_predicate = 0; } } while (id[0]); /* the target must be leaf or leaf-list (in YANG 1.1 only) */ if ((node->nodetype != LYS_LEAF) && (node->nodetype != LYS_LEAFLIST)) { LOGVAL(ctx, LYE_NORESOLV, LY_VLOG_LYS, parent, "leafref", type->info.lref.path); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Leafref target \"%s\" is not a leaf nor a leaf-list.", type->info.lref.path); return -1; } /* check status */ if (lyp_check_status(parent->flags, parent->module, parent->name, node->flags, node->module, node->name, node)) { return -1; } /* assign */ type->info.lref.target = (struct lys_node_leaf *)node; } /* as the last thing traverse this leafref and make targets on the path implemented */ if (lys_node_module(parent)->implemented) { /* make all the modules in the path implemented */ for (node = (struct lys_node *)type->info.lref.target; node; node = lys_parent(node)) { if (!lys_node_module(node)->implemented) { lys_node_module(node)->implemented = 1; if (unres_schema_add_node(lys_node_module(node), unres, NULL, UNRES_MOD_IMPLEMENT, NULL) == -1) { return -1; } } } /* store the backlink from leafref target */ if (lys_leaf_add_leafref_target(type->info.lref.target, (struct lys_node *)type->parent)) { return -1; } } /* check if leafref and its target are under common if-features */ return check_leafref_features(type); } /** * @brief Compare 2 data node values. * * Comparison performed on canonical forms, the first value * is first transformed into canonical form. * * @param[in] node Leaf/leaf-list with these values. * @param[in] noncan_val Non-canonical value. * @param[in] noncan_val_len Length of \p noncal_val. * @param[in] can_val Canonical value. * @return 1 if equal, 0 if not, -1 on error (logged). */ static int valequal(struct lys_node *node, const char *noncan_val, int noncan_val_len, const char *can_val) { int ret; struct lyd_node_leaf_list leaf; struct lys_node_leaf *sleaf = (struct lys_node_leaf*)node; /* dummy leaf */ memset(&leaf, 0, sizeof leaf); leaf.value_str = lydict_insert(node->module->ctx, noncan_val, noncan_val_len); repeat: leaf.value_type = sleaf->type.base; leaf.schema = node; if (leaf.value_type == LY_TYPE_LEAFREF) { if (!sleaf->type.info.lref.target) { /* it should either be unresolved leafref (leaf.value_type are ORed flags) or it will be resolved */ LOGINT(node->module->ctx); ret = -1; goto finish; } sleaf = sleaf->type.info.lref.target; goto repeat; } else { if (!lyp_parse_value(&sleaf->type, &leaf.value_str, NULL, &leaf, NULL, NULL, 0, 0, 0)) { ret = -1; goto finish; } } if (!strcmp(leaf.value_str, can_val)) { ret = 1; } else { ret = 0; } finish: lydict_remove(node->module->ctx, leaf.value_str); return ret; } /** * @brief Resolve instance-identifier predicate in JSON data format. * Does not log. * * @param[in] prev_mod Previous module to use in case there is no prefix. * @param[in] pred Predicate to use. * @param[in,out] node Node matching the restriction without * the predicate. If it does not satisfy the predicate, * it is set to NULL. * * @return Number of characters successfully parsed, * positive on success, negative on failure. */ static int resolve_instid_predicate(const struct lys_module *prev_mod, const char *pred, struct lyd_node **node, int cur_idx) { /* ... /node[key=value] ... */ struct lyd_node_leaf_list *key; struct lys_node_leaf **list_keys = NULL; struct lys_node_list *slist = NULL; const char *model, *name, *value; int mod_len, nam_len, val_len, i, has_predicate, parsed; struct ly_ctx *ctx = prev_mod->ctx; assert(pred && node && *node); parsed = 0; do { if ((i = parse_predicate(pred + parsed, &model, &mod_len, &name, &nam_len, &value, &val_len, &has_predicate)) < 1) { return -parsed + i; } parsed += i; if (!(*node)) { /* just parse it all */ continue; } /* target */ if (name[0] == '.') { /* leaf-list value */ if ((*node)->schema->nodetype != LYS_LEAFLIST) { LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Instance identifier expects leaf-list, but have %s \"%s\".", strnodetype((*node)->schema->nodetype), (*node)->schema->name); parsed = -1; goto cleanup; } /* check the value */ if (!valequal((*node)->schema, value, val_len, ((struct lyd_node_leaf_list *)*node)->value_str)) { *node = NULL; goto cleanup; } } else if (isdigit(name[0])) { assert(!value); /* keyless list position */ if ((*node)->schema->nodetype != LYS_LIST) { LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Instance identifier expects list, but have %s \"%s\".", strnodetype((*node)->schema->nodetype), (*node)->schema->name); parsed = -1; goto cleanup; } if (((struct lys_node_list *)(*node)->schema)->keys) { LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Instance identifier expects list without keys, but have list \"%s\".", (*node)->schema->name); parsed = -1; goto cleanup; } /* check the index */ if (atoi(name) != cur_idx) { *node = NULL; goto cleanup; } } else { /* list key value */ if ((*node)->schema->nodetype != LYS_LIST) { LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Instance identifier expects list, but have %s \"%s\".", strnodetype((*node)->schema->nodetype), (*node)->schema->name); parsed = -1; goto cleanup; } slist = (struct lys_node_list *)(*node)->schema; /* prepare key array */ if (!list_keys) { list_keys = malloc(slist->keys_size * sizeof *list_keys); LY_CHECK_ERR_RETURN(!list_keys, LOGMEM(ctx), -1); for (i = 0; i < slist->keys_size; ++i) { list_keys[i] = slist->keys[i]; } } /* find the schema key leaf */ for (i = 0; i < slist->keys_size; ++i) { if (list_keys[i] && !strncmp(list_keys[i]->name, name, nam_len) && !list_keys[i]->name[nam_len]) { break; } } if (i == slist->keys_size) { /* this list has no such key */ LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Instance identifier expects list with the key \"%.*s\"," " but list \"%s\" does not define it.", nam_len, name, slist->name); parsed = -1; goto cleanup; } /* check module */ if (model) { if (strncmp(list_keys[i]->module->name, model, mod_len) || list_keys[i]->module->name[mod_len]) { LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Instance identifier expects key \"%s\" from module \"%.*s\", not \"%s\".", list_keys[i]->name, model, mod_len, list_keys[i]->module->name); parsed = -1; goto cleanup; } } else { if (list_keys[i]->module != prev_mod) { LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Instance identifier expects key \"%s\" from module \"%s\", not \"%s\".", list_keys[i]->name, prev_mod->name, list_keys[i]->module->name); parsed = -1; goto cleanup; } } /* find the actual data key */ for (key = (struct lyd_node_leaf_list *)(*node)->child; key; key = (struct lyd_node_leaf_list *)key->next) { if (key->schema == (struct lys_node *)list_keys[i]) { break; } } if (!key) { /* list instance is missing a key? definitely should not happen */ LOGINT(ctx); parsed = -1; goto cleanup; } /* check the value */ if (!valequal(key->schema, value, val_len, key->value_str)) { *node = NULL; /* we still want to parse the whole predicate */ continue; } /* everything is fine, mark this key as resolved */ list_keys[i] = NULL; } } while (has_predicate); /* check that all list keys were specified */ if (*node && list_keys) { for (i = 0; i < slist->keys_size; ++i) { if (list_keys[i]) { LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Instance identifier is missing list key \"%s\".", list_keys[i]->name); parsed = -1; goto cleanup; } } } cleanup: free(list_keys); return parsed; } static int check_xpath(struct lys_node *node, int check_place) { struct lys_node *parent; struct lyxp_set set; enum int_log_opts prev_ilo; if (check_place) { parent = node; while (parent) { if (parent->nodetype == LYS_GROUPING) { /* unresolved grouping, skip for now (will be checked later) */ return EXIT_SUCCESS; } if (parent->nodetype == LYS_AUGMENT) { if (!((struct lys_node_augment *)parent)->target) { /* unresolved augment, skip for now (will be checked later) */ return EXIT_FAILURE; } else { parent = ((struct lys_node_augment *)parent)->target; continue; } } parent = parent->parent; } } memset(&set, 0, sizeof set); /* produce just warnings */ ly_ilo_change(NULL, ILO_ERR2WRN, &prev_ilo, NULL); lyxp_node_atomize(node, &set, 1); ly_ilo_restore(NULL, prev_ilo, NULL, 0); if (set.val.snodes) { free(set.val.snodes); } return EXIT_SUCCESS; } static int check_leafref_config(struct lys_node_leaf *leaf, struct lys_type *type) { unsigned int i; if (type->base == LY_TYPE_LEAFREF) { if ((leaf->flags & LYS_CONFIG_W) && type->info.lref.target && type->info.lref.req != -1 && (type->info.lref.target->flags & LYS_CONFIG_R)) { LOGVAL(leaf->module->ctx, LYE_SPEC, LY_VLOG_LYS, leaf, "The leafref %s is config but refers to a non-config %s.", strnodetype(leaf->nodetype), strnodetype(type->info.lref.target->nodetype)); return -1; } /* we can skip the test in case the leafref is not yet resolved. In that case the test is done in the time * of leafref resolving (lys_leaf_add_leafref_target()) */ } else if (type->base == LY_TYPE_UNION) { for (i = 0; i < type->info.uni.count; i++) { if (check_leafref_config(leaf, &type->info.uni.types[i])) { return -1; } } } return 0; } /** * @brief Passes config flag down to children, skips nodes without config flags. * Logs. * * @param[in] node Siblings and their children to have flags changed. * @param[in] clear Flag to clear all config flags if parent is LYS_NOTIF, LYS_INPUT, LYS_OUTPUT, LYS_RPC. * @param[in] flags Flags to assign to all the nodes. * @param[in,out] unres List of unresolved items. * * @return 0 on success, -1 on error. */ int inherit_config_flag(struct lys_node *node, int flags, int clear) { struct lys_node_leaf *leaf; struct ly_ctx *ctx; if (!node) { return 0; } assert(!(flags ^ (flags & LYS_CONFIG_MASK))); ctx = node->module->ctx; LY_TREE_FOR(node, node) { if (clear) { node->flags &= ~LYS_CONFIG_MASK; node->flags &= ~LYS_CONFIG_SET; } else { if (node->flags & LYS_CONFIG_SET) { /* skip nodes with an explicit config value */ if ((flags & LYS_CONFIG_R) && (node->flags & LYS_CONFIG_W)) { LOGVAL(ctx, LYE_INARG, LY_VLOG_LYS, node, "true", "config"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "State nodes cannot have configuration nodes as children."); return -1; } continue; } if (!(node->nodetype & (LYS_USES | LYS_GROUPING))) { node->flags = (node->flags & ~LYS_CONFIG_MASK) | flags; /* check that configuration lists have keys */ if ((node->nodetype == LYS_LIST) && (node->flags & LYS_CONFIG_W) && !((struct lys_node_list *)node)->keys_size) { LOGVAL(ctx, LYE_MISSCHILDSTMT, LY_VLOG_LYS, node, "key", "list"); return -1; } } } if (!(node->nodetype & (LYS_LEAF | LYS_LEAFLIST | LYS_ANYDATA))) { if (inherit_config_flag(node->child, flags, clear)) { return -1; } } else if (node->nodetype & (LYS_LEAF | LYS_LEAFLIST)) { leaf = (struct lys_node_leaf *)node; if (check_leafref_config(leaf, &leaf->type)) { return -1; } } } return 0; } /** * @brief Resolve augment target. Logs directly. * * @param[in] aug Augment to use. * @param[in] uses Parent where to start the search in. If set, uses augment, if not, standalone augment. * @param[in,out] unres List of unresolved items. * * @return EXIT_SUCCESS on success, EXIT_FAILURE on forward reference, -1 on error. */ static int resolve_augment(struct lys_node_augment *aug, struct lys_node *uses, struct unres_schema *unres) { int rc; struct lys_node *sub; struct lys_module *mod; struct ly_set *set; struct ly_ctx *ctx; assert(aug); mod = lys_main_module(aug->module); ctx = mod->ctx; /* set it as not applied for now */ aug->flags |= LYS_NOTAPPLIED; /* it can already be resolved in case we returned EXIT_FAILURE from if block below */ if (!aug->target) { /* resolve target node */ rc = resolve_schema_nodeid(aug->target_name, uses, (uses ? NULL : lys_node_module((struct lys_node *)aug)), &set, 0, 0); if (rc == -1) { LOGVAL(ctx, LYE_PATH, LY_VLOG_LYS, aug); return -1; } if (!set) { LOGVAL(ctx, LYE_INRESOLV, LY_VLOG_LYS, aug, "augment", aug->target_name); return EXIT_FAILURE; } aug->target = set->set.s[0]; ly_set_free(set); } /* make this module implemented if the target module is (if the target is in an unimplemented module, * it is fine because when we will be making that module implemented, its augment will be applied * and that augment target module made implemented, recursively) */ if (mod->implemented && !lys_node_module(aug->target)->implemented) { lys_node_module(aug->target)->implemented = 1; if (unres_schema_add_node(lys_node_module(aug->target), unres, NULL, UNRES_MOD_IMPLEMENT, NULL) == -1) { return -1; } } /* check for mandatory nodes - if the target node is in another module * the added nodes cannot be mandatory */ if (!aug->parent && (lys_node_module((struct lys_node *)aug) != lys_node_module(aug->target)) && (rc = lyp_check_mandatory_augment(aug, aug->target))) { return rc; } /* check augment target type and then augment nodes type */ if (aug->target->nodetype & (LYS_CONTAINER | LYS_LIST)) { LY_TREE_FOR(aug->child, sub) { if (!(sub->nodetype & (LYS_ANYDATA | LYS_CONTAINER | LYS_LEAF | LYS_LIST | LYS_LEAFLIST | LYS_USES | LYS_CHOICE | LYS_ACTION | LYS_NOTIF))) { LOGVAL(ctx, LYE_INCHILDSTMT, LY_VLOG_LYS, aug, strnodetype(sub->nodetype), "augment"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Cannot augment \"%s\" with a \"%s\".", strnodetype(aug->target->nodetype), strnodetype(sub->nodetype)); return -1; } } } else if (aug->target->nodetype & (LYS_CASE | LYS_INPUT | LYS_OUTPUT | LYS_NOTIF)) { LY_TREE_FOR(aug->child, sub) { if (!(sub->nodetype & (LYS_ANYDATA | LYS_CONTAINER | LYS_LEAF | LYS_LIST | LYS_LEAFLIST | LYS_USES | LYS_CHOICE))) { LOGVAL(ctx, LYE_INCHILDSTMT, LY_VLOG_LYS, aug, strnodetype(sub->nodetype), "augment"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Cannot augment \"%s\" with a \"%s\".", strnodetype(aug->target->nodetype), strnodetype(sub->nodetype)); return -1; } } } else if (aug->target->nodetype == LYS_CHOICE) { LY_TREE_FOR(aug->child, sub) { if (!(sub->nodetype & (LYS_CASE | LYS_ANYDATA | LYS_CONTAINER | LYS_LEAF | LYS_LIST | LYS_LEAFLIST))) { LOGVAL(ctx, LYE_INCHILDSTMT, LY_VLOG_LYS, aug, strnodetype(sub->nodetype), "augment"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Cannot augment \"%s\" with a \"%s\".", strnodetype(aug->target->nodetype), strnodetype(sub->nodetype)); return -1; } } } else { LOGVAL(ctx, LYE_INARG, LY_VLOG_LYS, aug, aug->target_name, "target-node"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Invalid augment target node type \"%s\".", strnodetype(aug->target->nodetype)); return -1; } /* check identifier uniqueness as in lys_node_addchild() */ LY_TREE_FOR(aug->child, sub) { if (lys_check_id(sub, aug->target, NULL)) { return -1; } } if (!aug->child) { /* empty augment, nothing to connect, but it is techincally applied */ LOGWRN(ctx, "Augment \"%s\" without children.", aug->target_name); aug->flags &= ~LYS_NOTAPPLIED; } else if ((aug->parent || mod->implemented) && apply_aug(aug, unres)) { /* we try to connect the augment only in case the module is implemented or * the augment applies on the used grouping, anyway we failed here */ return -1; } return EXIT_SUCCESS; } static int resolve_extension(struct unres_ext *info, struct lys_ext_instance **ext, struct unres_schema *unres) { enum LY_VLOG_ELEM vlog_type; void *vlog_node; unsigned int i, j; struct lys_ext *e; char *ext_name, *ext_prefix, *tmp; struct lyxml_elem *next_yin, *yin; const struct lys_module *mod; struct lys_ext_instance *tmp_ext; struct ly_ctx *ctx = NULL; LYEXT_TYPE etype; switch (info->parent_type) { case LYEXT_PAR_NODE: vlog_node = info->parent; vlog_type = LY_VLOG_LYS; break; case LYEXT_PAR_MODULE: case LYEXT_PAR_IMPORT: case LYEXT_PAR_INCLUDE: vlog_node = NULL; vlog_type = LY_VLOG_LYS; break; default: vlog_node = NULL; vlog_type = LY_VLOG_NONE; break; } if (info->datatype == LYS_IN_YIN) { /* YIN */ /* get the module where the extension is supposed to be defined */ mod = lyp_get_import_module_ns(info->mod, info->data.yin->ns->value); if (!mod) { LOGVAL(ctx, LYE_INSTMT, vlog_type, vlog_node, info->data.yin->name); return EXIT_FAILURE; } ctx = mod->ctx; /* find the extension definition */ e = NULL; for (i = 0; i < mod->extensions_size; i++) { if (ly_strequal(mod->extensions[i].name, info->data.yin->name, 1)) { e = &mod->extensions[i]; break; } } /* try submodules */ for (j = 0; !e && j < mod->inc_size; j++) { for (i = 0; i < mod->inc[j].submodule->extensions_size; i++) { if (ly_strequal(mod->inc[j].submodule->extensions[i].name, info->data.yin->name, 1)) { e = &mod->inc[j].submodule->extensions[i]; break; } } } if (!e) { LOGVAL(ctx, LYE_INSTMT, vlog_type, vlog_node, info->data.yin->name); return EXIT_FAILURE; } /* we have the extension definition, so now it cannot be forward referenced and error is always fatal */ if (e->plugin && e->plugin->check_position) { /* common part - we have plugin with position checking function, use it first */ if ((*e->plugin->check_position)(info->parent, info->parent_type, info->substmt)) { /* extension is not allowed here */ LOGVAL(ctx, LYE_INSTMT, vlog_type, vlog_node, e->name); return -1; } } /* extension type-specific part - allocation */ if (e->plugin) { etype = e->plugin->type; } else { /* default type */ etype = LYEXT_FLAG; } switch (etype) { case LYEXT_FLAG: (*ext) = calloc(1, sizeof(struct lys_ext_instance)); break; case LYEXT_COMPLEX: (*ext) = calloc(1, ((struct lyext_plugin_complex*)e->plugin)->instance_size); break; case LYEXT_ERR: /* we never should be here */ LOGINT(ctx); return -1; } LY_CHECK_ERR_RETURN(!*ext, LOGMEM(ctx), -1); /* common part for all extension types */ (*ext)->def = e; (*ext)->parent = info->parent; (*ext)->parent_type = info->parent_type; (*ext)->insubstmt = info->substmt; (*ext)->insubstmt_index = info->substmt_index; (*ext)->ext_type = e->plugin ? e->plugin->type : LYEXT_FLAG; (*ext)->flags |= e->plugin ? e->plugin->flags : 0; if (e->argument) { if (!(e->flags & LYS_YINELEM)) { (*ext)->arg_value = lyxml_get_attr(info->data.yin, e->argument, NULL); if (!(*ext)->arg_value) { LOGVAL(ctx, LYE_MISSARG, LY_VLOG_NONE, NULL, e->argument, info->data.yin->name); return -1; } (*ext)->arg_value = lydict_insert(mod->ctx, (*ext)->arg_value, 0); } else { LY_TREE_FOR_SAFE(info->data.yin->child, next_yin, yin) { if (ly_strequal(yin->name, e->argument, 1)) { (*ext)->arg_value = lydict_insert(mod->ctx, yin->content, 0); lyxml_free(mod->ctx, yin); break; } } } } if ((*ext)->flags & LYEXT_OPT_VALID && (info->parent_type == LYEXT_PAR_NODE || info->parent_type == LYEXT_PAR_TPDF)) { ((struct lys_node *)info->parent)->flags |= LYS_VALID_EXT; } (*ext)->nodetype = LYS_EXT; (*ext)->module = info->mod; /* extension type-specific part - parsing content */ switch (etype) { case LYEXT_FLAG: LY_TREE_FOR_SAFE(info->data.yin->child, next_yin, yin) { if (!yin->ns) { /* garbage */ lyxml_free(mod->ctx, yin); continue; } else if (!strcmp(yin->ns->value, LY_NSYIN)) { /* standard YANG statements are not expected here */ LOGVAL(ctx, LYE_INCHILDSTMT, vlog_type, vlog_node, yin->name, info->data.yin->name); return -1; } else if (yin->ns == info->data.yin->ns && (e->flags & LYS_YINELEM) && ly_strequal(yin->name, e->argument, 1)) { /* we have the extension's argument */ if ((*ext)->arg_value) { LOGVAL(ctx, LYE_TOOMANY, vlog_type, vlog_node, yin->name, info->data.yin->name); return -1; } (*ext)->arg_value = yin->content; yin->content = NULL; lyxml_free(mod->ctx, yin); } else { /* extension instance */ if (lyp_yin_parse_subnode_ext(info->mod, *ext, LYEXT_PAR_EXTINST, yin, LYEXT_SUBSTMT_SELF, 0, unres)) { return -1; } continue; } } break; case LYEXT_COMPLEX: ((struct lys_ext_instance_complex*)(*ext))->substmt = ((struct lyext_plugin_complex*)e->plugin)->substmt; if (lyp_yin_parse_complex_ext(info->mod, (struct lys_ext_instance_complex*)(*ext), info->data.yin, unres)) { /* TODO memory cleanup */ return -1; } break; default: break; } /* TODO - lyext_check_result_clb, other than LYEXT_FLAG plugins */ } else { /* YANG */ ext_prefix = (char *)(*ext)->def; tmp = strchr(ext_prefix, ':'); if (!tmp) { LOGVAL(ctx, LYE_INSTMT, vlog_type, vlog_node, ext_prefix); goto error; } ext_name = tmp + 1; /* get the module where the extension is supposed to be defined */ mod = lyp_get_module(info->mod, ext_prefix, tmp - ext_prefix, NULL, 0, 0); if (!mod) { LOGVAL(ctx, LYE_INSTMT, vlog_type, vlog_node, ext_prefix); return EXIT_FAILURE; } ctx = mod->ctx; /* find the extension definition */ e = NULL; for (i = 0; i < mod->extensions_size; i++) { if (ly_strequal(mod->extensions[i].name, ext_name, 0)) { e = &mod->extensions[i]; break; } } /* try submodules */ for (j = 0; !e && j < mod->inc_size; j++) { for (i = 0; i < mod->inc[j].submodule->extensions_size; i++) { if (ly_strequal(mod->inc[j].submodule->extensions[i].name, ext_name, 0)) { e = &mod->inc[j].submodule->extensions[i]; break; } } } if (!e) { LOGVAL(ctx, LYE_INSTMT, vlog_type, vlog_node, ext_prefix); return EXIT_FAILURE; } (*ext)->flags &= ~LYEXT_OPT_YANG; (*ext)->def = NULL; /* we have the extension definition, so now it cannot be forward referenced and error is always fatal */ if (e->plugin && e->plugin->check_position) { /* common part - we have plugin with position checking function, use it first */ if ((*e->plugin->check_position)(info->parent, info->parent_type, info->substmt)) { /* extension is not allowed here */ LOGVAL(ctx, LYE_INSTMT, vlog_type, vlog_node, e->name); goto error; } } /* extension common part */ (*ext)->def = e; (*ext)->parent = info->parent; (*ext)->ext_type = e->plugin ? e->plugin->type : LYEXT_FLAG; (*ext)->flags |= e->plugin ? e->plugin->flags : 0; if (e->argument && !(*ext)->arg_value) { LOGVAL(ctx, LYE_MISSARG, LY_VLOG_NONE, NULL, e->argument, ext_name); goto error; } if ((*ext)->flags & LYEXT_OPT_VALID && (info->parent_type == LYEXT_PAR_NODE || info->parent_type == LYEXT_PAR_TPDF)) { ((struct lys_node *)info->parent)->flags |= LYS_VALID_EXT; } (*ext)->module = info->mod; (*ext)->nodetype = LYS_EXT; /* extension type-specific part */ if (e->plugin) { etype = e->plugin->type; } else { /* default type */ etype = LYEXT_FLAG; } switch (etype) { case LYEXT_FLAG: /* nothing change */ break; case LYEXT_COMPLEX: tmp_ext = realloc(*ext, ((struct lyext_plugin_complex*)e->plugin)->instance_size); LY_CHECK_ERR_GOTO(!tmp_ext, LOGMEM(ctx), error); memset((char *)tmp_ext + offsetof(struct lys_ext_instance_complex, content), 0, ((struct lyext_plugin_complex*)e->plugin)->instance_size - offsetof(struct lys_ext_instance_complex, content)); (*ext) = tmp_ext; ((struct lys_ext_instance_complex*)(*ext))->substmt = ((struct lyext_plugin_complex*)e->plugin)->substmt; if (info->data.yang) { *tmp = ':'; if (yang_parse_ext_substatement(info->mod, unres, info->data.yang->ext_substmt, ext_prefix, (struct lys_ext_instance_complex*)(*ext))) { goto error; } if (yang_fill_extcomplex_module(info->mod->ctx, (struct lys_ext_instance_complex*)(*ext), ext_prefix, info->data.yang->ext_modules, info->mod->implemented)) { goto error; } } if (lyp_mand_check_ext((struct lys_ext_instance_complex*)(*ext), ext_prefix)) { goto error; } break; case LYEXT_ERR: /* we never should be here */ LOGINT(ctx); goto error; } if (yang_check_ext_instance(info->mod, &(*ext)->ext, (*ext)->ext_size, *ext, unres)) { goto error; } free(ext_prefix); } return EXIT_SUCCESS; error: free(ext_prefix); return -1; } /** * @brief Resolve (find) choice default case. Does not log. * * @param[in] choic Choice to use. * @param[in] dflt Name of the default case. * * @return Pointer to the default node or NULL. */ static struct lys_node * resolve_choice_dflt(struct lys_node_choice *choic, const char *dflt) { struct lys_node *child, *ret; LY_TREE_FOR(choic->child, child) { if (child->nodetype == LYS_USES) { ret = resolve_choice_dflt((struct lys_node_choice *)child, dflt); if (ret) { return ret; } } if (ly_strequal(child->name, dflt, 1) && (child->nodetype & (LYS_ANYDATA | LYS_CASE | LYS_CONTAINER | LYS_LEAF | LYS_LEAFLIST | LYS_LIST | LYS_CHOICE))) { return child; } } return NULL; } /** * @brief Resolve uses, apply augments, refines. Logs directly. * * @param[in] uses Uses to use. * @param[in,out] unres List of unresolved items. * * @return EXIT_SUCCESS on success, -1 on error. */ static int resolve_uses(struct lys_node_uses *uses, struct unres_schema *unres) { struct ly_ctx *ctx = uses->module->ctx; /* shortcut */ struct lys_node *node = NULL, *next, *iter, **refine_nodes = NULL; struct lys_node *node_aux, *parent, *tmp; struct lys_node_leaflist *llist; struct lys_node_leaf *leaf; struct lys_refine *rfn; struct lys_restr *must, **old_must; struct lys_iffeature *iff, **old_iff; int i, j, k, rc; uint8_t size, *old_size; unsigned int usize, usize1, usize2; assert(uses->grp); /* check that the grouping is resolved (no unresolved uses inside) */ assert(!uses->grp->unres_count); /* copy the data nodes from grouping into the uses context */ LY_TREE_FOR(uses->grp->child, node_aux) { if (node_aux->nodetype & LYS_GROUPING) { /* do not instantiate groupings from groupings */ continue; } node = lys_node_dup(uses->module, (struct lys_node *)uses, node_aux, unres, 0); if (!node) { LOGVAL(ctx, LYE_INARG, LY_VLOG_LYS, uses, uses->grp->name, "uses"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Copying data from grouping failed."); goto fail; } /* test the name of siblings */ LY_TREE_FOR((uses->parent) ? *lys_child(uses->parent, LYS_USES) : lys_main_module(uses->module)->data, tmp) { if (!(tmp->nodetype & (LYS_USES | LYS_GROUPING | LYS_CASE)) && ly_strequal(tmp->name, node_aux->name, 1)) { goto fail; } } } /* we managed to copy the grouping, the rest must be possible to resolve */ if (uses->refine_size) { refine_nodes = malloc(uses->refine_size * sizeof *refine_nodes); LY_CHECK_ERR_GOTO(!refine_nodes, LOGMEM(ctx), fail); } /* apply refines */ for (i = 0; i < uses->refine_size; i++) { rfn = &uses->refine[i]; rc = resolve_descendant_schema_nodeid(rfn->target_name, uses->child, LYS_NO_RPC_NOTIF_NODE | LYS_ACTION | LYS_NOTIF, 0, (const struct lys_node **)&node); if (rc || !node) { LOGVAL(ctx, LYE_INARG, LY_VLOG_LYS, uses, rfn->target_name, "refine"); goto fail; } if (rfn->target_type && !(node->nodetype & rfn->target_type)) { LOGVAL(ctx, LYE_INARG, LY_VLOG_LYS, uses, rfn->target_name, "refine"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Refine substatements not applicable to the target-node."); goto fail; } refine_nodes[i] = node; /* description on any nodetype */ if (rfn->dsc) { lydict_remove(ctx, node->dsc); node->dsc = lydict_insert(ctx, rfn->dsc, 0); } /* reference on any nodetype */ if (rfn->ref) { lydict_remove(ctx, node->ref); node->ref = lydict_insert(ctx, rfn->ref, 0); } /* config on any nodetype, * in case of notification or rpc/action, the config is not applicable (there is no config status) */ if ((rfn->flags & LYS_CONFIG_MASK) && (node->flags & LYS_CONFIG_MASK)) { node->flags &= ~LYS_CONFIG_MASK; node->flags |= (rfn->flags & LYS_CONFIG_MASK); } /* default value ... */ if (rfn->dflt_size) { if (node->nodetype == LYS_LEAF) { /* leaf */ leaf = (struct lys_node_leaf *)node; /* replace default value */ lydict_remove(ctx, leaf->dflt); leaf->dflt = lydict_insert(ctx, rfn->dflt[0], 0); /* check the default value */ if (unres_schema_add_node(leaf->module, unres, &leaf->type, UNRES_TYPE_DFLT, (struct lys_node *)(&leaf->dflt)) == -1) { goto fail; } } else if (node->nodetype == LYS_LEAFLIST) { /* leaf-list */ llist = (struct lys_node_leaflist *)node; /* remove complete set of defaults in target */ for (j = 0; j < llist->dflt_size; j++) { lydict_remove(ctx, llist->dflt[j]); } free(llist->dflt); /* copy the default set from refine */ llist->dflt = malloc(rfn->dflt_size * sizeof *llist->dflt); LY_CHECK_ERR_GOTO(!llist->dflt, LOGMEM(ctx), fail); llist->dflt_size = rfn->dflt_size; for (j = 0; j < llist->dflt_size; j++) { llist->dflt[j] = lydict_insert(ctx, rfn->dflt[j], 0); } /* check default value */ for (j = 0; j < llist->dflt_size; j++) { if (unres_schema_add_node(llist->module, unres, &llist->type, UNRES_TYPE_DFLT, (struct lys_node *)(&llist->dflt[j])) == -1) { goto fail; } } } } /* mandatory on leaf, anyxml or choice */ if (rfn->flags & LYS_MAND_MASK) { /* remove current value */ node->flags &= ~LYS_MAND_MASK; /* set new value */ node->flags |= (rfn->flags & LYS_MAND_MASK); if (rfn->flags & LYS_MAND_TRUE) { /* check if node has default value */ if ((node->nodetype & LYS_LEAF) && ((struct lys_node_leaf *)node)->dflt) { LOGVAL(ctx, LYE_SPEC, LY_VLOG_LYS, uses, "The \"mandatory\" statement is forbidden on leaf with \"default\"."); goto fail; } if ((node->nodetype & LYS_CHOICE) && ((struct lys_node_choice *)node)->dflt) { LOGVAL(ctx, LYE_SPEC, LY_VLOG_LYS, uses, "The \"mandatory\" statement is forbidden on choices with \"default\"."); goto fail; } } } /* presence on container */ if ((node->nodetype & LYS_CONTAINER) && rfn->mod.presence) { lydict_remove(ctx, ((struct lys_node_container *)node)->presence); ((struct lys_node_container *)node)->presence = lydict_insert(ctx, rfn->mod.presence, 0); } /* min/max-elements on list or leaf-list */ if (node->nodetype == LYS_LIST) { if (rfn->flags & LYS_RFN_MINSET) { ((struct lys_node_list *)node)->min = rfn->mod.list.min; } if (rfn->flags & LYS_RFN_MAXSET) { ((struct lys_node_list *)node)->max = rfn->mod.list.max; } } else if (node->nodetype == LYS_LEAFLIST) { if (rfn->flags & LYS_RFN_MINSET) { ((struct lys_node_leaflist *)node)->min = rfn->mod.list.min; } if (rfn->flags & LYS_RFN_MAXSET) { ((struct lys_node_leaflist *)node)->max = rfn->mod.list.max; } } /* must in leaf, leaf-list, list, container or anyxml */ if (rfn->must_size) { switch (node->nodetype) { case LYS_LEAF: old_size = &((struct lys_node_leaf *)node)->must_size; old_must = &((struct lys_node_leaf *)node)->must; break; case LYS_LEAFLIST: old_size = &((struct lys_node_leaflist *)node)->must_size; old_must = &((struct lys_node_leaflist *)node)->must; break; case LYS_LIST: old_size = &((struct lys_node_list *)node)->must_size; old_must = &((struct lys_node_list *)node)->must; break; case LYS_CONTAINER: old_size = &((struct lys_node_container *)node)->must_size; old_must = &((struct lys_node_container *)node)->must; break; case LYS_ANYXML: case LYS_ANYDATA: old_size = &((struct lys_node_anydata *)node)->must_size; old_must = &((struct lys_node_anydata *)node)->must; break; default: LOGINT(ctx); goto fail; } size = *old_size + rfn->must_size; must = realloc(*old_must, size * sizeof *rfn->must); LY_CHECK_ERR_GOTO(!must, LOGMEM(ctx), fail); for (k = 0, j = *old_size; k < rfn->must_size; k++, j++) { must[j].ext_size = rfn->must[k].ext_size; lys_ext_dup(ctx, rfn->module, rfn->must[k].ext, rfn->must[k].ext_size, &rfn->must[k], LYEXT_PAR_RESTR, &must[j].ext, 0, unres); must[j].expr = lydict_insert(ctx, rfn->must[k].expr, 0); must[j].dsc = lydict_insert(ctx, rfn->must[k].dsc, 0); must[j].ref = lydict_insert(ctx, rfn->must[k].ref, 0); must[j].eapptag = lydict_insert(ctx, rfn->must[k].eapptag, 0); must[j].emsg = lydict_insert(ctx, rfn->must[k].emsg, 0); must[j].flags = rfn->must[k].flags; } *old_must = must; *old_size = size; /* check XPath dependencies again */ if (unres_schema_add_node(node->module, unres, node, UNRES_XPATH, NULL) == -1) { goto fail; } } /* if-feature in leaf, leaf-list, list, container or anyxml */ if (rfn->iffeature_size) { old_size = &node->iffeature_size; old_iff = &node->iffeature; size = *old_size + rfn->iffeature_size; iff = realloc(*old_iff, size * sizeof *rfn->iffeature); LY_CHECK_ERR_GOTO(!iff, LOGMEM(ctx), fail); *old_iff = iff; for (k = 0, j = *old_size; k < rfn->iffeature_size; k++, j++) { resolve_iffeature_getsizes(&rfn->iffeature[k], &usize1, &usize2); if (usize1) { /* there is something to duplicate */ /* duplicate compiled expression */ usize = (usize1 / 4) + (usize1 % 4) ? 1 : 0; iff[j].expr = malloc(usize * sizeof *iff[j].expr); LY_CHECK_ERR_GOTO(!iff[j].expr, LOGMEM(ctx), fail); memcpy(iff[j].expr, rfn->iffeature[k].expr, usize * sizeof *iff[j].expr); /* duplicate list of feature pointers */ iff[j].features = malloc(usize2 * sizeof *iff[k].features); LY_CHECK_ERR_GOTO(!iff[j].expr, LOGMEM(ctx), fail); memcpy(iff[j].features, rfn->iffeature[k].features, usize2 * sizeof *iff[j].features); /* duplicate extensions */ iff[j].ext_size = rfn->iffeature[k].ext_size; lys_ext_dup(ctx, rfn->module, rfn->iffeature[k].ext, rfn->iffeature[k].ext_size, &rfn->iffeature[k], LYEXT_PAR_IFFEATURE, &iff[j].ext, 0, unres); } (*old_size)++; } assert(*old_size == size); } } /* apply augments */ for (i = 0; i < uses->augment_size; i++) { rc = resolve_augment(&uses->augment[i], (struct lys_node *)uses, unres); if (rc) { goto fail; } } /* check refines */ for (i = 0; i < uses->refine_size; i++) { node = refine_nodes[i]; rfn = &uses->refine[i]; /* config on any nodetype */ if ((rfn->flags & LYS_CONFIG_MASK) && (node->flags & LYS_CONFIG_MASK)) { for (parent = lys_parent(node); parent && parent->nodetype == LYS_USES; parent = lys_parent(parent)); if (parent && parent->nodetype != LYS_GROUPING && (parent->flags & LYS_CONFIG_MASK) && ((parent->flags & LYS_CONFIG_MASK) != (rfn->flags & LYS_CONFIG_MASK)) && (rfn->flags & LYS_CONFIG_W)) { /* setting config true under config false is prohibited */ LOGVAL(ctx, LYE_INARG, LY_VLOG_LYS, uses, "config", "refine"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "changing config from 'false' to 'true' is prohibited while " "the target's parent is still config 'false'."); goto fail; } /* inherit config change to the target children */ LY_TREE_DFS_BEGIN(node->child, next, iter) { if (rfn->flags & LYS_CONFIG_W) { if (iter->flags & LYS_CONFIG_SET) { /* config is set explicitely, go to next sibling */ next = NULL; goto nextsibling; } } else { /* LYS_CONFIG_R */ if ((iter->flags & LYS_CONFIG_SET) && (iter->flags & LYS_CONFIG_W)) { /* error - we would have config data under status data */ LOGVAL(ctx, LYE_INARG, LY_VLOG_LYS, uses, "config", "refine"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "changing config from 'true' to 'false' is prohibited while the target " "has still a children with explicit config 'true'."); goto fail; } } /* change config */ iter->flags &= ~LYS_CONFIG_MASK; iter->flags |= (rfn->flags & LYS_CONFIG_MASK); /* select next iter - modified LY_TREE_DFS_END */ if (iter->nodetype & (LYS_LEAF | LYS_LEAFLIST | LYS_ANYDATA)) { next = NULL; } else { next = iter->child; } nextsibling: if (!next) { /* try siblings */ next = iter->next; } while (!next) { /* parent is already processed, go to its sibling */ iter = lys_parent(iter); /* no siblings, go back through parents */ if (iter == node) { /* we are done, no next element to process */ break; } next = iter->next; } } } /* default value */ if (rfn->dflt_size) { if (node->nodetype == LYS_CHOICE) { /* choice */ ((struct lys_node_choice *)node)->dflt = resolve_choice_dflt((struct lys_node_choice *)node, rfn->dflt[0]); if (!((struct lys_node_choice *)node)->dflt) { LOGVAL(ctx, LYE_INARG, LY_VLOG_LYS, uses, rfn->dflt[0], "default"); goto fail; } if (lyp_check_mandatory_choice(node)) { goto fail; } } } /* min/max-elements on list or leaf-list */ if (node->nodetype == LYS_LIST && ((struct lys_node_list *)node)->max) { if (((struct lys_node_list *)node)->min > ((struct lys_node_list *)node)->max) { LOGVAL(ctx, LYE_SPEC, LY_VLOG_LYS, uses, "Invalid value \"%d\" of \"%s\".", rfn->mod.list.min, "min-elements"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "\"min-elements\" is bigger than \"max-elements\"."); goto fail; } } else if (node->nodetype == LYS_LEAFLIST && ((struct lys_node_leaflist *)node)->max) { if (((struct lys_node_leaflist *)node)->min > ((struct lys_node_leaflist *)node)->max) { LOGVAL(ctx, LYE_SPEC, LY_VLOG_LYS, uses, "Invalid value \"%d\" of \"%s\".", rfn->mod.list.min, "min-elements"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "\"min-elements\" is bigger than \"max-elements\"."); goto fail; } } /* additional checks */ /* default value with mandatory/min-elements */ if (node->nodetype == LYS_LEAFLIST) { llist = (struct lys_node_leaflist *)node; if (llist->dflt_size && llist->min) { LOGVAL(ctx, LYE_INCHILDSTMT, LY_VLOG_LYS, uses, rfn->dflt_size ? "default" : "min-elements", "refine"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "The \"min-elements\" statement with non-zero value is forbidden on leaf-lists with the \"default\" statement."); goto fail; } } else if (node->nodetype == LYS_LEAF) { leaf = (struct lys_node_leaf *)node; if (leaf->dflt && (leaf->flags & LYS_MAND_TRUE)) { LOGVAL(ctx, LYE_INCHILDSTMT, LY_VLOG_LYS, uses, rfn->dflt_size ? "default" : "mandatory", "refine"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "The \"mandatory\" statement is forbidden on leafs with the \"default\" statement."); goto fail; } } /* check for mandatory node in default case, first find the closest parent choice to the changed node */ if ((rfn->flags & LYS_MAND_TRUE) || rfn->mod.list.min) { for (parent = node->parent; parent && !(parent->nodetype & (LYS_CHOICE | LYS_GROUPING | LYS_ACTION | LYS_USES)); parent = parent->parent) { if (parent->nodetype == LYS_CONTAINER && ((struct lys_node_container *)parent)->presence) { /* stop also on presence containers */ break; } } /* and if it is a choice with the default case, check it for presence of a mandatory node in it */ if (parent && parent->nodetype == LYS_CHOICE && ((struct lys_node_choice *)parent)->dflt) { if (lyp_check_mandatory_choice(parent)) { goto fail; } } } } free(refine_nodes); return EXIT_SUCCESS; fail: LY_TREE_FOR_SAFE(uses->child, next, iter) { lys_node_free(iter, NULL, 0); } free(refine_nodes); return -1; } void resolve_identity_backlink_update(struct lys_ident *der, struct lys_ident *base) { int i; assert(der && base); if (!base->der) { /* create a set for backlinks if it does not exist */ base->der = ly_set_new(); } /* store backlink */ ly_set_add(base->der, der, LY_SET_OPT_USEASLIST); /* do it recursively */ for (i = 0; i < base->base_size; i++) { resolve_identity_backlink_update(der, base->base[i]); } } /** * @brief Resolve base identity recursively. Does not log. * * @param[in] module Main module. * @param[in] ident Identity to use. * @param[in] basename Base name of the identity. * @param[out] ret Pointer to the resolved identity. Can be NULL. * * @return EXIT_SUCCESS on success, EXIT_FAILURE on forward reference, -1 on crucial error. */ static int resolve_base_ident_sub(const struct lys_module *module, struct lys_ident *ident, const char *basename, struct unres_schema *unres, struct lys_ident **ret) { uint32_t i, j; struct lys_ident *base = NULL; struct ly_ctx *ctx = module->ctx; assert(ret); /* search module */ for (i = 0; i < module->ident_size; i++) { if (!strcmp(basename, module->ident[i].name)) { if (!ident) { /* just search for type, so do not modify anything, just return * the base identity pointer */ *ret = &module->ident[i]; return EXIT_SUCCESS; } base = &module->ident[i]; goto matchfound; } } /* search submodules */ for (j = 0; j < module->inc_size && module->inc[j].submodule; j++) { for (i = 0; i < module->inc[j].submodule->ident_size; i++) { if (!strcmp(basename, module->inc[j].submodule->ident[i].name)) { if (!ident) { *ret = &module->inc[j].submodule->ident[i]; return EXIT_SUCCESS; } base = &module->inc[j].submodule->ident[i]; goto matchfound; } } } matchfound: /* we found it somewhere */ if (base) { /* is it already completely resolved? */ for (i = 0; i < unres->count; i++) { if ((unres->item[i] == base) && (unres->type[i] == UNRES_IDENT)) { /* identity found, but not yet resolved, so do not return it in *res and try it again later */ /* simple check for circular reference, * the complete check is done as a side effect of using only completely * resolved identities (previous check of unres content) */ if (ly_strequal((const char *)unres->str_snode[i], ident->name, 1)) { LOGVAL(ctx, LYE_INARG, LY_VLOG_NONE, NULL, basename, "base"); LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Circular reference of \"%s\" identity.", basename); return -1; } return EXIT_FAILURE; } } /* checks done, store the result */ *ret = base; return EXIT_SUCCESS; } /* base not found (maybe a forward reference) */ return EXIT_FAILURE; } /** * @brief Resolve base identity. Logs directly. * * @param[in] module Main module. * @param[in] ident Identity to use. * @param[in] basename Base name of the identity. * @param[in] parent Either "type" or "identity". * @param[in,out] type Type structure where we want to resolve identity. Can be NULL. * * @return EXIT_SUCCESS on success, EXIT_FAILURE on forward reference, -1 on error. */ static int resolve_base_ident(const struct lys_module *module, struct lys_ident *ident, const char *basename, const char *parent, struct lys_type *type, struct unres_schema *unres) { const char *name; int mod_name_len = 0, rc; struct lys_ident *target, **ret; uint16_t flags; struct lys_module *mod; struct ly_ctx *ctx = module->ctx; assert((ident && !type) || (!ident && type)); if (!type) { /* have ident to resolve */ ret = &target; flags = ident->flags; mod = ident->module; } else { /* have type to fill */ ++type->info.ident.count; type->info.ident.ref = ly_realloc(type->info.ident.ref, type->info.ident.count * sizeof *type->info.ident.ref); LY_CHECK_ERR_RETURN(!type->info.ident.ref, LOGMEM(ctx), -1); ret = &type->info.ident.ref[type->info.ident.count - 1]; flags = type->parent->flags; mod = type->parent->module; } *ret = NULL; /* search for the base identity */ name = strchr(basename, ':'); if (name) { /* set name to correct position after colon */ mod_name_len = name - basename; name++; if (!strncmp(basename, module->name, mod_name_len) && !module->name[mod_name_len]) { /* prefix refers to the current module, ignore it */ mod_name_len = 0; } } else { name = basename; } /* get module where to search */ module = lyp_get_module(module, NULL, 0, mod_name_len ? basename : NULL, mod_name_len, 0); if (!module) { /* identity refers unknown data model */ LOGVAL(ctx, LYE_INMOD, LY_VLOG_NONE, NULL, basename); return -1; } /* search in the identified module ... */ rc = resolve_base_ident_sub(module, ident, name, unres, ret); if (!rc) { assert(*ret); /* check status */ if (lyp_check_status(flags, mod, ident ? ident->name : "of type", (*ret)->flags, (*ret)->module, (*ret)->name, NULL)) { rc = -1; } else if (ident) { ident->base[ident->base_size++] = *ret; if (lys_main_module(mod)->implemented) { /* in case of the implemented identity, maintain backlinks to it * from the base identities to make it available when resolving * data with the identity values (not implemented identity is not * allowed as an identityref value). */ resolve_identity_backlink_update(ident, *ret); } } } else if (rc == EXIT_FAILURE) { LOGVAL(ctx, LYE_INRESOLV, LY_VLOG_NONE, NULL, parent, basename); if (type) { --type->info.ident.count; } } return rc; } /* * 1 - true (der is derived from base) * 0 - false (der is not derived from base) */ static int search_base_identity(struct lys_ident *der, struct lys_ident *base) { int i; if (der == base) { return 1; } else { for(i = 0; i < der->base_size; i++) { if (search_base_identity(der->base[i], base) == 1) { return 1; } } } return 0; } /** * @brief Resolve JSON data format identityref. Logs directly. * * @param[in] type Identityref type. * @param[in] ident_name Identityref name. * @param[in] node Node where the identityref is being resolved * @param[in] dflt flag if we are resolving default value in the schema * * @return Pointer to the identity resolvent, NULL on error. */ struct lys_ident * resolve_identref(struct lys_type *type, const char *ident_name, struct lyd_node *node, struct lys_module *mod, int dflt) { const char *mod_name, *name; char *str; int mod_name_len, nam_len, rc; int need_implemented = 0; unsigned int i, j; struct lys_ident *der, *cur; struct lys_module *imod = NULL, *m, *tmod; struct ly_ctx *ctx; assert(type && ident_name && mod); ctx = mod->ctx; if (!type || (!type->info.ident.count && !type->der) || !ident_name) { return NULL; } rc = parse_node_identifier(ident_name, &mod_name, &mod_name_len, &name, &nam_len, NULL, 0); if (rc < 1) { LOGVAL(ctx, LYE_INCHAR, node ? LY_VLOG_LYD : LY_VLOG_NONE, node, ident_name[-rc], &ident_name[-rc]); return NULL; } else if (rc < (signed)strlen(ident_name)) { LOGVAL(ctx, LYE_INCHAR, node ? LY_VLOG_LYD : LY_VLOG_NONE, node, ident_name[rc], &ident_name[rc]); return NULL; } m = lys_main_module(mod); /* shortcut */ if (!mod_name || (!strncmp(mod_name, m->name, mod_name_len) && !m->name[mod_name_len])) { /* identity is defined in the same module as node */ imod = m; } else if (dflt) { /* solving identityref in default definition in schema - * find the identity's module in the imported modules list to have a correct revision */ for (i = 0; i < mod->imp_size; i++) { if (!strncmp(mod_name, mod->imp[i].module->name, mod_name_len) && !mod->imp[i].module->name[mod_name_len]) { imod = mod->imp[i].module; break; } } /* We may need to pull it from the module that the typedef came from */ if (!imod && type && type->der) { tmod = type->der->module; for (i = 0; i < tmod->imp_size; i++) { if (!strncmp(mod_name, tmod->imp[i].module->name, mod_name_len) && !tmod->imp[i].module->name[mod_name_len]) { imod = tmod->imp[i].module; break; } } } } else { /* solving identityref in data - get the module from the context */ for (i = 0; i < (unsigned)mod->ctx->models.used; ++i) { imod = mod->ctx->models.list[i]; if (!strncmp(mod_name, imod->name, mod_name_len) && !imod->name[mod_name_len]) { break; } imod = NULL; } if (!imod && mod->ctx->models.parsing_sub_modules_count) { /* we are currently parsing some module and checking XPath or a default value, * so take this module into account */ for (i = 0; i < mod->ctx->models.parsing_sub_modules_count; i++) { imod = mod->ctx->models.parsing_sub_modules[i]; if (imod->type) { /* skip submodules */ continue; } if (!strncmp(mod_name, imod->name, mod_name_len) && !imod->name[mod_name_len]) { break; } imod = NULL; } } } if (!dflt && (!imod || !imod->implemented) && ctx->data_clb) { /* the needed module was not found, but it may have been expected so call the data callback */ if (imod) { ctx->data_clb(ctx, imod->name, imod->ns, LY_MODCLB_NOT_IMPLEMENTED, ctx->data_clb_data); } else if (mod_name) { str = strndup(mod_name, mod_name_len); imod = (struct lys_module *)ctx->data_clb(ctx, str, NULL, 0, ctx->data_clb_data); free(str); } } if (!imod) { goto fail; } if (m != imod || lys_main_module(type->parent->module) != mod) { /* the type is not referencing the same schema, * THEN, we may need to make the module with the identity implemented, but only if it really * contains the identity */ if (!imod->implemented) { cur = NULL; /* get the identity in the module */ for (i = 0; i < imod->ident_size; i++) { if (!strcmp(name, imod->ident[i].name)) { cur = &imod->ident[i]; break; } } if (!cur) { /* go through includes */ for (j = 0; j < imod->inc_size; j++) { for (i = 0; i < imod->inc[j].submodule->ident_size; i++) { if (!strcmp(name, imod->inc[j].submodule->ident[i].name)) { cur = &imod->inc[j].submodule->ident[i]; break; } } } if (!cur) { goto fail; } } /* check that identity is derived from one of the type's base */ while (type->der) { for (i = 0; i < type->info.ident.count; i++) { if (search_base_identity(cur, type->info.ident.ref[i])) { /* cur's base matches the type's base */ need_implemented = 1; goto match; } } type = &type->der->type; } /* matching base not found */ LOGVAL(ctx, LYE_SPEC, node ? LY_VLOG_LYD : LY_VLOG_NONE, node, "Identity used as identityref value is not implemented."); goto fail; } } /* go through all the derived types of all the bases */ while (type->der) { for (i = 0; i < type->info.ident.count; ++i) { cur = type->info.ident.ref[i]; if (cur->der) { /* there are some derived identities */ for (j = 0; j < cur->der->number; j++) { der = (struct lys_ident *)cur->der->set.g[j]; /* shortcut */ if (!strcmp(der->name, name) && lys_main_module(der->module) == imod) { /* we have match */ cur = der; goto match; } } } } type = &type->der->type; } fail: LOGVAL(ctx, LYE_INRESOLV, node ? LY_VLOG_LYD : LY_VLOG_NONE, node, "identityref", ident_name); return NULL; match: for (i = 0; i < cur->iffeature_size; i++) { if (!resolve_iffeature(&cur->iffeature[i])) { if (node) { LOGVAL(ctx, LYE_INVAL, LY_VLOG_LYD, node, cur->name, node->schema->name); } LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Identity \"%s\" is disabled by its if-feature condition.", cur->name); return NULL; } } if (need_implemented) { if (dflt) { /* later try to make the module implemented */ LOGVRB("Making \"%s\" module implemented because of identityref default value \"%s\" used in the implemented \"%s\" module", imod->name, cur->name, mod->name); /* to be more effective we should use UNRES_MOD_IMPLEMENT but that would require changing prototype of * several functions with little gain */ if (lys_set_implemented(imod)) { LOGERR(ctx, ly_errno, "Setting the module \"%s\" implemented because of used default identity \"%s\" failed.", imod->name, cur->name); goto fail; } } else { /* just say that it was found, but in a non-implemented module */ LOGVAL(ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Identity found, but in a non-implemented module \"%s\".", lys_main_module(cur->module)->name); goto fail; } } return cur; } /** * @brief Resolve unresolved uses. Logs directly. * * @param[in] uses Uses to use. * @param[in] unres Specific unres item. * * @return EXIT_SUCCESS on success, EXIT_FAILURE on forward reference, -1 on error. */ static int resolve_unres_schema_uses(struct lys_node_uses *uses, struct unres_schema *unres) { int rc; struct lys_node *par_grp; struct ly_ctx *ctx = uses->module->ctx; /* HACK: when a grouping has uses inside, all such uses have to be resolved before the grouping itself is used * in some uses. When we see such a uses, the grouping's unres counter is used to store number of so far * unresolved uses. The grouping cannot be used unless this counter is decreased back to 0. To remember * that the uses already increased grouping's counter, the LYS_USESGRP flag is used. */ for (par_grp = lys_parent((struct lys_node *)uses); par_grp && (par_grp->nodetype != LYS_GROUPING); par_grp = lys_parent(par_grp)); if (par_grp && ly_strequal(par_grp->name, uses->name, 1)) { LOGVAL(ctx, LYE_INRESOLV, LY_VLOG_LYS, uses, "uses", uses->name); return -1; } if (!uses->grp) { rc = resolve_uses_schema_nodeid(uses->name, (const struct lys_node *)uses, (const struct lys_node_grp **)&uses->grp); if (rc == -1) { LOGVAL(ctx, LYE_INRESOLV, LY_VLOG_LYS, uses, "uses", uses->name); return -1; } else if (rc > 0) { LOGVAL(ctx, LYE_INCHAR, LY_VLOG_LYS, uses, uses->name[rc - 1], &uses->name[rc - 1]); return -1; } else if (!uses->grp) { if (par_grp && !(uses->flags & LYS_USESGRP)) { if (++((struct lys_node_grp *)par_grp)->unres_count == 0) { LOGERR(ctx, LY_EINT, "Too many unresolved items (uses) inside a grouping."); return -1; } uses->flags |= LYS_USESGRP; } LOGVAL(ctx, LYE_INRESOLV, LY_VLOG_LYS, uses, "uses", uses->name); return EXIT_FAILURE; } } if (uses->grp->unres_count) { if (par_grp && !(uses->flags & LYS_USESGRP)) { if (++((struct lys_node_grp *)par_grp)->unres_count == 0) { LOGERR(ctx, LY_EINT, "Too many unresolved items (uses) inside a grouping."); return -1; } uses->flags |= LYS_USESGRP; } else { /* instantiate grouping only when it is completely resolved */ uses->grp = NULL; } LOGVAL(ctx, LYE_INRESOLV, LY_VLOG_LYS, uses, "uses", uses->name); return EXIT_FAILURE; } rc = resolve_uses(uses, unres); if (!rc) { /* decrease unres count only if not first try */ if (par_grp && (uses->flags & LYS_USESGRP)) { assert(((struct lys_node_grp *)par_grp)->unres_count); ((struct lys_node_grp *)par_grp)->unres_count--; uses->flags &= ~LYS_USESGRP; } /* check status */ if (lyp_check_status(uses->flags, uses->module, "of uses", uses->grp->flags, uses->grp->module, uses->grp->name, (struct lys_node *)uses)) { return -1; } return EXIT_SUCCESS; } return rc; } /** * @brief Resolve list keys. Logs directly. * * @param[in] list List to use. * @param[in] keys_str Keys node value. * * @return EXIT_SUCCESS on success, EXIT_FAILURE on forward reference, -1 on error. */ static int resolve_list_keys(struct lys_node_list *list, const char *keys_str) { int i, len, rc; const char *value; char *s = NULL; struct ly_ctx *ctx = list->module->ctx; for (i = 0; i < list->keys_size; ++i) { assert(keys_str); if (!list->child) { /* no child, possible forward reference */ LOGVAL(ctx, LYE_INRESOLV, LY_VLOG_LYS, list, "list keys", keys_str); return EXIT_FAILURE; } /* get the key name */ if ((value = strpbrk(keys_str, " \t\n"))) { len = value - keys_str; while (isspace(value[0])) { value++; } } else { len = strlen(keys_str); } rc = lys_getnext_data(lys_node_module((struct lys_node *)list), (struct lys_node *)list, keys_str, len, LYS_LEAF, LYS_GETNEXT_NOSTATECHECK, (const struct lys_node **)&list->keys[i]); if (rc) { LOGVAL(ctx, LYE_INRESOLV, LY_VLOG_LYS, list, "list key", keys_str); return EXIT_FAILURE; } if (check_key(list, i, keys_str, len)) { /* check_key logs */ return -1; } /* check status */ if (lyp_check_status(list->flags, list->module, list->name, list->keys[i]->flags, list->keys[i]->module, list->keys[i]->name, (struct lys_node *)list->keys[i])) { return -1; } /* default value - is ignored, keep it but print a warning */ if (list->keys[i]->dflt) { /* log is not hidden only in case this resolving fails and in such a case * we cannot get here */ assert(log_opt == ILO_STORE); log_opt = ILO_LOG; LOGWRN(ctx, "Default value \"%s\" in the list key \"%s\" is ignored. (%s)", list->keys[i]->dflt, list->keys[i]->name, s = lys_path((struct lys_node*)list, LYS_PATH_FIRST_PREFIX)); log_opt = ILO_STORE; free(s); } /* prepare for next iteration */ while (value && isspace(value[0])) { value++; } keys_str = value; } return EXIT_SUCCESS; } /** * @brief Resolve (check) all must conditions of \p node. * Logs directly. * * @param[in] node Data node with optional must statements. * @param[in] inout_parent If set, must in input or output parent of node->schema will be resolved. * * @return EXIT_SUCCESS on pass, EXIT_FAILURE on fail, -1 on error. */ static int resolve_must(struct lyd_node *node, int inout_parent, int ignore_fail) { uint8_t i, must_size; struct lys_node *schema; struct lys_restr *must; struct lyxp_set set; struct ly_ctx *ctx = node->schema->module->ctx; assert(node); memset(&set, 0, sizeof set); if (inout_parent) { for (schema = lys_parent(node->schema); schema && (schema->nodetype & (LYS_CHOICE | LYS_CASE | LYS_USES)); schema = lys_parent(schema)); if (!schema || !(schema->nodetype & (LYS_INPUT | LYS_OUTPUT))) { LOGINT(ctx); return -1; } must_size = ((struct lys_node_inout *)schema)->must_size; must = ((struct lys_node_inout *)schema)->must; /* context node is the RPC/action */ node = node->parent; if (!(node->schema->nodetype & (LYS_RPC | LYS_ACTION))) { LOGINT(ctx); return -1; } } else { switch (node->schema->nodetype) { case LYS_CONTAINER: must_size = ((struct lys_node_container *)node->schema)->must_size; must = ((struct lys_node_container *)node->schema)->must; break; case LYS_LEAF: must_size = ((struct lys_node_leaf *)node->schema)->must_size; must = ((struct lys_node_leaf *)node->schema)->must; break; case LYS_LEAFLIST: must_size = ((struct lys_node_leaflist *)node->schema)->must_size; must = ((struct lys_node_leaflist *)node->schema)->must; break; case LYS_LIST: must_size = ((struct lys_node_list *)node->schema)->must_size; must = ((struct lys_node_list *)node->schema)->must; break; case LYS_ANYXML: case LYS_ANYDATA: must_size = ((struct lys_node_anydata *)node->schema)->must_size; must = ((struct lys_node_anydata *)node->schema)->must; break; case LYS_NOTIF: must_size = ((struct lys_node_notif *)node->schema)->must_size; must = ((struct lys_node_notif *)node->schema)->must; break; default: must_size = 0; break; } } for (i = 0; i < must_size; ++i) { if (lyxp_eval(must[i].expr, node, LYXP_NODE_ELEM, lyd_node_module(node), &set, LYXP_MUST)) { return -1; } lyxp_set_cast(&set, LYXP_SET_BOOLEAN, node, lyd_node_module(node), LYXP_MUST); if (!set.val.bool) { if ((ignore_fail == 1) || ((must[i].flags & (LYS_XPCONF_DEP | LYS_XPSTATE_DEP)) && (ignore_fail == 2))) { LOGVRB("Must condition \"%s\" not satisfied, but it is not required.", must[i].expr); } else { LOGVAL(ctx, LYE_NOMUST, LY_VLOG_LYD, node, must[i].expr); if (must[i].emsg) { ly_vlog_str(ctx, LY_VLOG_PREV, must[i].emsg); } if (must[i].eapptag) { ly_err_last_set_apptag(ctx, must[i].eapptag); } return 1; } } } return EXIT_SUCCESS; } /** * @brief Resolve (find) when condition schema context node. Does not log. * * @param[in] schema Schema node with the when condition. * @param[out] ctx_snode When schema context node. * @param[out] ctx_snode_type Schema context node type. */ void resolve_when_ctx_snode(const struct lys_node *schema, struct lys_node **ctx_snode, enum lyxp_node_type *ctx_snode_type) { const struct lys_node *sparent; /* find a not schema-only node */ *ctx_snode_type = LYXP_NODE_ELEM; while (schema->nodetype & (LYS_USES | LYS_CHOICE | LYS_CASE | LYS_AUGMENT | LYS_INPUT | LYS_OUTPUT)) { if (schema->nodetype == LYS_AUGMENT) { sparent = ((struct lys_node_augment *)schema)->target; } else { sparent = schema->parent; } if (!sparent) { /* context node is the document root (fake root in our case) */ if (schema->flags & LYS_CONFIG_W) { *ctx_snode_type = LYXP_NODE_ROOT_CONFIG; } else { *ctx_snode_type = LYXP_NODE_ROOT; } /* we need the first top-level sibling, but no uses or groupings */ schema = lys_getnext(NULL, NULL, lys_node_module(schema), LYS_GETNEXT_NOSTATECHECK); break; } schema = sparent; } *ctx_snode = (struct lys_node *)schema; } /** * @brief Resolve (find) when condition context node. Does not log. * * @param[in] node Data node, whose conditional definition is being decided. * @param[in] schema Schema node with the when condition. * @param[out] ctx_node Context node. * @param[out] ctx_node_type Context node type. * * @return EXIT_SUCCESS on success, -1 on error. */ static int resolve_when_ctx_node(struct lyd_node *node, struct lys_node *schema, struct lyd_node **ctx_node, enum lyxp_node_type *ctx_node_type) { struct lyd_node *parent; struct lys_node *sparent; enum lyxp_node_type node_type; uint16_t i, data_depth, schema_depth; resolve_when_ctx_snode(schema, &schema, &node_type); if (node_type == LYXP_NODE_ELEM) { /* standard element context node */ for (parent = node, data_depth = 0; parent; parent = parent->parent, ++data_depth); for (sparent = schema, schema_depth = 0; sparent; sparent = (sparent->nodetype == LYS_AUGMENT ? ((struct lys_node_augment *)sparent)->target : sparent->parent)) { if (sparent->nodetype & (LYS_CONTAINER | LYS_LEAF | LYS_LEAFLIST | LYS_LIST | LYS_ANYDATA | LYS_NOTIF | LYS_RPC)) { ++schema_depth; } } if (data_depth < schema_depth) { return -1; } /* find the corresponding data node */ for (i = 0; i < data_depth - schema_depth; ++i) { node = node->parent; } if (node->schema != schema) { return -1; } } else { /* root context node */ while (node->parent) { node = node->parent; } while (node->prev->next) { node = node->prev; } } *ctx_node = node; *ctx_node_type = node_type; return EXIT_SUCCESS; } /** * @brief Temporarily unlink nodes as per YANG 1.1 RFC section 7.21.5 for when XPath evaluation. * The context node is adjusted if needed. * * @param[in] snode Schema node, whose children instances need to be unlinked. * @param[in,out] node Data siblings where to look for the children of \p snode. If it is unlinked, * it is moved to point to another sibling still in the original tree. * @param[in,out] ctx_node When context node, adjusted if needed. * @param[in] ctx_node_type Context node type, just for information to detect invalid situations. * @param[out] unlinked_nodes Unlinked siblings. Can be safely appended to \p node afterwards. * Ordering may change, but there will be no semantic change. * * @return EXIT_SUCCESS on success, -1 on error. */ static int resolve_when_unlink_nodes(struct lys_node *snode, struct lyd_node **node, struct lyd_node **ctx_node, enum lyxp_node_type ctx_node_type, struct lyd_node **unlinked_nodes) { struct lyd_node *next, *elem; const struct lys_node *slast; struct ly_ctx *ctx = snode->module->ctx; switch (snode->nodetype) { case LYS_AUGMENT: case LYS_USES: case LYS_CHOICE: case LYS_CASE: slast = NULL; while ((slast = lys_getnext(slast, snode, NULL, LYS_GETNEXT_PARENTUSES))) { if (slast->nodetype & (LYS_ACTION | LYS_NOTIF)) { continue; } if (resolve_when_unlink_nodes((struct lys_node *)slast, node, ctx_node, ctx_node_type, unlinked_nodes)) { return -1; } } break; case LYS_CONTAINER: case LYS_LIST: case LYS_LEAF: case LYS_LEAFLIST: case LYS_ANYXML: case LYS_ANYDATA: LY_TREE_FOR_SAFE(lyd_first_sibling(*node), next, elem) { if (elem->schema == snode) { if (elem == *ctx_node) { /* We are going to unlink our context node! This normally cannot happen, * but we use normal top-level data nodes for faking a document root node, * so if this is the context node, we just use the next top-level node. * Additionally, it can even happen that there are no top-level data nodes left, * all were unlinked, so in this case we pass NULL as the context node/data tree, * lyxp_eval() can handle this special situation. */ if (ctx_node_type == LYXP_NODE_ELEM) { LOGINT(ctx); return -1; } if (elem->prev == elem) { /* unlinking last top-level element, use an empty data tree */ *ctx_node = NULL; } else { /* in this case just use the previous/last top-level data node */ *ctx_node = elem->prev; } } else if (elem == *node) { /* We are going to unlink the currently processed node. This does not matter that * much, but we would lose access to the original data tree, so just move our * pointer somewhere still inside it. */ if ((*node)->prev != *node) { *node = (*node)->prev; } else { /* the processed node with sibings were all unlinked, oh well */ *node = NULL; } } /* temporarily unlink the node */ lyd_unlink_internal(elem, 0); if (*unlinked_nodes) { if (lyd_insert_after((*unlinked_nodes)->prev, elem)) { LOGINT(ctx); return -1; } } else { *unlinked_nodes = elem; } if (snode->nodetype & (LYS_CONTAINER | LYS_LEAF | LYS_ANYDATA)) { /* there can be only one instance */ break; } } } break; default: LOGINT(ctx); return -1; } return EXIT_SUCCESS; } /** * @brief Relink the unlinked nodes back. * * @param[in] node Data node to link the nodes back to. It can actually be the adjusted context node, * we simply need a sibling from the original data tree. * @param[in] unlinked_nodes Unlinked nodes to relink to \p node. * @param[in] ctx_node_type Context node type to distinguish between \p node being the parent * or the sibling of \p unlinked_nodes. * * @return EXIT_SUCCESS on success, -1 on error. */ static int resolve_when_relink_nodes(struct lyd_node *node, struct lyd_node *unlinked_nodes, enum lyxp_node_type ctx_node_type) { struct lyd_node *elem; LY_TREE_FOR_SAFE(unlinked_nodes, unlinked_nodes, elem) { lyd_unlink_internal(elem, 0); if (ctx_node_type == LYXP_NODE_ELEM) { if (lyd_insert_common(node, NULL, elem, 0)) { return -1; } } else { if (lyd_insert_nextto(node, elem, 0, 0)) { return -1; } } } return EXIT_SUCCESS; } int resolve_applies_must(const struct lyd_node *node) { int ret = 0; uint8_t must_size; struct lys_node *schema, *iter; assert(node); schema = node->schema; /* their own must */ switch (schema->nodetype) { case LYS_CONTAINER: must_size = ((struct lys_node_container *)schema)->must_size; break; case LYS_LEAF: must_size = ((struct lys_node_leaf *)schema)->must_size; break; case LYS_LEAFLIST: must_size = ((struct lys_node_leaflist *)schema)->must_size; break; case LYS_LIST: must_size = ((struct lys_node_list *)schema)->must_size; break; case LYS_ANYXML: case LYS_ANYDATA: must_size = ((struct lys_node_anydata *)schema)->must_size; break; case LYS_NOTIF: must_size = ((struct lys_node_notif *)schema)->must_size; break; default: must_size = 0; break; } if (must_size) { ++ret; } /* schema may be a direct data child of input/output with must (but it must be first, it needs to be evaluated only once) */ if (!node->prev->next) { for (iter = lys_parent(schema); iter && (iter->nodetype & (LYS_CHOICE | LYS_CASE | LYS_USES)); iter = lys_parent(iter)); if (iter && (iter->nodetype & (LYS_INPUT | LYS_OUTPUT))) { ret += 0x2; } } return ret; } static struct lys_when * snode_get_when(const struct lys_node *schema) { switch (schema->nodetype) { case LYS_CONTAINER: return ((struct lys_node_container *)schema)->when; case LYS_CHOICE: return ((struct lys_node_choice *)schema)->when; case LYS_LEAF: return ((struct lys_node_leaf *)schema)->when; case LYS_LEAFLIST: return ((struct lys_node_leaflist *)schema)->when; case LYS_LIST: return ((struct lys_node_list *)schema)->when; case LYS_ANYDATA: case LYS_ANYXML: return ((struct lys_node_anydata *)schema)->when; case LYS_CASE: return ((struct lys_node_case *)schema)->when; case LYS_USES: return ((struct lys_node_uses *)schema)->when; case LYS_AUGMENT: return ((struct lys_node_augment *)schema)->when; default: return NULL; } } int resolve_applies_when(const struct lys_node *schema, int mode, const struct lys_node *stop) { const struct lys_node *parent; assert(schema); if (!(schema->nodetype & (LYS_NOTIF | LYS_RPC)) && snode_get_when(schema)) { return 1; } parent = schema; goto check_augment; while (parent) { /* stop conditions */ if (!mode) { /* stop on node that can be instantiated in data tree */ if (!(parent->nodetype & (LYS_USES | LYS_CHOICE | LYS_CASE))) { break; } } else { /* stop on the specified node */ if (parent == stop) { break; } } if (snode_get_when(parent)) { return 1; } check_augment: if (parent->parent && (parent->parent->nodetype == LYS_AUGMENT) && snode_get_when(parent->parent)) { return 1; } parent = lys_parent(parent); } return 0; } /** * @brief Resolve (check) all when conditions relevant for \p node. * Logs directly. * * @param[in] node Data node, whose conditional reference, if such, is being decided. * @param[in] ignore_fail 1 if when does not have to be satisfied, 2 if it does not have to be satisfied * only when requiring external dependencies. * * @return * -1 - error, ly_errno is set * 0 - all "when" statements true * 0, ly_vecode = LYVE_NOWHEN - some "when" statement false, returned in failed_when * 1, ly_vecode = LYVE_INWHEN - nodes needed to resolve are conditional and not yet resolved (under another "when") */ int resolve_when(struct lyd_node *node, int ignore_fail, struct lys_when **failed_when) { struct lyd_node *ctx_node = NULL, *unlinked_nodes, *tmp_node; struct lys_node *sparent; struct lyxp_set set; enum lyxp_node_type ctx_node_type; struct ly_ctx *ctx = node->schema->module->ctx; int rc = 0; assert(node); memset(&set, 0, sizeof set); if (!(node->schema->nodetype & (LYS_NOTIF | LYS_RPC | LYS_ACTION)) && snode_get_when(node->schema)) { /* make the node dummy for the evaluation */ node->validity |= LYD_VAL_INUSE; rc = lyxp_eval(snode_get_when(node->schema)->cond, node, LYXP_NODE_ELEM, lyd_node_module(node), &set, LYXP_WHEN); node->validity &= ~LYD_VAL_INUSE; if (rc) { if (rc == 1) { LOGVAL(ctx, LYE_INWHEN, LY_VLOG_LYD, node, snode_get_when(node->schema)->cond); } goto cleanup; } /* set boolean result of the condition */ lyxp_set_cast(&set, LYXP_SET_BOOLEAN, node, lyd_node_module(node), LYXP_WHEN); if (!set.val.bool) { node->when_status |= LYD_WHEN_FALSE; if ((ignore_fail == 1) || ((snode_get_when(node->schema)->flags & (LYS_XPCONF_DEP | LYS_XPSTATE_DEP)) && (ignore_fail == 2))) { LOGVRB("When condition \"%s\" is not satisfied, but it is not required.", snode_get_when(node->schema)->cond); } else { LOGVAL(ctx, LYE_NOWHEN, LY_VLOG_LYD, node, snode_get_when(node->schema)->cond); if (failed_when) { *failed_when = snode_get_when(node->schema); } goto cleanup; } } /* free xpath set content */ lyxp_set_cast(&set, LYXP_SET_EMPTY, node, lyd_node_module(node), 0); } sparent = node->schema; goto check_augment; /* check when in every schema node that affects node */ while (sparent && (sparent->nodetype & (LYS_USES | LYS_CHOICE | LYS_CASE))) { if (snode_get_when(sparent)) { if (!ctx_node) { rc = resolve_when_ctx_node(node, sparent, &ctx_node, &ctx_node_type); if (rc) { LOGINT(ctx); goto cleanup; } } unlinked_nodes = NULL; /* we do not want our node pointer to change */ tmp_node = node; rc = resolve_when_unlink_nodes(sparent, &tmp_node, &ctx_node, ctx_node_type, &unlinked_nodes); if (rc) { goto cleanup; } rc = lyxp_eval(snode_get_when(sparent)->cond, ctx_node, ctx_node_type, lys_node_module(sparent), &set, LYXP_WHEN); if (unlinked_nodes && ctx_node) { if (resolve_when_relink_nodes(ctx_node, unlinked_nodes, ctx_node_type)) { rc = -1; goto cleanup; } } if (rc) { if (rc == 1) { LOGVAL(ctx, LYE_INWHEN, LY_VLOG_LYD, node, snode_get_when(sparent)->cond); } goto cleanup; } lyxp_set_cast(&set, LYXP_SET_BOOLEAN, ctx_node, lys_node_module(sparent), LYXP_WHEN); if (!set.val.bool) { if ((ignore_fail == 1) || ((snode_get_when(sparent)->flags & (LYS_XPCONF_DEP | LYS_XPSTATE_DEP)) && (ignore_fail == 2))) { LOGVRB("When condition \"%s\" is not satisfied, but it is not required.", snode_get_when(sparent)->cond); } else { node->when_status |= LYD_WHEN_FALSE; LOGVAL(ctx, LYE_NOWHEN, LY_VLOG_LYD, node, snode_get_when(sparent)->cond); if (failed_when) { *failed_when = snode_get_when(sparent); } goto cleanup; } } /* free xpath set content */ lyxp_set_cast(&set, LYXP_SET_EMPTY, ctx_node, lys_node_module(sparent), 0); } check_augment: if ((sparent->parent && (sparent->parent->nodetype == LYS_AUGMENT) && snode_get_when(sparent->parent))) { if (!ctx_node) { rc = resolve_when_ctx_node(node, sparent->parent, &ctx_node, &ctx_node_type); if (rc) { LOGINT(ctx); goto cleanup; } } unlinked_nodes = NULL; tmp_node = node; rc = resolve_when_unlink_nodes(sparent->parent, &tmp_node, &ctx_node, ctx_node_type, &unlinked_nodes); if (rc) { goto cleanup; } rc = lyxp_eval(snode_get_when(sparent->parent)->cond, ctx_node, ctx_node_type, lys_node_module(sparent->parent), &set, LYXP_WHEN); /* reconnect nodes, if ctx_node is NULL then all the nodes were unlinked, but linked together, * so the tree did not actually change and there is nothing for us to do */ if (unlinked_nodes && ctx_node) { if (resolve_when_relink_nodes(ctx_node, unlinked_nodes, ctx_node_type)) { rc = -1; goto cleanup; } } if (rc) { if (rc == 1) { LOGVAL(ctx, LYE_INWHEN, LY_VLOG_LYD, node, snode_get_when(sparent->parent)->cond); } goto cleanup; } lyxp_set_cast(&set, LYXP_SET_BOOLEAN, ctx_node, lys_node_module(sparent->parent), LYXP_WHEN); if (!set.val.bool) { node->when_status |= LYD_WHEN_FALSE; if ((ignore_fail == 1) || ((snode_get_when(sparent->parent)->flags & (LYS_XPCONF_DEP | LYS_XPSTATE_DEP)) && (ignore_fail == 2))) { LOGVRB("When condition \"%s\" is not satisfied, but it is not required.", snode_get_when(sparent->parent)->cond); } else { LOGVAL(ctx, LYE_NOWHEN, LY_VLOG_LYD, node, snode_get_when(sparent->parent)->cond); if (failed_when) { *failed_when = snode_get_when(sparent->parent); } goto cleanup; } } /* free xpath set content */ lyxp_set_cast(&set, LYXP_SET_EMPTY, ctx_node, lys_node_module(sparent->parent), 0); } sparent = lys_parent(sparent); } node->when_status |= LYD_WHEN_TRUE; cleanup: /* free xpath set content */ lyxp_set_cast(&set, LYXP_SET_EMPTY, ctx_node ? ctx_node : node, NULL, 0); return rc; } static int check_type_union_leafref(struct lys_type *type) { uint8_t i; if ((type->base == LY_TYPE_UNION) && type->info.uni.count) { /* go through unions and look for leafref */ for (i = 0; i < type->info.uni.count; ++i) { switch (type->info.uni.types[i].base) { case LY_TYPE_LEAFREF: return 1; case LY_TYPE_UNION: if (check_type_union_leafref(&type->info.uni.types[i])) { return 1; } break; default: break; } } return 0; } /* just inherit the flag value */ return type->der->has_union_leafref; } /** * @brief Resolve a single unres schema item. Logs indirectly. * * @param[in] mod Main module. * @param[in] item Item to resolve. Type determined by \p type. * @param[in] type Type of the unresolved item. * @param[in] str_snode String, a schema node, or NULL. * @param[in] unres Unres schema structure to use. * @param[in] final_fail Whether we are just printing errors of the failed unres items. * * @return EXIT_SUCCESS on success, EXIT_FAILURE on forward reference, -1 on error. */ static int resolve_unres_schema_item(struct lys_module *mod, void *item, enum UNRES_ITEM type, void *str_snode, struct unres_schema *unres) { /* has_str - whether the str_snode is a string in a dictionary that needs to be freed */ int rc = -1, has_str = 0, parent_type = 0, i, k; unsigned int j; struct ly_ctx * ctx = mod->ctx; struct lys_node *root, *next, *node, *par_grp; const char *expr; uint8_t *u; struct ly_set *refs, *procs; struct lys_feature *ref, *feat; struct lys_ident *ident; struct lys_type *stype; struct lys_node_choice *choic; struct lyxml_elem *yin; struct yang_type *yang; struct unres_list_uniq *unique_info; struct unres_iffeat_data *iff_data; struct unres_ext *ext_data; struct lys_ext_instance *ext, **extlist; struct lyext_plugin *eplugin; switch (type) { case UNRES_IDENT: expr = str_snode; has_str = 1; ident = item; rc = resolve_base_ident(mod, ident, expr, "identity", NULL, unres); break; case UNRES_TYPE_IDENTREF: expr = str_snode; has_str = 1; stype = item; rc = resolve_base_ident(mod, NULL, expr, "type", stype, unres); break; case UNRES_TYPE_LEAFREF: node = str_snode; stype = item; rc = resolve_schema_leafref(stype, node, unres); break; case UNRES_TYPE_DER_EXT: parent_type++; /* falls through */ case UNRES_TYPE_DER_TPDF: parent_type++; /* falls through */ case UNRES_TYPE_DER: /* parent */ node = str_snode; stype = item; /* HACK type->der is temporarily unparsed type statement */ yin = (struct lyxml_elem *)stype->der; stype->der = NULL; if (yin->flags & LY_YANG_STRUCTURE_FLAG) { yang = (struct yang_type *)yin; rc = yang_check_type(mod, node, yang, stype, parent_type, unres); if (rc) { /* may try again later */ stype->der = (struct lys_tpdf *)yang; } else { /* we need to always be able to free this, it's safe only in this case */ lydict_remove(ctx, yang->name); free(yang); } } else { rc = fill_yin_type(mod, node, yin, stype, parent_type, unres); if (!rc || rc == -1) { /* we need to always be able to free this, it's safe only in this case */ lyxml_free(ctx, yin); } else { /* may try again later, put all back how it was */ stype->der = (struct lys_tpdf *)yin; } } if (rc == EXIT_SUCCESS) { /* it does not make sense to have leaf-list of empty type */ if (!parent_type && node->nodetype == LYS_LEAFLIST && stype->base == LY_TYPE_EMPTY) { LOGWRN(ctx, "The leaf-list \"%s\" is of \"empty\" type, which does not make sense.", node->name); } if ((type == UNRES_TYPE_DER_TPDF) && (stype->base == LY_TYPE_UNION)) { /* fill typedef union leafref flag */ ((struct lys_tpdf *)stype->parent)->has_union_leafref = check_type_union_leafref(stype); } else if ((type == UNRES_TYPE_DER) && stype->der->has_union_leafref) { /* copy the type in case it has union leafref flag */ if (lys_copy_union_leafrefs(mod, node, stype, NULL, unres)) { LOGERR(ctx, LY_EINT, "Failed to duplicate type."); return -1; } } } else if (rc == EXIT_FAILURE && !(stype->value_flags & LY_VALUE_UNRESGRP)) { /* forward reference - in case the type is in grouping, we have to make the grouping unusable * by uses statement until the type is resolved. We do that the same way as uses statements inside * grouping. The grouping cannot be used unless the unres counter is 0. * To remember that the grouping already increased the counter, the LYTYPE_GRP is used as value * of the type's base member. */ for (par_grp = node; par_grp && (par_grp->nodetype != LYS_GROUPING); par_grp = lys_parent(par_grp)); if (par_grp) { if (++((struct lys_node_grp *)par_grp)->unres_count == 0) { LOGERR(ctx, LY_EINT, "Too many unresolved items (type) inside a grouping."); return -1; } stype->value_flags |= LY_VALUE_UNRESGRP; } } break; case UNRES_IFFEAT: iff_data = str_snode; rc = resolve_feature(iff_data->fname, strlen(iff_data->fname), iff_data->node, item); if (!rc) { /* success */ if (iff_data->infeature) { /* store backlink into the target feature to allow reverse changes in case of changing feature status */ feat = *((struct lys_feature **)item); if (!feat->depfeatures) { feat->depfeatures = ly_set_new(); } ly_set_add(feat->depfeatures, iff_data->node, LY_SET_OPT_USEASLIST); } /* cleanup temporary data */ lydict_remove(ctx, iff_data->fname); free(iff_data); } break; case UNRES_FEATURE: feat = (struct lys_feature *)item; if (feat->iffeature_size) { refs = ly_set_new(); procs = ly_set_new(); ly_set_add(procs, feat, 0); while (procs->number) { ref = procs->set.g[procs->number - 1]; ly_set_rm_index(procs, procs->number - 1); for (i = 0; i < ref->iffeature_size; i++) { resolve_iffeature_getsizes(&ref->iffeature[i], NULL, &j); for (; j > 0 ; j--) { if (ref->iffeature[i].features[j - 1]) { if (ref->iffeature[i].features[j - 1] == feat) { LOGVAL(ctx, LYE_CIRC_FEATURES, LY_VLOG_NONE, NULL, feat->name); goto featurecheckdone; } if (ref->iffeature[i].features[j - 1]->iffeature_size) { k = refs->number; if (ly_set_add(refs, ref->iffeature[i].features[j - 1], 0) == k) { /* not yet seen feature, add it for processing */ ly_set_add(procs, ref->iffeature[i].features[j - 1], 0); } } } else { /* forward reference */ rc = EXIT_FAILURE; goto featurecheckdone; } } } } rc = EXIT_SUCCESS; featurecheckdone: ly_set_free(refs); ly_set_free(procs); } break; case UNRES_USES: rc = resolve_unres_schema_uses(item, unres); break; case UNRES_TYPEDEF_DFLT: parent_type++; /* falls through */ case UNRES_TYPE_DFLT: stype = item; rc = check_default(stype, (const char **)str_snode, mod, parent_type); if ((rc == EXIT_FAILURE) && !parent_type && (stype->base == LY_TYPE_LEAFREF)) { for (par_grp = (struct lys_node *)stype->parent; par_grp && (par_grp->nodetype != LYS_GROUPING); par_grp = lys_parent(par_grp)); if (par_grp) { /* checking default value in a grouping finished with forward reference means we cannot check the value */ rc = EXIT_SUCCESS; } } break; case UNRES_CHOICE_DFLT: expr = str_snode; has_str = 1; choic = item; if (!choic->dflt) { choic->dflt = resolve_choice_dflt(choic, expr); } if (choic->dflt) { rc = lyp_check_mandatory_choice((struct lys_node *)choic); } else { rc = EXIT_FAILURE; } break; case UNRES_LIST_KEYS: rc = resolve_list_keys(item, ((struct lys_node_list *)item)->keys_str); break; case UNRES_LIST_UNIQ: unique_info = (struct unres_list_uniq *)item; rc = resolve_unique(unique_info->list, unique_info->expr, unique_info->trg_type); break; case UNRES_AUGMENT: rc = resolve_augment(item, NULL, unres); break; case UNRES_XPATH: node = (struct lys_node *)item; rc = check_xpath(node, 1); break; case UNRES_MOD_IMPLEMENT: rc = lys_make_implemented_r(mod, unres); break; case UNRES_EXT: ext_data = (struct unres_ext *)str_snode; extlist = &(*(struct lys_ext_instance ***)item)[ext_data->ext_index]; rc = resolve_extension(ext_data, extlist, unres); if (!rc) { /* success */ /* is there a callback to be done to finalize the extension? */ eplugin = extlist[0]->def->plugin; if (eplugin) { if (eplugin->check_result || (eplugin->flags & LYEXT_OPT_INHERIT)) { u = malloc(sizeof *u); LY_CHECK_ERR_RETURN(!u, LOGMEM(ctx), -1); (*u) = ext_data->ext_index; if (unres_schema_add_node(mod, unres, item, UNRES_EXT_FINALIZE, (struct lys_node *)u) == -1) { /* something really bad happend since the extension finalization is not actually * being resolved while adding into unres, so something more serious with the unres * list itself must happened */ return -1; } } } } if (!rc || rc == -1) { /* cleanup on success or fatal error */ if (ext_data->datatype == LYS_IN_YIN) { /* YIN */ lyxml_free(ctx, ext_data->data.yin); } else { /* YANG */ yang_free_ext_data(ext_data->data.yang); } free(ext_data); } break; case UNRES_EXT_FINALIZE: u = (uint8_t *)str_snode; ext = (*(struct lys_ext_instance ***)item)[*u]; free(u); eplugin = ext->def->plugin; /* inherit */ if ((eplugin->flags & LYEXT_OPT_INHERIT) && (ext->parent_type == LYEXT_PAR_NODE)) { root = (struct lys_node *)ext->parent; if (!(root->nodetype & (LYS_LEAF | LYS_LEAFLIST | LYS_ANYDATA))) { LY_TREE_DFS_BEGIN(root->child, next, node) { /* first, check if the node already contain instance of the same extension, * in such a case we won't inherit. In case the node was actually defined as * augment data, we are supposed to check the same way also the augment node itself */ if (lys_ext_instance_presence(ext->def, node->ext, node->ext_size) != -1) { goto inherit_dfs_sibling; } else if (node->parent != root && node->parent->nodetype == LYS_AUGMENT && lys_ext_instance_presence(ext->def, node->parent->ext, node->parent->ext_size) != -1) { goto inherit_dfs_sibling; } if (eplugin->check_inherit) { /* we have a callback to check the inheritance, use it */ switch ((rc = (*eplugin->check_inherit)(ext, node))) { case 0: /* yes - continue with the inheriting code */ break; case 1: /* no - continue with the node's sibling */ goto inherit_dfs_sibling; case 2: /* no, but continue with the children, just skip the inheriting code for this node */ goto inherit_dfs_child; default: LOGERR(ctx, LY_EINT, "Plugin's (%s:%s) check_inherit callback returns invalid value (%d),", ext->def->module->name, ext->def->name, rc); } } /* inherit the extension */ extlist = realloc(node->ext, (node->ext_size + 1) * sizeof *node->ext); LY_CHECK_ERR_RETURN(!extlist, LOGMEM(ctx), -1); extlist[node->ext_size] = malloc(sizeof **extlist); LY_CHECK_ERR_RETURN(!extlist[node->ext_size], LOGMEM(ctx); node->ext = extlist, -1); memcpy(extlist[node->ext_size], ext, sizeof *ext); extlist[node->ext_size]->flags |= LYEXT_OPT_INHERIT; node->ext = extlist; node->ext_size++; inherit_dfs_child: /* modification of - select element for the next run - children first */ if (node->nodetype & (LYS_LEAF | LYS_LEAFLIST | LYS_ANYDATA)) { next = NULL; } else { next = node->child; } if (!next) { inherit_dfs_sibling: /* no children, try siblings */ next = node->next; } while (!next) { /* go to the parent */ node = lys_parent(node); /* we are done if we are back in the root (the starter's parent */ if (node == root) { break; } /* parent is already processed, go to its sibling */ next = node->next; } } } } /* final check */ if (eplugin->check_result) { if ((*eplugin->check_result)(ext)) { LOGERR(ctx, LY_EPLUGIN, "Resolving extension failed."); return -1; } } rc = 0; break; default: LOGINT(ctx); break; } if (has_str && !rc) { /* the string is no more needed in case of success. * In case of forward reference, we will try to resolve the string later */ lydict_remove(ctx, str_snode); } return rc; } /* logs directly */ static void print_unres_schema_item_fail(void *item, enum UNRES_ITEM type, void *str_node) { struct lyxml_elem *xml; struct lyxml_attr *attr; struct unres_iffeat_data *iff_data; const char *name = NULL; struct unres_ext *extinfo; switch (type) { case UNRES_IDENT: LOGVRB("Resolving %s \"%s\" failed, it will be attempted later.", "identity", (char *)str_node); break; case UNRES_TYPE_IDENTREF: LOGVRB("Resolving %s \"%s\" failed, it will be attempted later.", "identityref", (char *)str_node); break; case UNRES_TYPE_LEAFREF: LOGVRB("Resolving %s \"%s\" failed, it will be attempted later.", "leafref", ((struct lys_type *)item)->info.lref.path); break; case UNRES_TYPE_DER_EXT: case UNRES_TYPE_DER_TPDF: case UNRES_TYPE_DER: xml = (struct lyxml_elem *)((struct lys_type *)item)->der; if (xml->flags & LY_YANG_STRUCTURE_FLAG) { name = ((struct yang_type *)xml)->name; } else { LY_TREE_FOR(xml->attr, attr) { if ((attr->type == LYXML_ATTR_STD) && !strcmp(attr->name, "name")) { name = attr->value; break; } } assert(attr); } LOGVRB("Resolving %s \"%s\" failed, it will be attempted later.", "derived type", name); break; case UNRES_IFFEAT: iff_data = str_node; LOGVRB("Resolving %s \"%s\" failed, it will be attempted later.", "if-feature", iff_data->fname); break; case UNRES_FEATURE: LOGVRB("There are unresolved if-features for \"%s\" feature circular dependency check, it will be attempted later", ((struct lys_feature *)item)->name); break; case UNRES_USES: LOGVRB("Resolving %s \"%s\" failed, it will be attempted later.", "uses", ((struct lys_node_uses *)item)->name); break; case UNRES_TYPEDEF_DFLT: case UNRES_TYPE_DFLT: if (*(char **)str_node) { LOGVRB("Resolving %s \"%s\" failed, it will be attempted later.", "type default", *(char **)str_node); } /* else no default value in the type itself, but we are checking some restrictions against * possible default value of some base type. The failure is caused by not resolved base type, * so it was already reported */ break; case UNRES_CHOICE_DFLT: LOGVRB("Resolving %s \"%s\" failed, it will be attempted later.", "choice default", (char *)str_node); break; case UNRES_LIST_KEYS: LOGVRB("Resolving %s \"%s\" failed, it will be attempted later.", "list keys", (char *)str_node); break; case UNRES_LIST_UNIQ: LOGVRB("Resolving %s \"%s\" failed, it will be attempted later.", "list unique", (char *)str_node); break; case UNRES_AUGMENT: LOGVRB("Resolving %s \"%s\" failed, it will be attempted later.", "augment target", ((struct lys_node_augment *)item)->target_name); break; case UNRES_XPATH: LOGVRB("Resolving %s \"%s\" failed, it will be attempted later.", "XPath expressions of", ((struct lys_node *)item)->name); break; case UNRES_EXT: extinfo = (struct unres_ext *)str_node; name = extinfo->datatype == LYS_IN_YIN ? extinfo->data.yin->name : NULL; /* TODO YANG extension */ LOGVRB("Resolving extension \"%s\" failed, it will be attempted later.", name); break; default: LOGINT(NULL); break; } } static int resolve_unres_schema_types(struct unres_schema *unres, enum UNRES_ITEM types, struct ly_ctx *ctx, int forward_ref, int print_all_errors, uint32_t *resolved) { uint32_t i, unres_count, res_count; int ret = 0, rc; struct ly_err_item *prev_eitem; enum int_log_opts prev_ilo; LY_ERR prev_ly_errno; /* if there can be no forward references, every failure is final, so we can print it directly */ if (forward_ref) { prev_ly_errno = ly_errno; ly_ilo_change(ctx, ILO_STORE, &prev_ilo, &prev_eitem); } do { unres_count = 0; res_count = 0; for (i = 0; i < unres->count; ++i) { /* UNRES_TYPE_LEAFREF must be resolved (for storing leafref target pointers); * if-features are resolved here to make sure that we will have all if-features for * later check of feature circular dependency */ if (unres->type[i] & types) { ++unres_count; rc = resolve_unres_schema_item(unres->module[i], unres->item[i], unres->type[i], unres->str_snode[i], unres); if (unres->type[i] == UNRES_EXT_FINALIZE) { /* to avoid double free */ unres->type[i] = UNRES_RESOLVED; } if (!rc || (unres->type[i] == UNRES_XPATH)) { /* invalid XPath can never cause an error, only a warning */ if (unres->type[i] == UNRES_LIST_UNIQ) { /* free the allocated structure */ free(unres->item[i]); } unres->type[i] = UNRES_RESOLVED; ++(*resolved); ++res_count; } else if ((rc == EXIT_FAILURE) && forward_ref) { /* forward reference, erase errors */ ly_err_free_next(ctx, prev_eitem); } else if (print_all_errors) { /* just so that we quit the loop */ ++res_count; ret = -1; } else { if (forward_ref) { ly_ilo_restore(ctx, prev_ilo, prev_eitem, 1); } return -1; } } } } while (res_count && (res_count < unres_count)); if (res_count < unres_count) { assert(forward_ref); /* just print the errors (but we must free the ones we have and get them again :-/ ) */ ly_ilo_restore(ctx, prev_ilo, prev_eitem, 0); for (i = 0; i < unres->count; ++i) { if (unres->type[i] & types) { resolve_unres_schema_item(unres->module[i], unres->item[i], unres->type[i], unres->str_snode[i], unres); } } return -1; } if (forward_ref) { /* restore log */ ly_ilo_restore(ctx, prev_ilo, prev_eitem, 0); ly_errno = prev_ly_errno; } return ret; } /** * @brief Resolve every unres schema item in the structure. Logs directly. * * @param[in] mod Main module. * @param[in] unres Unres schema structure to use. * * @return EXIT_SUCCESS on success, -1 on error. */ int resolve_unres_schema(struct lys_module *mod, struct unres_schema *unres) { uint32_t resolved = 0; assert(unres); LOGVRB("Resolving \"%s\" unresolved schema nodes and their constraints...", mod->name); /* UNRES_TYPE_LEAFREF must be resolved (for storing leafref target pointers); * if-features are resolved here to make sure that we will have all if-features for * later check of feature circular dependency */ if (resolve_unres_schema_types(unres, UNRES_USES | UNRES_IFFEAT | UNRES_TYPE_DER | UNRES_TYPE_DER_TPDF | UNRES_TYPE_DER_TPDF | UNRES_TYPE_LEAFREF | UNRES_MOD_IMPLEMENT | UNRES_AUGMENT | UNRES_CHOICE_DFLT | UNRES_IDENT, mod->ctx, 1, 0, &resolved)) { return -1; } /* another batch of resolved items */ if (resolve_unres_schema_types(unres, UNRES_TYPE_IDENTREF | UNRES_FEATURE | UNRES_TYPEDEF_DFLT | UNRES_TYPE_DFLT | UNRES_LIST_KEYS | UNRES_LIST_UNIQ | UNRES_EXT, mod->ctx, 1, 0, &resolved)) { return -1; } /* print xpath warnings and finalize extensions, keep it last to provide the complete schema tree information to the plugin's checkers */ if (resolve_unres_schema_types(unres, UNRES_XPATH | UNRES_EXT_FINALIZE, mod->ctx, 0, 1, &resolved)) { return -1; } LOGVRB("All \"%s\" schema nodes and constraints resolved.", mod->name); unres->count = 0; return EXIT_SUCCESS; } /** * @brief Try to resolve an unres schema item with a string argument. Logs indirectly. * * @param[in] mod Main module. * @param[in] unres Unres schema structure to use. * @param[in] item Item to resolve. Type determined by \p type. * @param[in] type Type of the unresolved item. * @param[in] str String argument. * * @return EXIT_SUCCESS on success, EXIT_FAILURE on storing the item in unres, -1 on error. */ int unres_schema_add_str(struct lys_module *mod, struct unres_schema *unres, void *item, enum UNRES_ITEM type, const char *str) { int rc; const char *dictstr; dictstr = lydict_insert(mod->ctx, str, 0); rc = unres_schema_add_node(mod, unres, item, type, (struct lys_node *)dictstr); if (rc < 0) { lydict_remove(mod->ctx, dictstr); } return rc; } /** * @brief Try to resolve an unres schema item with a schema node argument. Logs indirectly. * * @param[in] mod Main module. * @param[in] unres Unres schema structure to use. * @param[in] item Item to resolve. Type determined by \p type. * @param[in] type Type of the unresolved item. UNRES_TYPE_DER is handled specially! * @param[in] snode Schema node argument. * * @return EXIT_SUCCESS on success, EXIT_FAILURE on storing the item in unres, -1 on error. */ int unres_schema_add_node(struct lys_module *mod, struct unres_schema *unres, void *item, enum UNRES_ITEM type, struct lys_node *snode) { int rc; uint32_t u; enum int_log_opts prev_ilo; struct ly_err_item *prev_eitem; LY_ERR prev_ly_errno; struct lyxml_elem *yin; struct ly_ctx *ctx = mod->ctx; assert(unres && (item || (type == UNRES_MOD_IMPLEMENT)) && ((type != UNRES_LEAFREF) && (type != UNRES_INSTID) && (type != UNRES_WHEN) && (type != UNRES_MUST))); /* check for duplicities in unres */ for (u = 0; u < unres->count; u++) { if (unres->type[u] == type && unres->item[u] == item && unres->str_snode[u] == snode && unres->module[u] == mod) { /* duplication can happen when the node contains multiple statements of the same type to check, * this can happen for example when refinement is being applied, so we just postpone the processing * and do not duplicate the information */ return EXIT_FAILURE; } } if ((type == UNRES_EXT_FINALIZE) || (type == UNRES_XPATH) || (type == UNRES_MOD_IMPLEMENT)) { /* extension finalization is not even tried when adding the item into the inres list, * xpath is not tried because it would hide some potential warnings, * implementing module must be deferred because some other nodes can be added that will need to be traversed * and their targets made implemented */ rc = EXIT_FAILURE; } else { prev_ly_errno = ly_errno; ly_ilo_change(ctx, ILO_STORE, &prev_ilo, &prev_eitem); rc = resolve_unres_schema_item(mod, item, type, snode, unres); if (rc != EXIT_FAILURE) { ly_ilo_restore(ctx, prev_ilo, prev_eitem, rc == -1 ? 1 : 0); if (rc != -1) { ly_errno = prev_ly_errno; } if (type == UNRES_LIST_UNIQ) { /* free the allocated structure */ free(item); } else if (rc == -1 && type == UNRES_IFFEAT) { /* free the allocated resources */ free(*((char **)item)); } return rc; } else { /* erase info about validation errors */ ly_ilo_restore(ctx, prev_ilo, prev_eitem, 0); ly_errno = prev_ly_errno; } print_unres_schema_item_fail(item, type, snode); /* HACK unlinking is performed here so that we do not do any (NS) copying in vain */ if (type == UNRES_TYPE_DER || type == UNRES_TYPE_DER_TPDF) { yin = (struct lyxml_elem *)((struct lys_type *)item)->der; if (!(yin->flags & LY_YANG_STRUCTURE_FLAG)) { lyxml_unlink_elem(mod->ctx, yin, 1); ((struct lys_type *)item)->der = (struct lys_tpdf *)yin; } } } unres->count++; unres->item = ly_realloc(unres->item, unres->count*sizeof *unres->item); LY_CHECK_ERR_RETURN(!unres->item, LOGMEM(ctx), -1); unres->item[unres->count-1] = item; unres->type = ly_realloc(unres->type, unres->count*sizeof *unres->type); LY_CHECK_ERR_RETURN(!unres->type, LOGMEM(ctx), -1); unres->type[unres->count-1] = type; unres->str_snode = ly_realloc(unres->str_snode, unres->count*sizeof *unres->str_snode); LY_CHECK_ERR_RETURN(!unres->str_snode, LOGMEM(ctx), -1); unres->str_snode[unres->count-1] = snode; unres->module = ly_realloc(unres->module, unres->count*sizeof *unres->module); LY_CHECK_ERR_RETURN(!unres->module, LOGMEM(ctx), -1); unres->module[unres->count-1] = mod; return rc; } /** * @brief Duplicate an unres schema item. Logs indirectly. * * @param[in] mod Main module. * @param[in] unres Unres schema structure to use. * @param[in] item Old item to be resolved. * @param[in] type Type of the old unresolved item. * @param[in] new_item New item to use in the duplicate. * * @return EXIT_SUCCESS on success, EXIT_FAILURE if item is not in unres, -1 on error. */ int unres_schema_dup(struct lys_module *mod, struct unres_schema *unres, void *item, enum UNRES_ITEM type, void *new_item) { int i; struct unres_list_uniq aux_uniq; struct unres_iffeat_data *iff_data; assert(item && new_item && ((type != UNRES_LEAFREF) && (type != UNRES_INSTID) && (type != UNRES_WHEN))); /* hack for UNRES_LIST_UNIQ, which stores multiple items behind its item */ if (type == UNRES_LIST_UNIQ) { aux_uniq.list = item; aux_uniq.expr = ((struct unres_list_uniq *)new_item)->expr; item = &aux_uniq; } i = unres_schema_find(unres, -1, item, type); if (i == -1) { if (type == UNRES_LIST_UNIQ) { free(new_item); } return EXIT_FAILURE; } if ((type == UNRES_TYPE_LEAFREF) || (type == UNRES_USES) || (type == UNRES_TYPE_DFLT) || (type == UNRES_FEATURE) || (type == UNRES_LIST_UNIQ)) { if (unres_schema_add_node(mod, unres, new_item, type, unres->str_snode[i]) == -1) { LOGINT(mod->ctx); return -1; } } else if (type == UNRES_IFFEAT) { /* duplicate unres_iffeature_data */ iff_data = malloc(sizeof *iff_data); LY_CHECK_ERR_RETURN(!iff_data, LOGMEM(mod->ctx), -1); iff_data->fname = lydict_insert(mod->ctx, ((struct unres_iffeat_data *)unres->str_snode[i])->fname, 0); iff_data->node = ((struct unres_iffeat_data *)unres->str_snode[i])->node; if (unres_schema_add_node(mod, unres, new_item, type, (struct lys_node *)iff_data) == -1) { LOGINT(mod->ctx); return -1; } } else { if (unres_schema_add_str(mod, unres, new_item, type, unres->str_snode[i]) == -1) { LOGINT(mod->ctx); return -1; } } return EXIT_SUCCESS; } /* does not log */ int unres_schema_find(struct unres_schema *unres, int start_on_backwards, void *item, enum UNRES_ITEM type) { int i; struct unres_list_uniq *aux_uniq1, *aux_uniq2; if (!unres->count) { return -1; } if (start_on_backwards >= 0) { i = start_on_backwards; } else { i = unres->count - 1; } for (; i > -1; i--) { if (unres->type[i] != type) { continue; } if (type != UNRES_LIST_UNIQ) { if (unres->item[i] == item) { break; } } else { aux_uniq1 = (struct unres_list_uniq *)unres->item[i]; aux_uniq2 = (struct unres_list_uniq *)item; if ((aux_uniq1->list == aux_uniq2->list) && ly_strequal(aux_uniq1->expr, aux_uniq2->expr, 0)) { break; } } } return i; } static void unres_schema_free_item(struct ly_ctx *ctx, struct unres_schema *unres, uint32_t i) { struct lyxml_elem *yin; struct yang_type *yang; struct unres_iffeat_data *iff_data; switch (unres->type[i]) { case UNRES_TYPE_DER_TPDF: case UNRES_TYPE_DER: yin = (struct lyxml_elem *)((struct lys_type *)unres->item[i])->der; if (yin->flags & LY_YANG_STRUCTURE_FLAG) { yang =(struct yang_type *)yin; ((struct lys_type *)unres->item[i])->base = yang->base; lydict_remove(ctx, yang->name); free(yang); if (((struct lys_type *)unres->item[i])->base == LY_TYPE_UNION) { yang_free_type_union(ctx, (struct lys_type *)unres->item[i]); } } else { lyxml_free(ctx, yin); } break; case UNRES_IFFEAT: iff_data = (struct unres_iffeat_data *)unres->str_snode[i]; lydict_remove(ctx, iff_data->fname); free(unres->str_snode[i]); break; case UNRES_IDENT: case UNRES_TYPE_IDENTREF: case UNRES_CHOICE_DFLT: case UNRES_LIST_KEYS: lydict_remove(ctx, (const char *)unres->str_snode[i]); break; case UNRES_LIST_UNIQ: free(unres->item[i]); break; case UNRES_EXT: free(unres->str_snode[i]); break; case UNRES_EXT_FINALIZE: free(unres->str_snode[i]); default: break; } unres->type[i] = UNRES_RESOLVED; } void unres_schema_free(struct lys_module *module, struct unres_schema **unres, int all) { uint32_t i; unsigned int unresolved = 0; if (!unres || !(*unres)) { return; } assert(module || ((*unres)->count == 0)); for (i = 0; i < (*unres)->count; ++i) { if (!all && ((*unres)->module[i] != module)) { if ((*unres)->type[i] != UNRES_RESOLVED) { unresolved++; } continue; } /* free heap memory for the specific item */ unres_schema_free_item(module->ctx, *unres, i); } /* free it all */ if (!module || all || (!unresolved && !module->type)) { free((*unres)->item); free((*unres)->type); free((*unres)->str_snode); free((*unres)->module); free((*unres)); (*unres) = NULL; } } /* check whether instance-identifier points outside its data subtree (for operation it is any node * outside the operation subtree, otherwise it is a node from a foreign model) */ static int check_instid_ext_dep(const struct lys_node *sleaf, const char *json_instid) { const struct lys_node *op_node, *first_node; enum int_log_opts prev_ilo; char *buf, *tmp; if (!json_instid || !json_instid[0]) { /* no/empty value */ return 0; } for (op_node = lys_parent(sleaf); op_node && !(op_node->nodetype & (LYS_NOTIF | LYS_RPC | LYS_ACTION)); op_node = lys_parent(op_node)); if (op_node && lys_parent(op_node)) { /* nested operation - any absolute path is external */ return 1; } /* get the first node from the instid */ tmp = strchr(json_instid + 1, '/'); buf = strndup(json_instid, tmp ? (size_t)(tmp - json_instid) : strlen(json_instid)); if (!buf) { /* so that we do not have to bother with logging, say it is not external */ return 0; } /* find the first schema node, do not log */ ly_ilo_change(NULL, ILO_IGNORE, &prev_ilo, NULL); first_node = ly_ctx_get_node(NULL, sleaf, buf, 0); ly_ilo_restore(NULL, prev_ilo, NULL, 0); free(buf); if (!first_node) { /* unknown path, say it is external */ return 1; } /* based on the first schema node in the path we can decide whether it points to an external tree or not */ if (op_node) { if (op_node != first_node) { /* it is a top-level operation, so we're good if it points somewhere inside it */ return 1; } } else { if (lys_node_module(sleaf) != lys_node_module(first_node)) { /* modules differ */ return 1; } } return 0; } /** * @brief Resolve instance-identifier in JSON data format. Logs directly. * * @param[in] data Data node where the path is used * @param[in] path Instance-identifier node value. * @param[in,out] ret Resolved instance or NULL. * * @return 0 on success (even if unresolved and \p ret is NULL), -1 on error. */ static int resolve_instid(struct lyd_node *data, const char *path, int req_inst, struct lyd_node **ret) { int i = 0, j, parsed, cur_idx; const struct lys_module *mod, *prev_mod = NULL; struct ly_ctx *ctx = data->schema->module->ctx; struct lyd_node *root, *node; const char *model = NULL, *name; char *str; int mod_len, name_len, has_predicate; struct unres_data node_match; memset(&node_match, 0, sizeof node_match); *ret = NULL; /* we need root to resolve absolute path */ for (root = data; root->parent; root = root->parent); /* we're still parsing it and the pointer is not correct yet */ if (root->prev) { for (; root->prev->next; root = root->prev); } /* search for the instance node */ while (path[i]) { j = parse_instance_identifier(&path[i], &model, &mod_len, &name, &name_len, &has_predicate); if (j <= 0) { LOGVAL(ctx, LYE_INCHAR, LY_VLOG_LYD, data, path[i-j], &path[i-j]); goto error; } i += j; if (model) { str = strndup(model, mod_len); if (!str) { LOGMEM(ctx); goto error; } mod = ly_ctx_get_module(ctx, str, NULL, 1); if (ctx->data_clb) { if (!mod) { mod = ctx->data_clb(ctx, str, NULL, 0, ctx->data_clb_data); } else if (!mod->implemented) { mod = ctx->data_clb(ctx, mod->name, mod->ns, LY_MODCLB_NOT_IMPLEMENTED, ctx->data_clb_data); } } free(str); if (!mod || !mod->implemented || mod->disabled) { break; } } else if (!prev_mod) { /* first iteration and we are missing module name */ LOGVAL(ctx, LYE_INELEM_LEN, LY_VLOG_LYD, data, name_len, name); LOGVAL(ctx, LYE_SPEC, LY_VLOG_PREV, NULL, "Instance-identifier is missing prefix in the first node."); goto error; } else { mod = prev_mod; } if (resolve_data(mod, name, name_len, root, &node_match)) { /* no instance exists */ break; } if (has_predicate) { /* we have predicate, so the current results must be list or leaf-list */ parsed = j = 0; /* index of the current node (for lists with position predicates) */ cur_idx = 1; while (j < (signed)node_match.count) { node = node_match.node[j]; parsed = resolve_instid_predicate(mod, &path[i], &node, cur_idx); if (parsed < 1) { LOGVAL(ctx, LYE_INPRED, LY_VLOG_LYD, data, &path[i - parsed]); goto error; } if (!node) { /* current node does not satisfy the predicate */ unres_data_del(&node_match, j); } else { ++j; } ++cur_idx; } i += parsed; } else if (node_match.count) { /* check that we are not addressing lists */ for (j = 0; (unsigned)j < node_match.count; ++j) { if (node_match.node[j]->schema->nodetype == LYS_LIST) { unres_data_del(&node_match, j--); } } if (!node_match.count) { LOGVAL(ctx, LYE_SPEC, LY_VLOG_LYD, data, "Instance identifier is missing list keys."); } } prev_mod = mod; } if (!node_match.count) { /* no instance exists */ if (req_inst > -1) { LOGVAL(ctx, LYE_NOREQINS, LY_VLOG_LYD, data, path); return EXIT_FAILURE; } LOGVRB("There is no instance of \"%s\", but it is not required.", path); return EXIT_SUCCESS; } else if (node_match.count > 1) { /* instance identifier must resolve to a single node */ LOGVAL(ctx, LYE_TOOMANY, LY_VLOG_LYD, data, path, "data tree"); goto error; } else { /* we have required result, remember it and cleanup */ *ret = node_match.node[0]; free(node_match.node); return EXIT_SUCCESS; } error: /* cleanup */ free(node_match.node); return -1; } static int resolve_leafref(struct lyd_node_leaf_list *leaf, const char *path, int req_inst, struct lyd_node **ret) { struct lyxp_set xp_set; uint32_t i; memset(&xp_set, 0, sizeof xp_set); *ret = NULL; /* syntax was already checked, so just evaluate the path using standard XPath */ if (lyxp_eval(path, (struct lyd_node *)leaf, LYXP_NODE_ELEM, lyd_node_module((struct lyd_node *)leaf), &xp_set, 0) != EXIT_SUCCESS) { return -1; } if (xp_set.type == LYXP_SET_NODE_SET) { for (i = 0; i < xp_set.used; ++i) { if ((xp_set.val.nodes[i].type != LYXP_NODE_ELEM) || !(xp_set.val.nodes[i].node->schema->nodetype & (LYS_LEAF | LYS_LEAFLIST))) { continue; } /* not that the value is already in canonical form since the parsers does the conversion, * so we can simply compare just the values */ if (ly_strequal(leaf->value_str, ((struct lyd_node_leaf_list *)xp_set.val.nodes[i].node)->value_str, 1)) { /* we have the match */ *ret = xp_set.val.nodes[i].node; break; } } } lyxp_set_cast(&xp_set, LYXP_SET_EMPTY, (struct lyd_node *)leaf, NULL, 0); if (!*ret) { /* reference not found */ if (req_inst > -1) { LOGVAL(leaf->schema->module->ctx, LYE_NOLEAFREF, LY_VLOG_LYD, leaf, path, leaf->value_str); return EXIT_FAILURE; } else { LOGVRB("There is no leafref \"%s\" with the value \"%s\", but it is not required.", path, leaf->value_str); } } return EXIT_SUCCESS; } /* ignore fail because we are parsing edit-config, get, or get-config - but only if the union includes leafref or instid */ int resolve_union(struct lyd_node_leaf_list *leaf, struct lys_type *type, int store, int ignore_fail, struct lys_type **resolved_type) { struct ly_ctx *ctx = leaf->schema->module->ctx; struct lys_type *t; struct lyd_node *ret; enum int_log_opts prev_ilo; int found, success = 0, ext_dep, req_inst; const char *json_val = NULL; assert(type->base == LY_TYPE_UNION); if ((leaf->value_type == LY_TYPE_UNION) || ((leaf->value_type == LY_TYPE_INST) && (leaf->value_flags & LY_VALUE_UNRES))) { /* either NULL or instid previously converted to JSON */ json_val = lydict_insert(ctx, leaf->value.string, 0); } if (store) { lyd_free_value(leaf->value, leaf->value_type, leaf->value_flags, &((struct lys_node_leaf *)leaf->schema)->type, NULL, NULL, NULL); memset(&leaf->value, 0, sizeof leaf->value); } /* turn logging off, we are going to try to validate the value with all the types in order */ ly_ilo_change(NULL, ILO_IGNORE, &prev_ilo, 0); t = NULL; found = 0; while ((t = lyp_get_next_union_type(type, t, &found))) { found = 0; switch (t->base) { case LY_TYPE_LEAFREF: if ((ignore_fail == 1) || ((leaf->schema->flags & LYS_LEAFREF_DEP) && (ignore_fail == 2))) { req_inst = -1; } else { req_inst = t->info.lref.req; } if (!resolve_leafref(leaf, t->info.lref.path, req_inst, &ret)) { if (store) { if (ret && !(leaf->schema->flags & LYS_LEAFREF_DEP)) { /* valid resolved */ leaf->value.leafref = ret; leaf->value_type = LY_TYPE_LEAFREF; } else { /* valid unresolved */ ly_ilo_restore(NULL, prev_ilo, NULL, 0); if (!lyp_parse_value(t, &leaf->value_str, NULL, leaf, NULL, NULL, 1, 0, 0)) { return -1; } ly_ilo_change(NULL, ILO_IGNORE, &prev_ilo, NULL); } } success = 1; } break; case LY_TYPE_INST: ext_dep = check_instid_ext_dep(leaf->schema, (json_val ? json_val : leaf->value_str)); if ((ignore_fail == 1) || (ext_dep && (ignore_fail == 2))) { req_inst = -1; } else { req_inst = t->info.inst.req; } if (!resolve_instid((struct lyd_node *)leaf, (json_val ? json_val : leaf->value_str), req_inst, &ret)) { if (store) { if (ret && !ext_dep) { /* valid resolved */ leaf->value.instance = ret; leaf->value_type = LY_TYPE_INST; if (json_val) { lydict_remove(leaf->schema->module->ctx, leaf->value_str); leaf->value_str = json_val; json_val = NULL; } } else { /* valid unresolved */ if (json_val) { /* put the JSON val back */ leaf->value.string = json_val; json_val = NULL; } else { leaf->value.instance = NULL; } leaf->value_type = LY_TYPE_INST; leaf->value_flags |= LY_VALUE_UNRES; } } success = 1; } break; default: if (lyp_parse_value(t, &leaf->value_str, NULL, leaf, NULL, NULL, store, 0, 0)) { success = 1; } break; } if (success) { break; } /* erase possible present and invalid value data */ if (store) { lyd_free_value(leaf->value, leaf->value_type, leaf->value_flags, t, NULL, NULL, NULL); memset(&leaf->value, 0, sizeof leaf->value); } } /* turn logging back on */ ly_ilo_restore(NULL, prev_ilo, NULL, 0); if (json_val) { if (!success) { /* put the value back for now */ assert(leaf->value_type == LY_TYPE_UNION); leaf->value.string = json_val; } else { /* value was ultimately useless, but we could not have known */ lydict_remove(leaf->schema->module->ctx, json_val); } } if (success) { if (resolved_type) { *resolved_type = t; } } else if (!ignore_fail || !type->info.uni.has_ptr_type) { /* not found and it is required */ LOGVAL(ctx, LYE_INVAL, LY_VLOG_LYD, leaf, leaf->value_str ? leaf->value_str : "", leaf->schema->name); return EXIT_FAILURE; } return EXIT_SUCCESS; } /** * @brief Resolve a single unres data item. Logs directly. * * @param[in] node Data node to resolve. * @param[in] type Type of the unresolved item. * @param[in] ignore_fail 0 - no, 1 - yes, 2 - yes, but only for external dependencies. * * @return EXIT_SUCCESS on success, EXIT_FAILURE on forward reference, -1 on error. */ int resolve_unres_data_item(struct lyd_node *node, enum UNRES_ITEM type, int ignore_fail, struct lys_when **failed_when) { int rc, req_inst, ext_dep; struct lyd_node_leaf_list *leaf; struct lyd_node *ret; struct lys_node_leaf *sleaf; leaf = (struct lyd_node_leaf_list *)node; sleaf = (struct lys_node_leaf *)leaf->schema; switch (type) { case UNRES_LEAFREF: assert(sleaf->type.base == LY_TYPE_LEAFREF); assert(leaf->validity & LYD_VAL_LEAFREF); if ((ignore_fail == 1) || ((leaf->schema->flags & LYS_LEAFREF_DEP) && (ignore_fail == 2))) { req_inst = -1; } else { req_inst = sleaf->type.info.lref.req; } rc = resolve_leafref(leaf, sleaf->type.info.lref.path, req_inst, &ret); if (!rc) { if (ret && !(leaf->schema->flags & LYS_LEAFREF_DEP)) { /* valid resolved */ if (leaf->value_type == LY_TYPE_BITS) { free(leaf->value.bit); } leaf->value.leafref = ret; leaf->value_type = LY_TYPE_LEAFREF; leaf->value_flags &= ~LY_VALUE_UNRES; } else { /* valid unresolved */ if (!(leaf->value_flags & LY_VALUE_UNRES)) { if (!lyp_parse_value(&sleaf->type, &leaf->value_str, NULL, leaf, NULL, NULL, 1, 0, 0)) { return -1; } } } leaf->validity &= ~LYD_VAL_LEAFREF; } else { return rc; } break; case UNRES_INSTID: assert(sleaf->type.base == LY_TYPE_INST); ext_dep = check_instid_ext_dep(leaf->schema, leaf->value_str); if (ext_dep == -1) { return -1; } if ((ignore_fail == 1) || (ext_dep && (ignore_fail == 2))) { req_inst = -1; } else { req_inst = sleaf->type.info.inst.req; } rc = resolve_instid(node, leaf->value_str, req_inst, &ret); if (!rc) { if (ret && !ext_dep) { /* valid resolved */ leaf->value.instance = ret; leaf->value_type = LY_TYPE_INST; leaf->value_flags &= ~LY_VALUE_UNRES; } else { /* valid unresolved */ leaf->value.instance = NULL; leaf->value_type = LY_TYPE_INST; leaf->value_flags |= LY_VALUE_UNRES; } } else { return rc; } break; case UNRES_UNION: assert(sleaf->type.base == LY_TYPE_UNION); return resolve_union(leaf, &sleaf->type, 1, ignore_fail, NULL); case UNRES_WHEN: if ((rc = resolve_when(node, ignore_fail, failed_when))) { return rc; } break; case UNRES_MUST: if ((rc = resolve_must(node, 0, ignore_fail))) { return rc; } break; case UNRES_MUST_INOUT: if ((rc = resolve_must(node, 1, ignore_fail))) { return rc; } break; case UNRES_UNIQ_LEAVES: if (lyv_data_unique(node)) { return -1; } break; default: LOGINT(NULL); return -1; } return EXIT_SUCCESS; } /** * @brief add data unres item * * @param[in] unres Unres data structure to use. * @param[in] node Data node to use. * * @return 0 on success, -1 on error. */ int unres_data_add(struct unres_data *unres, struct lyd_node *node, enum UNRES_ITEM type) { assert(unres && node); assert((type == UNRES_LEAFREF) || (type == UNRES_INSTID) || (type == UNRES_WHEN) || (type == UNRES_MUST) || (type == UNRES_MUST_INOUT) || (type == UNRES_UNION) || (type == UNRES_UNIQ_LEAVES)); unres->count++; unres->node = ly_realloc(unres->node, unres->count * sizeof *unres->node); LY_CHECK_ERR_RETURN(!unres->node, LOGMEM(NULL), -1); unres->node[unres->count - 1] = node; unres->type = ly_realloc(unres->type, unres->count * sizeof *unres->type); LY_CHECK_ERR_RETURN(!unres->type, LOGMEM(NULL), -1); unres->type[unres->count - 1] = type; return 0; } static void resolve_unres_data_autodel_diff(struct unres_data *unres, uint32_t unres_i) { struct lyd_node *next, *child, *parent; uint32_t i; for (i = 0; i < unres->diff_idx; ++i) { if (unres->diff->type[i] == LYD_DIFF_DELETED) { /* only leaf(-list) default could be removed and there is nothing to be checked in that case */ continue; } if (unres->diff->second[i] == unres->node[unres_i]) { /* 1) default value was supposed to be created, but is disabled by when * -> remove it from diff altogether */ unres_data_diff_rem(unres, i); /* if diff type is CREATED, the value was just a pointer, it can be freed normally (unlike in 4) */ return; } else { parent = unres->diff->second[i]->parent; while (parent && (parent != unres->node[unres_i])) { parent = parent->parent; } if (parent) { /* 2) default value was supposed to be created but is disabled by when in some parent * -> remove this default subtree and add the rest into diff as deleted instead in 4) */ unres_data_diff_rem(unres, i); break; } LY_TREE_DFS_BEGIN(unres->diff->second[i]->parent, next, child) { if (child == unres->node[unres_i]) { /* 3) some default child of a default value was supposed to be created but has false when * -> the subtree will be freed later and automatically disconnected from the diff parent node */ return; } LY_TREE_DFS_END(unres->diff->second[i]->parent, next, child); } } } /* 4) it does not overlap with created default values in any way * -> just add it into diff as deleted */ unres_data_diff_new(unres, unres->node[unres_i], unres->node[unres_i]->parent, 0); lyd_unlink(unres->node[unres_i]); /* should not be freed anymore */ unres->node[unres_i] = NULL; } /** * @brief Resolve every unres data item in the structure. Logs directly. * * If options include #LYD_OPT_TRUSTED, the data are considered trusted (must conditions are not expected, * unresolved leafrefs/instids are accepted, when conditions are normally resolved because at least some implicit * non-presence containers may need to be deleted). * * If options includes #LYD_OPT_WHENAUTODEL, the non-default nodes with false when conditions are auto-deleted. * * @param[in] ctx Context used. * @param[in] unres Unres data structure to use. * @param[in,out] root Root node of the data tree, can be changed due to autodeletion. * @param[in] options Data options as described above. * * @return EXIT_SUCCESS on success, -1 on error. */ int resolve_unres_data(struct ly_ctx *ctx, struct unres_data *unres, struct lyd_node **root, int options) { uint32_t i, j, first, resolved, del_items, stmt_count; uint8_t prev_when_status; int rc, progress, ignore_fail; enum int_log_opts prev_ilo; struct ly_err_item *prev_eitem; LY_ERR prev_ly_errno = ly_errno; struct lyd_node *parent; struct lys_when *when; assert(root); assert(unres); if (!unres->count) { return EXIT_SUCCESS; } if (options & (LYD_OPT_NOTIF_FILTER | LYD_OPT_GET | LYD_OPT_GETCONFIG | LYD_OPT_EDIT)) { ignore_fail = 1; } else if (options & LYD_OPT_NOEXTDEPS) { ignore_fail = 2; } else { ignore_fail = 0; } LOGVRB("Resolving unresolved data nodes and their constraints..."); if (!ignore_fail) { /* remember logging state only if errors are generated and valid */ ly_ilo_change(ctx, ILO_STORE, &prev_ilo, &prev_eitem); } /* * when-stmt first */ first = 1; stmt_count = 0; resolved = 0; del_items = 0; do { if (!ignore_fail) { ly_err_free_next(ctx, prev_eitem); } progress = 0; for (i = 0; i < unres->count; i++) { if (unres->type[i] != UNRES_WHEN) { continue; } if (first) { /* count when-stmt nodes in unres list */ stmt_count++; } /* resolve when condition only when all parent when conditions are already resolved */ for (parent = unres->node[i]->parent; parent && LYD_WHEN_DONE(parent->when_status); parent = parent->parent) { if (!parent->parent && (parent->when_status & LYD_WHEN_FALSE)) { /* the parent node was already unlinked, do not resolve this node, * it will be removed anyway, so just mark it as resolved */ unres->node[i]->when_status |= LYD_WHEN_FALSE; unres->type[i] = UNRES_RESOLVED; resolved++; break; } } if (parent) { continue; } prev_when_status = unres->node[i]->when_status; rc = resolve_unres_data_item(unres->node[i], unres->type[i], ignore_fail, &when); if (!rc) { /* finish with error/delete the node only if when was changed from true to false, an external * dependency was not required, or it was not provided (the flag would not be passed down otherwise, * checked in upper functions) */ if ((unres->node[i]->when_status & LYD_WHEN_FALSE) && (!(when->flags & (LYS_XPCONF_DEP | LYS_XPSTATE_DEP)) || !(options & LYD_OPT_NOEXTDEPS))) { if ((!(prev_when_status & LYD_WHEN_TRUE) || !(options & LYD_OPT_WHENAUTODEL)) && !unres->node[i]->dflt) { /* false when condition */ goto error; } /* follows else */ /* auto-delete */ LOGVRB("Auto-deleting node \"%s\" due to when condition (%s)", ly_errpath(ctx), when->cond); /* only unlink now, the subtree can contain another nodes stored in the unres list */ /* if it has parent non-presence containers that would be empty, we should actually * remove the container */ for (parent = unres->node[i]; parent->parent && parent->parent->schema->nodetype == LYS_CONTAINER; parent = parent->parent) { if (((struct lys_node_container *)parent->parent->schema)->presence) { /* presence container */ break; } if (parent->next || parent->prev != parent) { /* non empty (the child we are in and we are going to remove is not the only child) */ break; } } unres->node[i] = parent; if (*root && *root == unres->node[i]) { *root = (*root)->next; } lyd_unlink(unres->node[i]); unres->type[i] = UNRES_DELETE; del_items++; /* update the rest of unres items */ for (j = 0; j < unres->count; j++) { if (unres->type[j] == UNRES_RESOLVED || unres->type[j] == UNRES_DELETE) { continue; } /* test if the node is in subtree to be deleted */ for (parent = unres->node[j]; parent; parent = parent->parent) { if (parent == unres->node[i]) { /* yes, it is */ unres->type[j] = UNRES_RESOLVED; resolved++; break; } } } } else { unres->type[i] = UNRES_RESOLVED; } if (!ignore_fail) { ly_err_free_next(ctx, prev_eitem); } resolved++; progress = 1; } else if (rc == -1) { goto error; } /* else forward reference */ } first = 0; } while (progress && resolved < stmt_count); /* do we have some unresolved when-stmt? */ if (stmt_count > resolved) { goto error; } for (i = 0; del_items && i < unres->count; i++) { /* we had some when-stmt resulted to false, so now we have to sanitize the unres list */ if (unres->type[i] != UNRES_DELETE) { continue; } if (!unres->node[i]) { unres->type[i] = UNRES_RESOLVED; del_items--; continue; } if (unres->store_diff) { resolve_unres_data_autodel_diff(unres, i); } /* really remove the complete subtree */ lyd_free(unres->node[i]); unres->type[i] = UNRES_RESOLVED; del_items--; } /* * now leafrefs */ if (options & LYD_OPT_TRUSTED) { /* we want to attempt to resolve leafrefs */ assert(!ignore_fail); ignore_fail = 1; ly_ilo_restore(ctx, prev_ilo, prev_eitem, 0); ly_errno = prev_ly_errno; } first = 1; stmt_count = 0; resolved = 0; do { progress = 0; for (i = 0; i < unres->count; i++) { if (unres->type[i] != UNRES_LEAFREF) { continue; } if (first) { /* count leafref nodes in unres list */ stmt_count++; } rc = resolve_unres_data_item(unres->node[i], unres->type[i], ignore_fail, NULL); if (!rc) { unres->type[i] = UNRES_RESOLVED; if (!ignore_fail) { ly_err_free_next(ctx, prev_eitem); } resolved++; progress = 1; } else if (rc == -1) { goto error; } /* else forward reference */ } first = 0; } while (progress && resolved < stmt_count); /* do we have some unresolved leafrefs? */ if (stmt_count > resolved) { goto error; } if (!ignore_fail) { /* log normally now, throw away irrelevant errors */ ly_ilo_restore(ctx, prev_ilo, prev_eitem, 0); ly_errno = prev_ly_errno; } /* * rest */ for (i = 0; i < unres->count; ++i) { if (unres->type[i] == UNRES_RESOLVED) { continue; } assert(!(options & LYD_OPT_TRUSTED) || ((unres->type[i] != UNRES_MUST) && (unres->type[i] != UNRES_MUST_INOUT))); rc = resolve_unres_data_item(unres->node[i], unres->type[i], ignore_fail, NULL); if (rc) { /* since when was already resolved, a forward reference is an error */ return -1; } unres->type[i] = UNRES_RESOLVED; } LOGVRB("All data nodes and constraints resolved."); unres->count = 0; return EXIT_SUCCESS; error: if (!ignore_fail) { /* print all the new errors */ ly_ilo_restore(ctx, prev_ilo, prev_eitem, 1); /* do not restore ly_errno, it was udpated properly */ } return -1; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1362_0
crossvul-cpp_data_bad_1240_0
// SPDX-License-Identifier: GPL-2.0 /* * Crypto user configuration API. * * Copyright (C) 2017-2018 Corentin Labbe <clabbe@baylibre.com> * */ #include <linux/crypto.h> #include <linux/cryptouser.h> #include <linux/sched.h> #include <net/netlink.h> #include <net/sock.h> #include <crypto/internal/skcipher.h> #include <crypto/internal/rng.h> #include <crypto/akcipher.h> #include <crypto/kpp.h> #include <crypto/internal/cryptouser.h> #include "internal.h" #define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x)) struct crypto_dump_info { struct sk_buff *in_skb; struct sk_buff *out_skb; u32 nlmsg_seq; u16 nlmsg_flags; }; static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat_aead raead; memset(&raead, 0, sizeof(raead)); strscpy(raead.type, "aead", sizeof(raead.type)); raead.stat_encrypt_cnt = atomic64_read(&alg->stats.aead.encrypt_cnt); raead.stat_encrypt_tlen = atomic64_read(&alg->stats.aead.encrypt_tlen); raead.stat_decrypt_cnt = atomic64_read(&alg->stats.aead.decrypt_cnt); raead.stat_decrypt_tlen = atomic64_read(&alg->stats.aead.decrypt_tlen); raead.stat_err_cnt = atomic64_read(&alg->stats.aead.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead); } static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat_cipher rcipher; memset(&rcipher, 0, sizeof(rcipher)); strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); rcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.cipher.encrypt_cnt); rcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.cipher.encrypt_tlen); rcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.cipher.decrypt_cnt); rcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.cipher.decrypt_tlen); rcipher.stat_err_cnt = atomic64_read(&alg->stats.cipher.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); } static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat_compress rcomp; memset(&rcomp, 0, sizeof(rcomp)); strscpy(rcomp.type, "compression", sizeof(rcomp.type)); rcomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt); rcomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen); rcomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt); rcomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen); rcomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp); } static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat_compress racomp; memset(&racomp, 0, sizeof(racomp)); strscpy(racomp.type, "acomp", sizeof(racomp.type)); racomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt); racomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen); racomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt); racomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen); racomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp); } static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat_akcipher rakcipher; memset(&rakcipher, 0, sizeof(rakcipher)); strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); rakcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.akcipher.encrypt_cnt); rakcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.akcipher.encrypt_tlen); rakcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.akcipher.decrypt_cnt); rakcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.akcipher.decrypt_tlen); rakcipher.stat_sign_cnt = atomic64_read(&alg->stats.akcipher.sign_cnt); rakcipher.stat_verify_cnt = atomic64_read(&alg->stats.akcipher.verify_cnt); rakcipher.stat_err_cnt = atomic64_read(&alg->stats.akcipher.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, sizeof(rakcipher), &rakcipher); } static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat_kpp rkpp; memset(&rkpp, 0, sizeof(rkpp)); strscpy(rkpp.type, "kpp", sizeof(rkpp.type)); rkpp.stat_setsecret_cnt = atomic64_read(&alg->stats.kpp.setsecret_cnt); rkpp.stat_generate_public_key_cnt = atomic64_read(&alg->stats.kpp.generate_public_key_cnt); rkpp.stat_compute_shared_secret_cnt = atomic64_read(&alg->stats.kpp.compute_shared_secret_cnt); rkpp.stat_err_cnt = atomic64_read(&alg->stats.kpp.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp); } static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat_hash rhash; memset(&rhash, 0, sizeof(rhash)); strscpy(rhash.type, "ahash", sizeof(rhash.type)); rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt); rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen); rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); } static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat_hash rhash; memset(&rhash, 0, sizeof(rhash)); strscpy(rhash.type, "shash", sizeof(rhash.type)); rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt); rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen); rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); } static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat_rng rrng; memset(&rrng, 0, sizeof(rrng)); strscpy(rrng.type, "rng", sizeof(rrng.type)); rrng.stat_generate_cnt = atomic64_read(&alg->stats.rng.generate_cnt); rrng.stat_generate_tlen = atomic64_read(&alg->stats.rng.generate_tlen); rrng.stat_seed_cnt = atomic64_read(&alg->stats.rng.seed_cnt); rrng.stat_err_cnt = atomic64_read(&alg->stats.rng.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng); } static int crypto_reportstat_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { memset(ualg, 0, sizeof(*ualg)); strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); strscpy(ualg->cru_driver_name, alg->cra_driver_name, sizeof(ualg->cru_driver_name)); strscpy(ualg->cru_module_name, module_name(alg->cra_module), sizeof(ualg->cru_module_name)); ualg->cru_type = 0; ualg->cru_mask = 0; ualg->cru_flags = alg->cra_flags; ualg->cru_refcnt = refcount_read(&alg->cra_refcnt); if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) goto nla_put_failure; if (alg->cra_flags & CRYPTO_ALG_LARVAL) { struct crypto_stat_larval rl; memset(&rl, 0, sizeof(rl)); strscpy(rl.type, "larval", sizeof(rl.type)); if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl)) goto nla_put_failure; goto out; } switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) { case CRYPTO_ALG_TYPE_AEAD: if (crypto_report_aead(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_SKCIPHER: if (crypto_report_cipher(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_BLKCIPHER: if (crypto_report_cipher(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_CIPHER: if (crypto_report_cipher(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_COMPRESS: if (crypto_report_comp(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_ACOMPRESS: if (crypto_report_acomp(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_SCOMPRESS: if (crypto_report_acomp(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_AKCIPHER: if (crypto_report_akcipher(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_KPP: if (crypto_report_kpp(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_AHASH: if (crypto_report_ahash(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_HASH: if (crypto_report_shash(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_RNG: if (crypto_report_rng(skb, alg)) goto nla_put_failure; break; default: pr_err("ERROR: Unhandled alg %d in %s\n", alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL), __func__); } out: return 0; nla_put_failure: return -EMSGSIZE; } static int crypto_reportstat_alg(struct crypto_alg *alg, struct crypto_dump_info *info) { struct sk_buff *in_skb = info->in_skb; struct sk_buff *skb = info->out_skb; struct nlmsghdr *nlh; struct crypto_user_alg *ualg; int err = 0; nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq, CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags); if (!nlh) { err = -EMSGSIZE; goto out; } ualg = nlmsg_data(nlh); err = crypto_reportstat_one(alg, ualg, skb); if (err) { nlmsg_cancel(skb, nlh); goto out; } nlmsg_end(skb, nlh); out: return err; } int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs) { struct net *net = sock_net(in_skb->sk); struct crypto_user_alg *p = nlmsg_data(in_nlh); struct crypto_alg *alg; struct sk_buff *skb; struct crypto_dump_info info; int err; if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) return -EINVAL; alg = crypto_alg_match(p, 0); if (!alg) return -ENOENT; err = -ENOMEM; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) goto drop_alg; info.in_skb = in_skb; info.out_skb = skb; info.nlmsg_seq = in_nlh->nlmsg_seq; info.nlmsg_flags = 0; err = crypto_reportstat_alg(alg, &info); drop_alg: crypto_mod_put(alg); if (err) return err; return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid); } MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1240_0
crossvul-cpp_data_good_1071_0
/********************************************************************** regparse.c - Oniguruma (regular expression library) **********************************************************************/ /*- * Copyright (c) 2002-2019 K.Kosako <sndgk393 AT ybb DOT ne DOT jp> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "regparse.h" #include "st.h" #ifdef DEBUG_NODE_FREE #include <stdio.h> #endif #define INIT_TAG_NAMES_ALLOC_NUM 5 #define WARN_BUFSIZE 256 #define CASE_FOLD_IS_APPLIED_INSIDE_NEGATIVE_CCLASS #define IS_ALLOWED_CODE_IN_CALLOUT_NAME(c) \ ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '_' /* || c == '!' */) #define IS_ALLOWED_CODE_IN_CALLOUT_TAG_NAME(c) \ ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '_') OnigSyntaxType OnigSyntaxOniguruma = { (( SYN_GNU_REGEX_OP | ONIG_SYN_OP_QMARK_NON_GREEDY | ONIG_SYN_OP_ESC_OCTAL3 | ONIG_SYN_OP_ESC_X_HEX2 | ONIG_SYN_OP_ESC_X_BRACE_HEX8 | ONIG_SYN_OP_ESC_O_BRACE_OCTAL | ONIG_SYN_OP_ESC_CONTROL_CHARS | ONIG_SYN_OP_ESC_C_CONTROL ) & ~ONIG_SYN_OP_ESC_LTGT_WORD_BEGIN_END ) , ( ONIG_SYN_OP2_QMARK_GROUP_EFFECT | ONIG_SYN_OP2_OPTION_ONIGURUMA | ONIG_SYN_OP2_QMARK_LT_NAMED_GROUP | ONIG_SYN_OP2_ESC_K_NAMED_BACKREF | ONIG_SYN_OP2_QMARK_LPAREN_IF_ELSE | ONIG_SYN_OP2_QMARK_TILDE_ABSENT_GROUP | ONIG_SYN_OP2_QMARK_BRACE_CALLOUT_CONTENTS | ONIG_SYN_OP2_ASTERISK_CALLOUT_NAME | ONIG_SYN_OP2_ESC_X_Y_TEXT_SEGMENT | ONIG_SYN_OP2_ESC_CAPITAL_R_GENERAL_NEWLINE | ONIG_SYN_OP2_ESC_CAPITAL_N_O_SUPER_DOT | ONIG_SYN_OP2_ESC_CAPITAL_K_KEEP | ONIG_SYN_OP2_ESC_G_SUBEXP_CALL | ONIG_SYN_OP2_ESC_P_BRACE_CHAR_PROPERTY | ONIG_SYN_OP2_ESC_P_BRACE_CIRCUMFLEX_NOT | ONIG_SYN_OP2_PLUS_POSSESSIVE_REPEAT | ONIG_SYN_OP2_CCLASS_SET_OP | ONIG_SYN_OP2_ESC_CAPITAL_C_BAR_CONTROL | ONIG_SYN_OP2_ESC_CAPITAL_M_BAR_META | ONIG_SYN_OP2_ESC_V_VTAB | ONIG_SYN_OP2_ESC_H_XDIGIT | ONIG_SYN_OP2_ESC_U_HEX4 ) , ( SYN_GNU_REGEX_BV | ONIG_SYN_ALLOW_INTERVAL_LOW_ABBREV | ONIG_SYN_DIFFERENT_LEN_ALT_LOOK_BEHIND | ONIG_SYN_CAPTURE_ONLY_NAMED_GROUP | ONIG_SYN_ALLOW_MULTIPLEX_DEFINITION_NAME | ONIG_SYN_FIXED_INTERVAL_IS_GREEDY_ONLY | ONIG_SYN_WARN_CC_OP_NOT_ESCAPED | ONIG_SYN_WARN_REDUNDANT_NESTED_REPEAT ) , ONIG_OPTION_NONE , { (OnigCodePoint )'\\' /* esc */ , (OnigCodePoint )ONIG_INEFFECTIVE_META_CHAR /* anychar '.' */ , (OnigCodePoint )ONIG_INEFFECTIVE_META_CHAR /* anytime '*' */ , (OnigCodePoint )ONIG_INEFFECTIVE_META_CHAR /* zero or one time '?' */ , (OnigCodePoint )ONIG_INEFFECTIVE_META_CHAR /* one or more time '+' */ , (OnigCodePoint )ONIG_INEFFECTIVE_META_CHAR /* anychar anytime */ } }; OnigSyntaxType OnigSyntaxRuby = { (( SYN_GNU_REGEX_OP | ONIG_SYN_OP_QMARK_NON_GREEDY | ONIG_SYN_OP_ESC_OCTAL3 | ONIG_SYN_OP_ESC_X_HEX2 | ONIG_SYN_OP_ESC_X_BRACE_HEX8 | ONIG_SYN_OP_ESC_O_BRACE_OCTAL | ONIG_SYN_OP_ESC_CONTROL_CHARS | ONIG_SYN_OP_ESC_C_CONTROL ) & ~ONIG_SYN_OP_ESC_LTGT_WORD_BEGIN_END ) , ( ONIG_SYN_OP2_QMARK_GROUP_EFFECT | ONIG_SYN_OP2_OPTION_RUBY | ONIG_SYN_OP2_QMARK_LT_NAMED_GROUP | ONIG_SYN_OP2_ESC_K_NAMED_BACKREF | ONIG_SYN_OP2_QMARK_LPAREN_IF_ELSE | ONIG_SYN_OP2_QMARK_TILDE_ABSENT_GROUP | ONIG_SYN_OP2_ESC_X_Y_TEXT_SEGMENT | ONIG_SYN_OP2_ESC_CAPITAL_R_GENERAL_NEWLINE | ONIG_SYN_OP2_ESC_CAPITAL_K_KEEP | ONIG_SYN_OP2_ESC_G_SUBEXP_CALL | ONIG_SYN_OP2_ESC_P_BRACE_CHAR_PROPERTY | ONIG_SYN_OP2_ESC_P_BRACE_CIRCUMFLEX_NOT | ONIG_SYN_OP2_PLUS_POSSESSIVE_REPEAT | ONIG_SYN_OP2_CCLASS_SET_OP | ONIG_SYN_OP2_ESC_CAPITAL_C_BAR_CONTROL | ONIG_SYN_OP2_ESC_CAPITAL_M_BAR_META | ONIG_SYN_OP2_ESC_V_VTAB | ONIG_SYN_OP2_ESC_H_XDIGIT | ONIG_SYN_OP2_ESC_U_HEX4 ) , ( SYN_GNU_REGEX_BV | ONIG_SYN_ALLOW_INTERVAL_LOW_ABBREV | ONIG_SYN_DIFFERENT_LEN_ALT_LOOK_BEHIND | ONIG_SYN_CAPTURE_ONLY_NAMED_GROUP | ONIG_SYN_ALLOW_MULTIPLEX_DEFINITION_NAME | ONIG_SYN_FIXED_INTERVAL_IS_GREEDY_ONLY | ONIG_SYN_WARN_CC_OP_NOT_ESCAPED | ONIG_SYN_WARN_REDUNDANT_NESTED_REPEAT ) , ONIG_OPTION_NONE , { (OnigCodePoint )'\\' /* esc */ , (OnigCodePoint )ONIG_INEFFECTIVE_META_CHAR /* anychar '.' */ , (OnigCodePoint )ONIG_INEFFECTIVE_META_CHAR /* anytime '*' */ , (OnigCodePoint )ONIG_INEFFECTIVE_META_CHAR /* zero or one time '?' */ , (OnigCodePoint )ONIG_INEFFECTIVE_META_CHAR /* one or more time '+' */ , (OnigCodePoint )ONIG_INEFFECTIVE_META_CHAR /* anychar anytime */ } }; OnigSyntaxType* OnigDefaultSyntax = ONIG_SYNTAX_ONIGURUMA; extern void onig_null_warn(const char* s ARG_UNUSED) { } #ifdef DEFAULT_WARN_FUNCTION static OnigWarnFunc onig_warn = (OnigWarnFunc )DEFAULT_WARN_FUNCTION; #else static OnigWarnFunc onig_warn = onig_null_warn; #endif #ifdef DEFAULT_VERB_WARN_FUNCTION static OnigWarnFunc onig_verb_warn = (OnigWarnFunc )DEFAULT_VERB_WARN_FUNCTION; #else static OnigWarnFunc onig_verb_warn = onig_null_warn; #endif extern void onig_set_warn_func(OnigWarnFunc f) { onig_warn = f; } extern void onig_set_verb_warn_func(OnigWarnFunc f) { onig_verb_warn = f; } extern void onig_warning(const char* s) { if (onig_warn == onig_null_warn) return ; (*onig_warn)(s); } #define DEFAULT_MAX_CAPTURE_NUM 32767 static int MaxCaptureNum = DEFAULT_MAX_CAPTURE_NUM; extern int onig_set_capture_num_limit(int num) { if (num < 0) return -1; MaxCaptureNum = num; return 0; } static unsigned int ParseDepthLimit = DEFAULT_PARSE_DEPTH_LIMIT; extern unsigned int onig_get_parse_depth_limit(void) { return ParseDepthLimit; } extern int onig_set_parse_depth_limit(unsigned int depth) { if (depth == 0) ParseDepthLimit = DEFAULT_PARSE_DEPTH_LIMIT; else ParseDepthLimit = depth; return 0; } static int bbuf_init(BBuf* buf, int size) { if (size <= 0) { size = 0; buf->p = NULL; } else { buf->p = (UChar* )xmalloc(size); if (IS_NULL(buf->p)) return(ONIGERR_MEMORY); } buf->alloc = size; buf->used = 0; return 0; } static void bbuf_free(BBuf* bbuf) { if (IS_NOT_NULL(bbuf)) { if (IS_NOT_NULL(bbuf->p)) xfree(bbuf->p); xfree(bbuf); } } static int bbuf_clone(BBuf** rto, BBuf* from) { int r; BBuf *to; *rto = to = (BBuf* )xmalloc(sizeof(BBuf)); CHECK_NULL_RETURN_MEMERR(to); r = BB_INIT(to, from->alloc); if (r != 0) { xfree(to->p); *rto = 0; return r; } to->used = from->used; xmemcpy(to->p, from->p, from->used); return 0; } static int backref_rel_to_abs(int rel_no, ScanEnv* env) { if (rel_no > 0) { return env->num_mem + rel_no; } else { return env->num_mem + 1 + rel_no; } } #define OPTION_ON(v,f) ((v) |= (f)) #define OPTION_OFF(v,f) ((v) &= ~(f)) #define OPTION_NEGATE(v,f,negative) (negative) ? ((v) &= ~(f)) : ((v) |= (f)) #define MBCODE_START_POS(enc) \ (OnigCodePoint )(ONIGENC_MBC_MINLEN(enc) > 1 ? 0 : 0x80) #define SET_ALL_MULTI_BYTE_RANGE(enc, pbuf) \ add_code_range_to_buf(pbuf, MBCODE_START_POS(enc), ~((OnigCodePoint )0)) #define ADD_ALL_MULTI_BYTE_RANGE(enc, mbuf) do {\ if (! ONIGENC_IS_SINGLEBYTE(enc)) {\ r = SET_ALL_MULTI_BYTE_RANGE(enc, &(mbuf));\ if (r != 0) return r;\ }\ } while (0) #define BITSET_IS_EMPTY(bs,empty) do {\ int i;\ empty = 1;\ for (i = 0; i < (int )BITSET_SIZE; i++) {\ if ((bs)[i] != 0) {\ empty = 0; break;\ }\ }\ } while (0) static void bitset_set_range(BitSetRef bs, int from, int to) { int i; for (i = from; i <= to && i < SINGLE_BYTE_SIZE; i++) { BITSET_SET_BIT(bs, i); } } #if 0 static void bitset_set_all(BitSetRef bs) { int i; for (i = 0; i < BITSET_SIZE; i++) { bs[i] = ~((Bits )0); } } #endif static void bitset_invert(BitSetRef bs) { int i; for (i = 0; i < (int )BITSET_SIZE; i++) { bs[i] = ~(bs[i]); } } static void bitset_invert_to(BitSetRef from, BitSetRef to) { int i; for (i = 0; i < (int )BITSET_SIZE; i++) { to[i] = ~(from[i]); } } static void bitset_and(BitSetRef dest, BitSetRef bs) { int i; for (i = 0; i < (int )BITSET_SIZE; i++) { dest[i] &= bs[i]; } } static void bitset_or(BitSetRef dest, BitSetRef bs) { int i; for (i = 0; i < (int )BITSET_SIZE; i++) { dest[i] |= bs[i]; } } static void bitset_copy(BitSetRef dest, BitSetRef bs) { int i; for (i = 0; i < (int )BITSET_SIZE; i++) { dest[i] = bs[i]; } } extern int onig_strncmp(const UChar* s1, const UChar* s2, int n) { int x; while (n-- > 0) { x = *s2++ - *s1++; if (x) return x; } return 0; } extern void onig_strcpy(UChar* dest, const UChar* src, const UChar* end) { int len = (int )(end - src); if (len > 0) { xmemcpy(dest, src, len); dest[len] = (UChar )0; } } static int save_entry(ScanEnv* env, enum SaveType type, int* id) { int nid = env->save_num; #if 0 if (IS_NULL(env->saves)) { int n = 10; env->saves = (SaveItem* )xmalloc(sizeof(SaveItem) * n); CHECK_NULL_RETURN_MEMERR(env->saves); env->save_alloc_num = n; } else if (env->save_alloc_num <= nid) { int n = env->save_alloc_num * 2; SaveItem* p = (SaveItem* )xrealloc(env->saves, sizeof(SaveItem) * n); CHECK_NULL_RETURN_MEMERR(p); env->saves = p; env->save_alloc_num = n; } env->saves[nid].type = type; #endif env->save_num++; *id = nid; return 0; } /* scan pattern methods */ #define PEND_VALUE 0 #define PFETCH_READY UChar* pfetch_prev #define PEND (p < end ? 0 : 1) #define PUNFETCH p = pfetch_prev #define PINC do { \ pfetch_prev = p; \ p += ONIGENC_MBC_ENC_LEN(enc, p); \ } while (0) #define PFETCH(c) do { \ c = ONIGENC_MBC_TO_CODE(enc, p, end); \ pfetch_prev = p; \ p += ONIGENC_MBC_ENC_LEN(enc, p); \ } while (0) #define PINC_S do { \ p += ONIGENC_MBC_ENC_LEN(enc, p); \ } while (0) #define PFETCH_S(c) do { \ c = ONIGENC_MBC_TO_CODE(enc, p, end); \ p += ONIGENC_MBC_ENC_LEN(enc, p); \ } while (0) #define PPEEK (p < end ? ONIGENC_MBC_TO_CODE(enc, p, end) : PEND_VALUE) #define PPEEK_IS(c) (PPEEK == (OnigCodePoint )c) static UChar* strcat_capa(UChar* dest, UChar* dest_end, const UChar* src, const UChar* src_end, int capa) { UChar* r; if (dest) r = (UChar* )xrealloc(dest, capa + 1); else r = (UChar* )xmalloc(capa + 1); CHECK_NULL_RETURN(r); onig_strcpy(r + (dest_end - dest), src, src_end); return r; } /* dest on static area */ static UChar* strcat_capa_from_static(UChar* dest, UChar* dest_end, const UChar* src, const UChar* src_end, int capa) { UChar* r; r = (UChar* )xmalloc(capa + 1); CHECK_NULL_RETURN(r); onig_strcpy(r, dest, dest_end); onig_strcpy(r + (dest_end - dest), src, src_end); return r; } #ifdef USE_ST_LIBRARY typedef struct { UChar* s; UChar* end; } st_str_end_key; static int str_end_cmp(st_str_end_key* x, st_str_end_key* y) { UChar *p, *q; int c; if ((x->end - x->s) != (y->end - y->s)) return 1; p = x->s; q = y->s; while (p < x->end) { c = (int )*p - (int )*q; if (c != 0) return c; p++; q++; } return 0; } static int str_end_hash(st_str_end_key* x) { UChar *p; int val = 0; p = x->s; while (p < x->end) { val = val * 997 + (int )*p++; } return val + (val >> 5); } extern hash_table_type* onig_st_init_strend_table_with_size(int size) { static struct st_hash_type hashType = { str_end_cmp, str_end_hash, }; return (hash_table_type* ) onig_st_init_table_with_size(&hashType, size); } extern int onig_st_lookup_strend(hash_table_type* table, const UChar* str_key, const UChar* end_key, hash_data_type *value) { st_str_end_key key; key.s = (UChar* )str_key; key.end = (UChar* )end_key; return onig_st_lookup(table, (st_data_t )(&key), value); } extern int onig_st_insert_strend(hash_table_type* table, const UChar* str_key, const UChar* end_key, hash_data_type value) { st_str_end_key* key; int result; key = (st_str_end_key* )xmalloc(sizeof(st_str_end_key)); CHECK_NULL_RETURN_MEMERR(key); key->s = (UChar* )str_key; key->end = (UChar* )end_key; result = onig_st_insert(table, (st_data_t )key, value); if (result) { xfree(key); } return result; } #ifdef USE_CALLOUT typedef struct { OnigEncoding enc; int type; /* callout type: single or not */ UChar* s; UChar* end; } st_callout_name_key; static int callout_name_table_cmp(st_callout_name_key* x, st_callout_name_key* y) { UChar *p, *q; int c; if (x->enc != y->enc) return 1; if (x->type != y->type) return 1; if ((x->end - x->s) != (y->end - y->s)) return 1; p = x->s; q = y->s; while (p < x->end) { c = (int )*p - (int )*q; if (c != 0) return c; p++; q++; } return 0; } static int callout_name_table_hash(st_callout_name_key* x) { UChar *p; int val = 0; p = x->s; while (p < x->end) { val = val * 997 + (int )*p++; } /* use intptr_t for escape warning in Windows */ return val + (val >> 5) + ((intptr_t )x->enc & 0xffff) + x->type; } extern hash_table_type* onig_st_init_callout_name_table_with_size(int size) { static struct st_hash_type hashType = { callout_name_table_cmp, callout_name_table_hash, }; return (hash_table_type* ) onig_st_init_table_with_size(&hashType, size); } extern int onig_st_lookup_callout_name_table(hash_table_type* table, OnigEncoding enc, int type, const UChar* str_key, const UChar* end_key, hash_data_type *value) { st_callout_name_key key; key.enc = enc; key.type = type; key.s = (UChar* )str_key; key.end = (UChar* )end_key; return onig_st_lookup(table, (st_data_t )(&key), value); } static int st_insert_callout_name_table(hash_table_type* table, OnigEncoding enc, int type, UChar* str_key, UChar* end_key, hash_data_type value) { st_callout_name_key* key; int result; key = (st_callout_name_key* )xmalloc(sizeof(st_callout_name_key)); CHECK_NULL_RETURN_MEMERR(key); /* key->s: don't duplicate, because str_key is duped in callout_name_entry() */ key->enc = enc; key->type = type; key->s = str_key; key->end = end_key; result = onig_st_insert(table, (st_data_t )key, value); if (result) { xfree(key); } return result; } #endif #endif /* USE_ST_LIBRARY */ #define INIT_NAME_BACKREFS_ALLOC_NUM 8 typedef struct { UChar* name; int name_len; /* byte length */ int back_num; /* number of backrefs */ int back_alloc; int back_ref1; int* back_refs; } NameEntry; #ifdef USE_ST_LIBRARY #define INIT_NAMES_ALLOC_NUM 5 typedef st_table NameTable; typedef st_data_t HashDataType; /* 1.6 st.h doesn't define st_data_t type */ #define NAMEBUF_SIZE 24 #define NAMEBUF_SIZE_1 25 #ifdef ONIG_DEBUG static int i_print_name_entry(UChar* key, NameEntry* e, void* arg) { int i; FILE* fp = (FILE* )arg; fprintf(fp, "%s: ", e->name); if (e->back_num == 0) fputs("-", fp); else if (e->back_num == 1) fprintf(fp, "%d", e->back_ref1); else { for (i = 0; i < e->back_num; i++) { if (i > 0) fprintf(fp, ", "); fprintf(fp, "%d", e->back_refs[i]); } } fputs("\n", fp); return ST_CONTINUE; } extern int onig_print_names(FILE* fp, regex_t* reg) { NameTable* t = (NameTable* )reg->name_table; if (IS_NOT_NULL(t)) { fprintf(fp, "name table\n"); onig_st_foreach(t, i_print_name_entry, (HashDataType )fp); fputs("\n", fp); } return 0; } #endif /* ONIG_DEBUG */ static int i_free_name_entry(UChar* key, NameEntry* e, void* arg ARG_UNUSED) { xfree(e->name); if (IS_NOT_NULL(e->back_refs)) xfree(e->back_refs); xfree(key); xfree(e); return ST_DELETE; } static int names_clear(regex_t* reg) { NameTable* t = (NameTable* )reg->name_table; if (IS_NOT_NULL(t)) { onig_st_foreach(t, i_free_name_entry, 0); } return 0; } extern int onig_names_free(regex_t* reg) { int r; NameTable* t; r = names_clear(reg); if (r != 0) return r; t = (NameTable* )reg->name_table; if (IS_NOT_NULL(t)) onig_st_free_table(t); reg->name_table = (void* )NULL; return 0; } static NameEntry* name_find(regex_t* reg, const UChar* name, const UChar* name_end) { NameEntry* e; NameTable* t = (NameTable* )reg->name_table; e = (NameEntry* )NULL; if (IS_NOT_NULL(t)) { onig_st_lookup_strend(t, name, name_end, (HashDataType* )((void* )(&e))); } return e; } typedef struct { int (*func)(const UChar*, const UChar*,int,int*,regex_t*,void*); regex_t* reg; void* arg; int ret; OnigEncoding enc; } INamesArg; static int i_names(UChar* key ARG_UNUSED, NameEntry* e, INamesArg* arg) { int r = (*(arg->func))(e->name, e->name + e->name_len, e->back_num, (e->back_num > 1 ? e->back_refs : &(e->back_ref1)), arg->reg, arg->arg); if (r != 0) { arg->ret = r; return ST_STOP; } return ST_CONTINUE; } extern int onig_foreach_name(regex_t* reg, int (*func)(const UChar*, const UChar*,int,int*,regex_t*,void*), void* arg) { INamesArg narg; NameTable* t = (NameTable* )reg->name_table; narg.ret = 0; if (IS_NOT_NULL(t)) { narg.func = func; narg.reg = reg; narg.arg = arg; narg.enc = reg->enc; /* should be pattern encoding. */ onig_st_foreach(t, i_names, (HashDataType )&narg); } return narg.ret; } static int i_renumber_name(UChar* key ARG_UNUSED, NameEntry* e, GroupNumRemap* map) { int i; if (e->back_num > 1) { for (i = 0; i < e->back_num; i++) { e->back_refs[i] = map[e->back_refs[i]].new_val; } } else if (e->back_num == 1) { e->back_ref1 = map[e->back_ref1].new_val; } return ST_CONTINUE; } extern int onig_renumber_name_table(regex_t* reg, GroupNumRemap* map) { NameTable* t = (NameTable* )reg->name_table; if (IS_NOT_NULL(t)) { onig_st_foreach(t, i_renumber_name, (HashDataType )map); } return 0; } extern int onig_number_of_names(regex_t* reg) { NameTable* t = (NameTable* )reg->name_table; if (IS_NOT_NULL(t)) return t->num_entries; else return 0; } #else /* USE_ST_LIBRARY */ #define INIT_NAMES_ALLOC_NUM 8 typedef struct { NameEntry* e; int num; int alloc; } NameTable; #ifdef ONIG_DEBUG extern int onig_print_names(FILE* fp, regex_t* reg) { int i, j; NameEntry* e; NameTable* t = (NameTable* )reg->name_table; if (IS_NOT_NULL(t) && t->num > 0) { fprintf(fp, "name table\n"); for (i = 0; i < t->num; i++) { e = &(t->e[i]); fprintf(fp, "%s: ", e->name); if (e->back_num == 0) { fputs("-", fp); } else if (e->back_num == 1) { fprintf(fp, "%d", e->back_ref1); } else { for (j = 0; j < e->back_num; j++) { if (j > 0) fprintf(fp, ", "); fprintf(fp, "%d", e->back_refs[j]); } } fputs("\n", fp); } fputs("\n", fp); } return 0; } #endif static int names_clear(regex_t* reg) { int i; NameEntry* e; NameTable* t = (NameTable* )reg->name_table; if (IS_NOT_NULL(t)) { for (i = 0; i < t->num; i++) { e = &(t->e[i]); if (IS_NOT_NULL(e->name)) { xfree(e->name); e->name = NULL; e->name_len = 0; e->back_num = 0; e->back_alloc = 0; if (IS_NOT_NULL(e->back_refs)) xfree(e->back_refs); e->back_refs = (int* )NULL; } } if (IS_NOT_NULL(t->e)) { xfree(t->e); t->e = NULL; } t->num = 0; } return 0; } extern int onig_names_free(regex_t* reg) { int r; NameTable* t; r = names_clear(reg); if (r != 0) return r; t = (NameTable* )reg->name_table; if (IS_NOT_NULL(t)) xfree(t); reg->name_table = NULL; return 0; } static NameEntry* name_find(regex_t* reg, UChar* name, UChar* name_end) { int i, len; NameEntry* e; NameTable* t = (NameTable* )reg->name_table; if (IS_NOT_NULL(t)) { len = name_end - name; for (i = 0; i < t->num; i++) { e = &(t->e[i]); if (len == e->name_len && onig_strncmp(name, e->name, len) == 0) return e; } } return (NameEntry* )NULL; } extern int onig_foreach_name(regex_t* reg, int (*func)(const UChar*, const UChar*,int,int*,regex_t*,void*), void* arg) { int i, r; NameEntry* e; NameTable* t = (NameTable* )reg->name_table; if (IS_NOT_NULL(t)) { for (i = 0; i < t->num; i++) { e = &(t->e[i]); r = (*func)(e->name, e->name + e->name_len, e->back_num, (e->back_num > 1 ? e->back_refs : &(e->back_ref1)), reg, arg); if (r != 0) return r; } } return 0; } extern int onig_number_of_names(regex_t* reg) { NameTable* t = (NameTable* )reg->name_table; if (IS_NOT_NULL(t)) return t->num; else return 0; } #endif /* else USE_ST_LIBRARY */ static int name_add(regex_t* reg, UChar* name, UChar* name_end, int backref, ScanEnv* env) { int r; int alloc; NameEntry* e; NameTable* t = (NameTable* )reg->name_table; if (name_end - name <= 0) return ONIGERR_EMPTY_GROUP_NAME; e = name_find(reg, name, name_end); if (IS_NULL(e)) { #ifdef USE_ST_LIBRARY if (IS_NULL(t)) { t = onig_st_init_strend_table_with_size(INIT_NAMES_ALLOC_NUM); CHECK_NULL_RETURN_MEMERR(t); reg->name_table = (void* )t; } e = (NameEntry* )xmalloc(sizeof(NameEntry)); CHECK_NULL_RETURN_MEMERR(e); e->name = onigenc_strdup(reg->enc, name, name_end); if (IS_NULL(e->name)) { xfree(e); return ONIGERR_MEMORY; } r = onig_st_insert_strend(t, e->name, (e->name + (name_end - name)), (HashDataType )e); if (r < 0) return r; e->name_len = (int )(name_end - name); e->back_num = 0; e->back_alloc = 0; e->back_refs = (int* )NULL; #else if (IS_NULL(t)) { alloc = INIT_NAMES_ALLOC_NUM; t = (NameTable* )xmalloc(sizeof(NameTable)); CHECK_NULL_RETURN_MEMERR(t); t->e = NULL; t->alloc = 0; t->num = 0; t->e = (NameEntry* )xmalloc(sizeof(NameEntry) * alloc); if (IS_NULL(t->e)) { xfree(t); return ONIGERR_MEMORY; } t->alloc = alloc; reg->name_table = t; goto clear; } else if (t->num == t->alloc) { int i; alloc = t->alloc * 2; t->e = (NameEntry* )xrealloc(t->e, sizeof(NameEntry) * alloc); CHECK_NULL_RETURN_MEMERR(t->e); t->alloc = alloc; clear: for (i = t->num; i < t->alloc; i++) { t->e[i].name = NULL; t->e[i].name_len = 0; t->e[i].back_num = 0; t->e[i].back_alloc = 0; t->e[i].back_refs = (int* )NULL; } } e = &(t->e[t->num]); t->num++; e->name = onigenc_strdup(reg->enc, name, name_end); if (IS_NULL(e->name)) return ONIGERR_MEMORY; e->name_len = name_end - name; #endif } if (e->back_num >= 1 && ! IS_SYNTAX_BV(env->syntax, ONIG_SYN_ALLOW_MULTIPLEX_DEFINITION_NAME)) { onig_scan_env_set_error_string(env, ONIGERR_MULTIPLEX_DEFINED_NAME, name, name_end); return ONIGERR_MULTIPLEX_DEFINED_NAME; } e->back_num++; if (e->back_num == 1) { e->back_ref1 = backref; } else { if (e->back_num == 2) { alloc = INIT_NAME_BACKREFS_ALLOC_NUM; e->back_refs = (int* )xmalloc(sizeof(int) * alloc); CHECK_NULL_RETURN_MEMERR(e->back_refs); e->back_alloc = alloc; e->back_refs[0] = e->back_ref1; e->back_refs[1] = backref; } else { if (e->back_num > e->back_alloc) { alloc = e->back_alloc * 2; e->back_refs = (int* )xrealloc(e->back_refs, sizeof(int) * alloc); CHECK_NULL_RETURN_MEMERR(e->back_refs); e->back_alloc = alloc; } e->back_refs[e->back_num - 1] = backref; } } return 0; } extern int onig_name_to_group_numbers(regex_t* reg, const UChar* name, const UChar* name_end, int** nums) { NameEntry* e = name_find(reg, name, name_end); if (IS_NULL(e)) return ONIGERR_UNDEFINED_NAME_REFERENCE; switch (e->back_num) { case 0: break; case 1: *nums = &(e->back_ref1); break; default: *nums = e->back_refs; break; } return e->back_num; } extern int onig_name_to_backref_number(regex_t* reg, const UChar* name, const UChar* name_end, OnigRegion *region) { int i, n, *nums; n = onig_name_to_group_numbers(reg, name, name_end, &nums); if (n < 0) return n; else if (n == 0) return ONIGERR_PARSER_BUG; else if (n == 1) return nums[0]; else { if (IS_NOT_NULL(region)) { for (i = n - 1; i >= 0; i--) { if (region->beg[nums[i]] != ONIG_REGION_NOTPOS) return nums[i]; } } return nums[n - 1]; } } extern int onig_noname_group_capture_is_active(regex_t* reg) { if (ONIG_IS_OPTION_ON(reg->options, ONIG_OPTION_DONT_CAPTURE_GROUP)) return 0; if (onig_number_of_names(reg) > 0 && IS_SYNTAX_BV(reg->syntax, ONIG_SYN_CAPTURE_ONLY_NAMED_GROUP) && !ONIG_IS_OPTION_ON(reg->options, ONIG_OPTION_CAPTURE_GROUP)) { return 0; } return 1; } #ifdef USE_CALLOUT typedef struct { OnigCalloutType type; int in; OnigCalloutFunc start_func; OnigCalloutFunc end_func; int arg_num; int opt_arg_num; unsigned int arg_types[ONIG_CALLOUT_MAX_ARGS_NUM]; OnigValue opt_defaults[ONIG_CALLOUT_MAX_ARGS_NUM]; UChar* name; /* reference to GlobalCalloutNameTable entry: e->name */ } CalloutNameListEntry; typedef struct { int n; int alloc; CalloutNameListEntry* v; } CalloutNameListType; static CalloutNameListType* GlobalCalloutNameList; static int make_callout_func_list(CalloutNameListType** rs, int init_size) { CalloutNameListType* s; CalloutNameListEntry* v; *rs = 0; s = xmalloc(sizeof(*s)); if (IS_NULL(s)) return ONIGERR_MEMORY; v = (CalloutNameListEntry* )xmalloc(sizeof(CalloutNameListEntry) * init_size); if (IS_NULL(v)) { xfree(s); return ONIGERR_MEMORY; } s->n = 0; s->alloc = init_size; s->v = v; *rs = s; return ONIG_NORMAL; } static void free_callout_func_list(CalloutNameListType* s) { if (IS_NOT_NULL(s)) { if (IS_NOT_NULL(s->v)) { int i, j; for (i = 0; i < s->n; i++) { CalloutNameListEntry* e = s->v + i; for (j = e->arg_num - e->opt_arg_num; j < e->arg_num; j++) { if (e->arg_types[j] == ONIG_TYPE_STRING) { UChar* p = e->opt_defaults[j].s.start; if (IS_NOT_NULL(p)) xfree(p); } } } xfree(s->v); } xfree(s); } } static int callout_func_list_add(CalloutNameListType* s, int* rid) { if (s->n >= s->alloc) { int new_size = s->alloc * 2; CalloutNameListEntry* nv = (CalloutNameListEntry* ) xrealloc(s->v, sizeof(CalloutNameListEntry) * new_size); if (IS_NULL(nv)) return ONIGERR_MEMORY; s->alloc = new_size; s->v = nv; } *rid = s->n; xmemset(&(s->v[s->n]), 0, sizeof(*(s->v))); s->n++; return ONIG_NORMAL; } typedef struct { UChar* name; int name_len; /* byte length */ int id; } CalloutNameEntry; #ifdef USE_ST_LIBRARY typedef st_table CalloutNameTable; #else typedef struct { CalloutNameEntry* e; int num; int alloc; } CalloutNameTable; #endif static CalloutNameTable* GlobalCalloutNameTable; static int CalloutNameIDCounter; #ifdef USE_ST_LIBRARY static int i_free_callout_name_entry(st_callout_name_key* key, CalloutNameEntry* e, void* arg ARG_UNUSED) { xfree(e->name); /*xfree(key->s); */ /* is same as e->name */ xfree(key); xfree(e); return ST_DELETE; } static int callout_name_table_clear(CalloutNameTable* t) { if (IS_NOT_NULL(t)) { onig_st_foreach(t, i_free_callout_name_entry, 0); } return 0; } static int global_callout_name_table_free(void) { if (IS_NOT_NULL(GlobalCalloutNameTable)) { int r = callout_name_table_clear(GlobalCalloutNameTable); if (r != 0) return r; onig_st_free_table(GlobalCalloutNameTable); GlobalCalloutNameTable = 0; CalloutNameIDCounter = 0; } return 0; } static CalloutNameEntry* callout_name_find(OnigEncoding enc, int is_not_single, const UChar* name, const UChar* name_end) { int r; CalloutNameEntry* e; CalloutNameTable* t = GlobalCalloutNameTable; e = (CalloutNameEntry* )NULL; if (IS_NOT_NULL(t)) { r = onig_st_lookup_callout_name_table(t, enc, is_not_single, name, name_end, (HashDataType* )((void* )(&e))); if (r == 0) { /* not found */ if (enc != ONIG_ENCODING_ASCII && ONIGENC_IS_ASCII_COMPATIBLE_ENCODING(enc)) { enc = ONIG_ENCODING_ASCII; onig_st_lookup_callout_name_table(t, enc, is_not_single, name, name_end, (HashDataType* )((void* )(&e))); } } } return e; } #else static int callout_name_table_clear(CalloutNameTable* t) { int i; CalloutNameEntry* e; if (IS_NOT_NULL(t)) { for (i = 0; i < t->num; i++) { e = &(t->e[i]); if (IS_NOT_NULL(e->name)) { xfree(e->name); e->name = NULL; e->name_len = 0; e->id = 0; e->func = 0; } } if (IS_NOT_NULL(t->e)) { xfree(t->e); t->e = NULL; } t->num = 0; } return 0; } static int global_callout_name_table_free(void) { if (IS_NOT_NULL(GlobalCalloutNameTable)) { int r = callout_name_table_clear(GlobalCalloutNameTable); if (r != 0) return r; xfree(GlobalCalloutNameTable); GlobalCalloutNameTable = 0; CalloutNameIDCounter = 0; } return 0; } static CalloutNameEntry* callout_name_find(UChar* name, UChar* name_end) { int i, len; CalloutNameEntry* e; CalloutNameTable* t = Calloutnames; if (IS_NOT_NULL(t)) { len = name_end - name; for (i = 0; i < t->num; i++) { e = &(t->e[i]); if (len == e->name_len && onig_strncmp(name, e->name, len) == 0) return e; } } return (CalloutNameEntry* )NULL; } #endif /* name string must be single byte char string. */ static int callout_name_entry(CalloutNameEntry** rentry, OnigEncoding enc, int is_not_single, UChar* name, UChar* name_end) { int r; CalloutNameEntry* e; CalloutNameTable* t = GlobalCalloutNameTable; *rentry = 0; if (name_end - name <= 0) return ONIGERR_INVALID_CALLOUT_NAME; e = callout_name_find(enc, is_not_single, name, name_end); if (IS_NULL(e)) { #ifdef USE_ST_LIBRARY if (IS_NULL(t)) { t = onig_st_init_callout_name_table_with_size(INIT_NAMES_ALLOC_NUM); CHECK_NULL_RETURN_MEMERR(t); GlobalCalloutNameTable = t; } e = (CalloutNameEntry* )xmalloc(sizeof(CalloutNameEntry)); CHECK_NULL_RETURN_MEMERR(e); e->name = onigenc_strdup(enc, name, name_end); if (IS_NULL(e->name)) { xfree(e); return ONIGERR_MEMORY; } r = st_insert_callout_name_table(t, enc, is_not_single, e->name, (e->name + (name_end - name)), (HashDataType )e); if (r < 0) return r; #else int alloc; if (IS_NULL(t)) { alloc = INIT_NAMES_ALLOC_NUM; t = (CalloutNameTable* )xmalloc(sizeof(CalloutNameTable)); CHECK_NULL_RETURN_MEMERR(t); t->e = NULL; t->alloc = 0; t->num = 0; t->e = (CalloutNameEntry* )xmalloc(sizeof(CalloutNameEntry) * alloc); if (IS_NULL(t->e)) { xfree(t); return ONIGERR_MEMORY; } t->alloc = alloc; GlobalCalloutNameTable = t; goto clear; } else if (t->num == t->alloc) { int i; alloc = t->alloc * 2; t->e = (CalloutNameEntry* )xrealloc(t->e, sizeof(CalloutNameEntry) * alloc); CHECK_NULL_RETURN_MEMERR(t->e); t->alloc = alloc; clear: for (i = t->num; i < t->alloc; i++) { t->e[i].name = NULL; t->e[i].name_len = 0; t->e[i].id = 0; } } e = &(t->e[t->num]); t->num++; e->name = onigenc_strdup(enc, name, name_end); if (IS_NULL(e->name)) return ONIGERR_MEMORY; #endif CalloutNameIDCounter++; e->id = CalloutNameIDCounter; e->name_len = (int )(name_end - name); } *rentry = e; return e->id; } static int is_allowed_callout_name(OnigEncoding enc, UChar* name, UChar* name_end) { UChar* p; OnigCodePoint c; if (name >= name_end) return 0; p = name; while (p < name_end) { c = ONIGENC_MBC_TO_CODE(enc, p, name_end); if (! IS_ALLOWED_CODE_IN_CALLOUT_NAME(c)) return 0; if (p == name) { if (c >= '0' && c <= '9') return 0; } p += ONIGENC_MBC_ENC_LEN(enc, p); } return 1; } static int is_allowed_callout_tag_name(OnigEncoding enc, UChar* name, UChar* name_end) { UChar* p; OnigCodePoint c; if (name >= name_end) return 0; p = name; while (p < name_end) { c = ONIGENC_MBC_TO_CODE(enc, p, name_end); if (! IS_ALLOWED_CODE_IN_CALLOUT_TAG_NAME(c)) return 0; if (p == name) { if (c >= '0' && c <= '9') return 0; } p += ONIGENC_MBC_ENC_LEN(enc, p); } return 1; } extern int onig_set_callout_of_name(OnigEncoding enc, OnigCalloutType callout_type, UChar* name, UChar* name_end, int in, OnigCalloutFunc start_func, OnigCalloutFunc end_func, int arg_num, unsigned int arg_types[], int opt_arg_num, OnigValue opt_defaults[]) { int r; int i; int j; int id; int is_not_single; CalloutNameEntry* e; CalloutNameListEntry* fe; if (callout_type != ONIG_CALLOUT_TYPE_SINGLE) return ONIGERR_INVALID_ARGUMENT; if (arg_num < 0 || arg_num > ONIG_CALLOUT_MAX_ARGS_NUM) return ONIGERR_INVALID_CALLOUT_ARG; if (opt_arg_num < 0 || opt_arg_num > arg_num) return ONIGERR_INVALID_CALLOUT_ARG; if (start_func == 0 && end_func == 0) return ONIGERR_INVALID_CALLOUT_ARG; if ((in & ONIG_CALLOUT_IN_PROGRESS) == 0 && (in & ONIG_CALLOUT_IN_RETRACTION) == 0) return ONIGERR_INVALID_CALLOUT_ARG; for (i = 0; i < arg_num; i++) { unsigned int t = arg_types[i]; if (t == ONIG_TYPE_VOID) return ONIGERR_INVALID_CALLOUT_ARG; else { if (i >= arg_num - opt_arg_num) { if (t != ONIG_TYPE_LONG && t != ONIG_TYPE_CHAR && t != ONIG_TYPE_STRING && t != ONIG_TYPE_TAG) return ONIGERR_INVALID_CALLOUT_ARG; } else { if (t != ONIG_TYPE_LONG) { t = t & ~ONIG_TYPE_LONG; if (t != ONIG_TYPE_CHAR && t != ONIG_TYPE_STRING && t != ONIG_TYPE_TAG) return ONIGERR_INVALID_CALLOUT_ARG; } } } } if (! is_allowed_callout_name(enc, name, name_end)) { return ONIGERR_INVALID_CALLOUT_NAME; } is_not_single = (callout_type != ONIG_CALLOUT_TYPE_SINGLE); id = callout_name_entry(&e, enc, is_not_single, name, name_end); if (id < 0) return id; r = ONIG_NORMAL; if (IS_NULL(GlobalCalloutNameList)) { r = make_callout_func_list(&GlobalCalloutNameList, 10); if (r != ONIG_NORMAL) return r; } while (id >= GlobalCalloutNameList->n) { int rid; r = callout_func_list_add(GlobalCalloutNameList, &rid); if (r != ONIG_NORMAL) return r; } fe = GlobalCalloutNameList->v + id; fe->type = callout_type; fe->in = in; fe->start_func = start_func; fe->end_func = end_func; fe->arg_num = arg_num; fe->opt_arg_num = opt_arg_num; fe->name = e->name; for (i = 0; i < arg_num; i++) { fe->arg_types[i] = arg_types[i]; } for (i = arg_num - opt_arg_num, j = 0; i < arg_num; i++, j++) { if (fe->arg_types[i] == ONIG_TYPE_STRING) { OnigValue* val; UChar* ds; if (IS_NULL(opt_defaults)) return ONIGERR_INVALID_ARGUMENT; val = opt_defaults + j; ds = onigenc_strdup(enc, val->s.start, val->s.end); CHECK_NULL_RETURN_MEMERR(ds); fe->opt_defaults[i].s.start = ds; fe->opt_defaults[i].s.end = ds + (val->s.end - val->s.start); } else { fe->opt_defaults[i] = opt_defaults[j]; } } r = id; return r; } static int get_callout_name_id_by_name(OnigEncoding enc, int is_not_single, UChar* name, UChar* name_end, int* rid) { int r; CalloutNameEntry* e; if (! is_allowed_callout_name(enc, name, name_end)) { return ONIGERR_INVALID_CALLOUT_NAME; } e = callout_name_find(enc, is_not_single, name, name_end); if (IS_NULL(e)) { return ONIGERR_UNDEFINED_CALLOUT_NAME; } r = ONIG_NORMAL; *rid = e->id; return r; } extern OnigCalloutFunc onig_get_callout_start_func(regex_t* reg, int callout_num) { /* If used for callouts of contents, return 0. */ CalloutListEntry* e; e = onig_reg_callout_list_at(reg, callout_num); CHECK_NULL_RETURN(e); return e->start_func; } extern const UChar* onig_get_callout_tag_start(regex_t* reg, int callout_num) { CalloutListEntry* e = onig_reg_callout_list_at(reg, callout_num); CHECK_NULL_RETURN(e); return e->tag_start; } extern const UChar* onig_get_callout_tag_end(regex_t* reg, int callout_num) { CalloutListEntry* e = onig_reg_callout_list_at(reg, callout_num); CHECK_NULL_RETURN(e); return e->tag_end; } extern OnigCalloutType onig_get_callout_type_by_name_id(int name_id) { if (name_id < 0 || name_id >= GlobalCalloutNameList->n) return 0; return GlobalCalloutNameList->v[name_id].type; } extern OnigCalloutFunc onig_get_callout_start_func_by_name_id(int name_id) { if (name_id < 0 || name_id >= GlobalCalloutNameList->n) return 0; return GlobalCalloutNameList->v[name_id].start_func; } extern OnigCalloutFunc onig_get_callout_end_func_by_name_id(int name_id) { if (name_id < 0 || name_id >= GlobalCalloutNameList->n) return 0; return GlobalCalloutNameList->v[name_id].end_func; } extern int onig_get_callout_in_by_name_id(int name_id) { if (name_id < 0 || name_id >= GlobalCalloutNameList->n) return 0; return GlobalCalloutNameList->v[name_id].in; } static int get_callout_arg_num_by_name_id(int name_id) { return GlobalCalloutNameList->v[name_id].arg_num; } static int get_callout_opt_arg_num_by_name_id(int name_id) { return GlobalCalloutNameList->v[name_id].opt_arg_num; } static unsigned int get_callout_arg_type_by_name_id(int name_id, int index) { return GlobalCalloutNameList->v[name_id].arg_types[index]; } static OnigValue get_callout_opt_default_by_name_id(int name_id, int index) { return GlobalCalloutNameList->v[name_id].opt_defaults[index]; } extern UChar* onig_get_callout_name_by_name_id(int name_id) { if (name_id < 0 || name_id >= GlobalCalloutNameList->n) return 0; return GlobalCalloutNameList->v[name_id].name; } extern int onig_global_callout_names_free(void) { free_callout_func_list(GlobalCalloutNameList); GlobalCalloutNameList = 0; global_callout_name_table_free(); return ONIG_NORMAL; } typedef st_table CalloutTagTable; typedef intptr_t CalloutTagVal; #define CALLOUT_TAG_LIST_FLAG_TAG_EXIST (1<<0) static int i_callout_callout_list_set(UChar* key, CalloutTagVal e, void* arg) { int num; RegexExt* ext = (RegexExt* )arg; num = (int )e - 1; ext->callout_list[num].flag |= CALLOUT_TAG_LIST_FLAG_TAG_EXIST; return ST_CONTINUE; } static int setup_ext_callout_list_values(regex_t* reg) { int i, j; RegexExt* ext; ext = reg->extp; if (IS_NOT_NULL(ext->tag_table)) { onig_st_foreach((CalloutTagTable *)ext->tag_table, i_callout_callout_list_set, (st_data_t )ext); } for (i = 0; i < ext->callout_num; i++) { CalloutListEntry* e = ext->callout_list + i; if (e->of == ONIG_CALLOUT_OF_NAME) { for (j = 0; j < e->u.arg.num; j++) { if (e->u.arg.types[j] == ONIG_TYPE_TAG) { UChar* start; UChar* end; int num; start = e->u.arg.vals[j].s.start; end = e->u.arg.vals[j].s.end; num = onig_get_callout_num_by_tag(reg, start, end); if (num < 0) return num; e->u.arg.vals[j].tag = num; } } } } return ONIG_NORMAL; } extern int onig_callout_tag_is_exist_at_callout_num(regex_t* reg, int callout_num) { RegexExt* ext = reg->extp; if (IS_NULL(ext) || IS_NULL(ext->callout_list)) return 0; if (callout_num > ext->callout_num) return 0; return (ext->callout_list[callout_num].flag & CALLOUT_TAG_LIST_FLAG_TAG_EXIST) != 0; } static int i_free_callout_tag_entry(UChar* key, CalloutTagVal e, void* arg ARG_UNUSED) { xfree(key); return ST_DELETE; } static int callout_tag_table_clear(CalloutTagTable* t) { if (IS_NOT_NULL(t)) { onig_st_foreach(t, i_free_callout_tag_entry, 0); } return 0; } extern int onig_callout_tag_table_free(void* table) { CalloutTagTable* t = (CalloutTagTable* )table; if (IS_NOT_NULL(t)) { int r = callout_tag_table_clear(t); if (r != 0) return r; onig_st_free_table(t); } return 0; } extern int onig_get_callout_num_by_tag(regex_t* reg, const UChar* tag, const UChar* tag_end) { int r; RegexExt* ext; CalloutTagVal e; ext = reg->extp; if (IS_NULL(ext) || IS_NULL(ext->tag_table)) return ONIGERR_INVALID_CALLOUT_TAG_NAME; r = onig_st_lookup_strend(ext->tag_table, tag, tag_end, (HashDataType* )((void* )(&e))); if (r == 0) return ONIGERR_INVALID_CALLOUT_TAG_NAME; return (int )e; } static CalloutTagVal callout_tag_find(CalloutTagTable* t, const UChar* name, const UChar* name_end) { CalloutTagVal e; e = -1; if (IS_NOT_NULL(t)) { onig_st_lookup_strend(t, name, name_end, (HashDataType* )((void* )(&e))); } return e; } static int callout_tag_table_new(CalloutTagTable** rt) { CalloutTagTable* t; *rt = 0; t = onig_st_init_strend_table_with_size(INIT_TAG_NAMES_ALLOC_NUM); CHECK_NULL_RETURN_MEMERR(t); *rt = t; return ONIG_NORMAL; } static int callout_tag_entry_raw(CalloutTagTable* t, UChar* name, UChar* name_end, CalloutTagVal entry_val) { int r; CalloutTagVal val; if (name_end - name <= 0) return ONIGERR_INVALID_CALLOUT_TAG_NAME; val = callout_tag_find(t, name, name_end); if (val >= 0) return ONIGERR_MULTIPLEX_DEFINED_NAME; r = onig_st_insert_strend(t, name, name_end, (HashDataType )entry_val); if (r < 0) return r; return ONIG_NORMAL; } static int ext_ensure_tag_table(regex_t* reg) { int r; RegexExt* ext; CalloutTagTable* t; ext = onig_get_regex_ext(reg); CHECK_NULL_RETURN_MEMERR(ext); if (IS_NULL(ext->tag_table)) { r = callout_tag_table_new(&t); if (r != ONIG_NORMAL) return r; ext->tag_table = t; } return ONIG_NORMAL; } static int callout_tag_entry(regex_t* reg, UChar* name, UChar* name_end, CalloutTagVal entry_val) { int r; RegexExt* ext; CalloutListEntry* e; r = ext_ensure_tag_table(reg); if (r != ONIG_NORMAL) return r; ext = onig_get_regex_ext(reg); CHECK_NULL_RETURN_MEMERR(ext); r = callout_tag_entry_raw(ext->tag_table, name, name_end, entry_val); e = onig_reg_callout_list_at(reg, (int )entry_val); CHECK_NULL_RETURN_MEMERR(e); e->tag_start = name; e->tag_end = name_end; return r; } #endif /* USE_CALLOUT */ #define INIT_SCANENV_MEMENV_ALLOC_SIZE 16 static void scan_env_clear(ScanEnv* env) { MEM_STATUS_CLEAR(env->capture_history); MEM_STATUS_CLEAR(env->bt_mem_start); MEM_STATUS_CLEAR(env->bt_mem_end); MEM_STATUS_CLEAR(env->backrefed_mem); env->error = (UChar* )NULL; env->error_end = (UChar* )NULL; env->num_call = 0; #ifdef USE_CALL env->unset_addr_list = NULL; env->has_call_zero = 0; #endif env->num_mem = 0; env->num_named = 0; env->mem_alloc = 0; env->mem_env_dynamic = (MemEnv* )NULL; xmemset(env->mem_env_static, 0, sizeof(env->mem_env_static)); env->parse_depth = 0; env->keep_num = 0; env->save_num = 0; env->save_alloc_num = 0; env->saves = 0; } static int scan_env_add_mem_entry(ScanEnv* env) { int i, need, alloc; MemEnv* p; need = env->num_mem + 1; if (need > MaxCaptureNum && MaxCaptureNum != 0) return ONIGERR_TOO_MANY_CAPTURES; if (need >= SCANENV_MEMENV_SIZE) { if (env->mem_alloc <= need) { if (IS_NULL(env->mem_env_dynamic)) { alloc = INIT_SCANENV_MEMENV_ALLOC_SIZE; p = (MemEnv* )xmalloc(sizeof(MemEnv) * alloc); CHECK_NULL_RETURN_MEMERR(p); xmemcpy(p, env->mem_env_static, sizeof(env->mem_env_static)); } else { alloc = env->mem_alloc * 2; p = (MemEnv* )xrealloc(env->mem_env_dynamic, sizeof(MemEnv) * alloc); CHECK_NULL_RETURN_MEMERR(p); } for (i = env->num_mem + 1; i < alloc; i++) { p[i].node = NULL_NODE; #if 0 p[i].in = 0; p[i].recursion = 0; #endif } env->mem_env_dynamic = p; env->mem_alloc = alloc; } } env->num_mem++; return env->num_mem; } static int scan_env_set_mem_node(ScanEnv* env, int num, Node* node) { if (env->num_mem >= num) SCANENV_MEMENV(env)[num].node = node; else return ONIGERR_PARSER_BUG; return 0; } extern void onig_node_free(Node* node) { start: if (IS_NULL(node)) return ; #ifdef DEBUG_NODE_FREE fprintf(stderr, "onig_node_free: %p\n", node); #endif switch (NODE_TYPE(node)) { case NODE_STRING: if (STR_(node)->capacity != 0 && IS_NOT_NULL(STR_(node)->s) && STR_(node)->s != STR_(node)->buf) { xfree(STR_(node)->s); } break; case NODE_LIST: case NODE_ALT: onig_node_free(NODE_CAR(node)); { Node* next_node = NODE_CDR(node); xfree(node); node = next_node; goto start; } break; case NODE_CCLASS: { CClassNode* cc = CCLASS_(node); if (cc->mbuf) bbuf_free(cc->mbuf); } break; case NODE_BACKREF: if (IS_NOT_NULL(BACKREF_(node)->back_dynamic)) xfree(BACKREF_(node)->back_dynamic); break; case NODE_BAG: if (NODE_BODY(node)) onig_node_free(NODE_BODY(node)); { BagNode* en = BAG_(node); if (en->type == BAG_IF_ELSE) { onig_node_free(en->te.Then); onig_node_free(en->te.Else); } } break; case NODE_QUANT: case NODE_ANCHOR: if (NODE_BODY(node)) onig_node_free(NODE_BODY(node)); break; case NODE_CTYPE: case NODE_CALL: case NODE_GIMMICK: break; } xfree(node); } static void cons_node_free_alone(Node* node) { NODE_CAR(node) = 0; NODE_CDR(node) = 0; onig_node_free(node); } static Node* node_new(void) { Node* node; node = (Node* )xmalloc(sizeof(Node)); CHECK_NULL_RETURN(node); xmemset(node, 0, sizeof(*node)); #ifdef DEBUG_NODE_FREE fprintf(stderr, "node_new: %p\n", node); #endif return node; } static void initialize_cclass(CClassNode* cc) { BITSET_CLEAR(cc->bs); cc->flags = 0; cc->mbuf = NULL; } static Node* node_new_cclass(void) { Node* node = node_new(); CHECK_NULL_RETURN(node); NODE_SET_TYPE(node, NODE_CCLASS); initialize_cclass(CCLASS_(node)); return node; } static Node* node_new_ctype(int type, int not, OnigOptionType options) { Node* node = node_new(); CHECK_NULL_RETURN(node); NODE_SET_TYPE(node, NODE_CTYPE); CTYPE_(node)->ctype = type; CTYPE_(node)->not = not; CTYPE_(node)->options = options; CTYPE_(node)->ascii_mode = IS_ASCII_MODE_CTYPE_OPTION(type, options); return node; } static Node* node_new_anychar(void) { Node* node = node_new_ctype(CTYPE_ANYCHAR, 0, ONIG_OPTION_NONE); return node; } static Node* node_new_anychar_with_fixed_option(OnigOptionType option) { CtypeNode* ct; Node* node; node = node_new_anychar(); CHECK_NULL_RETURN(node); ct = CTYPE_(node); ct->options = option; NODE_STATUS_ADD(node, FIXED_OPTION); return node; } static int node_new_no_newline(Node** node, ScanEnv* env) { Node* n; n = node_new_anychar_with_fixed_option(ONIG_OPTION_NONE); CHECK_NULL_RETURN_MEMERR(n); *node = n; return 0; } static int node_new_true_anychar(Node** node, ScanEnv* env) { Node* n; n = node_new_anychar_with_fixed_option(ONIG_OPTION_MULTILINE); CHECK_NULL_RETURN_MEMERR(n); *node = n; return 0; } static Node* node_new_list(Node* left, Node* right) { Node* node = node_new(); CHECK_NULL_RETURN(node); NODE_SET_TYPE(node, NODE_LIST); NODE_CAR(node) = left; NODE_CDR(node) = right; return node; } extern Node* onig_node_new_list(Node* left, Node* right) { return node_new_list(left, right); } extern Node* onig_node_list_add(Node* list, Node* x) { Node *n; n = onig_node_new_list(x, NULL); if (IS_NULL(n)) return NULL_NODE; if (IS_NOT_NULL(list)) { while (IS_NOT_NULL(NODE_CDR(list))) list = NODE_CDR(list); NODE_CDR(list) = n; } return n; } extern Node* onig_node_new_alt(Node* left, Node* right) { Node* node = node_new(); CHECK_NULL_RETURN(node); NODE_SET_TYPE(node, NODE_ALT); NODE_CAR(node) = left; NODE_CDR(node) = right; return node; } static Node* make_list_or_alt(NodeType type, int n, Node* ns[]) { Node* r; if (n <= 0) return NULL_NODE; if (n == 1) { r = node_new(); CHECK_NULL_RETURN(r); NODE_SET_TYPE(r, type); NODE_CAR(r) = ns[0]; NODE_CDR(r) = NULL_NODE; } else { Node* right; r = node_new(); CHECK_NULL_RETURN(r); right = make_list_or_alt(type, n - 1, ns + 1); if (IS_NULL(right)) { onig_node_free(r); return NULL_NODE; } NODE_SET_TYPE(r, type); NODE_CAR(r) = ns[0]; NODE_CDR(r) = right; } return r; } static Node* make_list(int n, Node* ns[]) { return make_list_or_alt(NODE_LIST, n, ns); } static Node* make_alt(int n, Node* ns[]) { return make_list_or_alt(NODE_ALT, n, ns); } extern Node* onig_node_new_anchor(int type, int ascii_mode) { Node* node = node_new(); CHECK_NULL_RETURN(node); NODE_SET_TYPE(node, NODE_ANCHOR); ANCHOR_(node)->type = type; ANCHOR_(node)->char_len = -1; ANCHOR_(node)->ascii_mode = ascii_mode; return node; } static Node* node_new_backref(int back_num, int* backrefs, int by_name, #ifdef USE_BACKREF_WITH_LEVEL int exist_level, int nest_level, #endif ScanEnv* env) { int i; Node* node = node_new(); CHECK_NULL_RETURN(node); NODE_SET_TYPE(node, NODE_BACKREF); BACKREF_(node)->back_num = back_num; BACKREF_(node)->back_dynamic = (int* )NULL; if (by_name != 0) NODE_STATUS_ADD(node, BY_NAME); #ifdef USE_BACKREF_WITH_LEVEL if (exist_level != 0) { NODE_STATUS_ADD(node, NEST_LEVEL); BACKREF_(node)->nest_level = nest_level; } #endif for (i = 0; i < back_num; i++) { if (backrefs[i] <= env->num_mem && IS_NULL(SCANENV_MEMENV(env)[backrefs[i]].node)) { NODE_STATUS_ADD(node, RECURSION); /* /...(\1).../ */ break; } } if (back_num <= NODE_BACKREFS_SIZE) { for (i = 0; i < back_num; i++) BACKREF_(node)->back_static[i] = backrefs[i]; } else { int* p = (int* )xmalloc(sizeof(int) * back_num); if (IS_NULL(p)) { onig_node_free(node); return NULL; } BACKREF_(node)->back_dynamic = p; for (i = 0; i < back_num; i++) p[i] = backrefs[i]; } return node; } static Node* node_new_backref_checker(int back_num, int* backrefs, int by_name, #ifdef USE_BACKREF_WITH_LEVEL int exist_level, int nest_level, #endif ScanEnv* env) { Node* node; node = node_new_backref(back_num, backrefs, by_name, #ifdef USE_BACKREF_WITH_LEVEL exist_level, nest_level, #endif env); CHECK_NULL_RETURN(node); NODE_STATUS_ADD(node, CHECKER); return node; } #ifdef USE_CALL static Node* node_new_call(UChar* name, UChar* name_end, int gnum, int by_number) { Node* node = node_new(); CHECK_NULL_RETURN(node); NODE_SET_TYPE(node, NODE_CALL); CALL_(node)->by_number = by_number; CALL_(node)->name = name; CALL_(node)->name_end = name_end; CALL_(node)->group_num = gnum; CALL_(node)->entry_count = 1; return node; } #endif static Node* node_new_quantifier(int lower, int upper, int by_number) { Node* node = node_new(); CHECK_NULL_RETURN(node); NODE_SET_TYPE(node, NODE_QUANT); QUANT_(node)->lower = lower; QUANT_(node)->upper = upper; QUANT_(node)->greedy = 1; QUANT_(node)->emptiness = BODY_IS_NOT_EMPTY; QUANT_(node)->head_exact = NULL_NODE; QUANT_(node)->next_head_exact = NULL_NODE; QUANT_(node)->is_refered = 0; if (by_number != 0) NODE_STATUS_ADD(node, BY_NUMBER); return node; } static Node* node_new_bag(enum BagType type) { Node* node = node_new(); CHECK_NULL_RETURN(node); NODE_SET_TYPE(node, NODE_BAG); BAG_(node)->type = type; switch (type) { case BAG_MEMORY: BAG_(node)->m.regnum = 0; BAG_(node)->m.called_addr = -1; BAG_(node)->m.entry_count = 1; BAG_(node)->m.called_state = 0; break; case BAG_OPTION: BAG_(node)->o.options = 0; break; case BAG_STOP_BACKTRACK: break; case BAG_IF_ELSE: BAG_(node)->te.Then = 0; BAG_(node)->te.Else = 0; break; } BAG_(node)->opt_count = 0; return node; } extern Node* onig_node_new_bag(enum BagType type) { return node_new_bag(type); } static Node* node_new_bag_if_else(Node* cond, Node* Then, Node* Else) { Node* n; n = node_new_bag(BAG_IF_ELSE); CHECK_NULL_RETURN(n); NODE_BODY(n) = cond; BAG_(n)->te.Then = Then; BAG_(n)->te.Else = Else; return n; } static Node* node_new_memory(int is_named) { Node* node = node_new_bag(BAG_MEMORY); CHECK_NULL_RETURN(node); if (is_named != 0) NODE_STATUS_ADD(node, NAMED_GROUP); return node; } static Node* node_new_option(OnigOptionType option) { Node* node = node_new_bag(BAG_OPTION); CHECK_NULL_RETURN(node); BAG_(node)->o.options = option; return node; } static Node* node_new_group(Node* content) { Node* node; node = node_new(); CHECK_NULL_RETURN(node); NODE_SET_TYPE(node, NODE_LIST); NODE_CAR(node) = content; NODE_CDR(node) = NULL_NODE; return node; } static Node* node_drop_group(Node* group) { Node* content; content = NODE_CAR(group); NODE_CAR(group) = NULL_NODE; onig_node_free(group); return content; } static int node_new_fail(Node** node, ScanEnv* env) { *node = node_new(); CHECK_NULL_RETURN_MEMERR(*node); NODE_SET_TYPE(*node, NODE_GIMMICK); GIMMICK_(*node)->type = GIMMICK_FAIL; return ONIG_NORMAL; } static int node_new_save_gimmick(Node** node, enum SaveType save_type, ScanEnv* env) { int id; int r; r = save_entry(env, save_type, &id); if (r != ONIG_NORMAL) return r; *node = node_new(); CHECK_NULL_RETURN_MEMERR(*node); NODE_SET_TYPE(*node, NODE_GIMMICK); GIMMICK_(*node)->id = id; GIMMICK_(*node)->type = GIMMICK_SAVE; GIMMICK_(*node)->detail_type = (int )save_type; return ONIG_NORMAL; } static int node_new_update_var_gimmick(Node** node, enum UpdateVarType update_var_type, int id, ScanEnv* env) { *node = node_new(); CHECK_NULL_RETURN_MEMERR(*node); NODE_SET_TYPE(*node, NODE_GIMMICK); GIMMICK_(*node)->id = id; GIMMICK_(*node)->type = GIMMICK_UPDATE_VAR; GIMMICK_(*node)->detail_type = (int )update_var_type; return ONIG_NORMAL; } static int node_new_keep(Node** node, ScanEnv* env) { int r; r = node_new_save_gimmick(node, SAVE_KEEP, env); if (r != 0) return r; env->keep_num++; return ONIG_NORMAL; } #ifdef USE_CALLOUT extern void onig_free_reg_callout_list(int n, CalloutListEntry* list) { int i; int j; if (IS_NULL(list)) return ; for (i = 0; i < n; i++) { if (list[i].of == ONIG_CALLOUT_OF_NAME) { for (j = 0; j < list[i].u.arg.passed_num; j++) { if (list[i].u.arg.types[j] == ONIG_TYPE_STRING) { if (IS_NOT_NULL(list[i].u.arg.vals[j].s.start)) xfree(list[i].u.arg.vals[j].s.start); } } } else { /* ONIG_CALLOUT_OF_CONTENTS */ if (IS_NOT_NULL(list[i].u.content.start)) { xfree((void* )list[i].u.content.start); } } } xfree(list); } extern CalloutListEntry* onig_reg_callout_list_at(regex_t* reg, int num) { RegexExt* ext = reg->extp; CHECK_NULL_RETURN(ext); if (num <= 0 || num > ext->callout_num) return 0; num--; return ext->callout_list + num; } static int reg_callout_list_entry(ScanEnv* env, int* rnum) { #define INIT_CALLOUT_LIST_NUM 3 int num; CalloutListEntry* list; CalloutListEntry* e; RegexExt* ext; ext = onig_get_regex_ext(env->reg); CHECK_NULL_RETURN_MEMERR(ext); if (IS_NULL(ext->callout_list)) { list = (CalloutListEntry* )xmalloc(sizeof(*list) * INIT_CALLOUT_LIST_NUM); CHECK_NULL_RETURN_MEMERR(list); ext->callout_list = list; ext->callout_list_alloc = INIT_CALLOUT_LIST_NUM; ext->callout_num = 0; } num = ext->callout_num + 1; if (num > ext->callout_list_alloc) { int alloc = ext->callout_list_alloc * 2; list = (CalloutListEntry* )xrealloc(ext->callout_list, sizeof(CalloutListEntry) * alloc); CHECK_NULL_RETURN_MEMERR(list); ext->callout_list = list; ext->callout_list_alloc = alloc; } e = ext->callout_list + (num - 1); e->flag = 0; e->of = 0; e->in = ONIG_CALLOUT_OF_CONTENTS; e->type = 0; e->tag_start = 0; e->tag_end = 0; e->start_func = 0; e->end_func = 0; e->u.arg.num = 0; e->u.arg.passed_num = 0; ext->callout_num = num; *rnum = num; return ONIG_NORMAL; } static int node_new_callout(Node** node, OnigCalloutOf callout_of, int num, int id, ScanEnv* env) { *node = node_new(); CHECK_NULL_RETURN_MEMERR(*node); NODE_SET_TYPE(*node, NODE_GIMMICK); GIMMICK_(*node)->id = id; GIMMICK_(*node)->num = num; GIMMICK_(*node)->type = GIMMICK_CALLOUT; GIMMICK_(*node)->detail_type = (int )callout_of; return ONIG_NORMAL; } #endif static int make_text_segment(Node** node, ScanEnv* env) { int r; int i; Node* x; Node* ns[2]; /* \X == (?>\O(?:\Y\O)*) */ ns[1] = NULL_NODE; r = ONIGERR_MEMORY; ns[0] = onig_node_new_anchor(ANCR_NO_TEXT_SEGMENT_BOUNDARY, 0); if (IS_NULL(ns[0])) goto err; r = node_new_true_anychar(&ns[1], env); if (r != 0) goto err1; x = make_list(2, ns); if (IS_NULL(x)) goto err; ns[0] = x; ns[1] = NULL_NODE; x = node_new_quantifier(0, INFINITE_REPEAT, 1); if (IS_NULL(x)) goto err; NODE_BODY(x) = ns[0]; ns[0] = NULL_NODE; ns[1] = x; r = node_new_true_anychar(&ns[0], env); if (r != 0) goto err1; x = make_list(2, ns); if (IS_NULL(x)) goto err; ns[0] = x; ns[1] = NULL_NODE; x = node_new_bag(BAG_STOP_BACKTRACK); if (IS_NULL(x)) goto err; NODE_BODY(x) = ns[0]; *node = x; return ONIG_NORMAL; err: r = ONIGERR_MEMORY; err1: for (i = 0; i < 2; i++) onig_node_free(ns[i]); return r; } static int make_absent_engine(Node** node, int pre_save_right_id, Node* absent, Node* step_one, int lower, int upper, int possessive, int is_range_cutter, ScanEnv* env) { int r; int i; int id; Node* x; Node* ns[4]; for (i = 0; i < 4; i++) ns[i] = NULL_NODE; ns[1] = absent; ns[3] = step_one; /* for err */ r = node_new_save_gimmick(&ns[0], SAVE_S, env); if (r != 0) goto err; id = GIMMICK_(ns[0])->id; r = node_new_update_var_gimmick(&ns[2], UPDATE_VAR_RIGHT_RANGE_FROM_S_STACK, id, env); if (r != 0) goto err; r = node_new_fail(&ns[3], env); if (r != 0) goto err; x = make_list(4, ns); if (IS_NULL(x)) goto err0; ns[0] = x; ns[1] = step_one; ns[2] = ns[3] = NULL_NODE; x = make_alt(2, ns); if (IS_NULL(x)) goto err0; ns[0] = x; x = node_new_quantifier(lower, upper, 0); if (IS_NULL(x)) goto err0; NODE_BODY(x) = ns[0]; ns[0] = x; if (possessive != 0) { x = node_new_bag(BAG_STOP_BACKTRACK); if (IS_NULL(x)) goto err0; NODE_BODY(x) = ns[0]; ns[0] = x; } r = node_new_update_var_gimmick(&ns[1], UPDATE_VAR_RIGHT_RANGE_FROM_STACK, pre_save_right_id, env); if (r != 0) goto err; r = node_new_fail(&ns[2], env); if (r != 0) goto err; x = make_list(2, ns + 1); if (IS_NULL(x)) goto err0; ns[1] = x; ns[2] = NULL_NODE; x = make_alt(2, ns); if (IS_NULL(x)) goto err0; if (is_range_cutter != 0) NODE_STATUS_ADD(x, SUPER); *node = x; return ONIG_NORMAL; err0: r = ONIGERR_MEMORY; err: for (i = 0; i < 4; i++) onig_node_free(ns[i]); return r; } static int make_absent_tail(Node** node1, Node** node2, int pre_save_right_id, ScanEnv* env) { int r; int id; Node* save; Node* x; Node* ns[2]; *node1 = *node2 = NULL_NODE; save = ns[0] = ns[1] = NULL_NODE; r = node_new_save_gimmick(&save, SAVE_RIGHT_RANGE, env); if (r != 0) goto err; id = GIMMICK_(save)->id; r = node_new_update_var_gimmick(&ns[0], UPDATE_VAR_RIGHT_RANGE_FROM_STACK, id, env); if (r != 0) goto err; r = node_new_fail(&ns[1], env); if (r != 0) goto err; x = make_list(2, ns); if (IS_NULL(x)) goto err0; ns[0] = NULL_NODE; ns[1] = x; r = node_new_update_var_gimmick(&ns[0], UPDATE_VAR_RIGHT_RANGE_FROM_STACK, pre_save_right_id, env); if (r != 0) goto err; x = make_alt(2, ns); if (IS_NULL(x)) goto err0; *node1 = save; *node2 = x; return ONIG_NORMAL; err0: r = ONIGERR_MEMORY; err: onig_node_free(save); onig_node_free(ns[0]); onig_node_free(ns[1]); return r; } static int make_range_clear(Node** node, ScanEnv* env) { int r; int id; Node* save; Node* x; Node* ns[2]; *node = NULL_NODE; save = ns[0] = ns[1] = NULL_NODE; r = node_new_save_gimmick(&save, SAVE_RIGHT_RANGE, env); if (r != 0) goto err; id = GIMMICK_(save)->id; r = node_new_update_var_gimmick(&ns[0], UPDATE_VAR_RIGHT_RANGE_FROM_STACK, id, env); if (r != 0) goto err; r = node_new_fail(&ns[1], env); if (r != 0) goto err; x = make_list(2, ns); if (IS_NULL(x)) goto err0; ns[0] = NULL_NODE; ns[1] = x; r = node_new_update_var_gimmick(&ns[0], UPDATE_VAR_RIGHT_RANGE_INIT, 0, env); if (r != 0) goto err; x = make_alt(2, ns); if (IS_NULL(x)) goto err0; NODE_STATUS_ADD(x, SUPER); ns[0] = save; ns[1] = x; save = NULL_NODE; x = make_list(2, ns); if (IS_NULL(x)) goto err0; *node = x; return ONIG_NORMAL; err0: r = ONIGERR_MEMORY; err: onig_node_free(save); onig_node_free(ns[0]); onig_node_free(ns[1]); return r; } static int is_simple_one_char_repeat(Node* node, Node** rquant, Node** rbody, int* is_possessive, ScanEnv* env) { Node* quant; Node* body; *rquant = *rbody = 0; *is_possessive = 0; if (NODE_TYPE(node) == NODE_QUANT) { quant = node; } else { if (NODE_TYPE(node) == NODE_BAG) { BagNode* en = BAG_(node); if (en->type == BAG_STOP_BACKTRACK) { *is_possessive = 1; quant = NODE_BAG_BODY(en); if (NODE_TYPE(quant) != NODE_QUANT) return 0; } else return 0; } else return 0; } if (QUANT_(quant)->greedy == 0) return 0; body = NODE_BODY(quant); switch (NODE_TYPE(body)) { case NODE_STRING: { int len; StrNode* sn = STR_(body); UChar *s = sn->s; len = 0; while (s < sn->end) { s += enclen(env->enc, s); len++; } if (len != 1) return 0; } case NODE_CCLASS: break; default: return 0; break; } if (node != quant) { NODE_BODY(node) = 0; onig_node_free(node); } NODE_BODY(quant) = NULL_NODE; *rquant = quant; *rbody = body; return 1; } static int make_absent_tree_for_simple_one_char_repeat(Node** node, Node* absent, Node* quant, Node* body, int possessive, ScanEnv* env) { int r; int i; int id1; int lower, upper; Node* x; Node* ns[4]; *node = NULL_NODE; r = ONIGERR_MEMORY; ns[0] = ns[1] = NULL_NODE; ns[2] = body, ns[3] = absent; lower = QUANT_(quant)->lower; upper = QUANT_(quant)->upper; onig_node_free(quant); r = node_new_save_gimmick(&ns[0], SAVE_RIGHT_RANGE, env); if (r != 0) goto err; id1 = GIMMICK_(ns[0])->id; r = make_absent_engine(&ns[1], id1, absent, body, lower, upper, possessive, 0, env); if (r != 0) goto err; ns[2] = ns[3] = NULL_NODE; r = node_new_update_var_gimmick(&ns[2], UPDATE_VAR_RIGHT_RANGE_FROM_STACK, id1, env); if (r != 0) goto err; x = make_list(3, ns); if (IS_NULL(x)) goto err0; *node = x; return ONIG_NORMAL; err0: r = ONIGERR_MEMORY; err: for (i = 0; i < 4; i++) onig_node_free(ns[i]); return r; } static int make_absent_tree(Node** node, Node* absent, Node* expr, int is_range_cutter, ScanEnv* env) { int r; int i; int id1, id2; int possessive; Node* x; Node* ns[7]; r = ONIGERR_MEMORY; for (i = 0; i < 7; i++) ns[i] = NULL_NODE; ns[4] = expr; ns[5] = absent; if (is_range_cutter == 0) { Node* quant; Node* body; if (expr == NULL_NODE) { /* default expr \O* */ quant = node_new_quantifier(0, INFINITE_REPEAT, 0); if (IS_NULL(quant)) goto err0; r = node_new_true_anychar(&body, env); if (r != 0) { onig_node_free(quant); goto err; } possessive = 0; goto simple; } else { if (is_simple_one_char_repeat(expr, &quant, &body, &possessive, env)) { simple: r = make_absent_tree_for_simple_one_char_repeat(node, absent, quant, body, possessive, env); if (r != 0) { ns[4] = NULL_NODE; onig_node_free(quant); onig_node_free(body); goto err; } return ONIG_NORMAL; } } } r = node_new_save_gimmick(&ns[0], SAVE_RIGHT_RANGE, env); if (r != 0) goto err; id1 = GIMMICK_(ns[0])->id; r = node_new_save_gimmick(&ns[1], SAVE_S, env); if (r != 0) goto err; id2 = GIMMICK_(ns[1])->id; r = node_new_true_anychar(&ns[3], env); if (r != 0) goto err; possessive = 1; r = make_absent_engine(&ns[2], id1, absent, ns[3], 0, INFINITE_REPEAT, possessive, is_range_cutter, env); if (r != 0) goto err; ns[3] = NULL_NODE; ns[5] = NULL_NODE; r = node_new_update_var_gimmick(&ns[3], UPDATE_VAR_S_FROM_STACK, id2, env); if (r != 0) goto err; if (is_range_cutter != 0) { x = make_list(4, ns); if (IS_NULL(x)) goto err0; } else { r = make_absent_tail(&ns[5], &ns[6], id1, env); if (r != 0) goto err; x = make_list(7, ns); if (IS_NULL(x)) goto err0; } *node = x; return ONIG_NORMAL; err0: r = ONIGERR_MEMORY; err: for (i = 0; i < 7; i++) onig_node_free(ns[i]); return r; } extern int onig_node_str_cat(Node* node, const UChar* s, const UChar* end) { int addlen = (int )(end - s); if (addlen > 0) { int len = (int )(STR_(node)->end - STR_(node)->s); if (STR_(node)->capacity > 0 || (len + addlen > NODE_STRING_BUF_SIZE - 1)) { UChar* p; int capa = len + addlen + NODE_STRING_MARGIN; if (capa <= STR_(node)->capacity) { onig_strcpy(STR_(node)->s + len, s, end); } else { if (STR_(node)->s == STR_(node)->buf) p = strcat_capa_from_static(STR_(node)->s, STR_(node)->end, s, end, capa); else p = strcat_capa(STR_(node)->s, STR_(node)->end, s, end, capa); CHECK_NULL_RETURN_MEMERR(p); STR_(node)->s = p; STR_(node)->capacity = capa; } } else { onig_strcpy(STR_(node)->s + len, s, end); } STR_(node)->end = STR_(node)->s + len + addlen; } return 0; } extern int onig_node_str_set(Node* node, const UChar* s, const UChar* end) { onig_node_str_clear(node); return onig_node_str_cat(node, s, end); } static int node_str_cat_char(Node* node, UChar c) { UChar s[1]; s[0] = c; return onig_node_str_cat(node, s, s + 1); } extern void onig_node_conv_to_str_node(Node* node, int flag) { NODE_SET_TYPE(node, NODE_STRING); STR_(node)->flag = flag; STR_(node)->capacity = 0; STR_(node)->s = STR_(node)->buf; STR_(node)->end = STR_(node)->buf; } extern void onig_node_str_clear(Node* node) { if (STR_(node)->capacity != 0 && IS_NOT_NULL(STR_(node)->s) && STR_(node)->s != STR_(node)->buf) { xfree(STR_(node)->s); } STR_(node)->capacity = 0; STR_(node)->flag = 0; STR_(node)->s = STR_(node)->buf; STR_(node)->end = STR_(node)->buf; } static Node* node_new_str(const UChar* s, const UChar* end) { Node* node = node_new(); CHECK_NULL_RETURN(node); NODE_SET_TYPE(node, NODE_STRING); STR_(node)->capacity = 0; STR_(node)->flag = 0; STR_(node)->s = STR_(node)->buf; STR_(node)->end = STR_(node)->buf; if (onig_node_str_cat(node, s, end)) { onig_node_free(node); return NULL; } return node; } extern Node* onig_node_new_str(const UChar* s, const UChar* end) { return node_new_str(s, end); } static Node* node_new_str_raw(UChar* s, UChar* end) { Node* node = node_new_str(s, end); CHECK_NULL_RETURN(node); NODE_STRING_SET_RAW(node); return node; } static Node* node_new_empty(void) { return node_new_str(NULL, NULL); } static Node* node_new_str_raw_char(UChar c) { int i; UChar p[1]; Node* node; p[0] = c; node = node_new_str_raw(p, p + 1); /* clear buf tail */ for (i = 1; i < NODE_STRING_BUF_SIZE; i++) STR_(node)->buf[i] = '\0'; return node; } static Node* str_node_split_last_char(Node* node, OnigEncoding enc) { const UChar *p; Node* rn; StrNode* sn; sn = STR_(node); rn = NULL_NODE; if (sn->end > sn->s) { p = onigenc_get_prev_char_head(enc, sn->s, sn->end); if (p && p > sn->s) { /* can be split. */ rn = node_new_str(p, sn->end); CHECK_NULL_RETURN(rn); if (NODE_STRING_IS_RAW(node)) NODE_STRING_SET_RAW(rn); sn->end = (UChar* )p; } } return rn; } static int str_node_can_be_split(Node* node, OnigEncoding enc) { StrNode* sn = STR_(node); if (sn->end > sn->s) { return ((enclen(enc, sn->s) < sn->end - sn->s) ? 1 : 0); } return 0; } extern int onig_scan_unsigned_number(UChar** src, const UChar* end, OnigEncoding enc) { unsigned int num, val; OnigCodePoint c; UChar* p = *src; PFETCH_READY; num = 0; while (! PEND) { PFETCH(c); if (IS_CODE_DIGIT_ASCII(enc, c)) { val = (unsigned int )DIGITVAL(c); if ((INT_MAX_LIMIT - val) / 10UL < num) return -1; /* overflow */ num = num * 10 + val; } else { PUNFETCH; break; } } *src = p; return num; } static int scan_unsigned_hexadecimal_number(UChar** src, UChar* end, int minlen, int maxlen, OnigEncoding enc) { OnigCodePoint c; unsigned int num, val; int n; UChar* p = *src; PFETCH_READY; num = 0; n = 0; while (! PEND && n < maxlen) { PFETCH(c); if (IS_CODE_XDIGIT_ASCII(enc, c)) { n++; val = (unsigned int )XDIGITVAL(enc,c); if ((INT_MAX_LIMIT - val) / 16UL < num) return ONIGERR_TOO_BIG_NUMBER; /* overflow */ num = (num << 4) + XDIGITVAL(enc,c); } else { PUNFETCH; break; } } if (n < minlen) return ONIGERR_INVALID_CODE_POINT_VALUE; *src = p; return num; } static int scan_unsigned_octal_number(UChar** src, UChar* end, int maxlen, OnigEncoding enc) { OnigCodePoint c; unsigned int num, val; UChar* p = *src; PFETCH_READY; num = 0; while (! PEND && maxlen-- != 0) { PFETCH(c); if (IS_CODE_DIGIT_ASCII(enc, c) && c < '8') { val = ODIGITVAL(c); if ((INT_MAX_LIMIT - val) / 8UL < num) return -1; /* overflow */ num = (num << 3) + val; } else { PUNFETCH; break; } } *src = p; return num; } #define BB_WRITE_CODE_POINT(bbuf,pos,code) \ BB_WRITE(bbuf, pos, &(code), SIZE_CODE_POINT) /* data format: [n][from-1][to-1][from-2][to-2] ... [from-n][to-n] (all data size is OnigCodePoint) */ static int new_code_range(BBuf** pbuf) { #define INIT_MULTI_BYTE_RANGE_SIZE (SIZE_CODE_POINT * 5) int r; OnigCodePoint n; BBuf* bbuf; bbuf = *pbuf = (BBuf* )xmalloc(sizeof(BBuf)); CHECK_NULL_RETURN_MEMERR(bbuf); r = BB_INIT(bbuf, INIT_MULTI_BYTE_RANGE_SIZE); if (r != 0) { xfree(bbuf); *pbuf = 0; return r; } n = 0; BB_WRITE_CODE_POINT(bbuf, 0, n); return 0; } static int add_code_range_to_buf(BBuf** pbuf, OnigCodePoint from, OnigCodePoint to) { int r, inc_n, pos; int low, high, bound, x; OnigCodePoint n, *data; BBuf* bbuf; if (from > to) { n = from; from = to; to = n; } if (IS_NULL(*pbuf)) { r = new_code_range(pbuf); if (r != 0) return r; bbuf = *pbuf; n = 0; } else { bbuf = *pbuf; GET_CODE_POINT(n, bbuf->p); } data = (OnigCodePoint* )(bbuf->p); data++; for (low = 0, bound = n; low < bound; ) { x = (low + bound) >> 1; if (from > data[x*2 + 1]) low = x + 1; else bound = x; } high = (to == ~((OnigCodePoint )0)) ? n : low; for (bound = n; high < bound; ) { x = (high + bound) >> 1; if (to + 1 >= data[x*2]) high = x + 1; else bound = x; } inc_n = low + 1 - high; if (n + inc_n > ONIG_MAX_MULTI_BYTE_RANGES_NUM) return ONIGERR_TOO_MANY_MULTI_BYTE_RANGES; if (inc_n != 1) { if (from > data[low*2]) from = data[low*2]; if (to < data[(high - 1)*2 + 1]) to = data[(high - 1)*2 + 1]; } if (inc_n != 0 && (OnigCodePoint )high < n) { int from_pos = SIZE_CODE_POINT * (1 + high * 2); int to_pos = SIZE_CODE_POINT * (1 + (low + 1) * 2); int size = (n - high) * 2 * SIZE_CODE_POINT; if (inc_n > 0) { BB_MOVE_RIGHT(bbuf, from_pos, to_pos, size); } else { BB_MOVE_LEFT_REDUCE(bbuf, from_pos, to_pos); } } pos = SIZE_CODE_POINT * (1 + low * 2); BB_ENSURE_SIZE(bbuf, pos + SIZE_CODE_POINT * 2); BB_WRITE_CODE_POINT(bbuf, pos, from); BB_WRITE_CODE_POINT(bbuf, pos + SIZE_CODE_POINT, to); n += inc_n; BB_WRITE_CODE_POINT(bbuf, 0, n); return 0; } static int add_code_range(BBuf** pbuf, ScanEnv* env, OnigCodePoint from, OnigCodePoint to) { if (from > to) { if (IS_SYNTAX_BV(env->syntax, ONIG_SYN_ALLOW_EMPTY_RANGE_IN_CC)) return 0; else return ONIGERR_EMPTY_RANGE_IN_CHAR_CLASS; } return add_code_range_to_buf(pbuf, from, to); } static int not_code_range_buf(OnigEncoding enc, BBuf* bbuf, BBuf** pbuf) { int r, i, n; OnigCodePoint pre, from, *data, to = 0; *pbuf = (BBuf* )NULL; if (IS_NULL(bbuf)) { set_all: return SET_ALL_MULTI_BYTE_RANGE(enc, pbuf); } data = (OnigCodePoint* )(bbuf->p); GET_CODE_POINT(n, data); data++; if (n <= 0) goto set_all; r = 0; pre = MBCODE_START_POS(enc); for (i = 0; i < n; i++) { from = data[i*2]; to = data[i*2+1]; if (pre <= from - 1) { r = add_code_range_to_buf(pbuf, pre, from - 1); if (r != 0) return r; } if (to == ~((OnigCodePoint )0)) break; pre = to + 1; } if (to < ~((OnigCodePoint )0)) { r = add_code_range_to_buf(pbuf, to + 1, ~((OnigCodePoint )0)); } return r; } #define SWAP_BB_NOT(bbuf1, not1, bbuf2, not2) do {\ BBuf *tbuf; \ int tnot; \ tnot = not1; not1 = not2; not2 = tnot; \ tbuf = bbuf1; bbuf1 = bbuf2; bbuf2 = tbuf; \ } while (0) static int or_code_range_buf(OnigEncoding enc, BBuf* bbuf1, int not1, BBuf* bbuf2, int not2, BBuf** pbuf) { int r; OnigCodePoint i, n1, *data1; OnigCodePoint from, to; *pbuf = (BBuf* )NULL; if (IS_NULL(bbuf1) && IS_NULL(bbuf2)) { if (not1 != 0 || not2 != 0) return SET_ALL_MULTI_BYTE_RANGE(enc, pbuf); return 0; } r = 0; if (IS_NULL(bbuf2)) SWAP_BB_NOT(bbuf1, not1, bbuf2, not2); if (IS_NULL(bbuf1)) { if (not1 != 0) { return SET_ALL_MULTI_BYTE_RANGE(enc, pbuf); } else { if (not2 == 0) { return bbuf_clone(pbuf, bbuf2); } else { return not_code_range_buf(enc, bbuf2, pbuf); } } } if (not1 != 0) SWAP_BB_NOT(bbuf1, not1, bbuf2, not2); data1 = (OnigCodePoint* )(bbuf1->p); GET_CODE_POINT(n1, data1); data1++; if (not2 == 0 && not1 == 0) { /* 1 OR 2 */ r = bbuf_clone(pbuf, bbuf2); } else if (not1 == 0) { /* 1 OR (not 2) */ r = not_code_range_buf(enc, bbuf2, pbuf); } if (r != 0) return r; for (i = 0; i < n1; i++) { from = data1[i*2]; to = data1[i*2+1]; r = add_code_range_to_buf(pbuf, from, to); if (r != 0) return r; } return 0; } static int and_code_range1(BBuf** pbuf, OnigCodePoint from1, OnigCodePoint to1, OnigCodePoint* data, int n) { int i, r; OnigCodePoint from2, to2; for (i = 0; i < n; i++) { from2 = data[i*2]; to2 = data[i*2+1]; if (from2 < from1) { if (to2 < from1) continue; else { from1 = to2 + 1; } } else if (from2 <= to1) { if (to2 < to1) { if (from1 <= from2 - 1) { r = add_code_range_to_buf(pbuf, from1, from2-1); if (r != 0) return r; } from1 = to2 + 1; } else { to1 = from2 - 1; } } else { from1 = from2; } if (from1 > to1) break; } if (from1 <= to1) { r = add_code_range_to_buf(pbuf, from1, to1); if (r != 0) return r; } return 0; } static int and_code_range_buf(BBuf* bbuf1, int not1, BBuf* bbuf2, int not2, BBuf** pbuf) { int r; OnigCodePoint i, j, n1, n2, *data1, *data2; OnigCodePoint from, to, from1, to1, from2, to2; *pbuf = (BBuf* )NULL; if (IS_NULL(bbuf1)) { if (not1 != 0 && IS_NOT_NULL(bbuf2)) /* not1 != 0 -> not2 == 0 */ return bbuf_clone(pbuf, bbuf2); return 0; } else if (IS_NULL(bbuf2)) { if (not2 != 0) return bbuf_clone(pbuf, bbuf1); return 0; } if (not1 != 0) SWAP_BB_NOT(bbuf1, not1, bbuf2, not2); data1 = (OnigCodePoint* )(bbuf1->p); data2 = (OnigCodePoint* )(bbuf2->p); GET_CODE_POINT(n1, data1); GET_CODE_POINT(n2, data2); data1++; data2++; if (not2 == 0 && not1 == 0) { /* 1 AND 2 */ for (i = 0; i < n1; i++) { from1 = data1[i*2]; to1 = data1[i*2+1]; for (j = 0; j < n2; j++) { from2 = data2[j*2]; to2 = data2[j*2+1]; if (from2 > to1) break; if (to2 < from1) continue; from = MAX(from1, from2); to = MIN(to1, to2); r = add_code_range_to_buf(pbuf, from, to); if (r != 0) return r; } } } else if (not1 == 0) { /* 1 AND (not 2) */ for (i = 0; i < n1; i++) { from1 = data1[i*2]; to1 = data1[i*2+1]; r = and_code_range1(pbuf, from1, to1, data2, n2); if (r != 0) return r; } } return 0; } static int and_cclass(CClassNode* dest, CClassNode* cc, OnigEncoding enc) { int r, not1, not2; BBuf *buf1, *buf2, *pbuf; BitSetRef bsr1, bsr2; BitSet bs1, bs2; not1 = IS_NCCLASS_NOT(dest); bsr1 = dest->bs; buf1 = dest->mbuf; not2 = IS_NCCLASS_NOT(cc); bsr2 = cc->bs; buf2 = cc->mbuf; if (not1 != 0) { bitset_invert_to(bsr1, bs1); bsr1 = bs1; } if (not2 != 0) { bitset_invert_to(bsr2, bs2); bsr2 = bs2; } bitset_and(bsr1, bsr2); if (bsr1 != dest->bs) { bitset_copy(dest->bs, bsr1); } if (not1 != 0) { bitset_invert(dest->bs); } if (! ONIGENC_IS_SINGLEBYTE(enc)) { if (not1 != 0 && not2 != 0) { r = or_code_range_buf(enc, buf1, 0, buf2, 0, &pbuf); } else { r = and_code_range_buf(buf1, not1, buf2, not2, &pbuf); if (r == 0 && not1 != 0) { BBuf *tbuf; r = not_code_range_buf(enc, pbuf, &tbuf); if (r != 0) { bbuf_free(pbuf); return r; } bbuf_free(pbuf); pbuf = tbuf; } } if (r != 0) return r; dest->mbuf = pbuf; bbuf_free(buf1); return r; } return 0; } static int or_cclass(CClassNode* dest, CClassNode* cc, OnigEncoding enc) { int r, not1, not2; BBuf *buf1, *buf2, *pbuf; BitSetRef bsr1, bsr2; BitSet bs1, bs2; not1 = IS_NCCLASS_NOT(dest); bsr1 = dest->bs; buf1 = dest->mbuf; not2 = IS_NCCLASS_NOT(cc); bsr2 = cc->bs; buf2 = cc->mbuf; if (not1 != 0) { bitset_invert_to(bsr1, bs1); bsr1 = bs1; } if (not2 != 0) { bitset_invert_to(bsr2, bs2); bsr2 = bs2; } bitset_or(bsr1, bsr2); if (bsr1 != dest->bs) { bitset_copy(dest->bs, bsr1); } if (not1 != 0) { bitset_invert(dest->bs); } if (! ONIGENC_IS_SINGLEBYTE(enc)) { if (not1 != 0 && not2 != 0) { r = and_code_range_buf(buf1, 0, buf2, 0, &pbuf); } else { r = or_code_range_buf(enc, buf1, not1, buf2, not2, &pbuf); if (r == 0 && not1 != 0) { BBuf *tbuf; r = not_code_range_buf(enc, pbuf, &tbuf); if (r != 0) { bbuf_free(pbuf); return r; } bbuf_free(pbuf); pbuf = tbuf; } } if (r != 0) return r; dest->mbuf = pbuf; bbuf_free(buf1); return r; } else return 0; } static OnigCodePoint conv_backslash_value(OnigCodePoint c, ScanEnv* env) { if (IS_SYNTAX_OP(env->syntax, ONIG_SYN_OP_ESC_CONTROL_CHARS)) { switch (c) { case 'n': return '\n'; case 't': return '\t'; case 'r': return '\r'; case 'f': return '\f'; case 'a': return '\007'; case 'b': return '\010'; case 'e': return '\033'; case 'v': if (IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_ESC_V_VTAB)) return '\v'; break; default: break; } } return c; } static int is_invalid_quantifier_target(Node* node) { switch (NODE_TYPE(node)) { case NODE_ANCHOR: case NODE_GIMMICK: return 1; break; case NODE_BAG: /* allow enclosed elements */ /* return is_invalid_quantifier_target(NODE_BODY(node)); */ break; case NODE_LIST: do { if (! is_invalid_quantifier_target(NODE_CAR(node))) return 0; } while (IS_NOT_NULL(node = NODE_CDR(node))); return 0; break; case NODE_ALT: do { if (is_invalid_quantifier_target(NODE_CAR(node))) return 1; } while (IS_NOT_NULL(node = NODE_CDR(node))); break; default: break; } return 0; } /* ?:0, *:1, +:2, ??:3, *?:4, +?:5 */ static int quantifier_type_num(QuantNode* q) { if (q->greedy) { if (q->lower == 0) { if (q->upper == 1) return 0; else if (IS_INFINITE_REPEAT(q->upper)) return 1; } else if (q->lower == 1) { if (IS_INFINITE_REPEAT(q->upper)) return 2; } } else { if (q->lower == 0) { if (q->upper == 1) return 3; else if (IS_INFINITE_REPEAT(q->upper)) return 4; } else if (q->lower == 1) { if (IS_INFINITE_REPEAT(q->upper)) return 5; } } return -1; } enum ReduceType { RQ_ASIS = 0, /* as is */ RQ_DEL = 1, /* delete parent */ RQ_A, /* to '*' */ RQ_AQ, /* to '*?' */ RQ_QQ, /* to '??' */ RQ_P_QQ, /* to '+)??' */ RQ_PQ_Q /* to '+?)?' */ }; static enum ReduceType ReduceTypeTable[6][6] = { {RQ_DEL, RQ_A, RQ_A, RQ_QQ, RQ_AQ, RQ_ASIS}, /* '?' */ {RQ_DEL, RQ_DEL, RQ_DEL, RQ_P_QQ, RQ_P_QQ, RQ_DEL}, /* '*' */ {RQ_A, RQ_A, RQ_DEL, RQ_ASIS, RQ_P_QQ, RQ_DEL}, /* '+' */ {RQ_DEL, RQ_AQ, RQ_AQ, RQ_DEL, RQ_AQ, RQ_AQ}, /* '??' */ {RQ_DEL, RQ_DEL, RQ_DEL, RQ_DEL, RQ_DEL, RQ_DEL}, /* '*?' */ {RQ_ASIS, RQ_PQ_Q, RQ_DEL, RQ_AQ, RQ_AQ, RQ_DEL} /* '+?' */ }; extern void onig_reduce_nested_quantifier(Node* pnode, Node* cnode) { int pnum, cnum; QuantNode *p, *c; p = QUANT_(pnode); c = QUANT_(cnode); pnum = quantifier_type_num(p); cnum = quantifier_type_num(c); if (pnum < 0 || cnum < 0) { if ((p->lower == p->upper) && ! IS_INFINITE_REPEAT(p->upper)) { if ((c->lower == c->upper) && ! IS_INFINITE_REPEAT(c->upper)) { int n = onig_positive_int_multiply(p->lower, c->lower); if (n >= 0) { p->lower = p->upper = n; NODE_BODY(pnode) = NODE_BODY(cnode); goto remove_cnode; } } } return ; } switch(ReduceTypeTable[cnum][pnum]) { case RQ_DEL: *pnode = *cnode; break; case RQ_A: NODE_BODY(pnode) = NODE_BODY(cnode); p->lower = 0; p->upper = INFINITE_REPEAT; p->greedy = 1; break; case RQ_AQ: NODE_BODY(pnode) = NODE_BODY(cnode); p->lower = 0; p->upper = INFINITE_REPEAT; p->greedy = 0; break; case RQ_QQ: NODE_BODY(pnode) = NODE_BODY(cnode); p->lower = 0; p->upper = 1; p->greedy = 0; break; case RQ_P_QQ: NODE_BODY(pnode) = cnode; p->lower = 0; p->upper = 1; p->greedy = 0; c->lower = 1; c->upper = INFINITE_REPEAT; c->greedy = 1; return ; break; case RQ_PQ_Q: NODE_BODY(pnode) = cnode; p->lower = 0; p->upper = 1; p->greedy = 1; c->lower = 1; c->upper = INFINITE_REPEAT; c->greedy = 0; return ; break; case RQ_ASIS: NODE_BODY(pnode) = cnode; return ; break; } remove_cnode: NODE_BODY(cnode) = NULL_NODE; onig_node_free(cnode); } static int node_new_general_newline(Node** node, ScanEnv* env) { int r; int dlen, alen; UChar buf[ONIGENC_CODE_TO_MBC_MAXLEN * 2]; Node* crnl; Node* ncc; Node* x; CClassNode* cc; dlen = ONIGENC_CODE_TO_MBC(env->enc, 0x0d, buf); if (dlen < 0) return dlen; alen = ONIGENC_CODE_TO_MBC(env->enc, 0x0a, buf + dlen); if (alen < 0) return alen; crnl = node_new_str_raw(buf, buf + dlen + alen); CHECK_NULL_RETURN_MEMERR(crnl); ncc = node_new_cclass(); if (IS_NULL(ncc)) goto err2; cc = CCLASS_(ncc); if (dlen == 1) { bitset_set_range(cc->bs, 0x0a, 0x0d); } else { r = add_code_range(&(cc->mbuf), env, 0x0a, 0x0d); if (r != 0) { err1: onig_node_free(ncc); err2: onig_node_free(crnl); return ONIGERR_MEMORY; } } if (ONIGENC_IS_UNICODE_ENCODING(env->enc)) { r = add_code_range(&(cc->mbuf), env, 0x85, 0x85); if (r != 0) goto err1; r = add_code_range(&(cc->mbuf), env, 0x2028, 0x2029); if (r != 0) goto err1; } x = node_new_bag_if_else(crnl, 0, ncc); if (IS_NULL(x)) goto err1; *node = x; return 0; } enum TokenSyms { TK_EOT = 0, /* end of token */ TK_RAW_BYTE = 1, TK_CHAR, TK_STRING, TK_CODE_POINT, TK_ANYCHAR, TK_CHAR_TYPE, TK_BACKREF, TK_CALL, TK_ANCHOR, TK_REPEAT, TK_INTERVAL, TK_ANYCHAR_ANYTIME, /* SQL '%' == .* */ TK_ALT, TK_SUBEXP_OPEN, TK_SUBEXP_CLOSE, TK_CC_OPEN, TK_QUOTE_OPEN, TK_CHAR_PROPERTY, /* \p{...}, \P{...} */ TK_KEEP, /* \K */ TK_GENERAL_NEWLINE, /* \R */ TK_NO_NEWLINE, /* \N */ TK_TRUE_ANYCHAR, /* \O */ TK_TEXT_SEGMENT, /* \X */ /* in cc */ TK_CC_CLOSE, TK_CC_RANGE, TK_POSIX_BRACKET_OPEN, TK_CC_AND, /* && */ TK_CC_CC_OPEN /* [ */ }; typedef struct { enum TokenSyms type; int escaped; int base; /* is number: 8, 16 (used in [....]) */ UChar* backp; union { UChar* s; int c; OnigCodePoint code; int anchor; int subtype; struct { int lower; int upper; int greedy; int possessive; } repeat; struct { int num; int ref1; int* refs; int by_name; #ifdef USE_BACKREF_WITH_LEVEL int exist_level; int level; /* \k<name+n> */ #endif } backref; struct { UChar* name; UChar* name_end; int gnum; int by_number; } call; struct { int ctype; int not; } prop; } u; } PToken; static int fetch_interval_quantifier(UChar** src, UChar* end, PToken* tok, ScanEnv* env) { int low, up, syn_allow, non_low = 0; int r = 0; OnigCodePoint c; OnigEncoding enc = env->enc; UChar* p = *src; PFETCH_READY; syn_allow = IS_SYNTAX_BV(env->syntax, ONIG_SYN_ALLOW_INVALID_INTERVAL); if (PEND) { if (syn_allow) return 1; /* "....{" : OK! */ else return ONIGERR_END_PATTERN_AT_LEFT_BRACE; /* "....{" syntax error */ } if (! syn_allow) { c = PPEEK; if (c == ')' || c == '(' || c == '|') { return ONIGERR_END_PATTERN_AT_LEFT_BRACE; } } low = onig_scan_unsigned_number(&p, end, env->enc); if (low < 0) return ONIGERR_TOO_BIG_NUMBER_FOR_REPEAT_RANGE; if (low > ONIG_MAX_REPEAT_NUM) return ONIGERR_TOO_BIG_NUMBER_FOR_REPEAT_RANGE; if (p == *src) { /* can't read low */ if (IS_SYNTAX_BV(env->syntax, ONIG_SYN_ALLOW_INTERVAL_LOW_ABBREV)) { /* allow {,n} as {0,n} */ low = 0; non_low = 1; } else goto invalid; } if (PEND) goto invalid; PFETCH(c); if (c == ',') { UChar* prev = p; up = onig_scan_unsigned_number(&p, end, env->enc); if (up < 0) return ONIGERR_TOO_BIG_NUMBER_FOR_REPEAT_RANGE; if (up > ONIG_MAX_REPEAT_NUM) return ONIGERR_TOO_BIG_NUMBER_FOR_REPEAT_RANGE; if (p == prev) { if (non_low != 0) goto invalid; up = INFINITE_REPEAT; /* {n,} : {n,infinite} */ } } else { if (non_low != 0) goto invalid; PUNFETCH; up = low; /* {n} : exact n times */ r = 2; /* fixed */ } if (PEND) goto invalid; PFETCH(c); if (IS_SYNTAX_OP(env->syntax, ONIG_SYN_OP_ESC_BRACE_INTERVAL)) { if (c != MC_ESC(env->syntax)) goto invalid; PFETCH(c); } if (c != '}') goto invalid; if (!IS_INFINITE_REPEAT(up) && low > up) { /* {n,m}+ supported case */ if (IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_PLUS_POSSESSIVE_INTERVAL)) return ONIGERR_UPPER_SMALLER_THAN_LOWER_IN_REPEAT_RANGE; tok->u.repeat.possessive = 1; { int tmp; tmp = low; low = up; up = tmp; } } else tok->u.repeat.possessive = 0; tok->type = TK_INTERVAL; tok->u.repeat.lower = low; tok->u.repeat.upper = up; *src = p; return r; /* 0: normal {n,m}, 2: fixed {n} */ invalid: if (syn_allow) { /* *src = p; */ /* !!! Don't do this line !!! */ return 1; /* OK */ } else return ONIGERR_INVALID_REPEAT_RANGE_PATTERN; } /* \M-, \C-, \c, or \... */ static int fetch_escaped_value(UChar** src, UChar* end, ScanEnv* env, OnigCodePoint* val) { int v; OnigCodePoint c; OnigEncoding enc = env->enc; UChar* p = *src; if (PEND) return ONIGERR_END_PATTERN_AT_ESCAPE; PFETCH_S(c); switch (c) { case 'M': if (IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_ESC_CAPITAL_M_BAR_META)) { if (PEND) return ONIGERR_END_PATTERN_AT_META; PFETCH_S(c); if (c != '-') return ONIGERR_META_CODE_SYNTAX; if (PEND) return ONIGERR_END_PATTERN_AT_META; PFETCH_S(c); if (c == MC_ESC(env->syntax)) { v = fetch_escaped_value(&p, end, env, &c); if (v < 0) return v; } c = ((c & 0xff) | 0x80); } else goto backslash; break; case 'C': if (IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_ESC_CAPITAL_C_BAR_CONTROL)) { if (PEND) return ONIGERR_END_PATTERN_AT_CONTROL; PFETCH_S(c); if (c != '-') return ONIGERR_CONTROL_CODE_SYNTAX; goto control; } else goto backslash; case 'c': if (IS_SYNTAX_OP(env->syntax, ONIG_SYN_OP_ESC_C_CONTROL)) { control: if (PEND) return ONIGERR_END_PATTERN_AT_CONTROL; PFETCH_S(c); if (c == '?') { c = 0177; } else { if (c == MC_ESC(env->syntax)) { v = fetch_escaped_value(&p, end, env, &c); if (v < 0) return v; } c &= 0x9f; } break; } /* fall through */ default: { backslash: c = conv_backslash_value(c, env); } break; } *src = p; *val = c; return 0; } static int fetch_token(PToken* tok, UChar** src, UChar* end, ScanEnv* env); static OnigCodePoint get_name_end_code_point(OnigCodePoint start) { switch (start) { case '<': return (OnigCodePoint )'>'; break; case '\'': return (OnigCodePoint )'\''; break; case '(': return (OnigCodePoint )')'; break; default: break; } return (OnigCodePoint )0; } enum REF_NUM { IS_NOT_NUM = 0, IS_ABS_NUM = 1, IS_REL_NUM = 2 }; #ifdef USE_BACKREF_WITH_LEVEL /* \k<name+n>, \k<name-n> \k<num+n>, \k<num-n> \k<-num+n>, \k<-num-n> \k<+num+n>, \k<+num-n> */ static int fetch_name_with_level(OnigCodePoint start_code, UChar** src, UChar* end, UChar** rname_end, ScanEnv* env, int* rback_num, int* rlevel, enum REF_NUM* num_type) { int r, sign, exist_level; int digit_count; OnigCodePoint end_code; OnigCodePoint c = 0; OnigEncoding enc = env->enc; UChar *name_end; UChar *pnum_head; UChar *p = *src; PFETCH_READY; *rback_num = 0; exist_level = 0; *num_type = IS_NOT_NUM; sign = 1; pnum_head = *src; end_code = get_name_end_code_point(start_code); digit_count = 0; name_end = end; r = 0; if (PEND) { return ONIGERR_EMPTY_GROUP_NAME; } else { PFETCH(c); if (c == end_code) return ONIGERR_EMPTY_GROUP_NAME; if (IS_CODE_DIGIT_ASCII(enc, c)) { *num_type = IS_ABS_NUM; digit_count++; } else if (c == '-') { *num_type = IS_REL_NUM; sign = -1; pnum_head = p; } else if (c == '+') { *num_type = IS_REL_NUM; sign = 1; pnum_head = p; } else if (!ONIGENC_IS_CODE_WORD(enc, c)) { r = ONIGERR_INVALID_CHAR_IN_GROUP_NAME; } } while (!PEND) { name_end = p; PFETCH(c); if (c == end_code || c == ')' || c == '+' || c == '-') { if (*num_type != IS_NOT_NUM && digit_count == 0) r = ONIGERR_INVALID_GROUP_NAME; break; } if (*num_type != IS_NOT_NUM) { if (IS_CODE_DIGIT_ASCII(enc, c)) { digit_count++; } else { r = ONIGERR_INVALID_GROUP_NAME; *num_type = IS_NOT_NUM; } } else if (!ONIGENC_IS_CODE_WORD(enc, c)) { r = ONIGERR_INVALID_CHAR_IN_GROUP_NAME; } } if (r == 0 && c != end_code) { if (c == '+' || c == '-') { int level; int flag = (c == '-' ? -1 : 1); if (PEND) { r = ONIGERR_INVALID_CHAR_IN_GROUP_NAME; goto end; } PFETCH(c); if (! IS_CODE_DIGIT_ASCII(enc, c)) goto err; PUNFETCH; level = onig_scan_unsigned_number(&p, end, enc); if (level < 0) return ONIGERR_TOO_BIG_NUMBER; *rlevel = (level * flag); exist_level = 1; if (!PEND) { PFETCH(c); if (c == end_code) goto end; } } err: name_end = end; err2: r = ONIGERR_INVALID_GROUP_NAME; } end: if (r == 0) { if (*num_type != IS_NOT_NUM) { *rback_num = onig_scan_unsigned_number(&pnum_head, name_end, enc); if (*rback_num < 0) return ONIGERR_TOO_BIG_NUMBER; else if (*rback_num == 0) { if (*num_type == IS_REL_NUM) goto err2; } *rback_num *= sign; } *rname_end = name_end; *src = p; return (exist_level ? 1 : 0); } else { onig_scan_env_set_error_string(env, r, *src, name_end); return r; } } #endif /* USE_BACKREF_WITH_LEVEL */ /* ref: 0 -> define name (don't allow number name) 1 -> reference name (allow number name) */ static int fetch_name(OnigCodePoint start_code, UChar** src, UChar* end, UChar** rname_end, ScanEnv* env, int* rback_num, enum REF_NUM* num_type, int ref) { int r, sign; int digit_count; OnigCodePoint end_code; OnigCodePoint c = 0; OnigEncoding enc = env->enc; UChar *name_end; UChar *pnum_head; UChar *p = *src; *rback_num = 0; end_code = get_name_end_code_point(start_code); digit_count = 0; name_end = end; pnum_head = *src; r = 0; *num_type = IS_NOT_NUM; sign = 1; if (PEND) { return ONIGERR_EMPTY_GROUP_NAME; } else { PFETCH_S(c); if (c == end_code) return ONIGERR_EMPTY_GROUP_NAME; if (IS_CODE_DIGIT_ASCII(enc, c)) { if (ref == 1) *num_type = IS_ABS_NUM; else { r = ONIGERR_INVALID_GROUP_NAME; } digit_count++; } else if (c == '-') { if (ref == 1) { *num_type = IS_REL_NUM; sign = -1; pnum_head = p; } else { r = ONIGERR_INVALID_GROUP_NAME; } } else if (c == '+') { if (ref == 1) { *num_type = IS_REL_NUM; sign = 1; pnum_head = p; } else { r = ONIGERR_INVALID_GROUP_NAME; } } else if (!ONIGENC_IS_CODE_WORD(enc, c)) { r = ONIGERR_INVALID_CHAR_IN_GROUP_NAME; } } if (r == 0) { while (!PEND) { name_end = p; PFETCH_S(c); if (c == end_code || c == ')') { if (*num_type != IS_NOT_NUM && digit_count == 0) r = ONIGERR_INVALID_GROUP_NAME; break; } if (*num_type != IS_NOT_NUM) { if (IS_CODE_DIGIT_ASCII(enc, c)) { digit_count++; } else { if (!ONIGENC_IS_CODE_WORD(enc, c)) r = ONIGERR_INVALID_CHAR_IN_GROUP_NAME; else r = ONIGERR_INVALID_GROUP_NAME; *num_type = IS_NOT_NUM; } } else { if (!ONIGENC_IS_CODE_WORD(enc, c)) { r = ONIGERR_INVALID_CHAR_IN_GROUP_NAME; } } } if (c != end_code) { r = ONIGERR_INVALID_GROUP_NAME; goto err; } if (*num_type != IS_NOT_NUM) { *rback_num = onig_scan_unsigned_number(&pnum_head, name_end, enc); if (*rback_num < 0) return ONIGERR_TOO_BIG_NUMBER; else if (*rback_num == 0) { if (*num_type == IS_REL_NUM) { r = ONIGERR_INVALID_GROUP_NAME; goto err; } } *rback_num *= sign; } *rname_end = name_end; *src = p; return 0; } else { while (!PEND) { name_end = p; PFETCH_S(c); if (c == end_code || c == ')') break; } if (PEND) name_end = end; err: onig_scan_env_set_error_string(env, r, *src, name_end); return r; } } static void CC_ESC_WARN(ScanEnv* env, UChar *c) { if (onig_warn == onig_null_warn) return ; if (IS_SYNTAX_BV(env->syntax, ONIG_SYN_WARN_CC_OP_NOT_ESCAPED) && IS_SYNTAX_BV(env->syntax, ONIG_SYN_BACKSLASH_ESCAPE_IN_CC)) { UChar buf[WARN_BUFSIZE]; onig_snprintf_with_pattern(buf, WARN_BUFSIZE, env->enc, env->pattern, env->pattern_end, (UChar* )"character class has '%s' without escape", c); (*onig_warn)((char* )buf); } } static void CLOSE_BRACKET_WITHOUT_ESC_WARN(ScanEnv* env, UChar* c) { if (onig_warn == onig_null_warn) return ; if (IS_SYNTAX_BV((env)->syntax, ONIG_SYN_WARN_CC_OP_NOT_ESCAPED)) { UChar buf[WARN_BUFSIZE]; onig_snprintf_with_pattern(buf, WARN_BUFSIZE, (env)->enc, (env)->pattern, (env)->pattern_end, (UChar* )"regular expression has '%s' without escape", c); (*onig_warn)((char* )buf); } } static UChar* find_str_position(OnigCodePoint s[], int n, UChar* from, UChar* to, UChar **next, OnigEncoding enc) { int i; OnigCodePoint x; UChar *q; UChar *p = from; while (p < to) { x = ONIGENC_MBC_TO_CODE(enc, p, to); q = p + enclen(enc, p); if (x == s[0]) { for (i = 1; i < n && q < to; i++) { x = ONIGENC_MBC_TO_CODE(enc, q, to); if (x != s[i]) break; q += enclen(enc, q); } if (i >= n) { if (IS_NOT_NULL(next)) *next = q; return p; } } p = q; } return NULL_UCHARP; } static int str_exist_check_with_esc(OnigCodePoint s[], int n, UChar* from, UChar* to, OnigCodePoint bad, OnigEncoding enc, OnigSyntaxType* syn) { int i, in_esc; OnigCodePoint x; UChar *q; UChar *p = from; in_esc = 0; while (p < to) { if (in_esc) { in_esc = 0; p += enclen(enc, p); } else { x = ONIGENC_MBC_TO_CODE(enc, p, to); q = p + enclen(enc, p); if (x == s[0]) { for (i = 1; i < n && q < to; i++) { x = ONIGENC_MBC_TO_CODE(enc, q, to); if (x != s[i]) break; q += enclen(enc, q); } if (i >= n) return 1; p += enclen(enc, p); } else { x = ONIGENC_MBC_TO_CODE(enc, p, to); if (x == bad) return 0; else if (x == MC_ESC(syn)) in_esc = 1; p = q; } } } return 0; } static int fetch_token_in_cc(PToken* tok, UChar** src, UChar* end, ScanEnv* env) { int num; OnigCodePoint c, c2; OnigSyntaxType* syn = env->syntax; OnigEncoding enc = env->enc; UChar* prev; UChar* p = *src; PFETCH_READY; if (PEND) { tok->type = TK_EOT; return tok->type; } PFETCH(c); tok->type = TK_CHAR; tok->base = 0; tok->u.c = c; tok->escaped = 0; if (c == ']') { tok->type = TK_CC_CLOSE; } else if (c == '-') { tok->type = TK_CC_RANGE; } else if (c == MC_ESC(syn)) { if (! IS_SYNTAX_BV(syn, ONIG_SYN_BACKSLASH_ESCAPE_IN_CC)) goto end; if (PEND) return ONIGERR_END_PATTERN_AT_ESCAPE; PFETCH(c); tok->escaped = 1; tok->u.c = c; switch (c) { case 'w': tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_WORD; tok->u.prop.not = 0; break; case 'W': tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_WORD; tok->u.prop.not = 1; break; case 'd': tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_DIGIT; tok->u.prop.not = 0; break; case 'D': tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_DIGIT; tok->u.prop.not = 1; break; case 's': tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_SPACE; tok->u.prop.not = 0; break; case 'S': tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_SPACE; tok->u.prop.not = 1; break; case 'h': if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_H_XDIGIT)) break; tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_XDIGIT; tok->u.prop.not = 0; break; case 'H': if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_H_XDIGIT)) break; tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_XDIGIT; tok->u.prop.not = 1; break; case 'p': case 'P': if (PEND) break; c2 = PPEEK; if (c2 == '{' && IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_P_BRACE_CHAR_PROPERTY)) { PINC; tok->type = TK_CHAR_PROPERTY; tok->u.prop.not = c == 'P'; if (!PEND && IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_P_BRACE_CIRCUMFLEX_NOT)) { PFETCH(c2); if (c2 == '^') { tok->u.prop.not = tok->u.prop.not == 0; } else PUNFETCH; } } break; case 'o': if (PEND) break; prev = p; if (PPEEK_IS('{') && IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_O_BRACE_OCTAL)) { PINC; num = scan_unsigned_octal_number(&p, end, 11, enc); if (num < 0) return ONIGERR_TOO_BIG_WIDE_CHAR_VALUE; if (!PEND) { c2 = PPEEK; if (IS_CODE_DIGIT_ASCII(enc, c2)) return ONIGERR_TOO_LONG_WIDE_CHAR_VALUE; } if (p > prev + enclen(enc, prev) && !PEND && (PPEEK_IS('}'))) { PINC; tok->type = TK_CODE_POINT; tok->base = 8; tok->u.code = (OnigCodePoint )num; } else { /* can't read nothing or invalid format */ p = prev; } } break; case 'x': if (PEND) break; prev = p; if (PPEEK_IS('{') && IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_X_BRACE_HEX8)) { PINC; num = scan_unsigned_hexadecimal_number(&p, end, 0, 8, enc); if (num < 0) { if (num == ONIGERR_TOO_BIG_NUMBER) return ONIGERR_TOO_BIG_WIDE_CHAR_VALUE; else return num; } if (!PEND) { c2 = PPEEK; if (IS_CODE_XDIGIT_ASCII(enc, c2)) return ONIGERR_TOO_LONG_WIDE_CHAR_VALUE; } if (p > prev + enclen(enc, prev) && !PEND && (PPEEK_IS('}'))) { PINC; tok->type = TK_CODE_POINT; tok->base = 16; tok->u.code = (OnigCodePoint )num; } else { /* can't read nothing or invalid format */ p = prev; } } else if (IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_X_HEX2)) { num = scan_unsigned_hexadecimal_number(&p, end, 0, 2, enc); if (num < 0) return num; if (p == prev) { /* can't read nothing. */ num = 0; /* but, it's not error */ } tok->type = TK_RAW_BYTE; tok->base = 16; tok->u.c = num; } break; case 'u': if (PEND) break; prev = p; if (IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_U_HEX4)) { num = scan_unsigned_hexadecimal_number(&p, end, 4, 4, enc); if (num < 0) return num; if (p == prev) { /* can't read nothing. */ num = 0; /* but, it's not error */ } tok->type = TK_CODE_POINT; tok->base = 16; tok->u.code = (OnigCodePoint )num; } break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': if (IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_OCTAL3)) { PUNFETCH; prev = p; num = scan_unsigned_octal_number(&p, end, 3, enc); if (num < 0 || num >= 256) return ONIGERR_TOO_BIG_NUMBER; if (p == prev) { /* can't read nothing. */ num = 0; /* but, it's not error */ } tok->type = TK_RAW_BYTE; tok->base = 8; tok->u.c = num; } break; default: PUNFETCH; num = fetch_escaped_value(&p, end, env, &c2); if (num < 0) return num; if (tok->u.c != c2) { tok->u.code = c2; tok->type = TK_CODE_POINT; } break; } } else if (c == '[') { if (IS_SYNTAX_OP(syn, ONIG_SYN_OP_POSIX_BRACKET) && (PPEEK_IS(':'))) { OnigCodePoint send[] = { (OnigCodePoint )':', (OnigCodePoint )']' }; tok->backp = p; /* point at '[' is read */ PINC; if (str_exist_check_with_esc(send, 2, p, end, (OnigCodePoint )']', enc, syn)) { tok->type = TK_POSIX_BRACKET_OPEN; } else { PUNFETCH; goto cc_in_cc; } } else { cc_in_cc: if (IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_CCLASS_SET_OP)) { tok->type = TK_CC_CC_OPEN; } else { CC_ESC_WARN(env, (UChar* )"["); } } } else if (c == '&') { if (IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_CCLASS_SET_OP) && !PEND && (PPEEK_IS('&'))) { PINC; tok->type = TK_CC_AND; } } end: *src = p; return tok->type; } static int fetch_token(PToken* tok, UChar** src, UChar* end, ScanEnv* env) { int r, num; OnigCodePoint c; OnigEncoding enc = env->enc; OnigSyntaxType* syn = env->syntax; UChar* prev; UChar* p = *src; PFETCH_READY; start: if (PEND) { tok->type = TK_EOT; return tok->type; } tok->type = TK_STRING; tok->base = 0; tok->backp = p; PFETCH(c); if (IS_MC_ESC_CODE(c, syn)) { if (PEND) return ONIGERR_END_PATTERN_AT_ESCAPE; tok->backp = p; PFETCH(c); tok->u.c = c; tok->escaped = 1; switch (c) { case '*': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_ASTERISK_ZERO_INF)) break; tok->type = TK_REPEAT; tok->u.repeat.lower = 0; tok->u.repeat.upper = INFINITE_REPEAT; goto greedy_check; break; case '+': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_PLUS_ONE_INF)) break; tok->type = TK_REPEAT; tok->u.repeat.lower = 1; tok->u.repeat.upper = INFINITE_REPEAT; goto greedy_check; break; case '?': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_QMARK_ZERO_ONE)) break; tok->type = TK_REPEAT; tok->u.repeat.lower = 0; tok->u.repeat.upper = 1; greedy_check: tok->u.repeat.possessive = 0; greedy_check2: if (!PEND && PPEEK_IS('?') && IS_SYNTAX_OP(syn, ONIG_SYN_OP_QMARK_NON_GREEDY) && tok->u.repeat.possessive == 0) { PFETCH(c); tok->u.repeat.greedy = 0; tok->u.repeat.possessive = 0; } else { possessive_check: tok->u.repeat.greedy = 1; if (!PEND && PPEEK_IS('+') && ((IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_PLUS_POSSESSIVE_REPEAT) && tok->type != TK_INTERVAL) || (IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_PLUS_POSSESSIVE_INTERVAL) && tok->type == TK_INTERVAL)) && tok->u.repeat.possessive == 0) { PFETCH(c); tok->u.repeat.possessive = 1; } } break; case '{': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_BRACE_INTERVAL)) break; r = fetch_interval_quantifier(&p, end, tok, env); if (r < 0) return r; /* error */ if (r == 0) goto greedy_check2; else if (r == 2) { /* {n} */ if (IS_SYNTAX_BV(syn, ONIG_SYN_FIXED_INTERVAL_IS_GREEDY_ONLY)) goto possessive_check; goto greedy_check2; } /* r == 1 : normal char */ break; case '|': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_VBAR_ALT)) break; tok->type = TK_ALT; break; case '(': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_LPAREN_SUBEXP)) break; tok->type = TK_SUBEXP_OPEN; break; case ')': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_LPAREN_SUBEXP)) break; tok->type = TK_SUBEXP_CLOSE; break; case 'w': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_W_WORD)) break; tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_WORD; tok->u.prop.not = 0; break; case 'W': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_W_WORD)) break; tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_WORD; tok->u.prop.not = 1; break; case 'b': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_B_WORD_BOUND)) break; tok->type = TK_ANCHOR; tok->u.anchor = ANCR_WORD_BOUNDARY; break; case 'B': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_B_WORD_BOUND)) break; tok->type = TK_ANCHOR; tok->u.anchor = ANCR_NO_WORD_BOUNDARY; break; case 'y': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP2_ESC_X_Y_TEXT_SEGMENT)) break; tok->type = TK_ANCHOR; tok->u.anchor = ANCR_TEXT_SEGMENT_BOUNDARY; break; case 'Y': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP2_ESC_X_Y_TEXT_SEGMENT)) break; tok->type = TK_ANCHOR; tok->u.anchor = ANCR_NO_TEXT_SEGMENT_BOUNDARY; break; #ifdef USE_WORD_BEGIN_END case '<': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_LTGT_WORD_BEGIN_END)) break; tok->type = TK_ANCHOR; tok->u.anchor = ANCR_WORD_BEGIN; break; case '>': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_LTGT_WORD_BEGIN_END)) break; tok->type = TK_ANCHOR; tok->u.anchor = ANCR_WORD_END; break; #endif case 's': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_S_WHITE_SPACE)) break; tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_SPACE; tok->u.prop.not = 0; break; case 'S': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_S_WHITE_SPACE)) break; tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_SPACE; tok->u.prop.not = 1; break; case 'd': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_D_DIGIT)) break; tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_DIGIT; tok->u.prop.not = 0; break; case 'D': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_D_DIGIT)) break; tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_DIGIT; tok->u.prop.not = 1; break; case 'h': if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_H_XDIGIT)) break; tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_XDIGIT; tok->u.prop.not = 0; break; case 'H': if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_H_XDIGIT)) break; tok->type = TK_CHAR_TYPE; tok->u.prop.ctype = ONIGENC_CTYPE_XDIGIT; tok->u.prop.not = 1; break; case 'K': if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_CAPITAL_K_KEEP)) break; tok->type = TK_KEEP; break; case 'R': if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_CAPITAL_R_GENERAL_NEWLINE)) break; tok->type = TK_GENERAL_NEWLINE; break; case 'N': if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_CAPITAL_N_O_SUPER_DOT)) break; tok->type = TK_NO_NEWLINE; break; case 'O': if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_CAPITAL_N_O_SUPER_DOT)) break; tok->type = TK_TRUE_ANYCHAR; break; case 'X': if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_X_Y_TEXT_SEGMENT)) break; tok->type = TK_TEXT_SEGMENT; break; case 'A': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_AZ_BUF_ANCHOR)) break; begin_buf: tok->type = TK_ANCHOR; tok->u.subtype = ANCR_BEGIN_BUF; break; case 'Z': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_AZ_BUF_ANCHOR)) break; tok->type = TK_ANCHOR; tok->u.subtype = ANCR_SEMI_END_BUF; break; case 'z': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_AZ_BUF_ANCHOR)) break; end_buf: tok->type = TK_ANCHOR; tok->u.subtype = ANCR_END_BUF; break; case 'G': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_CAPITAL_G_BEGIN_ANCHOR)) break; tok->type = TK_ANCHOR; tok->u.subtype = ANCR_BEGIN_POSITION; break; case '`': if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_GNU_BUF_ANCHOR)) break; goto begin_buf; break; case '\'': if (! IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_GNU_BUF_ANCHOR)) break; goto end_buf; break; case 'o': if (PEND) break; prev = p; if (PPEEK_IS('{') && IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_O_BRACE_OCTAL)) { PINC; num = scan_unsigned_octal_number(&p, end, 11, enc); if (num < 0) return ONIGERR_TOO_BIG_WIDE_CHAR_VALUE; if (!PEND) { if (IS_CODE_DIGIT_ASCII(enc, PPEEK)) return ONIGERR_TOO_LONG_WIDE_CHAR_VALUE; } if ((p > prev + enclen(enc, prev)) && !PEND && PPEEK_IS('}')) { PINC; tok->type = TK_CODE_POINT; tok->u.code = (OnigCodePoint )num; } else { /* can't read nothing or invalid format */ p = prev; } } break; case 'x': if (PEND) break; prev = p; if (PPEEK_IS('{') && IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_X_BRACE_HEX8)) { PINC; num = scan_unsigned_hexadecimal_number(&p, end, 0, 8, enc); if (num < 0) { if (num == ONIGERR_TOO_BIG_NUMBER) return ONIGERR_TOO_BIG_WIDE_CHAR_VALUE; else return num; } if (!PEND) { if (IS_CODE_XDIGIT_ASCII(enc, PPEEK)) return ONIGERR_TOO_LONG_WIDE_CHAR_VALUE; } if ((p > prev + enclen(enc, prev)) && !PEND && PPEEK_IS('}')) { PINC; tok->type = TK_CODE_POINT; tok->u.code = (OnigCodePoint )num; } else { /* can't read nothing or invalid format */ p = prev; } } else if (IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_X_HEX2)) { num = scan_unsigned_hexadecimal_number(&p, end, 0, 2, enc); if (num < 0) return num; if (p == prev) { /* can't read nothing. */ num = 0; /* but, it's not error */ } tok->type = TK_RAW_BYTE; tok->base = 16; tok->u.c = num; } break; case 'u': if (PEND) break; prev = p; if (IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_U_HEX4)) { num = scan_unsigned_hexadecimal_number(&p, end, 4, 4, enc); if (num < 0) return num; if (p == prev) { /* can't read nothing. */ num = 0; /* but, it's not error */ } tok->type = TK_CODE_POINT; tok->base = 16; tok->u.code = (OnigCodePoint )num; } break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': PUNFETCH; prev = p; num = onig_scan_unsigned_number(&p, end, enc); if (num < 0 || num > ONIG_MAX_BACKREF_NUM) { goto skip_backref; } if (IS_SYNTAX_OP(syn, ONIG_SYN_OP_DECIMAL_BACKREF) && (num <= env->num_mem || num <= 9)) { /* This spec. from GNU regex */ if (IS_SYNTAX_BV(syn, ONIG_SYN_STRICT_CHECK_BACKREF)) { if (num > env->num_mem || IS_NULL(SCANENV_MEMENV(env)[num].node)) return ONIGERR_INVALID_BACKREF; } tok->type = TK_BACKREF; tok->u.backref.num = 1; tok->u.backref.ref1 = num; tok->u.backref.by_name = 0; #ifdef USE_BACKREF_WITH_LEVEL tok->u.backref.exist_level = 0; #endif break; } skip_backref: if (c == '8' || c == '9') { /* normal char */ p = prev; PINC; break; } p = prev; /* fall through */ case '0': if (IS_SYNTAX_OP(syn, ONIG_SYN_OP_ESC_OCTAL3)) { prev = p; num = scan_unsigned_octal_number(&p, end, (c == '0' ? 2:3), enc); if (num < 0 || num >= 256) return ONIGERR_TOO_BIG_NUMBER; if (p == prev) { /* can't read nothing. */ num = 0; /* but, it's not error */ } tok->type = TK_RAW_BYTE; tok->base = 8; tok->u.c = num; } else if (c != '0') { PINC; } break; case 'k': if (!PEND && IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_K_NAMED_BACKREF)) { PFETCH(c); if (c == '<' || c == '\'') { UChar* name_end; int* backs; int back_num; enum REF_NUM num_type; prev = p; #ifdef USE_BACKREF_WITH_LEVEL name_end = NULL_UCHARP; /* no need. escape gcc warning. */ r = fetch_name_with_level((OnigCodePoint )c, &p, end, &name_end, env, &back_num, &tok->u.backref.level, &num_type); if (r == 1) tok->u.backref.exist_level = 1; else tok->u.backref.exist_level = 0; #else r = fetch_name(c, &p, end, &name_end, env, &back_num, &num_type, 1); #endif if (r < 0) return r; if (num_type != IS_NOT_NUM) { if (num_type == IS_REL_NUM) { back_num = backref_rel_to_abs(back_num, env); } if (back_num <= 0) return ONIGERR_INVALID_BACKREF; if (IS_SYNTAX_BV(syn, ONIG_SYN_STRICT_CHECK_BACKREF)) { if (back_num > env->num_mem || IS_NULL(SCANENV_MEMENV(env)[back_num].node)) return ONIGERR_INVALID_BACKREF; } tok->type = TK_BACKREF; tok->u.backref.by_name = 0; tok->u.backref.num = 1; tok->u.backref.ref1 = back_num; } else { num = onig_name_to_group_numbers(env->reg, prev, name_end, &backs); if (num <= 0) { onig_scan_env_set_error_string(env, ONIGERR_UNDEFINED_NAME_REFERENCE, prev, name_end); return ONIGERR_UNDEFINED_NAME_REFERENCE; } if (IS_SYNTAX_BV(syn, ONIG_SYN_STRICT_CHECK_BACKREF)) { int i; for (i = 0; i < num; i++) { if (backs[i] > env->num_mem || IS_NULL(SCANENV_MEMENV(env)[backs[i]].node)) return ONIGERR_INVALID_BACKREF; } } tok->type = TK_BACKREF; tok->u.backref.by_name = 1; if (num == 1) { tok->u.backref.num = 1; tok->u.backref.ref1 = backs[0]; } else { tok->u.backref.num = num; tok->u.backref.refs = backs; } } } else PUNFETCH; } break; #ifdef USE_CALL case 'g': if (!PEND && IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_G_SUBEXP_CALL)) { PFETCH(c); if (c == '<' || c == '\'') { int gnum; UChar* name_end; enum REF_NUM num_type; prev = p; r = fetch_name((OnigCodePoint )c, &p, end, &name_end, env, &gnum, &num_type, 1); if (r < 0) return r; if (num_type != IS_NOT_NUM) { if (num_type == IS_REL_NUM) { gnum = backref_rel_to_abs(gnum, env); if (gnum < 0) { onig_scan_env_set_error_string(env, ONIGERR_UNDEFINED_NAME_REFERENCE, prev, name_end); return ONIGERR_UNDEFINED_GROUP_REFERENCE; } } tok->u.call.by_number = 1; tok->u.call.gnum = gnum; } else { tok->u.call.by_number = 0; tok->u.call.gnum = 0; } tok->type = TK_CALL; tok->u.call.name = prev; tok->u.call.name_end = name_end; } else PUNFETCH; } break; #endif case 'Q': if (IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_CAPITAL_Q_QUOTE)) { tok->type = TK_QUOTE_OPEN; } break; case 'p': case 'P': if (!PEND && PPEEK_IS('{') && IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_P_BRACE_CHAR_PROPERTY)) { PINC; tok->type = TK_CHAR_PROPERTY; tok->u.prop.not = c == 'P'; if (!PEND && IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_ESC_P_BRACE_CIRCUMFLEX_NOT)) { PFETCH(c); if (c == '^') { tok->u.prop.not = tok->u.prop.not == 0; } else PUNFETCH; } } break; default: { OnigCodePoint c2; PUNFETCH; num = fetch_escaped_value(&p, end, env, &c2); if (num < 0) return num; /* set_raw: */ if (tok->u.c != c2) { tok->type = TK_CODE_POINT; tok->u.code = c2; } else { /* string */ p = tok->backp + enclen(enc, tok->backp); } } break; } } else { tok->u.c = c; tok->escaped = 0; #ifdef USE_VARIABLE_META_CHARS if ((c != ONIG_INEFFECTIVE_META_CHAR) && IS_SYNTAX_OP(syn, ONIG_SYN_OP_VARIABLE_META_CHARACTERS)) { if (c == MC_ANYCHAR(syn)) goto any_char; else if (c == MC_ANYTIME(syn)) goto anytime; else if (c == MC_ZERO_OR_ONE_TIME(syn)) goto zero_or_one_time; else if (c == MC_ONE_OR_MORE_TIME(syn)) goto one_or_more_time; else if (c == MC_ANYCHAR_ANYTIME(syn)) { tok->type = TK_ANYCHAR_ANYTIME; goto out; } } #endif switch (c) { case '.': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_DOT_ANYCHAR)) break; #ifdef USE_VARIABLE_META_CHARS any_char: #endif tok->type = TK_ANYCHAR; break; case '*': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_ASTERISK_ZERO_INF)) break; #ifdef USE_VARIABLE_META_CHARS anytime: #endif tok->type = TK_REPEAT; tok->u.repeat.lower = 0; tok->u.repeat.upper = INFINITE_REPEAT; goto greedy_check; break; case '+': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_PLUS_ONE_INF)) break; #ifdef USE_VARIABLE_META_CHARS one_or_more_time: #endif tok->type = TK_REPEAT; tok->u.repeat.lower = 1; tok->u.repeat.upper = INFINITE_REPEAT; goto greedy_check; break; case '?': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_QMARK_ZERO_ONE)) break; #ifdef USE_VARIABLE_META_CHARS zero_or_one_time: #endif tok->type = TK_REPEAT; tok->u.repeat.lower = 0; tok->u.repeat.upper = 1; goto greedy_check; break; case '{': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_BRACE_INTERVAL)) break; r = fetch_interval_quantifier(&p, end, tok, env); if (r < 0) return r; /* error */ if (r == 0) goto greedy_check2; else if (r == 2) { /* {n} */ if (IS_SYNTAX_BV(syn, ONIG_SYN_FIXED_INTERVAL_IS_GREEDY_ONLY)) goto possessive_check; goto greedy_check2; } /* r == 1 : normal char */ break; case '|': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_VBAR_ALT)) break; tok->type = TK_ALT; break; case '(': if (!PEND && PPEEK_IS('?') && IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_QMARK_GROUP_EFFECT)) { PINC; if (! PEND) { c = PPEEK; if (c == '#') { PFETCH(c); while (1) { if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; PFETCH(c); if (c == MC_ESC(syn)) { if (! PEND) PFETCH(c); } else { if (c == ')') break; } } goto start; } else if (IS_SYNTAX_OP2(syn, ONIG_SYN_OP2_QMARK_PERL_SUBEXP_CALL)) { int gnum; UChar* name; UChar* name_end; enum REF_NUM num_type; switch (c) { case '&': { PINC; name = p; r = fetch_name((OnigCodePoint )'(', &p, end, &name_end, env, &gnum, &num_type, 0); if (r < 0) return r; tok->type = TK_CALL; tok->u.call.by_number = 0; tok->u.call.gnum = 0; tok->u.call.name = name; tok->u.call.name_end = name_end; } break; case 'R': tok->type = TK_CALL; tok->u.call.by_number = 1; tok->u.call.gnum = 0; tok->u.call.name = p; PINC; if (! PPEEK_IS(')')) return ONIGERR_INVALID_GROUP_NAME; tok->u.call.name_end = p; break; case '-': case '+': goto lparen_qmark_num; break; default: if (! ONIGENC_IS_CODE_DIGIT(enc, c)) goto lparen_qmark_end; lparen_qmark_num: { name = p; r = fetch_name((OnigCodePoint )'(', &p, end, &name_end, env, &gnum, &num_type, 1); if (r < 0) return r; if (num_type == IS_NOT_NUM) { return ONIGERR_INVALID_GROUP_NAME; } else { if (num_type == IS_REL_NUM) { gnum = backref_rel_to_abs(gnum, env); if (gnum < 0) { onig_scan_env_set_error_string(env, ONIGERR_UNDEFINED_NAME_REFERENCE, name, name_end); return ONIGERR_UNDEFINED_GROUP_REFERENCE; } } tok->u.call.by_number = 1; tok->u.call.gnum = gnum; } tok->type = TK_CALL; tok->u.call.name = name; tok->u.call.name_end = name_end; } break; } } } lparen_qmark_end: PUNFETCH; } if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_LPAREN_SUBEXP)) break; tok->type = TK_SUBEXP_OPEN; break; case ')': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_LPAREN_SUBEXP)) break; tok->type = TK_SUBEXP_CLOSE; break; case '^': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_LINE_ANCHOR)) break; tok->type = TK_ANCHOR; tok->u.subtype = (IS_SINGLELINE(env->options) ? ANCR_BEGIN_BUF : ANCR_BEGIN_LINE); break; case '$': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_LINE_ANCHOR)) break; tok->type = TK_ANCHOR; tok->u.subtype = (IS_SINGLELINE(env->options) ? ANCR_SEMI_END_BUF : ANCR_END_LINE); break; case '[': if (! IS_SYNTAX_OP(syn, ONIG_SYN_OP_BRACKET_CC)) break; tok->type = TK_CC_OPEN; break; case ']': if (*src > env->pattern) /* /].../ is allowed. */ CLOSE_BRACKET_WITHOUT_ESC_WARN(env, (UChar* )"]"); break; case '#': if (IS_EXTEND(env->options)) { while (!PEND) { PFETCH(c); if (ONIGENC_IS_CODE_NEWLINE(enc, c)) break; } goto start; break; } break; case ' ': case '\t': case '\n': case '\r': case '\f': if (IS_EXTEND(env->options)) goto start; break; default: /* string */ break; } } #ifdef USE_VARIABLE_META_CHARS out: #endif *src = p; return tok->type; } static int add_ctype_to_cc_by_range(CClassNode* cc, int ctype ARG_UNUSED, int not, OnigEncoding enc ARG_UNUSED, OnigCodePoint sb_out, const OnigCodePoint mbr[]) { int i, r; OnigCodePoint j; int n = ONIGENC_CODE_RANGE_NUM(mbr); if (not == 0) { for (i = 0; i < n; i++) { for (j = ONIGENC_CODE_RANGE_FROM(mbr, i); j <= ONIGENC_CODE_RANGE_TO(mbr, i); j++) { if (j >= sb_out) { if (j > ONIGENC_CODE_RANGE_FROM(mbr, i)) { r = add_code_range_to_buf(&(cc->mbuf), j, ONIGENC_CODE_RANGE_TO(mbr, i)); if (r != 0) return r; i++; } goto sb_end; } BITSET_SET_BIT(cc->bs, j); } } sb_end: for ( ; i < n; i++) { r = add_code_range_to_buf(&(cc->mbuf), ONIGENC_CODE_RANGE_FROM(mbr, i), ONIGENC_CODE_RANGE_TO(mbr, i)); if (r != 0) return r; } } else { OnigCodePoint prev = 0; for (i = 0; i < n; i++) { for (j = prev; j < ONIGENC_CODE_RANGE_FROM(mbr, i); j++) { if (j >= sb_out) { goto sb_end2; } BITSET_SET_BIT(cc->bs, j); } prev = ONIGENC_CODE_RANGE_TO(mbr, i) + 1; } for (j = prev; j < sb_out; j++) { BITSET_SET_BIT(cc->bs, j); } sb_end2: prev = sb_out; for (i = 0; i < n; i++) { if (prev < ONIGENC_CODE_RANGE_FROM(mbr, i)) { r = add_code_range_to_buf(&(cc->mbuf), prev, ONIGENC_CODE_RANGE_FROM(mbr, i) - 1); if (r != 0) return r; } prev = ONIGENC_CODE_RANGE_TO(mbr, i) + 1; if (prev == 0) goto end; } r = add_code_range_to_buf(&(cc->mbuf), prev, MAX_CODE_POINT); if (r != 0) return r; } end: return 0; } static int add_ctype_to_cc_by_range_limit(CClassNode* cc, int ctype ARG_UNUSED, int not, OnigEncoding enc ARG_UNUSED, OnigCodePoint sb_out, const OnigCodePoint mbr[], OnigCodePoint limit) { int i, r; OnigCodePoint j; OnigCodePoint from; OnigCodePoint to; int n = ONIGENC_CODE_RANGE_NUM(mbr); if (not == 0) { for (i = 0; i < n; i++) { for (j = ONIGENC_CODE_RANGE_FROM(mbr, i); j <= ONIGENC_CODE_RANGE_TO(mbr, i); j++) { if (j > limit) goto end; if (j >= sb_out) { if (j > ONIGENC_CODE_RANGE_FROM(mbr, i)) { to = ONIGENC_CODE_RANGE_TO(mbr, i); if (to > limit) to = limit; r = add_code_range_to_buf(&(cc->mbuf), j, to); if (r != 0) return r; i++; } goto sb_end; } BITSET_SET_BIT(cc->bs, j); } } sb_end: for ( ; i < n; i++) { from = ONIGENC_CODE_RANGE_FROM(mbr, i); to = ONIGENC_CODE_RANGE_TO(mbr, i); if (from > limit) break; if (to > limit) to = limit; r = add_code_range_to_buf(&(cc->mbuf), from, to); if (r != 0) return r; } } else { OnigCodePoint prev = 0; for (i = 0; i < n; i++) { from = ONIGENC_CODE_RANGE_FROM(mbr, i); if (from > limit) { for (j = prev; j < sb_out; j++) { BITSET_SET_BIT(cc->bs, j); } goto sb_end2; } for (j = prev; j < from; j++) { if (j >= sb_out) goto sb_end2; BITSET_SET_BIT(cc->bs, j); } prev = ONIGENC_CODE_RANGE_TO(mbr, i); if (prev > limit) prev = limit; prev++; if (prev == 0) goto end; } for (j = prev; j < sb_out; j++) { BITSET_SET_BIT(cc->bs, j); } sb_end2: prev = sb_out; for (i = 0; i < n; i++) { from = ONIGENC_CODE_RANGE_FROM(mbr, i); if (from > limit) goto last; if (prev < from) { r = add_code_range_to_buf(&(cc->mbuf), prev, from - 1); if (r != 0) return r; } prev = ONIGENC_CODE_RANGE_TO(mbr, i); if (prev > limit) prev = limit; prev++; if (prev == 0) goto end; } last: r = add_code_range_to_buf(&(cc->mbuf), prev, MAX_CODE_POINT); if (r != 0) return r; } end: return 0; } static int add_ctype_to_cc(CClassNode* cc, int ctype, int not, ScanEnv* env) { #define ASCII_LIMIT 127 int c, r; int ascii_mode; const OnigCodePoint *ranges; OnigCodePoint limit; OnigCodePoint sb_out; OnigEncoding enc = env->enc; ascii_mode = IS_ASCII_MODE_CTYPE_OPTION(ctype, env->options); r = ONIGENC_GET_CTYPE_CODE_RANGE(enc, ctype, &sb_out, &ranges); if (r == 0) { if (ascii_mode == 0) r = add_ctype_to_cc_by_range(cc, ctype, not, env->enc, sb_out, ranges); else r = add_ctype_to_cc_by_range_limit(cc, ctype, not, env->enc, sb_out, ranges, ASCII_LIMIT); return r; } else if (r != ONIG_NO_SUPPORT_CONFIG) { return r; } r = 0; limit = ascii_mode ? ASCII_LIMIT : SINGLE_BYTE_SIZE; switch (ctype) { case ONIGENC_CTYPE_ALPHA: case ONIGENC_CTYPE_BLANK: case ONIGENC_CTYPE_CNTRL: case ONIGENC_CTYPE_DIGIT: case ONIGENC_CTYPE_LOWER: case ONIGENC_CTYPE_PUNCT: case ONIGENC_CTYPE_SPACE: case ONIGENC_CTYPE_UPPER: case ONIGENC_CTYPE_XDIGIT: case ONIGENC_CTYPE_ASCII: case ONIGENC_CTYPE_ALNUM: if (not != 0) { for (c = 0; c < (int )limit; c++) { if (! ONIGENC_IS_CODE_CTYPE(enc, (OnigCodePoint )c, ctype)) BITSET_SET_BIT(cc->bs, c); } for (c = limit; c < SINGLE_BYTE_SIZE; c++) { BITSET_SET_BIT(cc->bs, c); } ADD_ALL_MULTI_BYTE_RANGE(enc, cc->mbuf); } else { for (c = 0; c < (int )limit; c++) { if (ONIGENC_IS_CODE_CTYPE(enc, (OnigCodePoint )c, ctype)) BITSET_SET_BIT(cc->bs, c); } } break; case ONIGENC_CTYPE_GRAPH: case ONIGENC_CTYPE_PRINT: case ONIGENC_CTYPE_WORD: if (not != 0) { for (c = 0; c < (int )limit; c++) { if (ONIGENC_CODE_TO_MBCLEN(enc, c) > 0 /* check invalid code point */ && ! ONIGENC_IS_CODE_CTYPE(enc, (OnigCodePoint )c, ctype)) BITSET_SET_BIT(cc->bs, c); } for (c = limit; c < SINGLE_BYTE_SIZE; c++) { if (ONIGENC_CODE_TO_MBCLEN(enc, c) > 0) BITSET_SET_BIT(cc->bs, c); } } else { for (c = 0; c < (int )limit; c++) { if (ONIGENC_IS_CODE_CTYPE(enc, (OnigCodePoint )c, ctype)) BITSET_SET_BIT(cc->bs, c); } if (ascii_mode == 0) ADD_ALL_MULTI_BYTE_RANGE(enc, cc->mbuf); } break; default: return ONIGERR_PARSER_BUG; break; } return r; } static int parse_posix_bracket(CClassNode* cc, UChar** src, UChar* end, ScanEnv* env) { #define POSIX_BRACKET_CHECK_LIMIT_LENGTH 20 #define POSIX_BRACKET_NAME_MIN_LEN 4 static PosixBracketEntryType PBS[] = { { (UChar* )"alnum", ONIGENC_CTYPE_ALNUM, 5 }, { (UChar* )"alpha", ONIGENC_CTYPE_ALPHA, 5 }, { (UChar* )"blank", ONIGENC_CTYPE_BLANK, 5 }, { (UChar* )"cntrl", ONIGENC_CTYPE_CNTRL, 5 }, { (UChar* )"digit", ONIGENC_CTYPE_DIGIT, 5 }, { (UChar* )"graph", ONIGENC_CTYPE_GRAPH, 5 }, { (UChar* )"lower", ONIGENC_CTYPE_LOWER, 5 }, { (UChar* )"print", ONIGENC_CTYPE_PRINT, 5 }, { (UChar* )"punct", ONIGENC_CTYPE_PUNCT, 5 }, { (UChar* )"space", ONIGENC_CTYPE_SPACE, 5 }, { (UChar* )"upper", ONIGENC_CTYPE_UPPER, 5 }, { (UChar* )"xdigit", ONIGENC_CTYPE_XDIGIT, 6 }, { (UChar* )"ascii", ONIGENC_CTYPE_ASCII, 5 }, { (UChar* )"word", ONIGENC_CTYPE_WORD, 4 }, { (UChar* )NULL, -1, 0 } }; PosixBracketEntryType *pb; int not, i, r; OnigCodePoint c; OnigEncoding enc = env->enc; UChar *p = *src; if (PPEEK_IS('^')) { PINC_S; not = 1; } else not = 0; if (onigenc_strlen(enc, p, end) < POSIX_BRACKET_NAME_MIN_LEN + 3) goto not_posix_bracket; for (pb = PBS; IS_NOT_NULL(pb->name); pb++) { if (onigenc_with_ascii_strncmp(enc, p, end, pb->name, pb->len) == 0) { p = (UChar* )onigenc_step(enc, p, end, pb->len); if (onigenc_with_ascii_strncmp(enc, p, end, (UChar* )":]", 2) != 0) return ONIGERR_INVALID_POSIX_BRACKET_TYPE; r = add_ctype_to_cc(cc, pb->ctype, not, env); if (r != 0) return r; PINC_S; PINC_S; *src = p; return 0; } } not_posix_bracket: c = 0; i = 0; while (!PEND && ((c = PPEEK) != ':') && c != ']') { PINC_S; if (++i > POSIX_BRACKET_CHECK_LIMIT_LENGTH) break; } if (c == ':' && ! PEND) { PINC_S; if (! PEND) { PFETCH_S(c); if (c == ']') return ONIGERR_INVALID_POSIX_BRACKET_TYPE; } } return 1; /* 1: is not POSIX bracket, but no error. */ } static int fetch_char_property_to_ctype(UChar** src, UChar* end, ScanEnv* env) { int r; OnigCodePoint c; OnigEncoding enc = env->enc; UChar *prev, *start, *p = *src; r = 0; start = prev = p; while (!PEND) { prev = p; PFETCH_S(c); if (c == '}') { r = ONIGENC_PROPERTY_NAME_TO_CTYPE(enc, start, prev); if (r < 0) break; *src = p; return r; } else if (c == '(' || c == ')' || c == '{' || c == '|') { r = ONIGERR_INVALID_CHAR_PROPERTY_NAME; break; } } onig_scan_env_set_error_string(env, r, *src, prev); return r; } static int parse_char_property(Node** np, PToken* tok, UChar** src, UChar* end, ScanEnv* env) { int r, ctype; CClassNode* cc; ctype = fetch_char_property_to_ctype(src, end, env); if (ctype < 0) return ctype; *np = node_new_cclass(); CHECK_NULL_RETURN_MEMERR(*np); cc = CCLASS_(*np); r = add_ctype_to_cc(cc, ctype, 0, env); if (r != 0) return r; if (tok->u.prop.not != 0) NCCLASS_SET_NOT(cc); return 0; } enum CCSTATE { CCS_VALUE, CCS_RANGE, CCS_COMPLETE, CCS_START }; enum CCVALTYPE { CCV_SB, CCV_CODE_POINT, CCV_CLASS }; static int next_state_class(CClassNode* cc, OnigCodePoint* vs, enum CCVALTYPE* type, enum CCSTATE* state, ScanEnv* env) { int r; if (*state == CCS_RANGE) return ONIGERR_CHAR_CLASS_VALUE_AT_END_OF_RANGE; if (*state == CCS_VALUE && *type != CCV_CLASS) { if (*type == CCV_SB) BITSET_SET_BIT(cc->bs, (int )(*vs)); else if (*type == CCV_CODE_POINT) { r = add_code_range(&(cc->mbuf), env, *vs, *vs); if (r < 0) return r; } } *state = CCS_VALUE; *type = CCV_CLASS; return 0; } static int next_state_val(CClassNode* cc, OnigCodePoint *from, OnigCodePoint to, int* from_israw, int to_israw, enum CCVALTYPE intype, enum CCVALTYPE* type, enum CCSTATE* state, ScanEnv* env) { int r; switch (*state) { case CCS_VALUE: if (*type == CCV_SB) { if (*from > 0xff) return ONIGERR_INVALID_CODE_POINT_VALUE; BITSET_SET_BIT(cc->bs, (int )(*from)); } else if (*type == CCV_CODE_POINT) { r = add_code_range(&(cc->mbuf), env, *from, *from); if (r < 0) return r; } break; case CCS_RANGE: if (intype == *type) { if (intype == CCV_SB) { if (*from > 0xff || to > 0xff) return ONIGERR_INVALID_CODE_POINT_VALUE; if (*from > to) { if (IS_SYNTAX_BV(env->syntax, ONIG_SYN_ALLOW_EMPTY_RANGE_IN_CC)) goto ccs_range_end; else return ONIGERR_EMPTY_RANGE_IN_CHAR_CLASS; } bitset_set_range(cc->bs, (int )*from, (int )to); } else { r = add_code_range(&(cc->mbuf), env, *from, to); if (r < 0) return r; } } else { if (*from > to) { if (IS_SYNTAX_BV(env->syntax, ONIG_SYN_ALLOW_EMPTY_RANGE_IN_CC)) goto ccs_range_end; else return ONIGERR_EMPTY_RANGE_IN_CHAR_CLASS; } bitset_set_range(cc->bs, (int )*from, (int )(to < 0xff ? to : 0xff)); r = add_code_range(&(cc->mbuf), env, (OnigCodePoint )*from, to); if (r < 0) return r; } ccs_range_end: *state = CCS_COMPLETE; break; case CCS_COMPLETE: case CCS_START: *state = CCS_VALUE; break; default: break; } *from_israw = to_israw; *from = to; *type = intype; return 0; } static int code_exist_check(OnigCodePoint c, UChar* from, UChar* end, int ignore_escaped, ScanEnv* env) { int in_esc; OnigCodePoint code; OnigEncoding enc = env->enc; UChar* p = from; in_esc = 0; while (! PEND) { if (ignore_escaped && in_esc) { in_esc = 0; } else { PFETCH_S(code); if (code == c) return 1; if (code == MC_ESC(env->syntax)) in_esc = 1; } } return 0; } static int parse_char_class(Node** np, PToken* tok, UChar** src, UChar* end, ScanEnv* env) { int r, neg, len, fetched, and_start; OnigCodePoint v, vs; UChar *p; Node* node; CClassNode *cc, *prev_cc; CClassNode work_cc; enum CCSTATE state; enum CCVALTYPE val_type, in_type; int val_israw, in_israw; *np = NULL_NODE; env->parse_depth++; if (env->parse_depth > ParseDepthLimit) return ONIGERR_PARSE_DEPTH_LIMIT_OVER; prev_cc = (CClassNode* )NULL; r = fetch_token_in_cc(tok, src, end, env); if (r == TK_CHAR && tok->u.c == '^' && tok->escaped == 0) { neg = 1; r = fetch_token_in_cc(tok, src, end, env); } else { neg = 0; } if (r < 0) return r; if (r == TK_CC_CLOSE) { if (! code_exist_check((OnigCodePoint )']', *src, env->pattern_end, 1, env)) return ONIGERR_EMPTY_CHAR_CLASS; CC_ESC_WARN(env, (UChar* )"]"); r = tok->type = TK_CHAR; /* allow []...] */ } *np = node = node_new_cclass(); CHECK_NULL_RETURN_MEMERR(node); cc = CCLASS_(node); and_start = 0; state = CCS_START; p = *src; while (r != TK_CC_CLOSE) { fetched = 0; switch (r) { case TK_CHAR: any_char_in: len = ONIGENC_CODE_TO_MBCLEN(env->enc, tok->u.c); if (len > 1) { in_type = CCV_CODE_POINT; } else if (len < 0) { r = len; goto err; } else { /* sb_char: */ in_type = CCV_SB; } v = (OnigCodePoint )tok->u.c; in_israw = 0; goto val_entry2; break; case TK_RAW_BYTE: /* tok->base != 0 : octal or hexadec. */ if (! ONIGENC_IS_SINGLEBYTE(env->enc) && tok->base != 0) { int i, j; UChar buf[ONIGENC_CODE_TO_MBC_MAXLEN]; UChar* bufe = buf + ONIGENC_CODE_TO_MBC_MAXLEN; UChar* psave = p; int base = tok->base; buf[0] = tok->u.c; for (i = 1; i < ONIGENC_MBC_MAXLEN(env->enc); i++) { r = fetch_token_in_cc(tok, &p, end, env); if (r < 0) goto err; if (r != TK_RAW_BYTE || tok->base != base) { fetched = 1; break; } buf[i] = tok->u.c; } if (i < ONIGENC_MBC_MINLEN(env->enc)) { r = ONIGERR_TOO_SHORT_MULTI_BYTE_STRING; goto err; } /* clear buf tail */ for (j = i; j < ONIGENC_CODE_TO_MBC_MAXLEN; j++) buf[j] = '\0'; len = enclen(env->enc, buf); if (i < len) { r = ONIGERR_TOO_SHORT_MULTI_BYTE_STRING; goto err; } else if (i > len) { /* fetch back */ p = psave; for (i = 1; i < len; i++) { r = fetch_token_in_cc(tok, &p, end, env); } fetched = 0; } if (i == 1) { v = (OnigCodePoint )buf[0]; goto raw_single; } else { v = ONIGENC_MBC_TO_CODE(env->enc, buf, bufe); in_type = CCV_CODE_POINT; } } else { v = (OnigCodePoint )tok->u.c; raw_single: in_type = CCV_SB; } in_israw = 1; goto val_entry2; break; case TK_CODE_POINT: v = tok->u.code; in_israw = 1; val_entry: len = ONIGENC_CODE_TO_MBCLEN(env->enc, v); if (len < 0) { r = len; goto err; } in_type = (len == 1 ? CCV_SB : CCV_CODE_POINT); val_entry2: r = next_state_val(cc, &vs, v, &val_israw, in_israw, in_type, &val_type, &state, env); if (r != 0) goto err; break; case TK_POSIX_BRACKET_OPEN: r = parse_posix_bracket(cc, &p, end, env); if (r < 0) goto err; if (r == 1) { /* is not POSIX bracket */ CC_ESC_WARN(env, (UChar* )"["); p = tok->backp; v = (OnigCodePoint )tok->u.c; in_israw = 0; goto val_entry; } goto next_class; break; case TK_CHAR_TYPE: r = add_ctype_to_cc(cc, tok->u.prop.ctype, tok->u.prop.not, env); if (r != 0) goto err; next_class: r = next_state_class(cc, &vs, &val_type, &state, env); if (r != 0) goto err; break; case TK_CHAR_PROPERTY: { int ctype = fetch_char_property_to_ctype(&p, end, env); if (ctype < 0) { r = ctype; goto err; } r = add_ctype_to_cc(cc, ctype, tok->u.prop.not, env); if (r != 0) goto err; goto next_class; } break; case TK_CC_RANGE: if (state == CCS_VALUE) { r = fetch_token_in_cc(tok, &p, end, env); if (r < 0) goto err; fetched = 1; if (r == TK_CC_CLOSE) { /* allow [x-] */ range_end_val: v = (OnigCodePoint )'-'; in_israw = 0; goto val_entry; } else if (r == TK_CC_AND) { CC_ESC_WARN(env, (UChar* )"-"); goto range_end_val; } if (val_type == CCV_CLASS) { r = ONIGERR_UNMATCHED_RANGE_SPECIFIER_IN_CHAR_CLASS; goto err; } state = CCS_RANGE; } else if (state == CCS_START) { /* [-xa] is allowed */ v = (OnigCodePoint )tok->u.c; in_israw = 0; r = fetch_token_in_cc(tok, &p, end, env); if (r < 0) goto err; fetched = 1; /* [--x] or [a&&-x] is warned. */ if (r == TK_CC_RANGE || and_start != 0) CC_ESC_WARN(env, (UChar* )"-"); goto val_entry; } else if (state == CCS_RANGE) { CC_ESC_WARN(env, (UChar* )"-"); goto any_char_in; /* [!--x] is allowed */ } else { /* CCS_COMPLETE */ r = fetch_token_in_cc(tok, &p, end, env); if (r < 0) goto err; fetched = 1; if (r == TK_CC_CLOSE) goto range_end_val; /* allow [a-b-] */ else if (r == TK_CC_AND) { CC_ESC_WARN(env, (UChar* )"-"); goto range_end_val; } if (IS_SYNTAX_BV(env->syntax, ONIG_SYN_ALLOW_DOUBLE_RANGE_OP_IN_CC)) { CC_ESC_WARN(env, (UChar* )"-"); goto range_end_val; /* [0-9-a] is allowed as [0-9\-a] */ } r = ONIGERR_UNMATCHED_RANGE_SPECIFIER_IN_CHAR_CLASS; goto err; } break; case TK_CC_CC_OPEN: /* [ */ { Node *anode; CClassNode* acc; r = parse_char_class(&anode, tok, &p, end, env); if (r != 0) { onig_node_free(anode); goto cc_open_err; } acc = CCLASS_(anode); r = or_cclass(cc, acc, env->enc); onig_node_free(anode); cc_open_err: if (r != 0) goto err; } break; case TK_CC_AND: /* && */ { if (state == CCS_VALUE) { r = next_state_val(cc, &vs, 0, &val_israw, 0, val_type, &val_type, &state, env); if (r != 0) goto err; } /* initialize local variables */ and_start = 1; state = CCS_START; if (IS_NOT_NULL(prev_cc)) { r = and_cclass(prev_cc, cc, env->enc); if (r != 0) goto err; bbuf_free(cc->mbuf); } else { prev_cc = cc; cc = &work_cc; } initialize_cclass(cc); } break; case TK_EOT: r = ONIGERR_PREMATURE_END_OF_CHAR_CLASS; goto err; break; default: r = ONIGERR_PARSER_BUG; goto err; break; } if (fetched) r = tok->type; else { r = fetch_token_in_cc(tok, &p, end, env); if (r < 0) goto err; } } if (state == CCS_VALUE) { r = next_state_val(cc, &vs, 0, &val_israw, 0, val_type, &val_type, &state, env); if (r != 0) goto err; } if (IS_NOT_NULL(prev_cc)) { r = and_cclass(prev_cc, cc, env->enc); if (r != 0) goto err; bbuf_free(cc->mbuf); cc = prev_cc; } if (neg != 0) NCCLASS_SET_NOT(cc); else NCCLASS_CLEAR_NOT(cc); if (IS_NCCLASS_NOT(cc) && IS_SYNTAX_BV(env->syntax, ONIG_SYN_NOT_NEWLINE_IN_NEGATIVE_CC)) { int is_empty = (IS_NULL(cc->mbuf) ? 1 : 0); if (is_empty != 0) BITSET_IS_EMPTY(cc->bs, is_empty); if (is_empty == 0) { #define NEWLINE_CODE 0x0a if (ONIGENC_IS_CODE_NEWLINE(env->enc, NEWLINE_CODE)) { if (ONIGENC_CODE_TO_MBCLEN(env->enc, NEWLINE_CODE) == 1) BITSET_SET_BIT(cc->bs, NEWLINE_CODE); else add_code_range(&(cc->mbuf), env, NEWLINE_CODE, NEWLINE_CODE); } } } *src = p; env->parse_depth--; return 0; err: if (cc != CCLASS_(*np)) bbuf_free(cc->mbuf); return r; } static int parse_subexp(Node** top, PToken* tok, int term, UChar** src, UChar* end, ScanEnv* env, int group_head); #ifdef USE_CALLOUT /* (?{...}[tag][+-]) (?{{...}}[tag][+-]) */ static int parse_callout_of_contents(Node** np, int cterm, UChar** src, UChar* end, ScanEnv* env) { int r; int i; int in; int num; OnigCodePoint c; UChar* code_start; UChar* code_end; UChar* contents; UChar* tag_start; UChar* tag_end; int brace_nest; CalloutListEntry* e; RegexExt* ext; OnigEncoding enc = env->enc; UChar* p = *src; if (PEND) return ONIGERR_INVALID_CALLOUT_PATTERN; brace_nest = 0; while (PPEEK_IS('{')) { brace_nest++; PINC_S; if (PEND) return ONIGERR_INVALID_CALLOUT_PATTERN; } in = ONIG_CALLOUT_IN_PROGRESS; code_start = p; while (1) { if (PEND) return ONIGERR_INVALID_CALLOUT_PATTERN; code_end = p; PFETCH_S(c); if (c == '}') { i = brace_nest; while (i > 0) { if (PEND) return ONIGERR_INVALID_CALLOUT_PATTERN; PFETCH_S(c); if (c == '}') i--; else break; } if (i == 0) break; } } if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; PFETCH_S(c); if (c == '[') { if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; tag_end = tag_start = p; while (! PEND) { if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; tag_end = p; PFETCH_S(c); if (c == ']') break; } if (! is_allowed_callout_tag_name(enc, tag_start, tag_end)) return ONIGERR_INVALID_CALLOUT_TAG_NAME; if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; PFETCH_S(c); } else { tag_start = tag_end = 0; } if (c == 'X') { in |= ONIG_CALLOUT_IN_RETRACTION; if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; PFETCH_S(c); } else if (c == '<') { in = ONIG_CALLOUT_IN_RETRACTION; if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; PFETCH_S(c); } else if (c == '>') { /* no needs (default) */ if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; PFETCH_S(c); } if (c != cterm) return ONIGERR_INVALID_CALLOUT_PATTERN; r = reg_callout_list_entry(env, &num); if (r != 0) return r; ext = onig_get_regex_ext(env->reg); CHECK_NULL_RETURN_MEMERR(ext); if (IS_NULL(ext->pattern)) { r = onig_ext_set_pattern(env->reg, env->pattern, env->pattern_end); if (r != ONIG_NORMAL) return r; } if (tag_start != tag_end) { r = callout_tag_entry(env->reg, tag_start, tag_end, num); if (r != ONIG_NORMAL) return r; } contents = onigenc_strdup(enc, code_start, code_end); CHECK_NULL_RETURN_MEMERR(contents); r = node_new_callout(np, ONIG_CALLOUT_OF_CONTENTS, num, ONIG_NON_NAME_ID, env); if (r != 0) { xfree(contents); return r; } e = onig_reg_callout_list_at(env->reg, num); if (IS_NULL(e)) { xfree(contents); return ONIGERR_MEMORY; } e->of = ONIG_CALLOUT_OF_CONTENTS; e->in = in; e->name_id = ONIG_NON_NAME_ID; e->u.content.start = contents; e->u.content.end = contents + (code_end - code_start); *src = p; return 0; } static long parse_long(OnigEncoding enc, UChar* s, UChar* end, int sign_on, long max, long* rl) { long v; long d; int flag; UChar* p; OnigCodePoint c; if (s >= end) return ONIGERR_INVALID_CALLOUT_ARG; flag = 1; v = 0; p = s; while (p < end) { c = ONIGENC_MBC_TO_CODE(enc, p, end); p += ONIGENC_MBC_ENC_LEN(enc, p); if (c >= '0' && c <= '9') { d = (long )(c - '0'); if (v > (max - d) / 10) return ONIGERR_INVALID_CALLOUT_ARG; v = v * 10 + d; } else if (sign_on != 0 && (c == '-' || c == '+')) { if (c == '-') flag = -1; } else return ONIGERR_INVALID_CALLOUT_ARG; sign_on = 0; } *rl = flag * v; return ONIG_NORMAL; } static int parse_callout_args(int skip_mode, int cterm, UChar** src, UChar* end, unsigned int types[], OnigValue vals[], ScanEnv* env) { #define MAX_CALLOUT_ARG_BYTE_LENGTH 128 int r; int n; int esc; int cn; UChar* s; UChar* e; UChar* eesc; OnigCodePoint c; UChar* bufend; UChar buf[MAX_CALLOUT_ARG_BYTE_LENGTH]; OnigEncoding enc = env->enc; UChar* p = *src; if (PEND) return ONIGERR_INVALID_CALLOUT_PATTERN; n = 0; while (n < ONIG_CALLOUT_MAX_ARGS_NUM) { c = 0; cn = 0; esc = 0; eesc = 0; bufend = buf; s = e = p; while (1) { if (PEND) return ONIGERR_INVALID_CALLOUT_PATTERN; e = p; PFETCH_S(c); if (esc != 0) { esc = 0; if (c == '\\' || c == cterm || c == ',') { /* */ } else { e = eesc; cn++; } goto add_char; } else { if (c == '\\') { esc = 1; eesc = e; } else if (c == cterm || c == ',') break; else { size_t clen; add_char: if (skip_mode == 0) { clen = p - e; if (bufend + clen > buf + MAX_CALLOUT_ARG_BYTE_LENGTH) return ONIGERR_INVALID_CALLOUT_ARG; /* too long argument */ xmemcpy(bufend, e, clen); bufend += clen; } cn++; } } } if (cn != 0) { if (skip_mode == 0) { if ((types[n] & ONIG_TYPE_LONG) != 0) { int fixed = 0; if (cn > 0) { long rl; r = parse_long(enc, buf, bufend, 1, LONG_MAX, &rl); if (r == ONIG_NORMAL) { vals[n].l = rl; fixed = 1; types[n] = ONIG_TYPE_LONG; } } if (fixed == 0) { types[n] = (types[n] & ~ONIG_TYPE_LONG); if (types[n] == ONIG_TYPE_VOID) return ONIGERR_INVALID_CALLOUT_ARG; } } switch (types[n]) { case ONIG_TYPE_LONG: break; case ONIG_TYPE_CHAR: if (cn != 1) return ONIGERR_INVALID_CALLOUT_ARG; vals[n].c = ONIGENC_MBC_TO_CODE(enc, buf, bufend); break; case ONIG_TYPE_STRING: { UChar* rs = onigenc_strdup(enc, buf, bufend); CHECK_NULL_RETURN_MEMERR(rs); vals[n].s.start = rs; vals[n].s.end = rs + (e - s); } break; case ONIG_TYPE_TAG: if (eesc != 0 || ! is_allowed_callout_tag_name(enc, s, e)) return ONIGERR_INVALID_CALLOUT_TAG_NAME; vals[n].s.start = s; vals[n].s.end = e; break; case ONIG_TYPE_VOID: case ONIG_TYPE_POINTER: return ONIGERR_PARSER_BUG; break; } } n++; } if (c == cterm) break; } if (c != cterm) return ONIGERR_INVALID_CALLOUT_PATTERN; *src = p; return n; } /* (*name[TAG]) (*name[TAG]{a,b,..}) */ static int parse_callout_of_name(Node** np, int cterm, UChar** src, UChar* end, ScanEnv* env) { int r; int i; int in; int num; int name_id; int arg_num; int max_arg_num; int opt_arg_num; int is_not_single; OnigCodePoint c; UChar* name_start; UChar* name_end; UChar* tag_start; UChar* tag_end; Node* node; CalloutListEntry* e; RegexExt* ext; unsigned int types[ONIG_CALLOUT_MAX_ARGS_NUM]; OnigValue vals[ONIG_CALLOUT_MAX_ARGS_NUM]; OnigEncoding enc = env->enc; UChar* p = *src; /* PFETCH_READY; */ if (PEND) return ONIGERR_INVALID_CALLOUT_PATTERN; node = 0; name_start = p; while (1) { if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; name_end = p; PFETCH_S(c); if (c == cterm || c == '[' || c == '{') break; } if (! is_allowed_callout_name(enc, name_start, name_end)) return ONIGERR_INVALID_CALLOUT_NAME; if (c == '[') { if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; tag_end = tag_start = p; while (! PEND) { if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; tag_end = p; PFETCH_S(c); if (c == ']') break; } if (! is_allowed_callout_tag_name(enc, tag_start, tag_end)) return ONIGERR_INVALID_CALLOUT_TAG_NAME; if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; PFETCH_S(c); } else { tag_start = tag_end = 0; } if (c == '{') { UChar* save; if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; /* read for single check only */ save = p; arg_num = parse_callout_args(1, '}', &p, end, 0, 0, env); if (arg_num < 0) return arg_num; is_not_single = PPEEK_IS(cterm) ? 0 : 1; p = save; r = get_callout_name_id_by_name(enc, is_not_single, name_start, name_end, &name_id); if (r != ONIG_NORMAL) return r; max_arg_num = get_callout_arg_num_by_name_id(name_id); for (i = 0; i < max_arg_num; i++) { types[i] = get_callout_arg_type_by_name_id(name_id, i); } arg_num = parse_callout_args(0, '}', &p, end, types, vals, env); if (arg_num < 0) return arg_num; if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; PFETCH_S(c); } else { arg_num = 0; is_not_single = 0; r = get_callout_name_id_by_name(enc, is_not_single, name_start, name_end, &name_id); if (r != ONIG_NORMAL) return r; max_arg_num = get_callout_arg_num_by_name_id(name_id); for (i = 0; i < max_arg_num; i++) { types[i] = get_callout_arg_type_by_name_id(name_id, i); } } in = onig_get_callout_in_by_name_id(name_id); opt_arg_num = get_callout_opt_arg_num_by_name_id(name_id); if (arg_num > max_arg_num || arg_num < (max_arg_num - opt_arg_num)) return ONIGERR_INVALID_CALLOUT_ARG; if (c != cterm) return ONIGERR_INVALID_CALLOUT_PATTERN; r = reg_callout_list_entry(env, &num); if (r != 0) return r; ext = onig_get_regex_ext(env->reg); CHECK_NULL_RETURN_MEMERR(ext); if (IS_NULL(ext->pattern)) { r = onig_ext_set_pattern(env->reg, env->pattern, env->pattern_end); if (r != ONIG_NORMAL) return r; } if (tag_start != tag_end) { r = callout_tag_entry(env->reg, tag_start, tag_end, num); if (r != ONIG_NORMAL) return r; } r = node_new_callout(&node, ONIG_CALLOUT_OF_NAME, num, name_id, env); if (r != ONIG_NORMAL) return r; e = onig_reg_callout_list_at(env->reg, num); CHECK_NULL_RETURN_MEMERR(e); e->of = ONIG_CALLOUT_OF_NAME; e->in = in; e->name_id = name_id; e->type = onig_get_callout_type_by_name_id(name_id); e->start_func = onig_get_callout_start_func_by_name_id(name_id); e->end_func = onig_get_callout_end_func_by_name_id(name_id); e->u.arg.num = max_arg_num; e->u.arg.passed_num = arg_num; for (i = 0; i < max_arg_num; i++) { e->u.arg.types[i] = types[i]; if (i < arg_num) e->u.arg.vals[i] = vals[i]; else e->u.arg.vals[i] = get_callout_opt_default_by_name_id(name_id, i); } *np = node; *src = p; return 0; } #endif static int parse_bag(Node** np, PToken* tok, int term, UChar** src, UChar* end, ScanEnv* env) { int r, num; Node *target; OnigOptionType option; OnigCodePoint c; int list_capture; OnigEncoding enc = env->enc; UChar* p = *src; PFETCH_READY; *np = NULL; if (PEND) return ONIGERR_END_PATTERN_WITH_UNMATCHED_PARENTHESIS; option = env->options; c = PPEEK; if (c == '?' && IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_QMARK_GROUP_EFFECT)) { PINC; if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; PFETCH(c); switch (c) { case ':': /* (?:...) grouping only */ group: r = fetch_token(tok, &p, end, env); if (r < 0) return r; r = parse_subexp(np, tok, term, &p, end, env, 0); if (r < 0) return r; *src = p; return 1; /* group */ break; case '=': *np = onig_node_new_anchor(ANCR_PREC_READ, 0); break; case '!': /* preceding read */ *np = onig_node_new_anchor(ANCR_PREC_READ_NOT, 0); break; case '>': /* (?>...) stop backtrack */ *np = node_new_bag(BAG_STOP_BACKTRACK); break; case '\'': if (IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_QMARK_LT_NAMED_GROUP)) { goto named_group1; } else return ONIGERR_UNDEFINED_GROUP_OPTION; break; case '<': /* look behind (?<=...), (?<!...) */ if (PEND) return ONIGERR_END_PATTERN_WITH_UNMATCHED_PARENTHESIS; PFETCH(c); if (c == '=') *np = onig_node_new_anchor(ANCR_LOOK_BEHIND, 0); else if (c == '!') *np = onig_node_new_anchor(ANCR_LOOK_BEHIND_NOT, 0); else { if (IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_QMARK_LT_NAMED_GROUP)) { UChar *name; UChar *name_end; enum REF_NUM num_type; PUNFETCH; c = '<'; named_group1: list_capture = 0; named_group2: name = p; r = fetch_name((OnigCodePoint )c, &p, end, &name_end, env, &num, &num_type, 0); if (r < 0) return r; num = scan_env_add_mem_entry(env); if (num < 0) return num; if (list_capture != 0 && num >= (int )MEM_STATUS_BITS_NUM) return ONIGERR_GROUP_NUMBER_OVER_FOR_CAPTURE_HISTORY; r = name_add(env->reg, name, name_end, num, env); if (r != 0) return r; *np = node_new_memory(1); CHECK_NULL_RETURN_MEMERR(*np); BAG_(*np)->m.regnum = num; if (list_capture != 0) MEM_STATUS_ON_SIMPLE(env->capture_history, num); env->num_named++; } else { return ONIGERR_UNDEFINED_GROUP_OPTION; } } break; case '~': if (IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_QMARK_TILDE_ABSENT_GROUP)) { Node* absent; Node* expr; int head_bar; int is_range_cutter; if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; if (PPEEK_IS('|')) { /* (?~|generator|absent) */ PINC; if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; head_bar = 1; if (PPEEK_IS(')')) { /* (?~|) : range clear */ PINC; r = make_range_clear(np, env); if (r != 0) return r; goto end; } } else head_bar = 0; r = fetch_token(tok, &p, end, env); if (r < 0) return r; r = parse_subexp(&absent, tok, term, &p, end, env, 1); if (r < 0) { onig_node_free(absent); return r; } expr = NULL_NODE; is_range_cutter = 0; if (head_bar != 0) { Node* top = absent; if (NODE_TYPE(top) != NODE_ALT || IS_NULL(NODE_CDR(top))) { expr = NULL_NODE; is_range_cutter = 1; /* return ONIGERR_INVALID_ABSENT_GROUP_GENERATOR_PATTERN; */ } else { absent = NODE_CAR(top); expr = NODE_CDR(top); NODE_CAR(top) = NULL_NODE; NODE_CDR(top) = NULL_NODE; onig_node_free(top); if (IS_NULL(NODE_CDR(expr))) { top = expr; expr = NODE_CAR(top); NODE_CAR(top) = NULL_NODE; onig_node_free(top); } } } r = make_absent_tree(np, absent, expr, is_range_cutter, env); if (r != 0) { return r; } goto end; } else { return ONIGERR_UNDEFINED_GROUP_OPTION; } break; #ifdef USE_CALLOUT case '{': if (! IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_QMARK_BRACE_CALLOUT_CONTENTS)) return ONIGERR_UNDEFINED_GROUP_OPTION; r = parse_callout_of_contents(np, ')', &p, end, env); if (r != 0) return r; goto end; break; #endif case '(': /* (?()...) */ if (IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_QMARK_LPAREN_IF_ELSE)) { UChar *prev; Node* condition; int condition_is_checker; if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; PFETCH(c); if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; if (IS_CODE_DIGIT_ASCII(enc, c) || c == '-' || c == '+' || c == '<' || c == '\'') { UChar* name_end; int back_num; int exist_level; int level; enum REF_NUM num_type; int is_enclosed; is_enclosed = (c == '<' || c == '\'') ? 1 : 0; if (! is_enclosed) PUNFETCH; prev = p; exist_level = 0; #ifdef USE_BACKREF_WITH_LEVEL name_end = NULL_UCHARP; /* no need. escape gcc warning. */ r = fetch_name_with_level( (OnigCodePoint )(is_enclosed != 0 ? c : '('), &p, end, &name_end, env, &back_num, &level, &num_type); if (r == 1) exist_level = 1; #else r = fetch_name((OnigCodePoint )(is_enclosed != 0 ? c : '('), &p, end, &name_end, env, &back_num, &num_type, 1); #endif if (r < 0) { if (is_enclosed == 0) { goto any_condition; } else return r; } condition_is_checker = 1; if (num_type != IS_NOT_NUM) { if (num_type == IS_REL_NUM) { back_num = backref_rel_to_abs(back_num, env); } if (back_num <= 0) return ONIGERR_INVALID_BACKREF; if (IS_SYNTAX_BV(env->syntax, ONIG_SYN_STRICT_CHECK_BACKREF)) { if (back_num > env->num_mem || IS_NULL(SCANENV_MEMENV(env)[back_num].node)) return ONIGERR_INVALID_BACKREF; } condition = node_new_backref_checker(1, &back_num, 0, #ifdef USE_BACKREF_WITH_LEVEL exist_level, level, #endif env); } else { int num; int* backs; num = onig_name_to_group_numbers(env->reg, prev, name_end, &backs); if (num <= 0) { onig_scan_env_set_error_string(env, ONIGERR_UNDEFINED_NAME_REFERENCE, prev, name_end); return ONIGERR_UNDEFINED_NAME_REFERENCE; } if (IS_SYNTAX_BV(env->syntax, ONIG_SYN_STRICT_CHECK_BACKREF)) { int i; for (i = 0; i < num; i++) { if (backs[i] > env->num_mem || IS_NULL(SCANENV_MEMENV(env)[backs[i]].node)) return ONIGERR_INVALID_BACKREF; } } condition = node_new_backref_checker(num, backs, 1, #ifdef USE_BACKREF_WITH_LEVEL exist_level, level, #endif env); } if (is_enclosed != 0) { if (PEND) goto err_if_else; PFETCH(c); if (c != ')') goto err_if_else; } } #ifdef USE_CALLOUT else if (c == '?') { if (IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_QMARK_BRACE_CALLOUT_CONTENTS)) { if (! PEND && PPEEK_IS('{')) { /* condition part is callouts of contents: (?(?{...})THEN|ELSE) */ condition_is_checker = 0; PFETCH(c); r = parse_callout_of_contents(&condition, ')', &p, end, env); if (r != 0) return r; goto end_condition; } } goto any_condition; } else if (c == '*' && IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_ASTERISK_CALLOUT_NAME)) { condition_is_checker = 0; r = parse_callout_of_name(&condition, ')', &p, end, env); if (r != 0) return r; goto end_condition; } #endif else { any_condition: PUNFETCH; condition_is_checker = 0; r = fetch_token(tok, &p, end, env); if (r < 0) return r; r = parse_subexp(&condition, tok, term, &p, end, env, 0); if (r < 0) { onig_node_free(condition); return r; } } #ifdef USE_CALLOUT end_condition: #endif CHECK_NULL_RETURN_MEMERR(condition); if (PEND) { err_if_else: onig_node_free(condition); return ONIGERR_END_PATTERN_IN_GROUP; } if (PPEEK_IS(')')) { /* case: empty body: make backref checker */ if (condition_is_checker == 0) { onig_node_free(condition); return ONIGERR_INVALID_IF_ELSE_SYNTAX; } PFETCH(c); *np = condition; } else { /* if-else */ int then_is_empty; Node *Then, *Else; Then = 0; if (PPEEK_IS('|')) { PFETCH(c); then_is_empty = 1; } else then_is_empty = 0; r = fetch_token(tok, &p, end, env); if (r < 0) { onig_node_free(condition); return r; } r = parse_subexp(&target, tok, term, &p, end, env, 1); if (r < 0) { onig_node_free(condition); onig_node_free(target); return r; } if (then_is_empty != 0) { Else = target; } else { if (NODE_TYPE(target) == NODE_ALT) { Then = NODE_CAR(target); if (NODE_CDR(NODE_CDR(target)) == NULL_NODE) { Else = NODE_CAR(NODE_CDR(target)); cons_node_free_alone(NODE_CDR(target)); } else { Else = NODE_CDR(target); } cons_node_free_alone(target); } else { Then = target; Else = 0; } } *np = node_new_bag_if_else(condition, Then, Else); if (IS_NULL(*np)) { onig_node_free(condition); onig_node_free(Then); onig_node_free(Else); return ONIGERR_MEMORY; } } goto end; } else { return ONIGERR_UNDEFINED_GROUP_OPTION; } break; case '@': if (IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_ATMARK_CAPTURE_HISTORY)) { if (IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_QMARK_LT_NAMED_GROUP)) { PFETCH(c); if (c == '<' || c == '\'') { list_capture = 1; goto named_group2; /* (?@<name>...) */ } PUNFETCH; } *np = node_new_memory(0); CHECK_NULL_RETURN_MEMERR(*np); num = scan_env_add_mem_entry(env); if (num < 0) { return num; } else if (num >= (int )MEM_STATUS_BITS_NUM) { return ONIGERR_GROUP_NUMBER_OVER_FOR_CAPTURE_HISTORY; } BAG_(*np)->m.regnum = num; MEM_STATUS_ON_SIMPLE(env->capture_history, num); } else { return ONIGERR_UNDEFINED_GROUP_OPTION; } break; #ifdef USE_POSIXLINE_OPTION case 'p': #endif case '-': case 'i': case 'm': case 's': case 'x': case 'W': case 'D': case 'S': case 'P': case 'y': { int neg = 0; while (1) { switch (c) { case ':': case ')': break; case '-': neg = 1; break; case 'x': OPTION_NEGATE(option, ONIG_OPTION_EXTEND, neg); break; case 'i': OPTION_NEGATE(option, ONIG_OPTION_IGNORECASE, neg); break; case 's': if (IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_OPTION_PERL)) { OPTION_NEGATE(option, ONIG_OPTION_MULTILINE, neg); } else return ONIGERR_UNDEFINED_GROUP_OPTION; break; case 'm': if (IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_OPTION_PERL)) { OPTION_NEGATE(option, ONIG_OPTION_SINGLELINE, (neg == 0 ? 1 : 0)); } else if (IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_OPTION_ONIGURUMA|ONIG_SYN_OP2_OPTION_RUBY)) { OPTION_NEGATE(option, ONIG_OPTION_MULTILINE, neg); } else return ONIGERR_UNDEFINED_GROUP_OPTION; break; #ifdef USE_POSIXLINE_OPTION case 'p': OPTION_NEGATE(option, ONIG_OPTION_MULTILINE|ONIG_OPTION_SINGLELINE, neg); break; #endif case 'W': OPTION_NEGATE(option, ONIG_OPTION_WORD_IS_ASCII, neg); break; case 'D': OPTION_NEGATE(option, ONIG_OPTION_DIGIT_IS_ASCII, neg); break; case 'S': OPTION_NEGATE(option, ONIG_OPTION_SPACE_IS_ASCII, neg); break; case 'P': OPTION_NEGATE(option, ONIG_OPTION_POSIX_IS_ASCII, neg); break; case 'y': /* y{g}, y{w} */ { if (! IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_OPTION_ONIGURUMA)) return ONIGERR_UNDEFINED_GROUP_OPTION; if (neg != 0) return ONIGERR_UNDEFINED_GROUP_OPTION; if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; if (! PPEEK_IS('{')) return ONIGERR_UNDEFINED_GROUP_OPTION; PFETCH(c); if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; PFETCH(c); switch (c) { case 'g': if (! ONIGENC_IS_UNICODE_ENCODING(enc)) return ONIGERR_UNDEFINED_GROUP_OPTION; OPTION_NEGATE(option, ONIG_OPTION_TEXT_SEGMENT_EXTENDED_GRAPHEME_CLUSTER, 0); OPTION_NEGATE(option, ONIG_OPTION_TEXT_SEGMENT_WORD, 1); break; #ifdef USE_UNICODE_WORD_BREAK case 'w': if (! ONIGENC_IS_UNICODE_ENCODING(enc)) return ONIGERR_UNDEFINED_GROUP_OPTION; OPTION_NEGATE(option, ONIG_OPTION_TEXT_SEGMENT_WORD, 0); OPTION_NEGATE(option, ONIG_OPTION_TEXT_SEGMENT_EXTENDED_GRAPHEME_CLUSTER, 1); break; #endif default: return ONIGERR_UNDEFINED_GROUP_OPTION; break; } if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; PFETCH(c); if (c != '}') return ONIGERR_UNDEFINED_GROUP_OPTION; break; } /* case 'y' */ default: return ONIGERR_UNDEFINED_GROUP_OPTION; } if (c == ')') { *np = node_new_option(option); CHECK_NULL_RETURN_MEMERR(*np); *src = p; return 2; /* option only */ } else if (c == ':') { OnigOptionType prev = env->options; env->options = option; r = fetch_token(tok, &p, end, env); if (r < 0) return r; r = parse_subexp(&target, tok, term, &p, end, env, 0); env->options = prev; if (r < 0) { onig_node_free(target); return r; } *np = node_new_option(option); CHECK_NULL_RETURN_MEMERR(*np); NODE_BODY(*np) = target; *src = p; return 0; } if (PEND) return ONIGERR_END_PATTERN_IN_GROUP; PFETCH(c); } /* while (1) */ } break; default: return ONIGERR_UNDEFINED_GROUP_OPTION; } } #ifdef USE_CALLOUT else if (c == '*' && IS_SYNTAX_OP2(env->syntax, ONIG_SYN_OP2_ASTERISK_CALLOUT_NAME)) { PINC; r = parse_callout_of_name(np, ')', &p, end, env); if (r != 0) return r; goto end; } #endif else { if (ONIG_IS_OPTION_ON(env->options, ONIG_OPTION_DONT_CAPTURE_GROUP)) goto group; *np = node_new_memory(0); CHECK_NULL_RETURN_MEMERR(*np); num = scan_env_add_mem_entry(env); if (num < 0) return num; BAG_(*np)->m.regnum = num; } CHECK_NULL_RETURN_MEMERR(*np); r = fetch_token(tok, &p, end, env); if (r < 0) return r; r = parse_subexp(&target, tok, term, &p, end, env, 0); if (r < 0) { onig_node_free(target); return r; } NODE_BODY(*np) = target; if (NODE_TYPE(*np) == NODE_BAG) { if (BAG_(*np)->type == BAG_MEMORY) { /* Don't move this to previous of parse_subexp() */ r = scan_env_set_mem_node(env, BAG_(*np)->m.regnum, *np); if (r != 0) return r; } } end: *src = p; return 0; } static const char* PopularQStr[] = { "?", "*", "+", "??", "*?", "+?" }; static const char* ReduceQStr[] = { "", "", "*", "*?", "??", "+ and ??", "+? and ?" }; static int set_quantifier(Node* qnode, Node* target, int group, ScanEnv* env) { QuantNode* qn; qn = QUANT_(qnode); if (qn->lower == 1 && qn->upper == 1) return 1; switch (NODE_TYPE(target)) { case NODE_STRING: if (group == 0) { if (str_node_can_be_split(target, env->enc)) { Node* n = str_node_split_last_char(target, env->enc); if (IS_NOT_NULL(n)) { NODE_BODY(qnode) = n; return 2; } } } break; case NODE_QUANT: { /* check redundant double repeat. */ /* verbose warn (?:.?)? etc... but not warn (.?)? etc... */ QuantNode* qnt = QUANT_(target); int nestq_num = quantifier_type_num(qn); int targetq_num = quantifier_type_num(qnt); #ifdef USE_WARNING_REDUNDANT_NESTED_REPEAT_OPERATOR if (targetq_num >= 0 && nestq_num >= 0 && IS_SYNTAX_BV(env->syntax, ONIG_SYN_WARN_REDUNDANT_NESTED_REPEAT)) { UChar buf[WARN_BUFSIZE]; switch(ReduceTypeTable[targetq_num][nestq_num]) { case RQ_ASIS: break; case RQ_DEL: if (onig_verb_warn != onig_null_warn) { onig_snprintf_with_pattern(buf, WARN_BUFSIZE, env->enc, env->pattern, env->pattern_end, (UChar* )"redundant nested repeat operator"); (*onig_verb_warn)((char* )buf); } goto warn_exit; break; default: if (onig_verb_warn != onig_null_warn) { onig_snprintf_with_pattern(buf, WARN_BUFSIZE, env->enc, env->pattern, env->pattern_end, (UChar* )"nested repeat operator %s and %s was replaced with '%s'", PopularQStr[targetq_num], PopularQStr[nestq_num], ReduceQStr[ReduceTypeTable[targetq_num][nestq_num]]); (*onig_verb_warn)((char* )buf); } goto warn_exit; break; } } warn_exit: #endif if (targetq_num >= 0 && nestq_num < 0) { if (targetq_num == 1 || targetq_num == 2) { /* * or + */ /* (?:a*){n,m}, (?:a+){n,m} => (?:a*){n,n}, (?:a+){n,n} */ if (! IS_INFINITE_REPEAT(qn->upper) && qn->upper > 1 && qn->greedy) { qn->upper = (qn->lower == 0 ? 1 : qn->lower); } } } else { NODE_BODY(qnode) = target; onig_reduce_nested_quantifier(qnode, target); goto q_exit; } } break; default: break; } NODE_BODY(qnode) = target; q_exit: return 0; } #ifndef CASE_FOLD_IS_APPLIED_INSIDE_NEGATIVE_CCLASS static int clear_not_flag_cclass(CClassNode* cc, OnigEncoding enc) { BBuf *tbuf; int r; if (IS_NCCLASS_NOT(cc)) { bitset_invert(cc->bs); if (! ONIGENC_IS_SINGLEBYTE(enc)) { r = not_code_range_buf(enc, cc->mbuf, &tbuf); if (r != 0) return r; bbuf_free(cc->mbuf); cc->mbuf = tbuf; } NCCLASS_CLEAR_NOT(cc); } return 0; } #endif /* CASE_FOLD_IS_APPLIED_INSIDE_NEGATIVE_CCLASS */ typedef struct { ScanEnv* env; CClassNode* cc; Node* alt_root; Node** ptail; } IApplyCaseFoldArg; static int i_apply_case_fold(OnigCodePoint from, OnigCodePoint to[], int to_len, void* arg) { IApplyCaseFoldArg* iarg; ScanEnv* env; CClassNode* cc; BitSetRef bs; iarg = (IApplyCaseFoldArg* )arg; env = iarg->env; cc = iarg->cc; bs = cc->bs; if (to_len == 1) { int is_in = onig_is_code_in_cc(env->enc, from, cc); #ifdef CASE_FOLD_IS_APPLIED_INSIDE_NEGATIVE_CCLASS if ((is_in != 0 && !IS_NCCLASS_NOT(cc)) || (is_in == 0 && IS_NCCLASS_NOT(cc))) { if (ONIGENC_MBC_MINLEN(env->enc) > 1 || *to >= SINGLE_BYTE_SIZE) { add_code_range(&(cc->mbuf), env, *to, *to); } else { BITSET_SET_BIT(bs, *to); } } #else if (is_in != 0) { if (ONIGENC_MBC_MINLEN(env->enc) > 1 || *to >= SINGLE_BYTE_SIZE) { if (IS_NCCLASS_NOT(cc)) clear_not_flag_cclass(cc, env->enc); add_code_range(&(cc->mbuf), env, *to, *to); } else { if (IS_NCCLASS_NOT(cc)) { BITSET_CLEAR_BIT(bs, *to); } else BITSET_SET_BIT(bs, *to); } } #endif /* CASE_FOLD_IS_APPLIED_INSIDE_NEGATIVE_CCLASS */ } else { int r, i, len; UChar buf[ONIGENC_CODE_TO_MBC_MAXLEN]; Node *snode = NULL_NODE; if (onig_is_code_in_cc(env->enc, from, cc) #ifdef CASE_FOLD_IS_APPLIED_INSIDE_NEGATIVE_CCLASS && !IS_NCCLASS_NOT(cc) #endif ) { for (i = 0; i < to_len; i++) { len = ONIGENC_CODE_TO_MBC(env->enc, to[i], buf); if (i == 0) { snode = onig_node_new_str(buf, buf + len); CHECK_NULL_RETURN_MEMERR(snode); /* char-class expanded multi-char only compare with string folded at match time. */ NODE_STRING_SET_AMBIG(snode); } else { r = onig_node_str_cat(snode, buf, buf + len); if (r < 0) { onig_node_free(snode); return r; } } } *(iarg->ptail) = onig_node_new_alt(snode, NULL_NODE); CHECK_NULL_RETURN_MEMERR(*(iarg->ptail)); iarg->ptail = &(NODE_CDR((*(iarg->ptail)))); } } return 0; } static int parse_exp(Node** np, PToken* tok, int term, UChar** src, UChar* end, ScanEnv* env, int group_head) { int r, len, group; Node* qn; Node** tp; unsigned int parse_depth; group = 0; *np = NULL; if (tok->type == (enum TokenSyms )term) goto end_of_token; parse_depth = env->parse_depth; switch (tok->type) { case TK_ALT: case TK_EOT: end_of_token: *np = node_new_empty(); CHECK_NULL_RETURN_MEMERR(*np); return tok->type; break; case TK_SUBEXP_OPEN: r = parse_bag(np, tok, TK_SUBEXP_CLOSE, src, end, env); if (r < 0) return r; if (r == 1) { /* group */ if (group_head == 0) group = 1; else { Node* target = *np; *np = node_new_group(target); if (IS_NULL(*np)) { onig_node_free(target); return ONIGERR_MEMORY; } group = 2; } } else if (r == 2) { /* option only */ Node* target; OnigOptionType prev = env->options; env->options = BAG_(*np)->o.options; r = fetch_token(tok, src, end, env); if (r < 0) return r; r = parse_subexp(&target, tok, term, src, end, env, 0); env->options = prev; if (r < 0) { onig_node_free(target); return r; } NODE_BODY(*np) = target; return tok->type; } break; case TK_SUBEXP_CLOSE: if (! IS_SYNTAX_BV(env->syntax, ONIG_SYN_ALLOW_UNMATCHED_CLOSE_SUBEXP)) return ONIGERR_UNMATCHED_CLOSE_PARENTHESIS; if (tok->escaped) goto tk_raw_byte; else goto tk_byte; break; case TK_STRING: tk_byte: { *np = node_new_str(tok->backp, *src); CHECK_NULL_RETURN_MEMERR(*np); while (1) { r = fetch_token(tok, src, end, env); if (r < 0) return r; if (r != TK_STRING) break; r = onig_node_str_cat(*np, tok->backp, *src); if (r < 0) return r; } string_end: tp = np; goto repeat; } break; case TK_RAW_BYTE: tk_raw_byte: { *np = node_new_str_raw_char((UChar )tok->u.c); CHECK_NULL_RETURN_MEMERR(*np); len = 1; while (1) { if (len >= ONIGENC_MBC_MINLEN(env->enc)) { if (len == enclen(env->enc, STR_(*np)->s)) { r = fetch_token(tok, src, end, env); goto tk_raw_byte_end; } } r = fetch_token(tok, src, end, env); if (r < 0) return r; if (r != TK_RAW_BYTE) return ONIGERR_TOO_SHORT_MULTI_BYTE_STRING; r = node_str_cat_char(*np, (UChar )tok->u.c); if (r < 0) return r; len++; } tk_raw_byte_end: if (! ONIGENC_IS_VALID_MBC_STRING(env->enc, STR_(*np)->s, STR_(*np)->end)) return ONIGERR_INVALID_WIDE_CHAR_VALUE; NODE_STRING_CLEAR_RAW(*np); goto string_end; } break; case TK_CODE_POINT: { UChar buf[ONIGENC_CODE_TO_MBC_MAXLEN]; len = ONIGENC_CODE_TO_MBC(env->enc, tok->u.code, buf); if (len < 0) return len; #ifdef NUMBERED_CHAR_IS_NOT_CASE_AMBIG *np = node_new_str_raw(buf, buf + len); #else *np = node_new_str(buf, buf + len); #endif CHECK_NULL_RETURN_MEMERR(*np); } break; case TK_QUOTE_OPEN: { OnigCodePoint end_op[2]; UChar *qstart, *qend, *nextp; end_op[0] = (OnigCodePoint )MC_ESC(env->syntax); end_op[1] = (OnigCodePoint )'E'; qstart = *src; qend = find_str_position(end_op, 2, qstart, end, &nextp, env->enc); if (IS_NULL(qend)) { nextp = qend = end; } *np = node_new_str(qstart, qend); CHECK_NULL_RETURN_MEMERR(*np); *src = nextp; } break; case TK_CHAR_TYPE: { switch (tok->u.prop.ctype) { case ONIGENC_CTYPE_WORD: *np = node_new_ctype(tok->u.prop.ctype, tok->u.prop.not, env->options); CHECK_NULL_RETURN_MEMERR(*np); break; case ONIGENC_CTYPE_SPACE: case ONIGENC_CTYPE_DIGIT: case ONIGENC_CTYPE_XDIGIT: { CClassNode* cc; *np = node_new_cclass(); CHECK_NULL_RETURN_MEMERR(*np); cc = CCLASS_(*np); add_ctype_to_cc(cc, tok->u.prop.ctype, 0, env); if (tok->u.prop.not != 0) NCCLASS_SET_NOT(cc); } break; default: return ONIGERR_PARSER_BUG; break; } } break; case TK_CHAR_PROPERTY: r = parse_char_property(np, tok, src, end, env); if (r != 0) return r; break; case TK_CC_OPEN: { CClassNode* cc; r = parse_char_class(np, tok, src, end, env); if (r != 0) return r; cc = CCLASS_(*np); if (IS_IGNORECASE(env->options)) { IApplyCaseFoldArg iarg; iarg.env = env; iarg.cc = cc; iarg.alt_root = NULL_NODE; iarg.ptail = &(iarg.alt_root); r = ONIGENC_APPLY_ALL_CASE_FOLD(env->enc, env->case_fold_flag, i_apply_case_fold, &iarg); if (r != 0) { onig_node_free(iarg.alt_root); return r; } if (IS_NOT_NULL(iarg.alt_root)) { Node* work = onig_node_new_alt(*np, iarg.alt_root); if (IS_NULL(work)) { onig_node_free(iarg.alt_root); return ONIGERR_MEMORY; } *np = work; } } } break; case TK_ANYCHAR: *np = node_new_anychar(); CHECK_NULL_RETURN_MEMERR(*np); break; case TK_ANYCHAR_ANYTIME: *np = node_new_anychar(); CHECK_NULL_RETURN_MEMERR(*np); qn = node_new_quantifier(0, INFINITE_REPEAT, 0); CHECK_NULL_RETURN_MEMERR(qn); NODE_BODY(qn) = *np; *np = qn; break; case TK_BACKREF: len = tok->u.backref.num; *np = node_new_backref(len, (len > 1 ? tok->u.backref.refs : &(tok->u.backref.ref1)), tok->u.backref.by_name, #ifdef USE_BACKREF_WITH_LEVEL tok->u.backref.exist_level, tok->u.backref.level, #endif env); CHECK_NULL_RETURN_MEMERR(*np); break; #ifdef USE_CALL case TK_CALL: { int gnum = tok->u.call.gnum; *np = node_new_call(tok->u.call.name, tok->u.call.name_end, gnum, tok->u.call.by_number); CHECK_NULL_RETURN_MEMERR(*np); env->num_call++; if (tok->u.call.by_number != 0 && gnum == 0) { env->has_call_zero = 1; } } break; #endif case TK_ANCHOR: { int ascii_mode = IS_WORD_ASCII(env->options) && IS_WORD_ANCHOR_TYPE(tok->u.anchor) ? 1 : 0; *np = onig_node_new_anchor(tok->u.anchor, ascii_mode); CHECK_NULL_RETURN_MEMERR(*np); } break; case TK_REPEAT: case TK_INTERVAL: if (IS_SYNTAX_BV(env->syntax, ONIG_SYN_CONTEXT_INDEP_REPEAT_OPS)) { if (IS_SYNTAX_BV(env->syntax, ONIG_SYN_CONTEXT_INVALID_REPEAT_OPS)) return ONIGERR_TARGET_OF_REPEAT_OPERATOR_NOT_SPECIFIED; else { *np = node_new_empty(); CHECK_NULL_RETURN_MEMERR(*np); } } else { goto tk_byte; } break; case TK_KEEP: r = node_new_keep(np, env); if (r < 0) return r; break; case TK_GENERAL_NEWLINE: r = node_new_general_newline(np, env); if (r < 0) return r; break; case TK_NO_NEWLINE: r = node_new_no_newline(np, env); if (r < 0) return r; break; case TK_TRUE_ANYCHAR: r = node_new_true_anychar(np, env); if (r < 0) return r; break; case TK_TEXT_SEGMENT: r = make_text_segment(np, env); if (r < 0) return r; break; default: return ONIGERR_PARSER_BUG; break; } { tp = np; re_entry: r = fetch_token(tok, src, end, env); if (r < 0) return r; repeat: if (r == TK_REPEAT || r == TK_INTERVAL) { Node* target; if (is_invalid_quantifier_target(*tp)) return ONIGERR_TARGET_OF_REPEAT_OPERATOR_INVALID; parse_depth++; if (parse_depth > ParseDepthLimit) return ONIGERR_PARSE_DEPTH_LIMIT_OVER; qn = node_new_quantifier(tok->u.repeat.lower, tok->u.repeat.upper, r == TK_INTERVAL); CHECK_NULL_RETURN_MEMERR(qn); QUANT_(qn)->greedy = tok->u.repeat.greedy; if (group == 2) { target = node_drop_group(*tp); *tp = NULL_NODE; } else { target = *tp; } r = set_quantifier(qn, target, group, env); if (r < 0) { onig_node_free(qn); return r; } if (tok->u.repeat.possessive != 0) { Node* en; en = node_new_bag(BAG_STOP_BACKTRACK); if (IS_NULL(en)) { onig_node_free(qn); return ONIGERR_MEMORY; } NODE_BODY(en) = qn; qn = en; } if (r == 0) { *tp = qn; } else if (r == 1) { /* x{1,1} ==> x */ onig_node_free(qn); *tp = target; } else if (r == 2) { /* split case: /abc+/ */ Node *tmp; *tp = node_new_list(*tp, NULL); if (IS_NULL(*tp)) { onig_node_free(qn); return ONIGERR_MEMORY; } tmp = NODE_CDR(*tp) = node_new_list(qn, NULL); if (IS_NULL(tmp)) { onig_node_free(qn); return ONIGERR_MEMORY; } tp = &(NODE_CAR(tmp)); } group = 0; goto re_entry; } } return r; } static int parse_branch(Node** top, PToken* tok, int term, UChar** src, UChar* end, ScanEnv* env, int group_head) { int r; Node *node, **headp; *top = NULL; r = parse_exp(&node, tok, term, src, end, env, group_head); if (r < 0) { onig_node_free(node); return r; } if (r == TK_EOT || r == term || r == TK_ALT) { *top = node; } else { *top = node_new_list(node, NULL); if (IS_NULL(*top)) { onig_node_free(node); return ONIGERR_MEMORY; } headp = &(NODE_CDR(*top)); while (r != TK_EOT && r != term && r != TK_ALT) { r = parse_exp(&node, tok, term, src, end, env, 0); if (r < 0) { onig_node_free(node); return r; } if (NODE_TYPE(node) == NODE_LIST) { *headp = node; while (IS_NOT_NULL(NODE_CDR(node))) node = NODE_CDR(node); headp = &(NODE_CDR(node)); } else { *headp = node_new_list(node, NULL); headp = &(NODE_CDR(*headp)); } } } return r; } /* term_tok: TK_EOT or TK_SUBEXP_CLOSE */ static int parse_subexp(Node** top, PToken* tok, int term, UChar** src, UChar* end, ScanEnv* env, int group_head) { int r; Node *node, **headp; *top = NULL; env->parse_depth++; if (env->parse_depth > ParseDepthLimit) return ONIGERR_PARSE_DEPTH_LIMIT_OVER; r = parse_branch(&node, tok, term, src, end, env, group_head); if (r < 0) { onig_node_free(node); return r; } if (r == term) { *top = node; } else if (r == TK_ALT) { *top = onig_node_new_alt(node, NULL); if (IS_NULL(*top)) { onig_node_free(node); return ONIGERR_MEMORY; } headp = &(NODE_CDR(*top)); while (r == TK_ALT) { r = fetch_token(tok, src, end, env); if (r < 0) return r; r = parse_branch(&node, tok, term, src, end, env, 0); if (r < 0) { onig_node_free(node); return r; } *headp = onig_node_new_alt(node, NULL); if (IS_NULL(*headp)) { onig_node_free(node); onig_node_free(*top); return ONIGERR_MEMORY; } headp = &(NODE_CDR(*headp)); } if (tok->type != (enum TokenSyms )term) goto err; } else { onig_node_free(node); err: if (term == TK_SUBEXP_CLOSE) return ONIGERR_END_PATTERN_WITH_UNMATCHED_PARENTHESIS; else return ONIGERR_PARSER_BUG; } env->parse_depth--; return r; } static int parse_regexp(Node** top, UChar** src, UChar* end, ScanEnv* env) { int r; PToken tok; r = fetch_token(&tok, src, end, env); if (r < 0) return r; r = parse_subexp(top, &tok, TK_EOT, src, end, env, 0); if (r < 0) return r; return 0; } #ifdef USE_CALL static int make_call_zero_body(Node* node, ScanEnv* env, Node** rnode) { int r; Node* x = node_new_memory(0 /* 0: is not named */); CHECK_NULL_RETURN_MEMERR(x); NODE_BODY(x) = node; BAG_(x)->m.regnum = 0; r = scan_env_set_mem_node(env, 0, x); if (r != 0) { onig_node_free(x); return r; } *rnode = x; return 0; } #endif extern int onig_parse_tree(Node** root, const UChar* pattern, const UChar* end, regex_t* reg, ScanEnv* env) { int r; UChar* p; #ifdef USE_CALLOUT RegexExt* ext; #endif names_clear(reg); scan_env_clear(env); env->options = reg->options; env->case_fold_flag = reg->case_fold_flag; env->enc = reg->enc; env->syntax = reg->syntax; env->pattern = (UChar* )pattern; env->pattern_end = (UChar* )end; env->reg = reg; *root = NULL; if (! ONIGENC_IS_VALID_MBC_STRING(env->enc, pattern, end)) return ONIGERR_INVALID_WIDE_CHAR_VALUE; p = (UChar* )pattern; r = parse_regexp(root, &p, (UChar* )end, env); #ifdef USE_CALL if (r != 0) return r; if (env->has_call_zero != 0) { Node* zero_node; r = make_call_zero_body(*root, env, &zero_node); if (r != 0) return r; *root = zero_node; } #endif reg->num_mem = env->num_mem; #ifdef USE_CALLOUT ext = reg->extp; if (IS_NOT_NULL(ext) && ext->callout_num > 0) { r = setup_ext_callout_list_values(reg); } #endif return r; } extern void onig_scan_env_set_error_string(ScanEnv* env, int ecode ARG_UNUSED, UChar* arg, UChar* arg_end) { env->error = arg; env->error_end = arg_end; }
./CrossVul/dataset_final_sorted/CWE-400/c/good_1071_0
crossvul-cpp_data_good_1272_1
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dm_services.h" #include "link_encoder.h" #include "stream_encoder.h" #include "resource.h" #include "dce110/dce110_resource.h" #include "include/irq_service_interface.h" #include "dce/dce_audio.h" #include "dce110/dce110_timing_generator.h" #include "irq/dce110/irq_service_dce110.h" #include "dce110/dce110_timing_generator_v.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" #include "dce/dce_mem_input.h" #include "dce110/dce110_mem_input_v.h" #include "dce/dce_ipp.h" #include "dce/dce_transform.h" #include "dce110/dce110_transform_v.h" #include "dce/dce_opp.h" #include "dce110/dce110_opp_v.h" #include "dce/dce_clock_source.h" #include "dce/dce_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dce/dce_aux.h" #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_i2c.h" #define DC_LOGGER \ dc->ctx->logger #include "dce110/dce110_compressor.h" #include "reg_helper.h" #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_8_2_d.h" #include "gmc/gmc_8_2_sh_mask.h" #endif #ifndef mmDP_DPHY_INTERNAL_CTRL #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7 #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7 #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7 #endif #ifndef mmBIOS_SCRATCH_2 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF #endif #ifndef mmDP_DPHY_BS_SR_SWAP_CNTL #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC #endif #ifndef mmDP_DPHY_FAST_TRAINING #define mmDP_DPHY_FAST_TRAINING 0x4ABC #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC #endif #ifndef DPHY_RX_FAST_TRAINING_CAPABLE #define DPHY_RX_FAST_TRAINING_CAPABLE 0x1 #endif static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = { { .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), }, { .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), } }; /* set register offset */ #define SR(reg_name)\ .reg_name = mm ## reg_name /* set register offset with instance */ #define SRI(reg_name, block, id)\ .reg_name = mm ## block ## id ## _ ## reg_name static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE110_COMMON_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCE110(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCE110_COMMON_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; #define ipp_regs(id)\ [id] = {\ IPP_DCE110_REG_LIST_DCE_BASE(id)\ } static const struct dce_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2) }; static const struct dce_ipp_shift ipp_shift = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_ipp_mask ipp_mask = { IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; #define transform_regs(id)\ [id] = {\ XFM_COMMON_REG_LIST_DCE110(id)\ } static const struct dce_transform_registers xfm_regs[] = { transform_regs(0), transform_regs(1), transform_regs(2) }; static const struct dce_transform_shift xfm_shift = { XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_transform_mask xfm_mask = { XFM_COMMON_MASK_SH_LIST_DCE110(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id)\ [id] = {\ LE_DCE110_REG_LIST(id)\ } static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3), link_regs(4), link_regs(5), link_regs(6), }; #define stream_enc_regs(id)\ [id] = {\ SE_COMMON_REG_LIST(id),\ .TMDS_CNTL = 0,\ } static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2) }; static const struct dce_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE110(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_DCE_110_REG_LIST(id),\ } static const struct dce_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5) }; static const struct dce_opp_shift opp_shift = { OPP_COMMON_MASK_SH_LIST_DCE_110(__SHIFT) }; static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6), }; static const struct dce_audio_shift audio_shift = { AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { AUD_COMMON_MASK_SH_LIST(_MASK) }; /* AG TBD Needs to be reduced back to 3 pipes once dce10 hw sequencer implemented. */ #define clk_src_regs(id)\ [id] = {\ CS_COMMON_REG_LIST_DCE_100_110(id),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0), clk_src_regs(1), clk_src_regs(2) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; static const struct bios_registers bios_regs = { .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; static const struct resource_caps carrizo_resource_cap = { .num_timing_generator = 3, .num_video_plane = 1, .num_audio = 3, .num_stream_encoder = 3, .num_pll = 2, .num_ddc = 3, }; static const struct resource_caps stoney_resource_cap = { .num_timing_generator = 2, .num_video_plane = 1, .num_audio = 3, .num_stream_encoder = 3, .num_pll = 2, .num_ddc = 3, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, .blends_with_below = true, .blends_with_above = true, .per_pixel_alpha = 1, .pixel_format_support = { .argb8888 = true, .nv12 = false, .fp16 = false }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 1, .fp16 = 1 } }; static const struct dc_plane_cap underlay_plane_cap = { .type = DC_PLANE_TYPE_DCE_UNDERLAY, .blends_with_above = true, .per_pixel_alpha = 1, .pixel_format_support = { .argb8888 = false, .nv12 = true, .fp16 = false }, .max_upscale_factor = { .argb8888 = 1, .nv12 = 16000, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 1, .nv12 = 250, .fp16 = 1 } }; #define CTX ctx #define REG(reg) mm ## reg #ifndef mmCC_DC_HDMI_STRAPS #define mmCC_DC_HDMI_STRAPS 0x4819 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { REG_GET_2(CC_DC_HDMI_STRAPS, HDMI_DISABLE, &straps->hdmi_disable, AUDIO_STREAM_NUMBER, &straps->audio_stream_number); REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct timing_generator *dce110_timing_generator_create( struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { struct dce110_timing_generator *tg110 = kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); if (!tg110) return NULL; dce110_timing_generator_construct(tg110, ctx, instance, offsets); return &tg110->base; } static struct stream_encoder *dce110_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } #define SRII(reg_name, block, id)\ .reg_name[id] = mm ## block ## id ## _ ## reg_name static const struct dce_hwseq_registers hwseq_stoney_reg = { HWSEQ_ST_REG_LIST() }; static const struct dce_hwseq_registers hwseq_cz_reg = { HWSEQ_CZ_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCE11_MASK_SH_LIST(__SHIFT), }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE11_MASK_SH_LIST(_MASK), }; static struct dce_hwseq *dce110_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = ASIC_REV_IS_STONEY(ctx->asic_id.hw_internal_rev) ? &hwseq_stoney_reg : &hwseq_cz_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; hws->wa.blnd_crtc_trigger = true; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce110_stream_encoder_create, .create_hwseq = dce110_hwseq_create, }; #define mi_inst_regs(id) { \ MI_DCE11_REG_LIST(id), \ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), mi_inst_regs(1), mi_inst_regs(2), }; static const struct dce_mem_input_shift mi_shifts = { MI_DCE11_MASK_SH_LIST(__SHIFT), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT }; static const struct dce_mem_input_mask mi_masks = { MI_DCE11_MASK_SH_LIST(_MASK), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK }; static struct mem_input *dce110_mem_input_create( struct dc_context *ctx, uint32_t inst) { struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), GFP_KERNEL); if (!dce_mi) { BREAK_TO_DEBUGGER(); return NULL; } dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); dce_mi->wa.single_head_rdreq_dmif_limit = 3; return &dce_mi->base; } static void dce110_transform_destroy(struct transform **xfm) { kfree(TO_DCE_TRANSFORM(*xfm)); *xfm = NULL; } static struct transform *dce110_transform_create( struct dc_context *ctx, uint32_t inst) { struct dce_transform *transform = kzalloc(sizeof(struct dce_transform), GFP_KERNEL); if (!transform) return NULL; dce_transform_construct(transform, ctx, inst, &xfm_regs[inst], &xfm_shift, &xfm_mask); return &transform->base; } static struct input_pixel_processor *dce110_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dce_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 300000, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true }; static struct link_encoder *dce110_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; } static struct output_pixel_processor *dce110_opp_create( struct dc_context *ctx, uint32_t inst) { struct dce110_opp *opp = kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); if (!opp) return NULL; dce110_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } struct dce_aux *dce110_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst]); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) }; struct dce_i2c_hw *dce110_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dce100_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } struct clock_source *dce110_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce110_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } BREAK_TO_DEBUGGER(); return NULL; } void dce110_clock_source_destroy(struct clock_source **clk_src) { struct dce110_clk_src *dce110_clk_src; if (!clk_src) return; dce110_clk_src = TO_DCE110_CLK_SRC(*clk_src); kfree(dce110_clk_src->dp_ss_params); kfree(dce110_clk_src->hdmi_ss_params); kfree(dce110_clk_src->dvi_ss_params); kfree(dce110_clk_src); *clk_src = NULL; } static void destruct(struct dce110_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); if (pool->base.transforms[i] != NULL) dce110_transform_destroy(&pool->base.transforms[i]); if (pool->base.ipps[i] != NULL) dce_ipp_destroy(&pool->base.ipps[i]); if (pool->base.mis[i] != NULL) { kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); pool->base.mis[i] = NULL; } if (pool->base.timing_generators[i] != NULL) { kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dce110_clock_source_destroy(&pool->base.clock_sources[i]); } } if (pool->base.dp_clock_source != NULL) dce110_clock_source_destroy(&pool->base.dp_clock_source); for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i] != NULL) { dce_aud_destroy(&pool->base.audios[i]); } } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } } static void get_pixel_clock_parameters( const struct pipe_ctx *pipe_ctx, struct pixel_clk_params *pixel_clk_params) { const struct dc_stream_state *stream = pipe_ctx->stream; /*TODO: is this halved for YCbCr 420? in that case we might want to move * the pixel clock normalization for hdmi up to here instead of doing it * in pll_adjust_pix_clk */ pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; pixel_clk_params->encoder_object_id = stream->link->link_enc->id; pixel_clk_params->signal_type = pipe_ctx->stream->signal; pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; /* TODO: un-hardcode*/ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * LINK_RATE_REF_FREQ_IN_KHZ; pixel_clk_params->flags.ENABLE_SS = 0; pixel_clk_params->color_depth = stream->timing.display_color_depth; pixel_clk_params->flags.DISPLAY_BLANKED = 1; pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420); pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) { pixel_clk_params->color_depth = COLOR_DEPTH_888; } if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2; } if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) pixel_clk_params->requested_pix_clk_100hz *= 2; } void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx) { get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); pipe_ctx->clock_source->funcs->get_pix_clk_dividers( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, &pipe_ctx->pll_settings); resource_build_bit_depth_reduction_params(pipe_ctx->stream, &pipe_ctx->stream->bit_depth_params); pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; } static bool is_surface_pixel_format_supported(struct pipe_ctx *pipe_ctx, unsigned int underlay_idx) { if (pipe_ctx->pipe_idx != underlay_idx) return true; if (!pipe_ctx->plane_state) return false; if (pipe_ctx->plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return false; return true; } static enum dc_status build_mapped_resource( const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; if (!is_surface_pixel_format_supported(pipe_ctx, dc->res_pool->underlay_pipe_index)) return DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED; dce110_resource_build_pipe_hw_param(pipe_ctx); /* TODO: validate audio ASIC caps, encoder */ resource_build_info_frame(pipe_ctx); return DC_OK; } static bool dce110_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) { bool result = false; DC_LOG_BANDWIDTH_CALCS( "%s: start", __func__); if (bw_calcs( dc->ctx, dc->bw_dceip, dc->bw_vbios, context->res_ctx.pipe_ctx, dc->res_pool->pipe_count, &context->bw_ctx.bw.dce)) result = true; if (!result) DC_LOG_BANDWIDTH_VALIDATION("%s: %dx%d@%d Bandwidth validation failed!\n", __func__, context->streams[0]->timing.h_addressable, context->streams[0]->timing.v_addressable, context->streams[0]->timing.pix_clk_100hz / 10); if (memcmp(&dc->current_state->bw_ctx.bw.dce, &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) { DC_LOG_BANDWIDTH_CALCS( "%s: finish,\n" "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" "stutMark_b: %d stutMark_a: %d\n" "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" "stutMark_b: %d stutMark_a: %d\n" "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n" "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n" "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n" , __func__, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark, context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark, context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark, context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark, context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark, context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark, context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark, context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark, context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark, context->bw_ctx.bw.dce.stutter_mode_enable, context->bw_ctx.bw.dce.cpuc_state_change_enable, context->bw_ctx.bw.dce.cpup_state_change_enable, context->bw_ctx.bw.dce.nbp_state_change_enable, context->bw_ctx.bw.dce.all_displays_in_sync, context->bw_ctx.bw.dce.dispclk_khz, context->bw_ctx.bw.dce.sclk_khz, context->bw_ctx.bw.dce.sclk_deep_sleep_khz, context->bw_ctx.bw.dce.yclk_khz, context->bw_ctx.bw.dce.blackout_recovery_time_us); } return result; } enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) { if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) || ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height)) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static bool dce110_validate_surface_sets( struct dc_state *context) { int i, j; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) continue; if (context->stream_status[i].plane_count > 2) return false; for (j = 0; j < context->stream_status[i].plane_count; j++) { struct dc_plane_state *plane = context->stream_status[i].plane_states[j]; /* underlay validation */ if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { if ((plane->src_rect.width > 1920 || plane->src_rect.height > 1080)) return false; /* we don't have the logic to support underlay * only yet so block the use case where we get * NV12 plane as top layer */ if (j == 0) return false; /* irrespective of plane format, * stream should be RGB encoded */ if (context->streams[i]->timing.pixel_encoding != PIXEL_ENCODING_RGB) return false; } } } return true; } enum dc_status dce110_validate_global( struct dc *dc, struct dc_state *context) { if (!dce110_validate_surface_sets(context)) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static enum dc_status dce110_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_ERROR_UNEXPECTED; result = resource_map_pool_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = resource_map_clock_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = build_mapped_resource(dc, new_ctx, dc_stream); return result; } static struct pipe_ctx *dce110_acquire_underlay( struct dc_state *context, const struct resource_pool *pool, struct dc_stream_state *stream) { struct dc *dc = stream->ctx->dc; struct resource_context *res_ctx = &context->res_ctx; unsigned int underlay_idx = pool->underlay_pipe_index; struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx]; if (res_ctx->pipe_ctx[underlay_idx].stream) return NULL; pipe_ctx->stream_res.tg = pool->timing_generators[underlay_idx]; pipe_ctx->plane_res.mi = pool->mis[underlay_idx]; /*pipe_ctx->plane_res.ipp = res_ctx->pool->ipps[underlay_idx];*/ pipe_ctx->plane_res.xfm = pool->transforms[underlay_idx]; pipe_ctx->stream_res.opp = pool->opps[underlay_idx]; pipe_ctx->pipe_idx = underlay_idx; pipe_ctx->stream = stream; if (!dc->current_state->res_ctx.pipe_ctx[underlay_idx].stream) { struct tg_color black_color = {0}; struct dc_bios *dcb = dc->ctx->dc_bios; dc->hwss.enable_display_power_gating( dc, pipe_ctx->stream_res.tg->inst, dcb, PIPE_GATING_CONTROL_DISABLE); /* * This is for powering on underlay, so crtc does not * need to be enabled */ pipe_ctx->stream_res.tg->funcs->program_timing(pipe_ctx->stream_res.tg, &stream->timing, 0, 0, 0, 0, pipe_ctx->stream->signal, false); pipe_ctx->stream_res.tg->funcs->enable_advanced_request( pipe_ctx->stream_res.tg, true, &stream->timing); pipe_ctx->plane_res.mi->funcs->allocate_mem_input(pipe_ctx->plane_res.mi, stream->timing.h_total, stream->timing.v_total, stream->timing.pix_clk_100hz / 10, context->stream_count); color_space_to_black_color(dc, COLOR_SPACE_YCBCR601, &black_color); pipe_ctx->stream_res.tg->funcs->set_blank_color( pipe_ctx->stream_res.tg, &black_color); } return pipe_ctx; } static void dce110_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link( struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) { int i; int j = -1; struct dc_link *link = stream->link; for (i = 0; i < pool->stream_enc_count; i++) { if (!res_ctx->is_stream_enc_acquired[i] && pool->stream_enc[i]) { /* Store first available for MST second display * in daisy chain use case */ j = i; if (pool->stream_enc[i]->id == link->link_enc->preferred_engine) return pool->stream_enc[i]; } } /* * For CZ and later, we can allow DIG FE and BE to differ for all display types */ if (j >= 0) return pool->stream_enc[j]; return NULL; } static const struct resource_funcs dce110_res_pool_funcs = { .destroy = dce110_destroy_resource_pool, .link_enc_create = dce110_link_encoder_create, .validate_bandwidth = dce110_validate_bandwidth, .validate_plane = dce110_validate_plane, .acquire_idle_pipe_for_layer = dce110_acquire_underlay, .add_stream_to_ctx = dce110_add_stream_to_ctx, .validate_global = dce110_validate_global, .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link }; static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool) { struct dce110_timing_generator *dce110_tgv = kzalloc(sizeof(*dce110_tgv), GFP_KERNEL); struct dce_transform *dce110_xfmv = kzalloc(sizeof(*dce110_xfmv), GFP_KERNEL); struct dce_mem_input *dce110_miv = kzalloc(sizeof(*dce110_miv), GFP_KERNEL); struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv), GFP_KERNEL); if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) { kfree(dce110_tgv); kfree(dce110_xfmv); kfree(dce110_miv); kfree(dce110_oppv); return false; } dce110_opp_v_construct(dce110_oppv, ctx); dce110_timing_generator_v_construct(dce110_tgv, ctx); dce110_mem_input_v_construct(dce110_miv, ctx); dce110_transform_v_construct(dce110_xfmv, ctx); pool->opps[pool->pipe_count] = &dce110_oppv->base; pool->timing_generators[pool->pipe_count] = &dce110_tgv->base; pool->mis[pool->pipe_count] = &dce110_miv->base; pool->transforms[pool->pipe_count] = &dce110_xfmv->base; pool->pipe_count++; /* update the public caps to indicate an underlay is available */ ctx->dc->caps.max_slave_planes = 1; ctx->dc->caps.max_slave_planes = 1; return true; } static void bw_calcs_data_update_from_pplib(struct dc *dc) { struct dm_pp_clock_levels clks = {0}; /*do system clock*/ dm_pp_get_clock_levels_by_type( dc->ctx, DM_PP_CLOCK_TYPE_ENGINE_CLK, &clks); /* convert all the clock fro kHz to fix point mHz */ dc->bw_vbios->high_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels-1], 1000); dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels/8], 1000); dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*2/8], 1000); dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*3/8], 1000); dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*4/8], 1000); dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*5/8], 1000); dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels*6/8], 1000); dc->bw_vbios->low_sclk = bw_frc_to_fixed( clks.clocks_in_khz[0], 1000); dc->sclk_lvls = clks; /*do display clock*/ dm_pp_get_clock_levels_by_type( dc->ctx, DM_PP_CLOCK_TYPE_DISPLAY_CLK, &clks); dc->bw_vbios->high_voltage_max_dispclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels-1], 1000); dc->bw_vbios->mid_voltage_max_dispclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels>>1], 1000); dc->bw_vbios->low_voltage_max_dispclk = bw_frc_to_fixed( clks.clocks_in_khz[0], 1000); /*do memory clock*/ dm_pp_get_clock_levels_by_type( dc->ctx, DM_PP_CLOCK_TYPE_MEMORY_CLK, &clks); dc->bw_vbios->low_yclk = bw_frc_to_fixed( clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); } const struct resource_caps *dce110_resource_cap( struct hw_asic_id *asic_id) { if (ASIC_REV_IS_STONEY(asic_id->hw_internal_rev)) return &stoney_resource_cap; else return &carrizo_resource_cap; } static bool construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool, struct hw_asic_id asic_id) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = dce110_resource_cap(&ctx->asic_id); pool->base.funcs = &dce110_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.pipe_count = pool->base.res_cap->num_timing_generator; pool->base.underlay_pipe_index = pool->base.pipe_count; pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; dc->caps.max_downscale_ratio = 150; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; dc->caps.is_apu = true; /************************************************* * Create resources * *************************************************/ bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clk_src_count = 2; /* TODO: find out if CZ support 3 PLLs */ } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce110_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce110_timing_generator_create( ctx, i, &dce110_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce110_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce110_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce110_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce110_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce110_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce110_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = NULL; } if (dc->config.fbc_support) dc->fbc_compressor = dce110_compressor_create(ctx); if (!underlay_create(ctx, &pool->base)) goto res_create_fail; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce110_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < pool->base.underlay_pipe_index; ++i) dc->caps.planes[i] = plane_cap; dc->caps.planes[pool->base.underlay_pipe_index] = underlay_plane_cap; bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); bw_calcs_data_update_from_pplib(dc); return true; res_create_fail: destruct(pool); return false; } struct resource_pool *dce110_create_resource_pool( uint8_t num_virtual_links, struct dc *dc, struct hw_asic_id asic_id) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (construct(num_virtual_links, dc, pool, asic_id)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; }
./CrossVul/dataset_final_sorted/CWE-400/c/good_1272_1
crossvul-cpp_data_bad_4780_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/registry.h" #include "magick/quantum-private.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short int type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[256], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(CompositeOperator op) { const char *blend_mode; switch (op) { case ColorBurnCompositeOp: blend_mode = "idiv"; break; case ColorDodgeCompositeOp: blend_mode = "div "; break; case ColorizeCompositeOp: blend_mode = "colr"; break; case DarkenCompositeOp: blend_mode = "dark"; break; case DifferenceCompositeOp: blend_mode = "diff"; break; case DissolveCompositeOp: blend_mode = "diss"; break; case ExclusionCompositeOp: blend_mode = "smud"; break; case HardLightCompositeOp: blend_mode = "hLit"; break; case HardMixCompositeOp: blend_mode = "hMix"; break; case HueCompositeOp: blend_mode = "hue "; break; case LightenCompositeOp: blend_mode = "lite"; break; case LinearBurnCompositeOp: blend_mode = "lbrn"; break; case LinearDodgeCompositeOp:blend_mode = "lddg"; break; case LinearLightCompositeOp:blend_mode = "lLit"; break; case LuminizeCompositeOp: blend_mode = "lum "; break; case MultiplyCompositeOp: blend_mode = "mul "; break; case OverCompositeOp: blend_mode = "norm"; break; case OverlayCompositeOp: blend_mode = "over"; break; case PinLightCompositeOp: blend_mode = "pLit"; break; case SaturateCompositeOp: blend_mode = "sat "; break; case ScreenCompositeOp: blend_mode = "scrn"; break; case SoftLightCompositeOp: blend_mode = "sLit"; break; case VividLightCompositeOp: blend_mode = "vLit"; break; default: blend_mode = "norm"; break; } return(blend_mode); } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image, ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if (image->matte == MagickFalse || image->colorspace != sRGBColorspace) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringNotFalse(option) == MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; gamma=QuantumScale*GetPixelAlpha(q); if (gamma != 0.0 && gamma != 1.0) { SetPixelRed(q,(GetPixelRed(q)-((1.0-gamma)*QuantumRange))/gamma); SetPixelGreen(q,(GetPixelGreen(q)-((1.0-gamma)*QuantumRange))/gamma); SetPixelBlue(q,(GetPixelBlue(q)-((1.0-gamma)*QuantumRange))/gamma); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == QuantumRange) return(MagickTrue); image->matte=MagickTrue; status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(q,(Quantum) (QuantumScale*(GetPixelAlpha(q)*opacity))); else if (opacity > 0) SetPixelAlpha(q,(Quantum) (QuantumRange*(GetPixelAlpha(q)/ (MagickRealType) opacity))); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; MagickPixelPacket color; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,image->columns,image->rows,MagickTrue, exception); complete_mask->matte=MagickTrue; GetMagickPixelPacket(complete_mask,&color); color.red=background; SetImageColor(complete_mask,&color); status=CompositeImage(complete_mask,OverCompositeOp,mask, mask->page.x-image->page.x,mask->page.y-image->page.y); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } image->matte=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register PixelPacket *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (PixelPacket *) NULL) || (p == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=GetPixelAlpha(q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(q,ClampToQuantum(intensity*(QuantumScale*alpha))); else if (intensity > 0) SetPixelAlpha(q,ClampToQuantum((alpha/intensity)*QuantumRange)); q++; p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); else if (image->depth > 8) return(2); } else if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static void ParseImageResourceBlocks(Image *image, const unsigned char *blocks,size_t length, MagickBooleanType *has_merged_image) { const unsigned char *p; StringInfo *profile; unsigned int count, long_sans; unsigned short id, short_sans; if (length < 16) return; profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,blocks); (void) SetImageProfile(image,"8bim",profile); profile=DestroyStringInfo(profile); for (p=blocks; (p >= blocks) && (p < (blocks+length-16)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if ((p+count) > (blocks+length-16)) return; switch (id) { case 0x03ed: { char value[MaxTextExtent]; unsigned short resolution; /* Resolution info. */ p=PushShortPixel(MSBEndian,p,&resolution); image->x_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->x_resolution); (void) SetImageProperty(image,"tiff:XResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->y_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->y_resolution); (void) SetImageProperty(image,"tiff:YResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if (*(p+4) == 0) *has_merged_image=MagickFalse; p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } return; } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline void ReversePSDString(Image *image,char *p,size_t length) { char *q; if (image->endian == MSBEndian) return; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel, PixelPacket *q,IndexPacket *indexes,ssize_t x) { if (image->storage_class == PseudoClass) { if (packet_size == 1) SetPixelIndex(indexes+x,ScaleQuantumToChar(pixel)); else SetPixelIndex(indexes+x,ScaleQuantumToShort(pixel)); SetPixelRGBO(q,image->colormap+(ssize_t) ConstrainColormapIndex(image,GetPixelIndex(indexes+x))); return; } switch (type) { case -1: { SetPixelAlpha(q,pixel); break; } case -2: case 0: { SetPixelRed(q,pixel); if (channels == 1 || type == -2) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); } break; } case 1: { if (image->storage_class == PseudoClass) SetPixelAlpha(q,pixel); else SetPixelGreen(q,pixel); break; } case 2: { if (image->storage_class == PseudoClass) SetPixelAlpha(q,pixel); else SetPixelBlue(q,pixel); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,pixel); else if (image->matte != MagickFalse) SetPixelAlpha(q,pixel); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->matte != MagickFalse) SetPixelAlpha(q,pixel); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image,const size_t channels, const size_t row,const ssize_t type,const unsigned char *pixels, ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register IndexPacket *indexes; register PixelPacket *q; register ssize_t x; size_t packet_size; unsigned short nibble; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; indexes=GetAuthenticIndexQueue(image); packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else { p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q++,indexes,x); } else { ssize_t bit, number_bits; number_bits=image->columns-x; if (number_bits > 8) number_bits=8; for (bit=0; bit < number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++); } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t count, row_size; ssize_t y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != row_size) break; status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) ResetMagickMemory(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } ResetMagickMemory(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; (void) ReadBlob(image,compact_size,compact_pixels); stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream, Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } } } if (compression == ZipWithPrediction) { p=pixels; while (count > 0) { length=image->columns; while (--length) { if (packet_size == 2) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; } else *(p+1)+=*p; p+=packet_size; } p+=packet_size; count-=row_size; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if (layer_info->channel_info[channel].type < -1) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { SeekBlob(image,layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); mask->matte=MagickFalse; channel_image=mask; } offset=TellBlob(image); status=MagickTrue; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } SeekBlob(image,offset+layer_info->channel_info[channel].size-2,SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (status != MagickFalse) layer_info->mask.image=mask; else mask=DestroyImage(mask); } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MaxTextExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) { layer_info->image->compose=NoCompositeOp; (void) SetImageArtifact(layer_info->image,"psd:layer.invisible","true"); } if (psd_info->mode == CMYKMode) SetImageColorspace(layer_info->image,CMYKColorspace); else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) || (psd_info->mode == GrayscaleMode)) SetImageColorspace(layer_info->image,GRAYColorspace); /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MaxTextExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MaxTextExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MaxTextExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->matte=MagickTrue; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,j, compression,exception); InheritException(exception,&layer_info->image->exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateImage(layer_info->image,MagickFalse); if (status != MagickFalse && layer_info->mask.image != (Image *) NULL) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); status=MagickFalse; if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) return(MagickTrue); else { count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0)) size=GetPSDSize(psd_info,image); else return(MagickTrue); } } status=MagickTrue; if (size != 0) { layer_info=(LayerInfo *) NULL; number_layers=(short) ReadBlobShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->matte=MagickTrue; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) ResetMagickMemory(layer_info,0,(size_t) number_layers* sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t x, y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); layer_info[i].page.y=ReadBlobSignedLong(image); layer_info[i].page.x=ReadBlobSignedLong(image); y=ReadBlobSignedLong(image); x=ReadBlobSignedLong(image); layer_info[i].page.width=(size_t) (x-layer_info[i].page.x); layer_info[i].page.height=(size_t) (y-layer_info[i].page.y); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } (void) ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=ReadBlobSignedLong(image); layer_info[i].mask.page.x=ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width, (double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); /* We read it, but don't use it... */ for (j=0; j < (ssize_t) length; j+=8) { size_t blend_source=ReadBlobLong(image); size_t blend_dest=ReadBlobLong(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " source(%x), dest(%x)",(unsigned int) blend_source,(unsigned int) blend_dest); } } /* Layer name. */ length=(MagickSizeType) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=0; j < layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers > 0) { for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; } layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info=DestroyLayerInfo(layer_info,number_layers); } return(status); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image* image,const PSDInfo* psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,i,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,i,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,i,psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateImage(image,MagickFalse); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType has_merged_image, skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; ssize_t count; unsigned char *data; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count == 0) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } if (SetImageBackgroundColor(image) == MagickFalse) { InheritException(exception,&image->exception); image=DestroyImageList(image); return((Image *) NULL); } if (psd_info.mode == LabMode) SetImageColorspace(image,LabColorspace); if (psd_info.mode == CMYKMode) { SetImageColorspace(image,CMYKColorspace); image->matte=psd_info.channels > 4 ? MagickTrue : MagickFalse; } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { status=AcquireImageColormap(image,psd_info.depth != 16 ? 256 : 65536); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); SetImageColorspace(image,GRAYColorspace); image->matte=psd_info.channels > 1 ? MagickTrue : MagickFalse; } else image->matte=psd_info.channels > 3 ? MagickTrue : MagickFalse; /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if (psd_info.mode == DuotoneMode) { /* Duotone image data; the format of this data is undocumented. */ data=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*data)); if (data == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); (void) ReadBlob(image,(size_t) length,data); data=(unsigned char *) RelinquishMagickMemory(data); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->matte=MagickFalse; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); has_merged_image=MagickTrue; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } ParseImageResourceBlocks(image,blocks,(size_t) length,&has_merged_image); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayers(image,image_info,&psd_info,skip_layers,exception) != MagickTrue) { (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); if (has_merged_image != MagickFalse || GetImageListLength(image) == 1) has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image, &psd_info,exception); if ((has_merged_image == MagickFalse) && (GetImageListLength(image) == 1) && (length != 0)) { SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayers(image,image_info,&psd_info,MagickFalse,exception); if (status != MagickTrue) { (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (has_merged_image == MagickFalse) { Image *merged; if (GetImageListLength(image) == 1) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); SetImageAlphaChannel(image,TransparentAlphaChannel); image->background_color.opacity=TransparentOpacity; merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=SetMagickInfo("PSB"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Adobe Large Document Format"); entry->module=ConstantString("PSD"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PSD"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Adobe Photoshop bitmap"); entry->module=ConstantString("PSD"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned short) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickSizeType offset) { MagickSizeType current_offset; ssize_t result; current_offset=TellBlob(image); SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=(WriteBlobMSBLong(image,(unsigned short) size)); SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobMSBLong(image,(unsigned int) size)); return(WriteBlobMSBLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickSizeType offset) { MagickSizeType current_offset; ssize_t result; current_offset=TellBlob(image); SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBLong(image,(unsigned int) size); else result=WriteBlobMSBLongLong(image,size); SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const ssize_t channels) { size_t length; ssize_t i, y; if (next_image->compression == RLECompression) { length=WriteBlobMSBShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) length=WriteBlobMSBShort(image,ZipWithoutPrediction); #endif else length=WriteBlobMSBShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate) { int y; MagickBooleanType monochrome; QuantumInfo *quantum_info; register const PixelPacket *p; register ssize_t i; size_t count, length; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE #define CHUNK 16384 int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { compressed_pixels=AcquireQuantumMemory(CHUNK,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } ResetMagickMemory(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,&image->exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (next_image->compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) CHUNK; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) CHUNK-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(Image *image) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); } return(compact_pixels); } static MagickBooleanType WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate) { Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; if (next_image->compression == RLECompression) { compact_pixels=AcquireCompactPixels(image); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if (next_image->storage_class != PseudoClass) { if (IsGrayImage(next_image,&next_image->exception) == MagickFalse) channels=next_image->colorspace == CMYKColorspace ? 4 : 3; if (next_image->matte != MagickFalse) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if (next_image->storage_class == PseudoClass) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsGrayImage(next_image,&next_image->exception) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->matte != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); if (mask != (Image *) NULL) { if (mask->compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->x_resolution+0.5; y_resolution=2.54*65536.0*image->y_resolution+0.5; units=2; } else { x_resolution=65536.0*image->x_resolution+0.5; y_resolution=65536.0*image->y_resolution+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=WriteBlobMSBSignedShort(image,channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < length)) { if ((q+quantum < (datum+length-16))) (void) CopyMagickMemory(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12))) { (void) CopyMagickMemory(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(*p++); key[1]=(*p++); key[2]=(*p++); key[3]=(*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) CopyMagickMemory(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); SetImageProfile(image,"psd:additional-info",info); return(profile); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image) { char layer_name[MaxTextExtent]; const char *property; const StringInfo *icc_profile, *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; PSDInfo psd_info; register ssize_t i; size_t layer_count, layer_index, length, name_length, num_channels, packet_size, rounded_size, size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->matte != MagickFalse) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ if (SetImageGray(image,&image->exception) != MagickFalse) num_channels=(image->matte != MagickFalse ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorMatteType) && (image->storage_class == PseudoClass)) num_channels=(image->matte != MagickFalse ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass); if (image->colorspace != CMYKColorspace) num_channels=(image->matte != MagickFalse ? 4UL : 3UL); else num_channels=(image->matte != MagickFalse ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsGrayImage(image,&image->exception) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsGrayImage(image,&image->exception) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar( image->colormap[i].green)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((MagickOffsetType) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } base_image=GetNextImageInList(image); if (base_image == (Image *)NULL) base_image=image; size=0; size_offset=TellBlob(image); SetPSDSize(&psd_info,image,0); SetPSDSize(&psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->matte != MagickFalse) size+=WriteBlobMSBShort(image,-(unsigned short) layer_count); else size+=WriteBlobMSBShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); default_color=strlen(property) == 9 ? 255 : 0; } size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y); size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+ next_image->rows)); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+ next_image->columns)); channels=1U; if ((next_image->storage_class != PseudoClass) && (IsGrayImage(next_image,&next_image->exception) == MagickFalse)) channels=next_image->colorspace == CMYKColorspace ? 4U : 3U; total_channels=channels; if (next_image->matte != MagickFalse) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobMSBShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(&psd_info,image,(signed short) i); if (next_image->matte != MagickFalse) size+=WriteChannelSize(&psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(&psd_info,image,-2); size+=WriteBlob(image,4,(const unsigned char *) "8BIM"); size+=WriteBlob(image,4,(const unsigned char *) CompositeOperatorToPSDBlendMode(next_image->compose)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue, &image->exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,next_image->compose==NoCompositeOp ? 1 << 0x02 : 1); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image); property=(const char *) GetImageProperty(next_image,"label"); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MaxTextExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobMSBLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobMSBLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,&image->exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobMSBLong(image,20); size+=WriteBlobMSBSignedLong(image,mask->page.y); size+=WriteBlobMSBSignedLong(image,mask->page.x); size+=WriteBlobMSBLong(image,mask->rows+mask->page.y); size+=WriteBlobMSBLong(image,mask->columns+mask->page.x); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,mask->compose == NoCompositeOp ? 2 : 0); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobMSBLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(&psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } /* Write the total size */ size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 16),size_offset); if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(&psd_info,image,rounded_size,size_offset); layer_size_offsets=RelinquishMagickMemory(layer_size_offsets); /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0, MagickFalse) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_4780_0
crossvul-cpp_data_good_1273_4
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dce/dce_8_0_d.h" #include "dce/dce_8_0_sh_mask.h" #include "dm_services.h" #include "link_encoder.h" #include "stream_encoder.h" #include "resource.h" #include "include/irq_service_interface.h" #include "irq/dce80/irq_service_dce80.h" #include "dce110/dce110_timing_generator.h" #include "dce110/dce110_resource.h" #include "dce80/dce80_timing_generator.h" #include "dce/dce_mem_input.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" #include "dce/dce_ipp.h" #include "dce/dce_transform.h" #include "dce/dce_opp.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "dce80/dce80_hw_sequencer.h" #include "dce100/dce100_resource.h" #include "reg_helper.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_abm.h" #include "dce/dce_i2c.h" /* TODO remove this include */ #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_7_1_d.h" #include "gmc/gmc_7_1_sh_mask.h" #endif #ifndef mmDP_DPHY_INTERNAL_CTRL #define mmDP_DPHY_INTERNAL_CTRL 0x1CDE #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x1FDE #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x42DE #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x45DE #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x48DE #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4BDE #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x4EDE #endif #ifndef mmBIOS_SCRATCH_2 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF #endif #ifndef mmDP_DPHY_FAST_TRAINING #define mmDP_DPHY_FAST_TRAINING 0x1CCE #define mmDP0_DP_DPHY_FAST_TRAINING 0x1CCE #define mmDP1_DP_DPHY_FAST_TRAINING 0x1FCE #define mmDP2_DP_DPHY_FAST_TRAINING 0x42CE #define mmDP3_DP_DPHY_FAST_TRAINING 0x45CE #define mmDP4_DP_DPHY_FAST_TRAINING 0x48CE #define mmDP5_DP_DPHY_FAST_TRAINING 0x4BCE #define mmDP6_DP_DPHY_FAST_TRAINING 0x4ECE #endif #ifndef mmHPD_DC_HPD_CONTROL #define mmHPD_DC_HPD_CONTROL 0x189A #define mmHPD0_DC_HPD_CONTROL 0x189A #define mmHPD1_DC_HPD_CONTROL 0x18A2 #define mmHPD2_DC_HPD_CONTROL 0x18AA #define mmHPD3_DC_HPD_CONTROL 0x18B2 #define mmHPD4_DC_HPD_CONTROL 0x18BA #define mmHPD5_DC_HPD_CONTROL 0x18C2 #endif #define DCE11_DIG_FE_CNTL 0x4a00 #define DCE11_DIG_BE_CNTL 0x4a47 #define DCE11_DP_SEC 0x4ac3 static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = { { .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmGRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL - mmDPG_WATERMARK_MASK_CONTROL), }, { .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL - mmDPG_WATERMARK_MASK_CONTROL), }, { .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL - mmDPG_WATERMARK_MASK_CONTROL), }, { .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG3_DPG_WATERMARK_MASK_CONTROL - mmDPG_WATERMARK_MASK_CONTROL), }, { .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG4_DPG_WATERMARK_MASK_CONTROL - mmDPG_WATERMARK_MASK_CONTROL), }, { .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), .dmif = (mmDMIF_PG5_DPG_WATERMARK_MASK_CONTROL - mmDPG_WATERMARK_MASK_CONTROL), } }; /* set register offset */ #define SR(reg_name)\ .reg_name = mm ## reg_name /* set register offset with instance */ #define SRI(reg_name, block, id)\ .reg_name = mm ## block ## id ## _ ## reg_name #define ipp_regs(id)\ [id] = {\ IPP_COMMON_REG_LIST_DCE_BASE(id)\ } static const struct dce_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), ipp_regs(4), ipp_regs(5) }; static const struct dce_ipp_shift ipp_shift = { IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_ipp_mask ipp_mask = { IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; #define transform_regs(id)\ [id] = {\ XFM_COMMON_REG_LIST_DCE80(id)\ } static const struct dce_transform_registers xfm_regs[] = { transform_regs(0), transform_regs(1), transform_regs(2), transform_regs(3), transform_regs(4), transform_regs(5) }; static const struct dce_transform_shift xfm_shift = { XFM_COMMON_MASK_SH_LIST_DCE80(__SHIFT) }; static const struct dce_transform_mask xfm_mask = { XFM_COMMON_MASK_SH_LIST_DCE80(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id)\ [id] = {\ LE_DCE80_REG_LIST(id)\ } static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3), link_regs(4), link_regs(5), link_regs(6), }; #define stream_enc_regs(id)\ [id] = {\ SE_COMMON_REG_LIST_DCE_BASE(id),\ .AFMT_CNTL = 0,\ } static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4), stream_enc_regs(5), stream_enc_regs(6) }; static const struct dce_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT) }; static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_DCE_80_REG_LIST(id),\ } static const struct dce_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5) }; static const struct dce_opp_shift opp_shift = { OPP_COMMON_MASK_SH_LIST_DCE_80(__SHIFT) }; static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6), }; static const struct dce_audio_shift audio_shift = { AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { AUD_COMMON_MASK_SH_LIST(_MASK) }; #define clk_src_regs(id)\ [id] = {\ CS_COMMON_REG_LIST_DCE_80(id),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0), clk_src_regs(1), clk_src_regs(2) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; static const struct bios_registers bios_regs = { .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, .num_stream_encoder = 6, .num_pll = 3, .num_ddc = 6, }; static const struct resource_caps res_cap_81 = { .num_timing_generator = 4, .num_audio = 7, .num_stream_encoder = 7, .num_pll = 3, .num_ddc = 6, }; static const struct resource_caps res_cap_83 = { .num_timing_generator = 2, .num_audio = 6, .num_stream_encoder = 6, .num_pll = 2, .num_ddc = 2, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, .pixel_format_support = { .argb8888 = true, .nv12 = false, .fp16 = false }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 1, .fp16 = 1 } }; static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE80_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCE80(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCE80(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCE110_COMMON_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; #define CTX ctx #define REG(reg) mm ## reg #ifndef mmCC_DC_HDMI_STRAPS #define mmCC_DC_HDMI_STRAPS 0x1918 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 #define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { REG_GET_2(CC_DC_HDMI_STRAPS, HDMI_DISABLE, &straps->hdmi_disable, AUDIO_STREAM_NUMBER, &straps->audio_stream_number); REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct timing_generator *dce80_timing_generator_create( struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { struct dce110_timing_generator *tg110 = kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); if (!tg110) return NULL; dce80_timing_generator_construct(tg110, ctx, instance, offsets); return &tg110->base; } static struct output_pixel_processor *dce80_opp_create( struct dc_context *ctx, uint32_t inst) { struct dce110_opp *opp = kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); if (!opp) return NULL; dce110_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } struct dce_aux *dce80_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst]); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; struct dce_i2c_hw *dce80_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dce_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } struct dce_i2c_sw *dce80_i2c_sw_create( struct dc_context *ctx) { struct dce_i2c_sw *dce_i2c_sw = kzalloc(sizeof(struct dce_i2c_sw), GFP_KERNEL); if (!dce_i2c_sw) return NULL; dce_i2c_sw_construct(dce_i2c_sw, ctx); return dce_i2c_sw; } static struct stream_encoder *dce80_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } #define SRII(reg_name, block, id)\ .reg_name[id] = mm ## block ## id ## _ ## reg_name static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCE8_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCE8_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE8_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dce80_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce80_stream_encoder_create, .create_hwseq = dce80_hwseq_create, }; #define mi_inst_regs(id) { \ MI_DCE8_REG_LIST(id), \ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), mi_inst_regs(1), mi_inst_regs(2), mi_inst_regs(3), mi_inst_regs(4), mi_inst_regs(5), }; static const struct dce_mem_input_shift mi_shifts = { MI_DCE8_MASK_SH_LIST(__SHIFT), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT }; static const struct dce_mem_input_mask mi_masks = { MI_DCE8_MASK_SH_LIST(_MASK), .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK }; static struct mem_input *dce80_mem_input_create( struct dc_context *ctx, uint32_t inst) { struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), GFP_KERNEL); if (!dce_mi) { BREAK_TO_DEBUGGER(); return NULL; } dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); dce_mi->wa.single_head_rdreq_dmif_limit = 2; return &dce_mi->base; } static void dce80_transform_destroy(struct transform **xfm) { kfree(TO_DCE_TRANSFORM(*xfm)); *xfm = NULL; } static struct transform *dce80_transform_create( struct dc_context *ctx, uint32_t inst) { struct dce_transform *transform = kzalloc(sizeof(struct dce_transform), GFP_KERNEL); if (!transform) return NULL; dce_transform_construct(transform, ctx, inst, &xfm_regs[inst], &xfm_shift, &xfm_mask); transform->prescaler_on = false; return &transform->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 297000, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true }; struct link_encoder *dce80_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; } struct clock_source *dce80_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce110_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } void dce80_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } static struct input_pixel_processor *dce80_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dce_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static void destruct(struct dce110_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); if (pool->base.transforms[i] != NULL) dce80_transform_destroy(&pool->base.transforms[i]); if (pool->base.ipps[i] != NULL) dce_ipp_destroy(&pool->base.ipps[i]); if (pool->base.mis[i] != NULL) { kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); pool->base.mis[i] = NULL; } if (pool->base.timing_generators[i] != NULL) { kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dce80_clock_source_destroy(&pool->base.clock_sources[i]); } } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); if (pool->base.dp_clock_source != NULL) dce80_clock_source_destroy(&pool->base.dp_clock_source); for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i] != NULL) { dce_aud_destroy(&pool->base.audios[i]); } } if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } } bool dce80_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) { int i; bool at_least_one_pipe = false; for (i = 0; i < dc->res_pool->pipe_count; i++) { if (context->res_ctx.pipe_ctx[i].stream) at_least_one_pipe = true; } if (at_least_one_pipe) { /* TODO implement when needed but for now hardcode max value*/ context->bw_ctx.bw.dce.dispclk_khz = 681000; context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; } else { context->bw_ctx.bw.dce.dispclk_khz = 0; context->bw_ctx.bw.dce.yclk_khz = 0; } return true; } static bool dce80_validate_surface_sets( struct dc_state *context) { int i; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) continue; if (context->stream_status[i].plane_count > 1) return false; if (context->stream_status[i].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return false; } return true; } enum dc_status dce80_validate_global( struct dc *dc, struct dc_state *context) { if (!dce80_validate_surface_sets(context)) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static void dce80_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } static const struct resource_funcs dce80_res_pool_funcs = { .destroy = dce80_destroy_resource_pool, .link_enc_create = dce80_link_encoder_create, .validate_bandwidth = dce80_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce100_add_stream_to_ctx, .validate_global = dce80_validate_global, .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link }; static bool dce80_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap; pool->base.funcs = &dce80_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.pipe_count = res_cap.num_timing_generator; pool->base.timing_generator_count = res_cap.num_timing_generator; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; /************************************************* * Create resources * *************************************************/ bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[2] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 3; } else { pool->base.dp_clock_source = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); pool->base.clock_sources[0] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[1] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 2; } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce80_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce80_timing_generator_create( ctx, i, &dce80_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce80_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce80_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce80_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce80_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce80_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx); if (pool->base.sw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create sw i2c!!\n"); goto res_create_fail; } } dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->caps.disable_dp_clk_share = true; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce80_hw_sequencer_construct(dc); return true; res_create_fail: destruct(pool); return false; } struct resource_pool *dce80_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dce80_construct(num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); return NULL; } static bool dce81_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap_81; pool->base.funcs = &dce80_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.pipe_count = res_cap_81.num_timing_generator; pool->base.timing_generator_count = res_cap_81.num_timing_generator; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 128; dc->caps.is_apu = true; /************************************************* * Create resources * *************************************************/ bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[2] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 3; } else { pool->base.dp_clock_source = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); pool->base.clock_sources[0] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[1] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 2; } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce80_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce80_timing_generator_create( ctx, i, &dce80_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce80_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce80_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce80_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce80_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce80_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx); if (pool->base.sw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create sw i2c!!\n"); goto res_create_fail; } } dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->caps.disable_dp_clk_share = true; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce80_hw_sequencer_construct(dc); return true; res_create_fail: destruct(pool); return false; } struct resource_pool *dce81_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dce81_construct(num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); return NULL; } static bool dce83_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; struct dc_context *ctx = dc->ctx; struct dc_bios *bp; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap_83; pool->base.funcs = &dce80_res_pool_funcs; /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; pool->base.pipe_count = res_cap_83.num_timing_generator; pool->base.timing_generator_count = res_cap_83.num_timing_generator; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 128; dc->caps.is_apu = true; /************************************************* * Create resources * *************************************************/ bp = ctx->dc_bios; if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); pool->base.clock_sources[0] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false); pool->base.clock_sources[1] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); pool->base.clk_src_count = 2; } else { pool->base.dp_clock_source = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true); pool->base.clock_sources[0] = dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); pool->base.clk_src_count = 1; } if (pool->base.dp_clock_source == NULL) { dm_error("DC: failed to create dp clock source!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce80_create(&init_data); if (!pool->base.irqs) goto res_create_fail; } for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce80_timing_generator_create( ctx, i, &dce80_tg_offsets[i]); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto res_create_fail; } pool->base.mis[i] = dce80_mem_input_create(ctx, i); if (pool->base.mis[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create memory input!\n"); goto res_create_fail; } pool->base.ipps[i] = dce80_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create input pixel processor!\n"); goto res_create_fail; } pool->base.transforms[i] = dce80_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[i] = dce80_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create output pixel processor!\n"); goto res_create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce80_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx); if (pool->base.sw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create sw i2c!!\n"); goto res_create_fail; } } dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->caps.disable_dp_clk_share = true; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) goto res_create_fail; /* Create hardware sequencer */ dce80_hw_sequencer_construct(dc); return true; res_create_fail: destruct(pool); return false; } struct resource_pool *dce83_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (dce83_construct(num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); return NULL; }
./CrossVul/dataset_final_sorted/CWE-400/c/good_1273_4
crossvul-cpp_data_bad_1235_0
/* * Copyright (c) 2017 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <net/addrconf.h> #include <linux/etherdevice.h> #include <linux/mlx5/vport.h> #include "mlx5_core.h" #include "lib/mlx5.h" #include "fpga/conn.h" #define MLX5_FPGA_PKEY 0xFFFF #define MLX5_FPGA_PKEY_INDEX 0 /* RoCE PKEY 0xFFFF is always at index 0 */ #define MLX5_FPGA_RECV_SIZE 2048 #define MLX5_FPGA_PORT_NUM 1 #define MLX5_FPGA_CQ_BUDGET 64 static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn, struct mlx5_fpga_dma_buf *buf) { struct device *dma_device; int err = 0; if (unlikely(!buf->sg[0].data)) goto out; dma_device = &conn->fdev->mdev->pdev->dev; buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data, buf->sg[0].size, buf->dma_dir); err = dma_mapping_error(dma_device, buf->sg[0].dma_addr); if (unlikely(err)) { mlx5_fpga_warn(conn->fdev, "DMA error on sg 0: %d\n", err); err = -ENOMEM; goto out; } if (!buf->sg[1].data) goto out; buf->sg[1].dma_addr = dma_map_single(dma_device, buf->sg[1].data, buf->sg[1].size, buf->dma_dir); err = dma_mapping_error(dma_device, buf->sg[1].dma_addr); if (unlikely(err)) { mlx5_fpga_warn(conn->fdev, "DMA error on sg 1: %d\n", err); dma_unmap_single(dma_device, buf->sg[0].dma_addr, buf->sg[0].size, buf->dma_dir); err = -ENOMEM; } out: return err; } static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn, struct mlx5_fpga_dma_buf *buf) { struct device *dma_device; dma_device = &conn->fdev->mdev->pdev->dev; if (buf->sg[1].data) dma_unmap_single(dma_device, buf->sg[1].dma_addr, buf->sg[1].size, buf->dma_dir); if (likely(buf->sg[0].data)) dma_unmap_single(dma_device, buf->sg[0].dma_addr, buf->sg[0].size, buf->dma_dir); } static int mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn *conn, struct mlx5_fpga_dma_buf *buf) { struct mlx5_wqe_data_seg *data; unsigned int ix; int err = 0; err = mlx5_fpga_conn_map_buf(conn, buf); if (unlikely(err)) goto out; if (unlikely(conn->qp.rq.pc - conn->qp.rq.cc >= conn->qp.rq.size)) { mlx5_fpga_conn_unmap_buf(conn, buf); return -EBUSY; } ix = conn->qp.rq.pc & (conn->qp.rq.size - 1); data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix); data->byte_count = cpu_to_be32(buf->sg[0].size); data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key); data->addr = cpu_to_be64(buf->sg[0].dma_addr); conn->qp.rq.pc++; conn->qp.rq.bufs[ix] = buf; /* Make sure that descriptors are written before doorbell record. */ dma_wmb(); *conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff); out: return err; } static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe) { /* ensure wqe is visible to device before updating doorbell record */ dma_wmb(); *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc); /* Make sure that doorbell record is visible before ringing */ wmb(); mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET); } static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn, struct mlx5_fpga_dma_buf *buf) { struct mlx5_wqe_ctrl_seg *ctrl; struct mlx5_wqe_data_seg *data; unsigned int ix, sgi; int size = 1; ix = conn->qp.sq.pc & (conn->qp.sq.size - 1); ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix); data = (void *)(ctrl + 1); for (sgi = 0; sgi < ARRAY_SIZE(buf->sg); sgi++) { if (!buf->sg[sgi].data) break; data->byte_count = cpu_to_be32(buf->sg[sgi].size); data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key); data->addr = cpu_to_be64(buf->sg[sgi].dma_addr); data++; size++; } ctrl->imm = 0; ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) | MLX5_OPCODE_SEND); ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.mqp.qpn << 8)); conn->qp.sq.pc++; conn->qp.sq.bufs[ix] = buf; mlx5_fpga_conn_notify_hw(conn, ctrl); } int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn, struct mlx5_fpga_dma_buf *buf) { unsigned long flags; int err; if (!conn->qp.active) return -ENOTCONN; buf->dma_dir = DMA_TO_DEVICE; err = mlx5_fpga_conn_map_buf(conn, buf); if (err) return err; spin_lock_irqsave(&conn->qp.sq.lock, flags); if (conn->qp.sq.pc - conn->qp.sq.cc >= conn->qp.sq.size) { list_add_tail(&buf->list, &conn->qp.sq.backlog); goto out_unlock; } mlx5_fpga_conn_post_send(conn, buf); out_unlock: spin_unlock_irqrestore(&conn->qp.sq.lock, flags); return err; } static int mlx5_fpga_conn_post_recv_buf(struct mlx5_fpga_conn *conn) { struct mlx5_fpga_dma_buf *buf; int err; buf = kzalloc(sizeof(*buf) + MLX5_FPGA_RECV_SIZE, 0); if (!buf) return -ENOMEM; buf->sg[0].data = (void *)(buf + 1); buf->sg[0].size = MLX5_FPGA_RECV_SIZE; buf->dma_dir = DMA_FROM_DEVICE; err = mlx5_fpga_conn_post_recv(conn, buf); if (err) kfree(buf); return err; } static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, struct mlx5_core_mkey *mkey) { int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); void *mkc; u32 *in; int err; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); MLX5_SET(mkc, mkc, pd, pdn); MLX5_SET(mkc, mkc, length64, 1); MLX5_SET(mkc, mkc, qpn, 0xffffff); err = mlx5_core_create_mkey(mdev, mkey, in, inlen); kvfree(in); return err; } static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn, struct mlx5_cqe64 *cqe, u8 status) { struct mlx5_fpga_dma_buf *buf; int ix, err; ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1); buf = conn->qp.rq.bufs[ix]; conn->qp.rq.bufs[ix] = NULL; conn->qp.rq.cc++; if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR))) mlx5_fpga_warn(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n", buf, conn->fpga_qpn, status); else mlx5_fpga_dbg(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n", buf, conn->fpga_qpn, status); mlx5_fpga_conn_unmap_buf(conn, buf); if (unlikely(status || !conn->qp.active)) { conn->qp.active = false; kfree(buf); return; } buf->sg[0].size = be32_to_cpu(cqe->byte_cnt); mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n", buf->sg[0].size); conn->recv_cb(conn->cb_arg, buf); buf->sg[0].size = MLX5_FPGA_RECV_SIZE; err = mlx5_fpga_conn_post_recv(conn, buf); if (unlikely(err)) { mlx5_fpga_warn(conn->fdev, "Failed to re-post recv buf: %d\n", err); kfree(buf); } } static void mlx5_fpga_conn_sq_cqe(struct mlx5_fpga_conn *conn, struct mlx5_cqe64 *cqe, u8 status) { struct mlx5_fpga_dma_buf *buf, *nextbuf; unsigned long flags; int ix; spin_lock_irqsave(&conn->qp.sq.lock, flags); ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1); buf = conn->qp.sq.bufs[ix]; conn->qp.sq.bufs[ix] = NULL; conn->qp.sq.cc++; /* Handle backlog still under the spinlock to ensure message post order */ if (unlikely(!list_empty(&conn->qp.sq.backlog))) { if (likely(conn->qp.active)) { nextbuf = list_first_entry(&conn->qp.sq.backlog, struct mlx5_fpga_dma_buf, list); list_del(&nextbuf->list); mlx5_fpga_conn_post_send(conn, nextbuf); } } spin_unlock_irqrestore(&conn->qp.sq.lock, flags); if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR))) mlx5_fpga_warn(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n", buf, conn->fpga_qpn, status); else mlx5_fpga_dbg(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n", buf, conn->fpga_qpn, status); mlx5_fpga_conn_unmap_buf(conn, buf); if (likely(buf->complete)) buf->complete(conn, conn->fdev, buf, status); if (unlikely(status)) conn->qp.active = false; } static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn, struct mlx5_cqe64 *cqe) { u8 opcode, status = 0; opcode = get_cqe_opcode(cqe); switch (opcode) { case MLX5_CQE_REQ_ERR: status = ((struct mlx5_err_cqe *)cqe)->syndrome; /* Fall through */ case MLX5_CQE_REQ: mlx5_fpga_conn_sq_cqe(conn, cqe, status); break; case MLX5_CQE_RESP_ERR: status = ((struct mlx5_err_cqe *)cqe)->syndrome; /* Fall through */ case MLX5_CQE_RESP_SEND: mlx5_fpga_conn_rq_cqe(conn, cqe, status); break; default: mlx5_fpga_warn(conn->fdev, "Unexpected cqe opcode %u\n", opcode); } } static void mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn *conn) { mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT, conn->fdev->conn_res.uar->map, conn->cq.wq.cc); } static void mlx5_fpga_conn_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event event) { struct mlx5_fpga_conn *conn; conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq); mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn); } static void mlx5_fpga_conn_event(struct mlx5_core_qp *mqp, int event) { struct mlx5_fpga_conn *conn; conn = container_of(mqp, struct mlx5_fpga_conn, qp.mqp); mlx5_fpga_warn(conn->fdev, "QP event %u on QP #%u\n", event, mqp->qpn); } static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn, unsigned int budget) { struct mlx5_cqe64 *cqe; while (budget) { cqe = mlx5_cqwq_get_cqe(&conn->cq.wq); if (!cqe) break; budget--; mlx5_cqwq_pop(&conn->cq.wq); mlx5_fpga_conn_handle_cqe(conn, cqe); mlx5_cqwq_update_db_record(&conn->cq.wq); } if (!budget) { tasklet_schedule(&conn->cq.tasklet); return; } mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc); /* ensure cq space is freed before enabling more cqes */ wmb(); mlx5_fpga_conn_arm_cq(conn); } static void mlx5_fpga_conn_cq_tasklet(unsigned long data) { struct mlx5_fpga_conn *conn = (void *)data; if (unlikely(!conn->qp.active)) return; mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET); } static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) { struct mlx5_fpga_conn *conn; conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq); if (unlikely(!conn->qp.active)) return; mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET); } static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) { struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0}; u32 out[MLX5_ST_SZ_DW(create_cq_out)]; struct mlx5_wq_param wqp; struct mlx5_cqe64 *cqe; int inlen, err, eqn; unsigned int irqn; void *cqc, *in; __be64 *pas; u32 i; cq_size = roundup_pow_of_two(cq_size); MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size)); wqp.buf_numa_node = mdev->priv.numa_node; wqp.db_numa_node = mdev->priv.numa_node; err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &conn->cq.wq, &conn->cq.wq_ctrl); if (err) return err; for (i = 0; i < mlx5_cqwq_get_size(&conn->cq.wq); i++) { cqe = mlx5_cqwq_get_wqe(&conn->cq.wq, i); cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK; } inlen = MLX5_ST_SZ_BYTES(create_cq_in) + sizeof(u64) * conn->cq.wq_ctrl.buf.npages; in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_cqwq; } err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn); if (err) goto err_cqwq; cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index); MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma); pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas); mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas); err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen, out, sizeof(out)); kvfree(in); if (err) goto err_cqwq; conn->cq.mcq.cqe_sz = 64; conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db; conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1; *conn->cq.mcq.set_ci_db = 0; *conn->cq.mcq.arm_db = 0; conn->cq.mcq.vector = 0; conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete; conn->cq.mcq.event = mlx5_fpga_conn_cq_event; conn->cq.mcq.irqn = irqn; conn->cq.mcq.uar = fdev->conn_res.uar; tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet, (unsigned long)conn); mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn); goto out; err_cqwq: mlx5_wq_destroy(&conn->cq.wq_ctrl); out: return err; } static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn) { tasklet_disable(&conn->cq.tasklet); tasklet_kill(&conn->cq.tasklet); mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq); mlx5_wq_destroy(&conn->cq.wq_ctrl); } static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc) { struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; struct mlx5_wq_param wqp; wqp.buf_numa_node = mdev->priv.numa_node; wqp.db_numa_node = mdev->priv.numa_node; return mlx5_wq_qp_create(mdev, &wqp, qpc, &conn->qp.wq, &conn->qp.wq_ctrl); } static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn, unsigned int tx_size, unsigned int rx_size) { struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {0}; void *in = NULL, *qpc; int err, inlen; conn->qp.rq.pc = 0; conn->qp.rq.cc = 0; conn->qp.rq.size = roundup_pow_of_two(rx_size); conn->qp.sq.pc = 0; conn->qp.sq.cc = 0; conn->qp.sq.size = roundup_pow_of_two(tx_size); MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4); MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(conn->qp.rq.size)); MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(conn->qp.sq.size)); err = mlx5_fpga_conn_create_wq(conn, temp_qpc); if (err) goto out; conn->qp.rq.bufs = kvcalloc(conn->qp.rq.size, sizeof(conn->qp.rq.bufs[0]), GFP_KERNEL); if (!conn->qp.rq.bufs) { err = -ENOMEM; goto err_wq; } conn->qp.sq.bufs = kvcalloc(conn->qp.sq.size, sizeof(conn->qp.sq.bufs[0]), GFP_KERNEL); if (!conn->qp.sq.bufs) { err = -ENOMEM; goto err_rq_bufs; } inlen = MLX5_ST_SZ_BYTES(create_qp_in) + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * conn->qp.wq_ctrl.buf.npages; in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_sq_bufs; } qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); MLX5_SET(qpc, qpc, uar_page, fdev->conn_res.uar->index); MLX5_SET(qpc, qpc, log_page_size, conn->qp.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET(qpc, qpc, fre, 1); MLX5_SET(qpc, qpc, rlky, 1); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); MLX5_SET(qpc, qpc, pd, fdev->conn_res.pdn); MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4); MLX5_SET(qpc, qpc, log_rq_size, ilog2(conn->qp.rq.size)); MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size)); MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn); MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn); MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma); if (MLX5_CAP_GEN(mdev, cqe_version) == 1) MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas)); err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen); if (err) goto err_sq_bufs; conn->qp.mqp.event = mlx5_fpga_conn_event; mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.mqp.qpn); goto out; err_sq_bufs: kvfree(conn->qp.sq.bufs); err_rq_bufs: kvfree(conn->qp.rq.bufs); err_wq: mlx5_wq_destroy(&conn->qp.wq_ctrl); out: kvfree(in); return err; } static void mlx5_fpga_conn_free_recv_bufs(struct mlx5_fpga_conn *conn) { int ix; for (ix = 0; ix < conn->qp.rq.size; ix++) { if (!conn->qp.rq.bufs[ix]) continue; mlx5_fpga_conn_unmap_buf(conn, conn->qp.rq.bufs[ix]); kfree(conn->qp.rq.bufs[ix]); conn->qp.rq.bufs[ix] = NULL; } } static void mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn *conn) { struct mlx5_fpga_dma_buf *buf, *temp; int ix; for (ix = 0; ix < conn->qp.sq.size; ix++) { buf = conn->qp.sq.bufs[ix]; if (!buf) continue; conn->qp.sq.bufs[ix] = NULL; mlx5_fpga_conn_unmap_buf(conn, buf); if (!buf->complete) continue; buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR); } list_for_each_entry_safe(buf, temp, &conn->qp.sq.backlog, list) { mlx5_fpga_conn_unmap_buf(conn, buf); if (!buf->complete) continue; buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR); } } static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn) { mlx5_core_destroy_qp(conn->fdev->mdev, &conn->qp.mqp); mlx5_fpga_conn_free_recv_bufs(conn); mlx5_fpga_conn_flush_send_bufs(conn); kvfree(conn->qp.sq.bufs); kvfree(conn->qp.rq.bufs); mlx5_wq_destroy(&conn->qp.wq_ctrl); } static inline int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn) { struct mlx5_core_dev *mdev = conn->fdev->mdev; mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.mqp.qpn); return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, NULL, &conn->qp.mqp); } static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn) { struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; u32 *qpc = NULL; int err; mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.mqp.qpn); qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL); if (!qpc) { err = -ENOMEM; goto out; } MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX); MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM); MLX5_SET(qpc, qpc, pd, conn->fdev->conn_res.pdn); MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn); MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn); MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma); err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc, &conn->qp.mqp); if (err) { mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err); goto out; } out: kfree(qpc); return err; } static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn) { struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; u32 *qpc = NULL; int err; mlx5_fpga_dbg(conn->fdev, "QP RTR\n"); qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL); if (!qpc) { err = -ENOMEM; goto out; } MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_1K_BYTES); MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg)); MLX5_SET(qpc, qpc, remote_qpn, conn->fpga_qpn); MLX5_SET(qpc, qpc, next_rcv_psn, MLX5_GET(fpga_qpc, conn->fpga_qpc, next_send_psn)); MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX); MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM); ether_addr_copy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32), MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_mac_47_32)); MLX5_SET(qpc, qpc, primary_address_path.udp_sport, MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port)); MLX5_SET(qpc, qpc, primary_address_path.src_addr_index, conn->qp.sgid_index); MLX5_SET(qpc, qpc, primary_address_path.hop_limit, 0); memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip), MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_ip), MLX5_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip)); err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc, &conn->qp.mqp); if (err) { mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err); goto out; } out: kfree(qpc); return err; } static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn) { struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; u32 *qpc = NULL; u32 opt_mask; int err; mlx5_fpga_dbg(conn->fdev, "QP RTS\n"); qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL); if (!qpc) { err = -ENOMEM; goto out; } MLX5_SET(qpc, qpc, log_ack_req_freq, 8); MLX5_SET(qpc, qpc, min_rnr_nak, 0x12); MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x12); /* ~1.07s */ MLX5_SET(qpc, qpc, next_send_psn, MLX5_GET(fpga_qpc, conn->fpga_qpc, next_rcv_psn)); MLX5_SET(qpc, qpc, retry_count, 7); MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */ opt_mask = MLX5_QP_OPTPAR_RNR_TIMEOUT; err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, opt_mask, qpc, &conn->qp.mqp); if (err) { mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err); goto out; } out: kfree(qpc); return err; } static int mlx5_fpga_conn_connect(struct mlx5_fpga_conn *conn) { struct mlx5_fpga_device *fdev = conn->fdev; int err; MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_ACTIVE); err = mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn, MLX5_FPGA_QPC_STATE, &conn->fpga_qpc); if (err) { mlx5_fpga_err(fdev, "Failed to activate FPGA RC QP: %d\n", err); goto out; } err = mlx5_fpga_conn_reset_qp(conn); if (err) { mlx5_fpga_err(fdev, "Failed to change QP state to reset\n"); goto err_fpga_qp; } err = mlx5_fpga_conn_init_qp(conn); if (err) { mlx5_fpga_err(fdev, "Failed to modify QP from RESET to INIT\n"); goto err_fpga_qp; } conn->qp.active = true; while (!mlx5_fpga_conn_post_recv_buf(conn)) ; err = mlx5_fpga_conn_rtr_qp(conn); if (err) { mlx5_fpga_err(fdev, "Failed to change QP state from INIT to RTR\n"); goto err_recv_bufs; } err = mlx5_fpga_conn_rts_qp(conn); if (err) { mlx5_fpga_err(fdev, "Failed to change QP state from RTR to RTS\n"); goto err_recv_bufs; } goto out; err_recv_bufs: mlx5_fpga_conn_free_recv_bufs(conn); err_fpga_qp: MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT); if (mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn, MLX5_FPGA_QPC_STATE, &conn->fpga_qpc)) mlx5_fpga_err(fdev, "Failed to revert FPGA QP to INIT\n"); out: return err; } struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev, struct mlx5_fpga_conn_attr *attr, enum mlx5_ifc_fpga_qp_type qp_type) { struct mlx5_fpga_conn *ret, *conn; u8 *remote_mac, *remote_ip; int err; if (!attr->recv_cb) return ERR_PTR(-EINVAL); conn = kzalloc(sizeof(*conn), GFP_KERNEL); if (!conn) return ERR_PTR(-ENOMEM); conn->fdev = fdev; INIT_LIST_HEAD(&conn->qp.sq.backlog); spin_lock_init(&conn->qp.sq.lock); conn->recv_cb = attr->recv_cb; conn->cb_arg = attr->cb_arg; remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32); err = mlx5_query_mac_address(fdev->mdev, remote_mac); if (err) { mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err); ret = ERR_PTR(err); goto err; } /* Build Modified EUI-64 IPv6 address from the MAC address */ remote_ip = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_ip); remote_ip[0] = 0xfe; remote_ip[1] = 0x80; addrconf_addr_eui48(&remote_ip[8], remote_mac); err = mlx5_core_reserved_gid_alloc(fdev->mdev, &conn->qp.sgid_index); if (err) { mlx5_fpga_err(fdev, "Failed to allocate SGID: %d\n", err); ret = ERR_PTR(err); goto err; } err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, MLX5_ROCE_VERSION_2, MLX5_ROCE_L3_TYPE_IPV6, remote_ip, remote_mac, true, 0, MLX5_FPGA_PORT_NUM); if (err) { mlx5_fpga_err(fdev, "Failed to set SGID: %d\n", err); ret = ERR_PTR(err); goto err_rsvd_gid; } mlx5_fpga_dbg(fdev, "Reserved SGID index %u\n", conn->qp.sgid_index); /* Allow for one cqe per rx/tx wqe, plus one cqe for the next wqe, * created during processing of the cqe */ err = mlx5_fpga_conn_create_cq(conn, (attr->tx_size + attr->rx_size) * 2); if (err) { mlx5_fpga_err(fdev, "Failed to create CQ: %d\n", err); ret = ERR_PTR(err); goto err_gid; } mlx5_fpga_conn_arm_cq(conn); err = mlx5_fpga_conn_create_qp(conn, attr->tx_size, attr->rx_size); if (err) { mlx5_fpga_err(fdev, "Failed to create QP: %d\n", err); ret = ERR_PTR(err); goto err_cq; } MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT); MLX5_SET(fpga_qpc, conn->fpga_qpc, qp_type, qp_type); MLX5_SET(fpga_qpc, conn->fpga_qpc, st, MLX5_FPGA_QPC_ST_RC); MLX5_SET(fpga_qpc, conn->fpga_qpc, ether_type, ETH_P_8021Q); MLX5_SET(fpga_qpc, conn->fpga_qpc, vid, 0); MLX5_SET(fpga_qpc, conn->fpga_qpc, next_rcv_psn, 1); MLX5_SET(fpga_qpc, conn->fpga_qpc, next_send_psn, 0); MLX5_SET(fpga_qpc, conn->fpga_qpc, pkey, MLX5_FPGA_PKEY); MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.mqp.qpn); MLX5_SET(fpga_qpc, conn->fpga_qpc, rnr_retry, 7); MLX5_SET(fpga_qpc, conn->fpga_qpc, retry_count, 7); err = mlx5_fpga_create_qp(fdev->mdev, &conn->fpga_qpc, &conn->fpga_qpn); if (err) { mlx5_fpga_err(fdev, "Failed to create FPGA RC QP: %d\n", err); ret = ERR_PTR(err); goto err_qp; } err = mlx5_fpga_conn_connect(conn); if (err) { ret = ERR_PTR(err); goto err_conn; } mlx5_fpga_dbg(fdev, "FPGA QPN is %u\n", conn->fpga_qpn); ret = conn; goto out; err_conn: mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn); err_qp: mlx5_fpga_conn_destroy_qp(conn); err_cq: mlx5_fpga_conn_destroy_cq(conn); err_gid: mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL, NULL, false, 0, MLX5_FPGA_PORT_NUM); err_rsvd_gid: mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index); err: kfree(conn); out: return ret; } void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn) { struct mlx5_fpga_device *fdev = conn->fdev; struct mlx5_core_dev *mdev = fdev->mdev; int err = 0; conn->qp.active = false; tasklet_disable(&conn->cq.tasklet); synchronize_irq(conn->cq.mcq.irqn); mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn); err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, NULL, &conn->qp.mqp); if (err) mlx5_fpga_warn(fdev, "qp_modify 2ERR failed: %d\n", err); mlx5_fpga_conn_destroy_qp(conn); mlx5_fpga_conn_destroy_cq(conn); mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0, NULL, NULL, false, 0, MLX5_FPGA_PORT_NUM); mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index); kfree(conn); } int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev) { int err; err = mlx5_nic_vport_enable_roce(fdev->mdev); if (err) { mlx5_fpga_err(fdev, "Failed to enable RoCE: %d\n", err); goto out; } fdev->conn_res.uar = mlx5_get_uars_page(fdev->mdev); if (IS_ERR(fdev->conn_res.uar)) { err = PTR_ERR(fdev->conn_res.uar); mlx5_fpga_err(fdev, "get_uars_page failed, %d\n", err); goto err_roce; } mlx5_fpga_dbg(fdev, "Allocated UAR index %u\n", fdev->conn_res.uar->index); err = mlx5_core_alloc_pd(fdev->mdev, &fdev->conn_res.pdn); if (err) { mlx5_fpga_err(fdev, "alloc pd failed, %d\n", err); goto err_uar; } mlx5_fpga_dbg(fdev, "Allocated PD %u\n", fdev->conn_res.pdn); err = mlx5_fpga_conn_create_mkey(fdev->mdev, fdev->conn_res.pdn, &fdev->conn_res.mkey); if (err) { mlx5_fpga_err(fdev, "create mkey failed, %d\n", err); goto err_dealloc_pd; } mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey.key); return 0; err_dealloc_pd: mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn); err_uar: mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar); err_roce: mlx5_nic_vport_disable_roce(fdev->mdev); out: return err; } void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev) { mlx5_core_destroy_mkey(fdev->mdev, &fdev->conn_res.mkey); mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn); mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar); mlx5_nic_vport_disable_roce(fdev->mdev); }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1235_0
crossvul-cpp_data_good_4780_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/registry.h" #include "magick/quantum-private.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short int type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[256], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(CompositeOperator op) { const char *blend_mode; switch (op) { case ColorBurnCompositeOp: blend_mode = "idiv"; break; case ColorDodgeCompositeOp: blend_mode = "div "; break; case ColorizeCompositeOp: blend_mode = "colr"; break; case DarkenCompositeOp: blend_mode = "dark"; break; case DifferenceCompositeOp: blend_mode = "diff"; break; case DissolveCompositeOp: blend_mode = "diss"; break; case ExclusionCompositeOp: blend_mode = "smud"; break; case HardLightCompositeOp: blend_mode = "hLit"; break; case HardMixCompositeOp: blend_mode = "hMix"; break; case HueCompositeOp: blend_mode = "hue "; break; case LightenCompositeOp: blend_mode = "lite"; break; case LinearBurnCompositeOp: blend_mode = "lbrn"; break; case LinearDodgeCompositeOp:blend_mode = "lddg"; break; case LinearLightCompositeOp:blend_mode = "lLit"; break; case LuminizeCompositeOp: blend_mode = "lum "; break; case MultiplyCompositeOp: blend_mode = "mul "; break; case OverCompositeOp: blend_mode = "norm"; break; case OverlayCompositeOp: blend_mode = "over"; break; case PinLightCompositeOp: blend_mode = "pLit"; break; case SaturateCompositeOp: blend_mode = "sat "; break; case ScreenCompositeOp: blend_mode = "scrn"; break; case SoftLightCompositeOp: blend_mode = "sLit"; break; case VividLightCompositeOp: blend_mode = "vLit"; break; default: blend_mode = "norm"; break; } return(blend_mode); } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image, ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if (image->matte == MagickFalse || image->colorspace != sRGBColorspace) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringNotFalse(option) == MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; gamma=QuantumScale*GetPixelAlpha(q); if (gamma != 0.0 && gamma != 1.0) { SetPixelRed(q,(GetPixelRed(q)-((1.0-gamma)*QuantumRange))/gamma); SetPixelGreen(q,(GetPixelGreen(q)-((1.0-gamma)*QuantumRange))/gamma); SetPixelBlue(q,(GetPixelBlue(q)-((1.0-gamma)*QuantumRange))/gamma); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == QuantumRange) return(MagickTrue); image->matte=MagickTrue; status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(q,(Quantum) (QuantumScale*(GetPixelAlpha(q)*opacity))); else if (opacity > 0) SetPixelAlpha(q,(Quantum) (QuantumRange*(GetPixelAlpha(q)/ (MagickRealType) opacity))); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; MagickPixelPacket color; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,image->columns,image->rows,MagickTrue, exception); complete_mask->matte=MagickTrue; GetMagickPixelPacket(complete_mask,&color); color.red=background; SetImageColor(complete_mask,&color); status=CompositeImage(complete_mask,OverCompositeOp,mask, mask->page.x-image->page.x,mask->page.y-image->page.y); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } image->matte=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register PixelPacket *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (PixelPacket *) NULL) || (p == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=GetPixelAlpha(q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(q,ClampToQuantum(intensity*(QuantumScale*alpha))); else if (intensity > 0) SetPixelAlpha(q,ClampToQuantum((alpha/intensity)*QuantumRange)); q++; p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); else if (image->depth > 8) return(2); } else if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static void ParseImageResourceBlocks(Image *image, const unsigned char *blocks,size_t length, MagickBooleanType *has_merged_image) { const unsigned char *p; StringInfo *profile; unsigned int count, long_sans; unsigned short id, short_sans; if (length < 16) return; profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,blocks); (void) SetImageProfile(image,"8bim",profile); profile=DestroyStringInfo(profile); for (p=blocks; (p >= blocks) && (p < (blocks+length-16)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if ((p+count) > (blocks+length-16)) return; switch (id) { case 0x03ed: { char value[MaxTextExtent]; unsigned short resolution; /* Resolution info. */ p=PushShortPixel(MSBEndian,p,&resolution); image->x_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->x_resolution); (void) SetImageProperty(image,"tiff:XResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->y_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->y_resolution); (void) SetImageProperty(image,"tiff:YResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if (*(p+4) == 0) *has_merged_image=MagickFalse; p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } return; } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline void ReversePSDString(Image *image,char *p,size_t length) { char *q; if (image->endian == MSBEndian) return; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel, PixelPacket *q,IndexPacket *indexes,ssize_t x) { if (image->storage_class == PseudoClass) { if (packet_size == 1) SetPixelIndex(indexes+x,ScaleQuantumToChar(pixel)); else SetPixelIndex(indexes+x,ScaleQuantumToShort(pixel)); SetPixelRGBO(q,image->colormap+(ssize_t) ConstrainColormapIndex(image,GetPixelIndex(indexes+x))); return; } switch (type) { case -1: { SetPixelAlpha(q,pixel); break; } case -2: case 0: { SetPixelRed(q,pixel); if (channels == 1 || type == -2) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); } break; } case 1: { if (image->storage_class == PseudoClass) SetPixelAlpha(q,pixel); else SetPixelGreen(q,pixel); break; } case 2: { if (image->storage_class == PseudoClass) SetPixelAlpha(q,pixel); else SetPixelBlue(q,pixel); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,pixel); else if (image->matte != MagickFalse) SetPixelAlpha(q,pixel); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->matte != MagickFalse) SetPixelAlpha(q,pixel); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image,const size_t channels, const size_t row,const ssize_t type,const unsigned char *pixels, ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register IndexPacket *indexes; register PixelPacket *q; register ssize_t x; size_t packet_size; unsigned short nibble; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; indexes=GetAuthenticIndexQueue(image); packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else { p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q++,indexes,x); } else { ssize_t bit, number_bits; number_bits=image->columns-x; if (number_bits > 8) number_bits=8; for (bit=0; bit < number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++); } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t count, row_size; ssize_t y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != row_size) break; status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) ResetMagickMemory(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } ResetMagickMemory(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; (void) ReadBlob(image,compact_size,compact_pixels); stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream, Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } } } if (compression == ZipWithPrediction) { p=pixels; while (count > 0) { length=image->columns; while (--length) { if (packet_size == 2) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; } else *(p+1)+=*p; p+=packet_size; } p+=packet_size; count-=row_size; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if (layer_info->channel_info[channel].type < -1) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { SeekBlob(image,layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); mask->matte=MagickFalse; channel_image=mask; } offset=TellBlob(image); status=MagickTrue; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } SeekBlob(image,offset+layer_info->channel_info[channel].size-2,SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (status != MagickFalse) layer_info->mask.image=mask; else mask=DestroyImage(mask); } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MaxTextExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) { layer_info->image->compose=NoCompositeOp; (void) SetImageArtifact(layer_info->image,"psd:layer.invisible","true"); } if (psd_info->mode == CMYKMode) SetImageColorspace(layer_info->image,CMYKColorspace); else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) || (psd_info->mode == GrayscaleMode)) SetImageColorspace(layer_info->image,GRAYColorspace); /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MaxTextExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MaxTextExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MaxTextExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->matte=MagickTrue; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,j, compression,exception); InheritException(exception,&layer_info->image->exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateImage(layer_info->image,MagickFalse); if (status != MagickFalse && layer_info->mask.image != (Image *) NULL) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); status=MagickFalse; if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) return(MagickTrue); else { count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0)) size=GetPSDSize(psd_info,image); else return(MagickTrue); } } status=MagickTrue; if (size != 0) { layer_info=(LayerInfo *) NULL; number_layers=(short) ReadBlobShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->matte=MagickTrue; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) ResetMagickMemory(layer_info,0,(size_t) number_layers* sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t x, y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); layer_info[i].page.y=ReadBlobSignedLong(image); layer_info[i].page.x=ReadBlobSignedLong(image); y=ReadBlobSignedLong(image); x=ReadBlobSignedLong(image); layer_info[i].page.width=(size_t) (x-layer_info[i].page.x); layer_info[i].page.height=(size_t) (y-layer_info[i].page.y); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } (void) ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=ReadBlobSignedLong(image); layer_info[i].mask.page.x=ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width, (double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); /* We read it, but don't use it... */ for (j=0; j < (ssize_t) length; j+=8) { size_t blend_source=ReadBlobLong(image); size_t blend_dest=ReadBlobLong(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " source(%x), dest(%x)",(unsigned int) blend_source,(unsigned int) blend_dest); } } /* Layer name. */ length=(MagickSizeType) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=0; j < layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers > 0) { for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; } layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info=DestroyLayerInfo(layer_info,number_layers); } return(status); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image* image,const PSDInfo* psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,i,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,i,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,i,psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateImage(image,MagickFalse); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType has_merged_image, skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; ssize_t count; unsigned char *data; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count == 0) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } if (SetImageBackgroundColor(image) == MagickFalse) { InheritException(exception,&image->exception); image=DestroyImageList(image); return((Image *) NULL); } if (psd_info.mode == LabMode) SetImageColorspace(image,LabColorspace); if (psd_info.mode == CMYKMode) { SetImageColorspace(image,CMYKColorspace); image->matte=psd_info.channels > 4 ? MagickTrue : MagickFalse; } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { status=AcquireImageColormap(image,psd_info.depth != 16 ? 256 : 65536); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); SetImageColorspace(image,GRAYColorspace); image->matte=psd_info.channels > 1 ? MagickTrue : MagickFalse; } else image->matte=psd_info.channels > 3 ? MagickTrue : MagickFalse; /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if (psd_info.mode == DuotoneMode) { /* Duotone image data; the format of this data is undocumented. */ data=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*data)); if (data == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); (void) ReadBlob(image,(size_t) length,data); data=(unsigned char *) RelinquishMagickMemory(data); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->matte=MagickFalse; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); has_merged_image=MagickTrue; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } ParseImageResourceBlocks(image,blocks,(size_t) length,&has_merged_image); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayers(image,image_info,&psd_info,skip_layers,exception) != MagickTrue) { (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); if (has_merged_image != MagickFalse || GetImageListLength(image) == 1) has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image, &psd_info,exception); if ((has_merged_image == MagickFalse) && (GetImageListLength(image) == 1) && (length != 0)) { SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayers(image,image_info,&psd_info,MagickFalse,exception); if (status != MagickTrue) { (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (has_merged_image == MagickFalse) { Image *merged; if (GetImageListLength(image) == 1) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); SetImageAlphaChannel(image,TransparentAlphaChannel); image->background_color.opacity=TransparentOpacity; merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=SetMagickInfo("PSB"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Adobe Large Document Format"); entry->module=ConstantString("PSD"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PSD"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Adobe Photoshop bitmap"); entry->module=ConstantString("PSD"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned short) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickSizeType offset) { MagickSizeType current_offset; ssize_t result; current_offset=TellBlob(image); SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=(WriteBlobMSBLong(image,(unsigned short) size)); SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobMSBLong(image,(unsigned int) size)); return(WriteBlobMSBLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickSizeType offset) { MagickSizeType current_offset; ssize_t result; current_offset=TellBlob(image); SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBLong(image,(unsigned int) size); else result=WriteBlobMSBLongLong(image,size); SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const ssize_t channels) { size_t length; ssize_t i, y; if (next_image->compression == RLECompression) { length=WriteBlobMSBShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) length=WriteBlobMSBShort(image,ZipWithoutPrediction); #endif else length=WriteBlobMSBShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate) { int y; MagickBooleanType monochrome; QuantumInfo *quantum_info; register const PixelPacket *p; register ssize_t i; size_t count, length; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE #define CHUNK 16384 int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { compressed_pixels=AcquireQuantumMemory(CHUNK,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } ResetMagickMemory(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,&image->exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (next_image->compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) CHUNK; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) CHUNK-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(Image *image) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); } return(compact_pixels); } static MagickBooleanType WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate) { Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; if (next_image->compression == RLECompression) { compact_pixels=AcquireCompactPixels(image); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if (next_image->storage_class != PseudoClass) { if (IsGrayImage(next_image,&next_image->exception) == MagickFalse) channels=next_image->colorspace == CMYKColorspace ? 4 : 3; if (next_image->matte != MagickFalse) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if (next_image->storage_class == PseudoClass) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsGrayImage(next_image,&next_image->exception) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->matte != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); if (mask != (Image *) NULL) { if (mask->compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->x_resolution+0.5; y_resolution=2.54*65536.0*image->y_resolution+0.5; units=2; } else { x_resolution=65536.0*image->x_resolution+0.5; y_resolution=65536.0*image->y_resolution+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=WriteBlobMSBSignedShort(image,channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < length)) { if ((q+quantum < (datum+length-16))) (void) CopyMagickMemory(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12))) { (void) CopyMagickMemory(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(*p++); key[1]=(*p++); key[2]=(*p++); key[3]=(*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) CopyMagickMemory(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); SetImageProfile(image,"psd:additional-info",info); return(profile); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image) { char layer_name[MaxTextExtent]; const char *property; const StringInfo *icc_profile, *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; PSDInfo psd_info; register ssize_t i; size_t layer_count, layer_index, length, name_length, num_channels, packet_size, rounded_size, size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->matte != MagickFalse) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ if (SetImageGray(image,&image->exception) != MagickFalse) num_channels=(image->matte != MagickFalse ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorMatteType) && (image->storage_class == PseudoClass)) num_channels=(image->matte != MagickFalse ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass); if (image->colorspace != CMYKColorspace) num_channels=(image->matte != MagickFalse ? 4UL : 3UL); else num_channels=(image->matte != MagickFalse ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsGrayImage(image,&image->exception) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsGrayImage(image,&image->exception) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar( image->colormap[i].green)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((MagickOffsetType) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } base_image=GetNextImageInList(image); if (base_image == (Image *)NULL) base_image=image; size=0; size_offset=TellBlob(image); SetPSDSize(&psd_info,image,0); SetPSDSize(&psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->matte != MagickFalse) size+=WriteBlobMSBShort(image,-(unsigned short) layer_count); else size+=WriteBlobMSBShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); default_color=strlen(property) == 9 ? 255 : 0; } size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y); size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+ next_image->rows)); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+ next_image->columns)); channels=1U; if ((next_image->storage_class != PseudoClass) && (IsGrayImage(next_image,&next_image->exception) == MagickFalse)) channels=next_image->colorspace == CMYKColorspace ? 4U : 3U; total_channels=channels; if (next_image->matte != MagickFalse) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobMSBShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(&psd_info,image,(signed short) i); if (next_image->matte != MagickFalse) size+=WriteChannelSize(&psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(&psd_info,image,-2); size+=WriteBlob(image,4,(const unsigned char *) "8BIM"); size+=WriteBlob(image,4,(const unsigned char *) CompositeOperatorToPSDBlendMode(next_image->compose)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue, &image->exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,next_image->compose==NoCompositeOp ? 1 << 0x02 : 1); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image); property=(const char *) GetImageProperty(next_image,"label"); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MaxTextExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobMSBLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobMSBLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,&image->exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobMSBLong(image,20); size+=WriteBlobMSBSignedLong(image,mask->page.y); size+=WriteBlobMSBSignedLong(image,mask->page.x); size+=WriteBlobMSBLong(image,mask->rows+mask->page.y); size+=WriteBlobMSBLong(image,mask->columns+mask->page.x); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,mask->compose == NoCompositeOp ? 2 : 0); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobMSBLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(&psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } /* Write the total size */ size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 16),size_offset); if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(&psd_info,image,rounded_size,size_offset); layer_size_offsets=RelinquishMagickMemory(layer_size_offsets); /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0, MagickFalse) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
./CrossVul/dataset_final_sorted/CWE-400/c/good_4780_0
crossvul-cpp_data_good_1323_1
// SPDX-License-Identifier: GPL-2.0 /* * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) * * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * * Interactivity improvements by Mike Galbraith * (C) 2007 Mike Galbraith <efault@gmx.de> * * Various enhancements by Dmitry Adamushko. * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> * * Group scheduling enhancements by Srivatsa Vaddagiri * Copyright IBM Corporation, 2007 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> * * Scaled math optimizations by Thomas Gleixner * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> * * Adaptive scheduling granularity, math enhancements by Peter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra */ #include "sched.h" #include <trace/events/sched.h> /* * Targeted preemption latency for CPU-bound tasks: * * NOTE: this latency value is not the same as the concept of * 'timeslice length' - timeslices in CFS are of variable length * and have no persistent notion like in traditional, time-slice * based scheduling concepts. * * (to see the precise effective timeslice length of your workload, * run vmstat and monitor the context-switches (cs) field) * * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) */ unsigned int sysctl_sched_latency = 6000000ULL; static unsigned int normalized_sysctl_sched_latency = 6000000ULL; /* * The initial- and re-scaling of tunables is configurable * * Options are: * * SCHED_TUNABLESCALING_NONE - unscaled, always *1 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus * * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) */ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; /* * Minimal preemption granularity for CPU-bound tasks: * * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) */ unsigned int sysctl_sched_min_granularity = 750000ULL; static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; /* * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity */ static unsigned int sched_nr_latency = 8; /* * After fork, child runs first. If set to 0 (default) then * parent will (try to) run first. */ unsigned int sysctl_sched_child_runs_first __read_mostly; /* * SCHED_OTHER wake-up granularity. * * This option delays the preemption effects of decoupled workloads * and reduces their over-scheduling. Synchronous workloads will still * have immediate wakeup/sleep latencies. * * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) */ unsigned int sysctl_sched_wakeup_granularity = 1000000UL; static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; const_debug unsigned int sysctl_sched_migration_cost = 500000UL; #ifdef CONFIG_SMP /* * For asym packing, by default the lower numbered CPU has higher priority. */ int __weak arch_asym_cpu_priority(int cpu) { return -cpu; } /* * The margin used when comparing utilization with CPU capacity. * * (default: ~20%) */ #define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024) #endif #ifdef CONFIG_CFS_BANDWIDTH /* * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool * each time a cfs_rq requests quota. * * Note: in the case that the slice exceeds the runtime remaining (either due * to consumption or the quota being specified to be smaller than the slice) * we will always only issue the remaining available time. * * (default: 5 msec, units: microseconds) */ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; #endif static inline void update_load_add(struct load_weight *lw, unsigned long inc) { lw->weight += inc; lw->inv_weight = 0; } static inline void update_load_sub(struct load_weight *lw, unsigned long dec) { lw->weight -= dec; lw->inv_weight = 0; } static inline void update_load_set(struct load_weight *lw, unsigned long w) { lw->weight = w; lw->inv_weight = 0; } /* * Increase the granularity value when there are more CPUs, * because with more CPUs the 'effective latency' as visible * to users decreases. But the relationship is not linear, * so pick a second-best guess by going with the log2 of the * number of CPUs. * * This idea comes from the SD scheduler of Con Kolivas: */ static unsigned int get_update_sysctl_factor(void) { unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); unsigned int factor; switch (sysctl_sched_tunable_scaling) { case SCHED_TUNABLESCALING_NONE: factor = 1; break; case SCHED_TUNABLESCALING_LINEAR: factor = cpus; break; case SCHED_TUNABLESCALING_LOG: default: factor = 1 + ilog2(cpus); break; } return factor; } static void update_sysctl(void) { unsigned int factor = get_update_sysctl_factor(); #define SET_SYSCTL(name) \ (sysctl_##name = (factor) * normalized_sysctl_##name) SET_SYSCTL(sched_min_granularity); SET_SYSCTL(sched_latency); SET_SYSCTL(sched_wakeup_granularity); #undef SET_SYSCTL } void sched_init_granularity(void) { update_sysctl(); } #define WMULT_CONST (~0U) #define WMULT_SHIFT 32 static void __update_inv_weight(struct load_weight *lw) { unsigned long w; if (likely(lw->inv_weight)) return; w = scale_load_down(lw->weight); if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) lw->inv_weight = 1; else if (unlikely(!w)) lw->inv_weight = WMULT_CONST; else lw->inv_weight = WMULT_CONST / w; } /* * delta_exec * weight / lw.weight * OR * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT * * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case * we're guaranteed shift stays positive because inv_weight is guaranteed to * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. * * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus * weight/lw.weight <= 1, and therefore our shift will also be positive. */ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) { u64 fact = scale_load_down(weight); int shift = WMULT_SHIFT; __update_inv_weight(lw); if (unlikely(fact >> 32)) { while (fact >> 32) { fact >>= 1; shift--; } } /* hint to use a 32x32->64 mul */ fact = (u64)(u32)fact * lw->inv_weight; while (fact >> 32) { fact >>= 1; shift--; } return mul_u64_u32_shr(delta_exec, fact, shift); } const struct sched_class fair_sched_class; /************************************************************** * CFS operations on generic schedulable entities: */ #ifdef CONFIG_FAIR_GROUP_SCHED static inline struct task_struct *task_of(struct sched_entity *se) { SCHED_WARN_ON(!entity_is_task(se)); return container_of(se, struct task_struct, se); } /* Walk up scheduling entities hierarchy */ #define for_each_sched_entity(se) \ for (; se; se = se->parent) static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) { return p->se.cfs_rq; } /* runqueue on which this entity is (to be) queued */ static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) { return se->cfs_rq; } /* runqueue "owned" by this group */ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) { return grp->my_q; } static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) { if (!path) return; if (cfs_rq && task_group_is_autogroup(cfs_rq->tg)) autogroup_path(cfs_rq->tg, path, len); else if (cfs_rq && cfs_rq->tg->css.cgroup) cgroup_path(cfs_rq->tg->css.cgroup, path, len); else strlcpy(path, "(null)", len); } static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); int cpu = cpu_of(rq); if (cfs_rq->on_list) return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list; cfs_rq->on_list = 1; /* * Ensure we either appear before our parent (if already * enqueued) or force our parent to appear after us when it is * enqueued. The fact that we always enqueue bottom-up * reduces this to two cases and a special case for the root * cfs_rq. Furthermore, it also means that we will always reset * tmp_alone_branch either when the branch is connected * to a tree or when we reach the top of the tree */ if (cfs_rq->tg->parent && cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { /* * If parent is already on the list, we add the child * just before. Thanks to circular linked property of * the list, this means to put the child at the tail * of the list that starts by parent. */ list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); /* * The branch is now connected to its tree so we can * reset tmp_alone_branch to the beginning of the * list. */ rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; return true; } if (!cfs_rq->tg->parent) { /* * cfs rq without parent should be put * at the tail of the list. */ list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); /* * We have reach the top of a tree so we can reset * tmp_alone_branch to the beginning of the list. */ rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; return true; } /* * The parent has not already been added so we want to * make sure that it will be put after us. * tmp_alone_branch points to the begin of the branch * where we will add parent. */ list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); /* * update tmp_alone_branch to points to the new begin * of the branch */ rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; return false; } static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) { if (cfs_rq->on_list) { struct rq *rq = rq_of(cfs_rq); /* * With cfs_rq being unthrottled/throttled during an enqueue, * it can happen the tmp_alone_branch points the a leaf that * we finally want to del. In this case, tmp_alone_branch moves * to the prev element but it will point to rq->leaf_cfs_rq_list * at the end of the enqueue. */ if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev; list_del_rcu(&cfs_rq->leaf_cfs_rq_list); cfs_rq->on_list = 0; } } static inline void assert_list_leaf_cfs_rq(struct rq *rq) { SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list); } /* Iterate thr' all leaf cfs_rq's on a runqueue */ #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \ leaf_cfs_rq_list) /* Do the two (enqueued) entities belong to the same group ? */ static inline struct cfs_rq * is_same_group(struct sched_entity *se, struct sched_entity *pse) { if (se->cfs_rq == pse->cfs_rq) return se->cfs_rq; return NULL; } static inline struct sched_entity *parent_entity(struct sched_entity *se) { return se->parent; } static void find_matching_se(struct sched_entity **se, struct sched_entity **pse) { int se_depth, pse_depth; /* * preemption test can be made between sibling entities who are in the * same cfs_rq i.e who have a common parent. Walk up the hierarchy of * both tasks until we find their ancestors who are siblings of common * parent. */ /* First walk up until both entities are at same depth */ se_depth = (*se)->depth; pse_depth = (*pse)->depth; while (se_depth > pse_depth) { se_depth--; *se = parent_entity(*se); } while (pse_depth > se_depth) { pse_depth--; *pse = parent_entity(*pse); } while (!is_same_group(*se, *pse)) { *se = parent_entity(*se); *pse = parent_entity(*pse); } } #else /* !CONFIG_FAIR_GROUP_SCHED */ static inline struct task_struct *task_of(struct sched_entity *se) { return container_of(se, struct task_struct, se); } #define for_each_sched_entity(se) \ for (; se; se = NULL) static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) { return &task_rq(p)->cfs; } static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) { struct task_struct *p = task_of(se); struct rq *rq = task_rq(p); return &rq->cfs; } /* runqueue "owned" by this group */ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) { return NULL; } static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) { if (path) strlcpy(path, "(null)", len); } static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) { return true; } static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) { } static inline void assert_list_leaf_cfs_rq(struct rq *rq) { } #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos) static inline struct sched_entity *parent_entity(struct sched_entity *se) { return NULL; } static inline void find_matching_se(struct sched_entity **se, struct sched_entity **pse) { } #endif /* CONFIG_FAIR_GROUP_SCHED */ static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); /************************************************************** * Scheduling class tree data structure manipulation methods: */ static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime) { s64 delta = (s64)(vruntime - max_vruntime); if (delta > 0) max_vruntime = vruntime; return max_vruntime; } static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) { s64 delta = (s64)(vruntime - min_vruntime); if (delta < 0) min_vruntime = vruntime; return min_vruntime; } static inline int entity_before(struct sched_entity *a, struct sched_entity *b) { return (s64)(a->vruntime - b->vruntime) < 0; } static void update_min_vruntime(struct cfs_rq *cfs_rq) { struct sched_entity *curr = cfs_rq->curr; struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline); u64 vruntime = cfs_rq->min_vruntime; if (curr) { if (curr->on_rq) vruntime = curr->vruntime; else curr = NULL; } if (leftmost) { /* non-empty tree */ struct sched_entity *se; se = rb_entry(leftmost, struct sched_entity, run_node); if (!curr) vruntime = se->vruntime; else vruntime = min_vruntime(vruntime, se->vruntime); } /* ensure we never gain time by being placed backwards. */ cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); #ifndef CONFIG_64BIT smp_wmb(); cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; #endif } /* * Enqueue an entity into the rb-tree: */ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) { struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node; struct rb_node *parent = NULL; struct sched_entity *entry; bool leftmost = true; /* * Find the right place in the rbtree: */ while (*link) { parent = *link; entry = rb_entry(parent, struct sched_entity, run_node); /* * We dont care about collisions. Nodes with * the same key stay together. */ if (entity_before(se, entry)) { link = &parent->rb_left; } else { link = &parent->rb_right; leftmost = false; } } rb_link_node(&se->run_node, parent, link); rb_insert_color_cached(&se->run_node, &cfs_rq->tasks_timeline, leftmost); } static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) { rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); } struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) { struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline); if (!left) return NULL; return rb_entry(left, struct sched_entity, run_node); } static struct sched_entity *__pick_next_entity(struct sched_entity *se) { struct rb_node *next = rb_next(&se->run_node); if (!next) return NULL; return rb_entry(next, struct sched_entity, run_node); } #ifdef CONFIG_SCHED_DEBUG struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) { struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); if (!last) return NULL; return rb_entry(last, struct sched_entity, run_node); } /************************************************************** * Scheduling class statistics methods: */ int sched_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); unsigned int factor = get_update_sysctl_factor(); if (ret || !write) return ret; sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, sysctl_sched_min_granularity); #define WRT_SYSCTL(name) \ (normalized_sysctl_##name = sysctl_##name / (factor)) WRT_SYSCTL(sched_min_granularity); WRT_SYSCTL(sched_latency); WRT_SYSCTL(sched_wakeup_granularity); #undef WRT_SYSCTL return 0; } #endif /* * delta /= w */ static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) { if (unlikely(se->load.weight != NICE_0_LOAD)) delta = __calc_delta(delta, NICE_0_LOAD, &se->load); return delta; } /* * The idea is to set a period in which each task runs once. * * When there are too many tasks (sched_nr_latency) we have to stretch * this period because otherwise the slices get too small. * * p = (nr <= nl) ? l : l*nr/nl */ static u64 __sched_period(unsigned long nr_running) { if (unlikely(nr_running > sched_nr_latency)) return nr_running * sysctl_sched_min_granularity; else return sysctl_sched_latency; } /* * We calculate the wall-time slice from the period by taking a part * proportional to the weight. * * s = p*P[w/rw] */ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) { u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); for_each_sched_entity(se) { struct load_weight *load; struct load_weight lw; cfs_rq = cfs_rq_of(se); load = &cfs_rq->load; if (unlikely(!se->on_rq)) { lw = cfs_rq->load; update_load_add(&lw, se->load.weight); load = &lw; } slice = __calc_delta(slice, se->load.weight, load); } return slice; } /* * We calculate the vruntime slice of a to-be-inserted task. * * vs = s/w */ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) { return calc_delta_fair(sched_slice(cfs_rq, se), se); } #include "pelt.h" #ifdef CONFIG_SMP static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); static unsigned long task_h_load(struct task_struct *p); static unsigned long capacity_of(int cpu); /* Give new sched_entity start runnable values to heavy its load in infant time */ void init_entity_runnable_average(struct sched_entity *se) { struct sched_avg *sa = &se->avg; memset(sa, 0, sizeof(*sa)); /* * Tasks are initialized with full load to be seen as heavy tasks until * they get a chance to stabilize to their real load level. * Group entities are initialized with zero load to reflect the fact that * nothing has been attached to the task group yet. */ if (entity_is_task(se)) sa->runnable_load_avg = sa->load_avg = scale_load_down(se->load.weight); se->runnable_weight = se->load.weight; /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ } static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq); static void attach_entity_cfs_rq(struct sched_entity *se); /* * With new tasks being created, their initial util_avgs are extrapolated * based on the cfs_rq's current util_avg: * * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight * * However, in many cases, the above util_avg does not give a desired * value. Moreover, the sum of the util_avgs may be divergent, such * as when the series is a harmonic series. * * To solve this problem, we also cap the util_avg of successive tasks to * only 1/2 of the left utilization budget: * * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n * * where n denotes the nth task and cpu_scale the CPU capacity. * * For example, for a CPU with 1024 of capacity, a simplest series from * the beginning would be like: * * task util_avg: 512, 256, 128, 64, 32, 16, 8, ... * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ... * * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap) * if util_avg > util_avg_cap. */ void post_init_entity_util_avg(struct task_struct *p) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); struct sched_avg *sa = &se->avg; long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; if (cap > 0) { if (cfs_rq->avg.util_avg != 0) { sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; sa->util_avg /= (cfs_rq->avg.load_avg + 1); if (sa->util_avg > cap) sa->util_avg = cap; } else { sa->util_avg = cap; } } if (p->sched_class != &fair_sched_class) { /* * For !fair tasks do: * update_cfs_rq_load_avg(now, cfs_rq); attach_entity_load_avg(cfs_rq, se, 0); switched_from_fair(rq, p); * * such that the next switched_to_fair() has the * expected state. */ se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); return; } attach_entity_cfs_rq(se); } #else /* !CONFIG_SMP */ void init_entity_runnable_average(struct sched_entity *se) { } void post_init_entity_util_avg(struct task_struct *p) { } static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) { } #endif /* CONFIG_SMP */ /* * Update the current task's runtime statistics. */ static void update_curr(struct cfs_rq *cfs_rq) { struct sched_entity *curr = cfs_rq->curr; u64 now = rq_clock_task(rq_of(cfs_rq)); u64 delta_exec; if (unlikely(!curr)) return; delta_exec = now - curr->exec_start; if (unlikely((s64)delta_exec <= 0)) return; curr->exec_start = now; schedstat_set(curr->statistics.exec_max, max(delta_exec, curr->statistics.exec_max)); curr->sum_exec_runtime += delta_exec; schedstat_add(cfs_rq->exec_clock, delta_exec); curr->vruntime += calc_delta_fair(delta_exec, curr); update_min_vruntime(cfs_rq); if (entity_is_task(curr)) { struct task_struct *curtask = task_of(curr); trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); cgroup_account_cputime(curtask, delta_exec); account_group_exec_runtime(curtask, delta_exec); } account_cfs_rq_runtime(cfs_rq, delta_exec); } static void update_curr_fair(struct rq *rq) { update_curr(cfs_rq_of(&rq->curr->se)); } static inline void update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) { u64 wait_start, prev_wait_start; if (!schedstat_enabled()) return; wait_start = rq_clock(rq_of(cfs_rq)); prev_wait_start = schedstat_val(se->statistics.wait_start); if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) && likely(wait_start > prev_wait_start)) wait_start -= prev_wait_start; __schedstat_set(se->statistics.wait_start, wait_start); } static inline void update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) { struct task_struct *p; u64 delta; if (!schedstat_enabled()) return; delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start); if (entity_is_task(se)) { p = task_of(se); if (task_on_rq_migrating(p)) { /* * Preserve migrating task's wait time so wait_start * time stamp can be adjusted to accumulate wait time * prior to migration. */ __schedstat_set(se->statistics.wait_start, delta); return; } trace_sched_stat_wait(p, delta); } __schedstat_set(se->statistics.wait_max, max(schedstat_val(se->statistics.wait_max), delta)); __schedstat_inc(se->statistics.wait_count); __schedstat_add(se->statistics.wait_sum, delta); __schedstat_set(se->statistics.wait_start, 0); } static inline void update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) { struct task_struct *tsk = NULL; u64 sleep_start, block_start; if (!schedstat_enabled()) return; sleep_start = schedstat_val(se->statistics.sleep_start); block_start = schedstat_val(se->statistics.block_start); if (entity_is_task(se)) tsk = task_of(se); if (sleep_start) { u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start; if ((s64)delta < 0) delta = 0; if (unlikely(delta > schedstat_val(se->statistics.sleep_max))) __schedstat_set(se->statistics.sleep_max, delta); __schedstat_set(se->statistics.sleep_start, 0); __schedstat_add(se->statistics.sum_sleep_runtime, delta); if (tsk) { account_scheduler_latency(tsk, delta >> 10, 1); trace_sched_stat_sleep(tsk, delta); } } if (block_start) { u64 delta = rq_clock(rq_of(cfs_rq)) - block_start; if ((s64)delta < 0) delta = 0; if (unlikely(delta > schedstat_val(se->statistics.block_max))) __schedstat_set(se->statistics.block_max, delta); __schedstat_set(se->statistics.block_start, 0); __schedstat_add(se->statistics.sum_sleep_runtime, delta); if (tsk) { if (tsk->in_iowait) { __schedstat_add(se->statistics.iowait_sum, delta); __schedstat_inc(se->statistics.iowait_count); trace_sched_stat_iowait(tsk, delta); } trace_sched_stat_blocked(tsk, delta); /* * Blocking time is in units of nanosecs, so shift by * 20 to get a milliseconds-range estimation of the * amount of time that the task spent sleeping: */ if (unlikely(prof_on == SLEEP_PROFILING)) { profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), delta >> 20); } account_scheduler_latency(tsk, delta >> 10, 0); } } } /* * Task is being enqueued - update stats: */ static inline void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { if (!schedstat_enabled()) return; /* * Are we enqueueing a waiting task? (for current tasks * a dequeue/enqueue event is a NOP) */ if (se != cfs_rq->curr) update_stats_wait_start(cfs_rq, se); if (flags & ENQUEUE_WAKEUP) update_stats_enqueue_sleeper(cfs_rq, se); } static inline void update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { if (!schedstat_enabled()) return; /* * Mark the end of the wait period if dequeueing a * waiting task: */ if (se != cfs_rq->curr) update_stats_wait_end(cfs_rq, se); if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { struct task_struct *tsk = task_of(se); if (tsk->state & TASK_INTERRUPTIBLE) __schedstat_set(se->statistics.sleep_start, rq_clock(rq_of(cfs_rq))); if (tsk->state & TASK_UNINTERRUPTIBLE) __schedstat_set(se->statistics.block_start, rq_clock(rq_of(cfs_rq))); } } /* * We are picking a new current task - update its stats: */ static inline void update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) { /* * We are starting a new run period: */ se->exec_start = rq_clock_task(rq_of(cfs_rq)); } /************************************************** * Scheduling class queueing methods: */ #ifdef CONFIG_NUMA_BALANCING /* * Approximate time to scan a full NUMA task in ms. The task scan period is * calculated based on the tasks virtual memory size and * numa_balancing_scan_size. */ unsigned int sysctl_numa_balancing_scan_period_min = 1000; unsigned int sysctl_numa_balancing_scan_period_max = 60000; /* Portion of address space to scan in MB */ unsigned int sysctl_numa_balancing_scan_size = 256; /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ unsigned int sysctl_numa_balancing_scan_delay = 1000; struct numa_group { refcount_t refcount; spinlock_t lock; /* nr_tasks, tasks */ int nr_tasks; pid_t gid; int active_nodes; struct rcu_head rcu; unsigned long total_faults; unsigned long max_faults_cpu; /* * Faults_cpu is used to decide whether memory should move * towards the CPU. As a consequence, these stats are weighted * more by CPU use than by memory faults. */ unsigned long *faults_cpu; unsigned long faults[0]; }; /* * For functions that can be called in multiple contexts that permit reading * ->numa_group (see struct task_struct for locking rules). */ static struct numa_group *deref_task_numa_group(struct task_struct *p) { return rcu_dereference_check(p->numa_group, p == current || (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu))); } static struct numa_group *deref_curr_numa_group(struct task_struct *p) { return rcu_dereference_protected(p->numa_group, p == current); } static inline unsigned long group_faults_priv(struct numa_group *ng); static inline unsigned long group_faults_shared(struct numa_group *ng); static unsigned int task_nr_scan_windows(struct task_struct *p) { unsigned long rss = 0; unsigned long nr_scan_pages; /* * Calculations based on RSS as non-present and empty pages are skipped * by the PTE scanner and NUMA hinting faults should be trapped based * on resident pages */ nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT); rss = get_mm_rss(p->mm); if (!rss) rss = nr_scan_pages; rss = round_up(rss, nr_scan_pages); return rss / nr_scan_pages; } /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */ #define MAX_SCAN_WINDOW 2560 static unsigned int task_scan_min(struct task_struct *p) { unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size); unsigned int scan, floor; unsigned int windows = 1; if (scan_size < MAX_SCAN_WINDOW) windows = MAX_SCAN_WINDOW / scan_size; floor = 1000 / windows; scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); return max_t(unsigned int, floor, scan); } static unsigned int task_scan_start(struct task_struct *p) { unsigned long smin = task_scan_min(p); unsigned long period = smin; struct numa_group *ng; /* Scale the maximum scan period with the amount of shared memory. */ rcu_read_lock(); ng = rcu_dereference(p->numa_group); if (ng) { unsigned long shared = group_faults_shared(ng); unsigned long private = group_faults_priv(ng); period *= refcount_read(&ng->refcount); period *= shared + 1; period /= private + shared + 1; } rcu_read_unlock(); return max(smin, period); } static unsigned int task_scan_max(struct task_struct *p) { unsigned long smin = task_scan_min(p); unsigned long smax; struct numa_group *ng; /* Watch for min being lower than max due to floor calculations */ smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); /* Scale the maximum scan period with the amount of shared memory. */ ng = deref_curr_numa_group(p); if (ng) { unsigned long shared = group_faults_shared(ng); unsigned long private = group_faults_priv(ng); unsigned long period = smax; period *= refcount_read(&ng->refcount); period *= shared + 1; period /= private + shared + 1; smax = max(smax, period); } return max(smin, smax); } static void account_numa_enqueue(struct rq *rq, struct task_struct *p) { rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE); rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); } static void account_numa_dequeue(struct rq *rq, struct task_struct *p) { rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE); rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); } /* Shared or private faults. */ #define NR_NUMA_HINT_FAULT_TYPES 2 /* Memory and CPU locality */ #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2) /* Averaged statistics, and temporary buffers. */ #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2) pid_t task_numa_group_id(struct task_struct *p) { struct numa_group *ng; pid_t gid = 0; rcu_read_lock(); ng = rcu_dereference(p->numa_group); if (ng) gid = ng->gid; rcu_read_unlock(); return gid; } /* * The averaged statistics, shared & private, memory & CPU, * occupy the first half of the array. The second half of the * array is for current counters, which are averaged into the * first set by task_numa_placement. */ static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv) { return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; } static inline unsigned long task_faults(struct task_struct *p, int nid) { if (!p->numa_faults) return 0; return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)]; } static inline unsigned long group_faults(struct task_struct *p, int nid) { struct numa_group *ng = deref_task_numa_group(p); if (!ng) return 0; return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] + ng->faults[task_faults_idx(NUMA_MEM, nid, 1)]; } static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) { return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] + group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)]; } static inline unsigned long group_faults_priv(struct numa_group *ng) { unsigned long faults = 0; int node; for_each_online_node(node) { faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; } return faults; } static inline unsigned long group_faults_shared(struct numa_group *ng) { unsigned long faults = 0; int node; for_each_online_node(node) { faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)]; } return faults; } /* * A node triggering more than 1/3 as many NUMA faults as the maximum is * considered part of a numa group's pseudo-interleaving set. Migrations * between these nodes are slowed down, to allow things to settle down. */ #define ACTIVE_NODE_FRACTION 3 static bool numa_is_active_node(int nid, struct numa_group *ng) { return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu; } /* Handle placement on systems where not all nodes are directly connected. */ static unsigned long score_nearby_nodes(struct task_struct *p, int nid, int maxdist, bool task) { unsigned long score = 0; int node; /* * All nodes are directly connected, and the same distance * from each other. No need for fancy placement algorithms. */ if (sched_numa_topology_type == NUMA_DIRECT) return 0; /* * This code is called for each node, introducing N^2 complexity, * which should be ok given the number of nodes rarely exceeds 8. */ for_each_online_node(node) { unsigned long faults; int dist = node_distance(nid, node); /* * The furthest away nodes in the system are not interesting * for placement; nid was already counted. */ if (dist == sched_max_numa_distance || node == nid) continue; /* * On systems with a backplane NUMA topology, compare groups * of nodes, and move tasks towards the group with the most * memory accesses. When comparing two nodes at distance * "hoplimit", only nodes closer by than "hoplimit" are part * of each group. Skip other nodes. */ if (sched_numa_topology_type == NUMA_BACKPLANE && dist >= maxdist) continue; /* Add up the faults from nearby nodes. */ if (task) faults = task_faults(p, node); else faults = group_faults(p, node); /* * On systems with a glueless mesh NUMA topology, there are * no fixed "groups of nodes". Instead, nodes that are not * directly connected bounce traffic through intermediate * nodes; a numa_group can occupy any set of nodes. * The further away a node is, the less the faults count. * This seems to result in good task placement. */ if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { faults *= (sched_max_numa_distance - dist); faults /= (sched_max_numa_distance - LOCAL_DISTANCE); } score += faults; } return score; } /* * These return the fraction of accesses done by a particular task, or * task group, on a particular numa node. The group weight is given a * larger multiplier, in order to group tasks together that are almost * evenly spread out between numa nodes. */ static inline unsigned long task_weight(struct task_struct *p, int nid, int dist) { unsigned long faults, total_faults; if (!p->numa_faults) return 0; total_faults = p->total_numa_faults; if (!total_faults) return 0; faults = task_faults(p, nid); faults += score_nearby_nodes(p, nid, dist, true); return 1000 * faults / total_faults; } static inline unsigned long group_weight(struct task_struct *p, int nid, int dist) { struct numa_group *ng = deref_task_numa_group(p); unsigned long faults, total_faults; if (!ng) return 0; total_faults = ng->total_faults; if (!total_faults) return 0; faults = group_faults(p, nid); faults += score_nearby_nodes(p, nid, dist, false); return 1000 * faults / total_faults; } bool should_numa_migrate_memory(struct task_struct *p, struct page * page, int src_nid, int dst_cpu) { struct numa_group *ng = deref_curr_numa_group(p); int dst_nid = cpu_to_node(dst_cpu); int last_cpupid, this_cpupid; this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); last_cpupid = page_cpupid_xchg_last(page, this_cpupid); /* * Allow first faults or private faults to migrate immediately early in * the lifetime of a task. The magic number 4 is based on waiting for * two full passes of the "multi-stage node selection" test that is * executed below. */ if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) && (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) return true; /* * Multi-stage node selection is used in conjunction with a periodic * migration fault to build a temporal task<->page relation. By using * a two-stage filter we remove short/unlikely relations. * * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate * a task's usage of a particular page (n_p) per total usage of this * page (n_t) (in a given time-span) to a probability. * * Our periodic faults will sample this probability and getting the * same result twice in a row, given these samples are fully * independent, is then given by P(n)^2, provided our sample period * is sufficiently short compared to the usage pattern. * * This quadric squishes small probabilities, making it less likely we * act on an unlikely task<->page relation. */ if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != dst_nid) return false; /* Always allow migrate on private faults */ if (cpupid_match_pid(p, last_cpupid)) return true; /* A shared fault, but p->numa_group has not been set up yet. */ if (!ng) return true; /* * Destination node is much more heavily used than the source * node? Allow migration. */ if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) * ACTIVE_NODE_FRACTION) return true; /* * Distribute memory according to CPU & memory use on each node, * with 3/4 hysteresis to avoid unnecessary memory migrations: * * faults_cpu(dst) 3 faults_cpu(src) * --------------- * - > --------------- * faults_mem(dst) 4 faults_mem(src) */ return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 > group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; } static unsigned long cpu_runnable_load(struct rq *rq); /* Cached statistics for all CPUs within a node */ struct numa_stats { unsigned long load; /* Total compute capacity of CPUs on a node */ unsigned long compute_capacity; }; /* * XXX borrowed from update_sg_lb_stats */ static void update_numa_stats(struct numa_stats *ns, int nid) { int cpu; memset(ns, 0, sizeof(*ns)); for_each_cpu(cpu, cpumask_of_node(nid)) { struct rq *rq = cpu_rq(cpu); ns->load += cpu_runnable_load(rq); ns->compute_capacity += capacity_of(cpu); } } struct task_numa_env { struct task_struct *p; int src_cpu, src_nid; int dst_cpu, dst_nid; struct numa_stats src_stats, dst_stats; int imbalance_pct; int dist; struct task_struct *best_task; long best_imp; int best_cpu; }; static void task_numa_assign(struct task_numa_env *env, struct task_struct *p, long imp) { struct rq *rq = cpu_rq(env->dst_cpu); /* Bail out if run-queue part of active NUMA balance. */ if (xchg(&rq->numa_migrate_on, 1)) return; /* * Clear previous best_cpu/rq numa-migrate flag, since task now * found a better CPU to move/swap. */ if (env->best_cpu != -1) { rq = cpu_rq(env->best_cpu); WRITE_ONCE(rq->numa_migrate_on, 0); } if (env->best_task) put_task_struct(env->best_task); if (p) get_task_struct(p); env->best_task = p; env->best_imp = imp; env->best_cpu = env->dst_cpu; } static bool load_too_imbalanced(long src_load, long dst_load, struct task_numa_env *env) { long imb, old_imb; long orig_src_load, orig_dst_load; long src_capacity, dst_capacity; /* * The load is corrected for the CPU capacity available on each node. * * src_load dst_load * ------------ vs --------- * src_capacity dst_capacity */ src_capacity = env->src_stats.compute_capacity; dst_capacity = env->dst_stats.compute_capacity; imb = abs(dst_load * src_capacity - src_load * dst_capacity); orig_src_load = env->src_stats.load; orig_dst_load = env->dst_stats.load; old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity); /* Would this change make things worse? */ return (imb > old_imb); } /* * Maximum NUMA importance can be 1998 (2*999); * SMALLIMP @ 30 would be close to 1998/64. * Used to deter task migration. */ #define SMALLIMP 30 /* * This checks if the overall compute and NUMA accesses of the system would * be improved if the source tasks was migrated to the target dst_cpu taking * into account that it might be best if task running on the dst_cpu should * be exchanged with the source task */ static void task_numa_compare(struct task_numa_env *env, long taskimp, long groupimp, bool maymove) { struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p); struct rq *dst_rq = cpu_rq(env->dst_cpu); long imp = p_ng ? groupimp : taskimp; struct task_struct *cur; long src_load, dst_load; int dist = env->dist; long moveimp = imp; long load; if (READ_ONCE(dst_rq->numa_migrate_on)) return; rcu_read_lock(); cur = task_rcu_dereference(&dst_rq->curr); if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) cur = NULL; /* * Because we have preemption enabled we can get migrated around and * end try selecting ourselves (current == env->p) as a swap candidate. */ if (cur == env->p) goto unlock; if (!cur) { if (maymove && moveimp >= env->best_imp) goto assign; else goto unlock; } /* * "imp" is the fault differential for the source task between the * source and destination node. Calculate the total differential for * the source task and potential destination task. The more negative * the value is, the more remote accesses that would be expected to * be incurred if the tasks were swapped. */ /* Skip this swap candidate if cannot move to the source cpu */ if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) goto unlock; /* * If dst and source tasks are in the same NUMA group, or not * in any group then look only at task weights. */ cur_ng = rcu_dereference(cur->numa_group); if (cur_ng == p_ng) { imp = taskimp + task_weight(cur, env->src_nid, dist) - task_weight(cur, env->dst_nid, dist); /* * Add some hysteresis to prevent swapping the * tasks within a group over tiny differences. */ if (cur_ng) imp -= imp / 16; } else { /* * Compare the group weights. If a task is all by itself * (not part of a group), use the task weight instead. */ if (cur_ng && p_ng) imp += group_weight(cur, env->src_nid, dist) - group_weight(cur, env->dst_nid, dist); else imp += task_weight(cur, env->src_nid, dist) - task_weight(cur, env->dst_nid, dist); } if (maymove && moveimp > imp && moveimp > env->best_imp) { imp = moveimp; cur = NULL; goto assign; } /* * If the NUMA importance is less than SMALLIMP, * task migration might only result in ping pong * of tasks and also hurt performance due to cache * misses. */ if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2) goto unlock; /* * In the overloaded case, try and keep the load balanced. */ load = task_h_load(env->p) - task_h_load(cur); if (!load) goto assign; dst_load = env->dst_stats.load + load; src_load = env->src_stats.load - load; if (load_too_imbalanced(src_load, dst_load, env)) goto unlock; assign: /* * One idle CPU per node is evaluated for a task numa move. * Call select_idle_sibling to maybe find a better one. */ if (!cur) { /* * select_idle_siblings() uses an per-CPU cpumask that * can be used from IRQ context. */ local_irq_disable(); env->dst_cpu = select_idle_sibling(env->p, env->src_cpu, env->dst_cpu); local_irq_enable(); } task_numa_assign(env, cur, imp); unlock: rcu_read_unlock(); } static void task_numa_find_cpu(struct task_numa_env *env, long taskimp, long groupimp) { long src_load, dst_load, load; bool maymove = false; int cpu; load = task_h_load(env->p); dst_load = env->dst_stats.load + load; src_load = env->src_stats.load - load; /* * If the improvement from just moving env->p direction is better * than swapping tasks around, check if a move is possible. */ maymove = !load_too_imbalanced(src_load, dst_load, env); for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { /* Skip this CPU if the source task cannot migrate */ if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) continue; env->dst_cpu = cpu; task_numa_compare(env, taskimp, groupimp, maymove); } } static int task_numa_migrate(struct task_struct *p) { struct task_numa_env env = { .p = p, .src_cpu = task_cpu(p), .src_nid = task_node(p), .imbalance_pct = 112, .best_task = NULL, .best_imp = 0, .best_cpu = -1, }; unsigned long taskweight, groupweight; struct sched_domain *sd; long taskimp, groupimp; struct numa_group *ng; struct rq *best_rq; int nid, ret, dist; /* * Pick the lowest SD_NUMA domain, as that would have the smallest * imbalance and would be the first to start moving tasks about. * * And we want to avoid any moving of tasks about, as that would create * random movement of tasks -- counter the numa conditions we're trying * to satisfy here. */ rcu_read_lock(); sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); if (sd) env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; rcu_read_unlock(); /* * Cpusets can break the scheduler domain tree into smaller * balance domains, some of which do not cross NUMA boundaries. * Tasks that are "trapped" in such domains cannot be migrated * elsewhere, so there is no point in (re)trying. */ if (unlikely(!sd)) { sched_setnuma(p, task_node(p)); return -EINVAL; } env.dst_nid = p->numa_preferred_nid; dist = env.dist = node_distance(env.src_nid, env.dst_nid); taskweight = task_weight(p, env.src_nid, dist); groupweight = group_weight(p, env.src_nid, dist); update_numa_stats(&env.src_stats, env.src_nid); taskimp = task_weight(p, env.dst_nid, dist) - taskweight; groupimp = group_weight(p, env.dst_nid, dist) - groupweight; update_numa_stats(&env.dst_stats, env.dst_nid); /* Try to find a spot on the preferred nid. */ task_numa_find_cpu(&env, taskimp, groupimp); /* * Look at other nodes in these cases: * - there is no space available on the preferred_nid * - the task is part of a numa_group that is interleaved across * multiple NUMA nodes; in order to better consolidate the group, * we need to check other locations. */ ng = deref_curr_numa_group(p); if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) { for_each_online_node(nid) { if (nid == env.src_nid || nid == p->numa_preferred_nid) continue; dist = node_distance(env.src_nid, env.dst_nid); if (sched_numa_topology_type == NUMA_BACKPLANE && dist != env.dist) { taskweight = task_weight(p, env.src_nid, dist); groupweight = group_weight(p, env.src_nid, dist); } /* Only consider nodes where both task and groups benefit */ taskimp = task_weight(p, nid, dist) - taskweight; groupimp = group_weight(p, nid, dist) - groupweight; if (taskimp < 0 && groupimp < 0) continue; env.dist = dist; env.dst_nid = nid; update_numa_stats(&env.dst_stats, env.dst_nid); task_numa_find_cpu(&env, taskimp, groupimp); } } /* * If the task is part of a workload that spans multiple NUMA nodes, * and is migrating into one of the workload's active nodes, remember * this node as the task's preferred numa node, so the workload can * settle down. * A task that migrated to a second choice node will be better off * trying for a better one later. Do not set the preferred node here. */ if (ng) { if (env.best_cpu == -1) nid = env.src_nid; else nid = cpu_to_node(env.best_cpu); if (nid != p->numa_preferred_nid) sched_setnuma(p, nid); } /* No better CPU than the current one was found. */ if (env.best_cpu == -1) return -EAGAIN; best_rq = cpu_rq(env.best_cpu); if (env.best_task == NULL) { ret = migrate_task_to(p, env.best_cpu); WRITE_ONCE(best_rq->numa_migrate_on, 0); if (ret != 0) trace_sched_stick_numa(p, env.src_cpu, env.best_cpu); return ret; } ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); WRITE_ONCE(best_rq->numa_migrate_on, 0); if (ret != 0) trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task)); put_task_struct(env.best_task); return ret; } /* Attempt to migrate a task to a CPU on the preferred node. */ static void numa_migrate_preferred(struct task_struct *p) { unsigned long interval = HZ; /* This task has no NUMA fault statistics yet */ if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults)) return; /* Periodically retry migrating the task to the preferred node */ interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); p->numa_migrate_retry = jiffies + interval; /* Success if task is already running on preferred CPU */ if (task_node(p) == p->numa_preferred_nid) return; /* Otherwise, try migrate to a CPU on the preferred node */ task_numa_migrate(p); } /* * Find out how many nodes on the workload is actively running on. Do this by * tracking the nodes from which NUMA hinting faults are triggered. This can * be different from the set of nodes where the workload's memory is currently * located. */ static void numa_group_count_active_nodes(struct numa_group *numa_group) { unsigned long faults, max_faults = 0; int nid, active_nodes = 0; for_each_online_node(nid) { faults = group_faults_cpu(numa_group, nid); if (faults > max_faults) max_faults = faults; } for_each_online_node(nid) { faults = group_faults_cpu(numa_group, nid); if (faults * ACTIVE_NODE_FRACTION > max_faults) active_nodes++; } numa_group->max_faults_cpu = max_faults; numa_group->active_nodes = active_nodes; } /* * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS * increments. The more local the fault statistics are, the higher the scan * period will be for the next scan window. If local/(local+remote) ratio is * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) * the scan period will decrease. Aim for 70% local accesses. */ #define NUMA_PERIOD_SLOTS 10 #define NUMA_PERIOD_THRESHOLD 7 /* * Increase the scan period (slow down scanning) if the majority of * our memory is already on our local node, or if the majority of * the page accesses are shared with other processes. * Otherwise, decrease the scan period. */ static void update_task_scan_period(struct task_struct *p, unsigned long shared, unsigned long private) { unsigned int period_slot; int lr_ratio, ps_ratio; int diff; unsigned long remote = p->numa_faults_locality[0]; unsigned long local = p->numa_faults_locality[1]; /* * If there were no record hinting faults then either the task is * completely idle or all activity is areas that are not of interest * to automatic numa balancing. Related to that, if there were failed * migration then it implies we are migrating too quickly or the local * node is overloaded. In either case, scan slower */ if (local + shared == 0 || p->numa_faults_locality[2]) { p->numa_scan_period = min(p->numa_scan_period_max, p->numa_scan_period << 1); p->mm->numa_next_scan = jiffies + msecs_to_jiffies(p->numa_scan_period); return; } /* * Prepare to scale scan period relative to the current period. * == NUMA_PERIOD_THRESHOLD scan period stays the same * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster) * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower) */ period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote); ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared); if (ps_ratio >= NUMA_PERIOD_THRESHOLD) { /* * Most memory accesses are local. There is no need to * do fast NUMA scanning, since memory is already local. */ int slot = ps_ratio - NUMA_PERIOD_THRESHOLD; if (!slot) slot = 1; diff = slot * period_slot; } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) { /* * Most memory accesses are shared with other tasks. * There is no point in continuing fast NUMA scanning, * since other tasks may just move the memory elsewhere. */ int slot = lr_ratio - NUMA_PERIOD_THRESHOLD; if (!slot) slot = 1; diff = slot * period_slot; } else { /* * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS, * yet they are not on the local NUMA node. Speed up * NUMA scanning to get the memory moved over. */ int ratio = max(lr_ratio, ps_ratio); diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot; } p->numa_scan_period = clamp(p->numa_scan_period + diff, task_scan_min(p), task_scan_max(p)); memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); } /* * Get the fraction of time the task has been running since the last * NUMA placement cycle. The scheduler keeps similar statistics, but * decays those on a 32ms period, which is orders of magnitude off * from the dozens-of-seconds NUMA balancing period. Use the scheduler * stats only if the task is so new there are no NUMA statistics yet. */ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) { u64 runtime, delta, now; /* Use the start of this time slice to avoid calculations. */ now = p->se.exec_start; runtime = p->se.sum_exec_runtime; if (p->last_task_numa_placement) { delta = runtime - p->last_sum_exec_runtime; *period = now - p->last_task_numa_placement; /* Avoid time going backwards, prevent potential divide error: */ if (unlikely((s64)*period < 0)) *period = 0; } else { delta = p->se.avg.load_sum; *period = LOAD_AVG_MAX; } p->last_sum_exec_runtime = runtime; p->last_task_numa_placement = now; return delta; } /* * Determine the preferred nid for a task in a numa_group. This needs to * be done in a way that produces consistent results with group_weight, * otherwise workloads might not converge. */ static int preferred_group_nid(struct task_struct *p, int nid) { nodemask_t nodes; int dist; /* Direct connections between all NUMA nodes. */ if (sched_numa_topology_type == NUMA_DIRECT) return nid; /* * On a system with glueless mesh NUMA topology, group_weight * scores nodes according to the number of NUMA hinting faults on * both the node itself, and on nearby nodes. */ if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { unsigned long score, max_score = 0; int node, max_node = nid; dist = sched_max_numa_distance; for_each_online_node(node) { score = group_weight(p, node, dist); if (score > max_score) { max_score = score; max_node = node; } } return max_node; } /* * Finding the preferred nid in a system with NUMA backplane * interconnect topology is more involved. The goal is to locate * tasks from numa_groups near each other in the system, and * untangle workloads from different sides of the system. This requires * searching down the hierarchy of node groups, recursively searching * inside the highest scoring group of nodes. The nodemask tricks * keep the complexity of the search down. */ nodes = node_online_map; for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { unsigned long max_faults = 0; nodemask_t max_group = NODE_MASK_NONE; int a, b; /* Are there nodes at this distance from each other? */ if (!find_numa_distance(dist)) continue; for_each_node_mask(a, nodes) { unsigned long faults = 0; nodemask_t this_group; nodes_clear(this_group); /* Sum group's NUMA faults; includes a==b case. */ for_each_node_mask(b, nodes) { if (node_distance(a, b) < dist) { faults += group_faults(p, b); node_set(b, this_group); node_clear(b, nodes); } } /* Remember the top group. */ if (faults > max_faults) { max_faults = faults; max_group = this_group; /* * subtle: at the smallest distance there is * just one node left in each "group", the * winner is the preferred nid. */ nid = a; } } /* Next round, evaluate the nodes within max_group. */ if (!max_faults) break; nodes = max_group; } return nid; } static void task_numa_placement(struct task_struct *p) { int seq, nid, max_nid = NUMA_NO_NODE; unsigned long max_faults = 0; unsigned long fault_types[2] = { 0, 0 }; unsigned long total_faults; u64 runtime, period; spinlock_t *group_lock = NULL; struct numa_group *ng; /* * The p->mm->numa_scan_seq field gets updated without * exclusive access. Use READ_ONCE() here to ensure * that the field is read in a single access: */ seq = READ_ONCE(p->mm->numa_scan_seq); if (p->numa_scan_seq == seq) return; p->numa_scan_seq = seq; p->numa_scan_period_max = task_scan_max(p); total_faults = p->numa_faults_locality[0] + p->numa_faults_locality[1]; runtime = numa_get_avg_runtime(p, &period); /* If the task is part of a group prevent parallel updates to group stats */ ng = deref_curr_numa_group(p); if (ng) { group_lock = &ng->lock; spin_lock_irq(group_lock); } /* Find the node with the highest number of faults */ for_each_online_node(nid) { /* Keep track of the offsets in numa_faults array */ int mem_idx, membuf_idx, cpu_idx, cpubuf_idx; unsigned long faults = 0, group_faults = 0; int priv; for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) { long diff, f_diff, f_weight; mem_idx = task_faults_idx(NUMA_MEM, nid, priv); membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv); cpu_idx = task_faults_idx(NUMA_CPU, nid, priv); cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv); /* Decay existing window, copy faults since last scan */ diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2; fault_types[priv] += p->numa_faults[membuf_idx]; p->numa_faults[membuf_idx] = 0; /* * Normalize the faults_from, so all tasks in a group * count according to CPU use, instead of by the raw * number of faults. Tasks with little runtime have * little over-all impact on throughput, and thus their * faults are less important. */ f_weight = div64_u64(runtime << 16, period + 1); f_weight = (f_weight * p->numa_faults[cpubuf_idx]) / (total_faults + 1); f_diff = f_weight - p->numa_faults[cpu_idx] / 2; p->numa_faults[cpubuf_idx] = 0; p->numa_faults[mem_idx] += diff; p->numa_faults[cpu_idx] += f_diff; faults += p->numa_faults[mem_idx]; p->total_numa_faults += diff; if (ng) { /* * safe because we can only change our own group * * mem_idx represents the offset for a given * nid and priv in a specific region because it * is at the beginning of the numa_faults array. */ ng->faults[mem_idx] += diff; ng->faults_cpu[mem_idx] += f_diff; ng->total_faults += diff; group_faults += ng->faults[mem_idx]; } } if (!ng) { if (faults > max_faults) { max_faults = faults; max_nid = nid; } } else if (group_faults > max_faults) { max_faults = group_faults; max_nid = nid; } } if (ng) { numa_group_count_active_nodes(ng); spin_unlock_irq(group_lock); max_nid = preferred_group_nid(p, max_nid); } if (max_faults) { /* Set the new preferred node */ if (max_nid != p->numa_preferred_nid) sched_setnuma(p, max_nid); } update_task_scan_period(p, fault_types[0], fault_types[1]); } static inline int get_numa_group(struct numa_group *grp) { return refcount_inc_not_zero(&grp->refcount); } static inline void put_numa_group(struct numa_group *grp) { if (refcount_dec_and_test(&grp->refcount)) kfree_rcu(grp, rcu); } static void task_numa_group(struct task_struct *p, int cpupid, int flags, int *priv) { struct numa_group *grp, *my_grp; struct task_struct *tsk; bool join = false; int cpu = cpupid_to_cpu(cpupid); int i; if (unlikely(!deref_curr_numa_group(p))) { unsigned int size = sizeof(struct numa_group) + 4*nr_node_ids*sizeof(unsigned long); grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); if (!grp) return; refcount_set(&grp->refcount, 1); grp->active_nodes = 1; grp->max_faults_cpu = 0; spin_lock_init(&grp->lock); grp->gid = p->pid; /* Second half of the array tracks nids where faults happen */ grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES * nr_node_ids; for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) grp->faults[i] = p->numa_faults[i]; grp->total_faults = p->total_numa_faults; grp->nr_tasks++; rcu_assign_pointer(p->numa_group, grp); } rcu_read_lock(); tsk = READ_ONCE(cpu_rq(cpu)->curr); if (!cpupid_match_pid(tsk, cpupid)) goto no_join; grp = rcu_dereference(tsk->numa_group); if (!grp) goto no_join; my_grp = deref_curr_numa_group(p); if (grp == my_grp) goto no_join; /* * Only join the other group if its bigger; if we're the bigger group, * the other task will join us. */ if (my_grp->nr_tasks > grp->nr_tasks) goto no_join; /* * Tie-break on the grp address. */ if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp) goto no_join; /* Always join threads in the same process. */ if (tsk->mm == current->mm) join = true; /* Simple filter to avoid false positives due to PID collisions */ if (flags & TNF_SHARED) join = true; /* Update priv based on whether false sharing was detected */ *priv = !join; if (join && !get_numa_group(grp)) goto no_join; rcu_read_unlock(); if (!join) return; BUG_ON(irqs_disabled()); double_lock_irq(&my_grp->lock, &grp->lock); for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { my_grp->faults[i] -= p->numa_faults[i]; grp->faults[i] += p->numa_faults[i]; } my_grp->total_faults -= p->total_numa_faults; grp->total_faults += p->total_numa_faults; my_grp->nr_tasks--; grp->nr_tasks++; spin_unlock(&my_grp->lock); spin_unlock_irq(&grp->lock); rcu_assign_pointer(p->numa_group, grp); put_numa_group(my_grp); return; no_join: rcu_read_unlock(); return; } /* * Get rid of NUMA staticstics associated with a task (either current or dead). * If @final is set, the task is dead and has reached refcount zero, so we can * safely free all relevant data structures. Otherwise, there might be * concurrent reads from places like load balancing and procfs, and we should * reset the data back to default state without freeing ->numa_faults. */ void task_numa_free(struct task_struct *p, bool final) { /* safe: p either is current or is being freed by current */ struct numa_group *grp = rcu_dereference_raw(p->numa_group); unsigned long *numa_faults = p->numa_faults; unsigned long flags; int i; if (!numa_faults) return; if (grp) { spin_lock_irqsave(&grp->lock, flags); for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) grp->faults[i] -= p->numa_faults[i]; grp->total_faults -= p->total_numa_faults; grp->nr_tasks--; spin_unlock_irqrestore(&grp->lock, flags); RCU_INIT_POINTER(p->numa_group, NULL); put_numa_group(grp); } if (final) { p->numa_faults = NULL; kfree(numa_faults); } else { p->total_numa_faults = 0; for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) numa_faults[i] = 0; } } /* * Got a PROT_NONE fault for a page on @node. */ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) { struct task_struct *p = current; bool migrated = flags & TNF_MIGRATED; int cpu_node = task_node(current); int local = !!(flags & TNF_FAULT_LOCAL); struct numa_group *ng; int priv; if (!static_branch_likely(&sched_numa_balancing)) return; /* for example, ksmd faulting in a user's mm */ if (!p->mm) return; /* Allocate buffer to track faults on a per-node basis */ if (unlikely(!p->numa_faults)) { int size = sizeof(*p->numa_faults) * NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); if (!p->numa_faults) return; p->total_numa_faults = 0; memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); } /* * First accesses are treated as private, otherwise consider accesses * to be private if the accessing pid has not changed */ if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) { priv = 1; } else { priv = cpupid_match_pid(p, last_cpupid); if (!priv && !(flags & TNF_NO_GROUP)) task_numa_group(p, last_cpupid, flags, &priv); } /* * If a workload spans multiple NUMA nodes, a shared fault that * occurs wholly within the set of nodes that the workload is * actively using should be counted as local. This allows the * scan rate to slow down when a workload has settled down. */ ng = deref_curr_numa_group(p); if (!priv && !local && ng && ng->active_nodes > 1 && numa_is_active_node(cpu_node, ng) && numa_is_active_node(mem_node, ng)) local = 1; /* * Retry to migrate task to preferred node periodically, in case it * previously failed, or the scheduler moved us. */ if (time_after(jiffies, p->numa_migrate_retry)) { task_numa_placement(p); numa_migrate_preferred(p); } if (migrated) p->numa_pages_migrated += pages; if (flags & TNF_MIGRATE_FAIL) p->numa_faults_locality[2] += pages; p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; p->numa_faults_locality[local] += pages; } static void reset_ptenuma_scan(struct task_struct *p) { /* * We only did a read acquisition of the mmap sem, so * p->mm->numa_scan_seq is written to without exclusive access * and the update is not guaranteed to be atomic. That's not * much of an issue though, since this is just used for * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not * expensive, to avoid any form of compiler optimizations: */ WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); p->mm->numa_scan_offset = 0; } /* * The expensive part of numa migration is done from task_work context. * Triggered from task_tick_numa(). */ static void task_numa_work(struct callback_head *work) { unsigned long migrate, next_scan, now = jiffies; struct task_struct *p = current; struct mm_struct *mm = p->mm; u64 runtime = p->se.sum_exec_runtime; struct vm_area_struct *vma; unsigned long start, end; unsigned long nr_pte_updates = 0; long pages, virtpages; SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); work->next = work; /* * Who cares about NUMA placement when they're dying. * * NOTE: make sure not to dereference p->mm before this check, * exit_task_work() happens _after_ exit_mm() so we could be called * without p->mm even though we still had it when we enqueued this * work. */ if (p->flags & PF_EXITING) return; if (!mm->numa_next_scan) { mm->numa_next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); } /* * Enforce maximal scan/migration frequency.. */ migrate = mm->numa_next_scan; if (time_before(now, migrate)) return; if (p->numa_scan_period == 0) { p->numa_scan_period_max = task_scan_max(p); p->numa_scan_period = task_scan_start(p); } next_scan = now + msecs_to_jiffies(p->numa_scan_period); if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate) return; /* * Delay this task enough that another task of this mm will likely win * the next time around. */ p->node_stamp += 2 * TICK_NSEC; start = mm->numa_scan_offset; pages = sysctl_numa_balancing_scan_size; pages <<= 20 - PAGE_SHIFT; /* MB in pages */ virtpages = pages * 8; /* Scan up to this much virtual space */ if (!pages) return; if (!down_read_trylock(&mm->mmap_sem)) return; vma = find_vma(mm, start); if (!vma) { reset_ptenuma_scan(p); start = 0; vma = mm->mmap; } for (; vma; vma = vma->vm_next) { if (!vma_migratable(vma) || !vma_policy_mof(vma) || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { continue; } /* * Shared library pages mapped by multiple processes are not * migrated as it is expected they are cache replicated. Avoid * hinting faults in read-only file-backed mappings or the vdso * as migrating the pages will be of marginal benefit. */ if (!vma->vm_mm || (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) continue; /* * Skip inaccessible VMAs to avoid any confusion between * PROT_NONE and NUMA hinting ptes */ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) continue; do { start = max(start, vma->vm_start); end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); end = min(end, vma->vm_end); nr_pte_updates = change_prot_numa(vma, start, end); /* * Try to scan sysctl_numa_balancing_size worth of * hpages that have at least one present PTE that * is not already pte-numa. If the VMA contains * areas that are unused or already full of prot_numa * PTEs, scan up to virtpages, to skip through those * areas faster. */ if (nr_pte_updates) pages -= (end - start) >> PAGE_SHIFT; virtpages -= (end - start) >> PAGE_SHIFT; start = end; if (pages <= 0 || virtpages <= 0) goto out; cond_resched(); } while (end != vma->vm_end); } out: /* * It is possible to reach the end of the VMA list but the last few * VMAs are not guaranteed to the vma_migratable. If they are not, we * would find the !migratable VMA on the next scan but not reset the * scanner to the start so check it now. */ if (vma) mm->numa_scan_offset = start; else reset_ptenuma_scan(p); up_read(&mm->mmap_sem); /* * Make sure tasks use at least 32x as much time to run other code * than they used here, to limit NUMA PTE scanning overhead to 3% max. * Usually update_task_scan_period slows down scanning enough; on an * overloaded system we need to limit overhead on a per task basis. */ if (unlikely(p->se.sum_exec_runtime != runtime)) { u64 diff = p->se.sum_exec_runtime - runtime; p->node_stamp += 32 * diff; } } void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) { int mm_users = 0; struct mm_struct *mm = p->mm; if (mm) { mm_users = atomic_read(&mm->mm_users); if (mm_users == 1) { mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); mm->numa_scan_seq = 0; } } p->node_stamp = 0; p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; p->numa_scan_period = sysctl_numa_balancing_scan_delay; /* Protect against double add, see task_tick_numa and task_numa_work */ p->numa_work.next = &p->numa_work; p->numa_faults = NULL; RCU_INIT_POINTER(p->numa_group, NULL); p->last_task_numa_placement = 0; p->last_sum_exec_runtime = 0; init_task_work(&p->numa_work, task_numa_work); /* New address space, reset the preferred nid */ if (!(clone_flags & CLONE_VM)) { p->numa_preferred_nid = NUMA_NO_NODE; return; } /* * New thread, keep existing numa_preferred_nid which should be copied * already by arch_dup_task_struct but stagger when scans start. */ if (mm) { unsigned int delay; delay = min_t(unsigned int, task_scan_max(current), current->numa_scan_period * mm_users * NSEC_PER_MSEC); delay += 2 * TICK_NSEC; p->node_stamp = delay; } } /* * Drive the periodic memory faults.. */ static void task_tick_numa(struct rq *rq, struct task_struct *curr) { struct callback_head *work = &curr->numa_work; u64 period, now; /* * We don't care about NUMA placement if we don't have memory. */ if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) return; /* * Using runtime rather than walltime has the dual advantage that * we (mostly) drive the selection from busy threads and that the * task needs to have done some actual work before we bother with * NUMA placement. */ now = curr->se.sum_exec_runtime; period = (u64)curr->numa_scan_period * NSEC_PER_MSEC; if (now > curr->node_stamp + period) { if (!curr->node_stamp) curr->numa_scan_period = task_scan_start(curr); curr->node_stamp += period; if (!time_before(jiffies, curr->mm->numa_next_scan)) task_work_add(curr, work, true); } } static void update_scan_period(struct task_struct *p, int new_cpu) { int src_nid = cpu_to_node(task_cpu(p)); int dst_nid = cpu_to_node(new_cpu); if (!static_branch_likely(&sched_numa_balancing)) return; if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) return; if (src_nid == dst_nid) return; /* * Allow resets if faults have been trapped before one scan * has completed. This is most likely due to a new task that * is pulled cross-node due to wakeups or load balancing. */ if (p->numa_scan_seq) { /* * Avoid scan adjustments if moving to the preferred * node or if the task was not previously running on * the preferred node. */ if (dst_nid == p->numa_preferred_nid || (p->numa_preferred_nid != NUMA_NO_NODE && src_nid != p->numa_preferred_nid)) return; } p->numa_scan_period = task_scan_start(p); } #else static void task_tick_numa(struct rq *rq, struct task_struct *curr) { } static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) { } static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) { } static inline void update_scan_period(struct task_struct *p, int new_cpu) { } #endif /* CONFIG_NUMA_BALANCING */ static void account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) { update_load_add(&cfs_rq->load, se->load.weight); #ifdef CONFIG_SMP if (entity_is_task(se)) { struct rq *rq = rq_of(cfs_rq); account_numa_enqueue(rq, task_of(se)); list_add(&se->group_node, &rq->cfs_tasks); } #endif cfs_rq->nr_running++; } static void account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) { update_load_sub(&cfs_rq->load, se->load.weight); #ifdef CONFIG_SMP if (entity_is_task(se)) { account_numa_dequeue(rq_of(cfs_rq), task_of(se)); list_del_init(&se->group_node); } #endif cfs_rq->nr_running--; } /* * Signed add and clamp on underflow. * * Explicitly do a load-store to ensure the intermediate value never hits * memory. This allows lockless observations without ever seeing the negative * values. */ #define add_positive(_ptr, _val) do { \ typeof(_ptr) ptr = (_ptr); \ typeof(_val) val = (_val); \ typeof(*ptr) res, var = READ_ONCE(*ptr); \ \ res = var + val; \ \ if (val < 0 && res > var) \ res = 0; \ \ WRITE_ONCE(*ptr, res); \ } while (0) /* * Unsigned subtract and clamp on underflow. * * Explicitly do a load-store to ensure the intermediate value never hits * memory. This allows lockless observations without ever seeing the negative * values. */ #define sub_positive(_ptr, _val) do { \ typeof(_ptr) ptr = (_ptr); \ typeof(*ptr) val = (_val); \ typeof(*ptr) res, var = READ_ONCE(*ptr); \ res = var - val; \ if (res > var) \ res = 0; \ WRITE_ONCE(*ptr, res); \ } while (0) /* * Remove and clamp on negative, from a local variable. * * A variant of sub_positive(), which does not use explicit load-store * and is thus optimized for local variable updates. */ #define lsub_positive(_ptr, _val) do { \ typeof(_ptr) ptr = (_ptr); \ *ptr -= min_t(typeof(*ptr), *ptr, _val); \ } while (0) #ifdef CONFIG_SMP static inline void enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { cfs_rq->runnable_weight += se->runnable_weight; cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg; cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum; } static inline void dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { cfs_rq->runnable_weight -= se->runnable_weight; sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg); sub_positive(&cfs_rq->avg.runnable_load_sum, se_runnable(se) * se->avg.runnable_load_sum); } static inline void enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { cfs_rq->avg.load_avg += se->avg.load_avg; cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; } static inline void dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); } #else static inline void enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } static inline void dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } static inline void enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } static inline void dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } #endif static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight, unsigned long runnable) { if (se->on_rq) { /* commit outstanding execution time */ if (cfs_rq->curr == se) update_curr(cfs_rq); account_entity_dequeue(cfs_rq, se); dequeue_runnable_load_avg(cfs_rq, se); } dequeue_load_avg(cfs_rq, se); se->runnable_weight = runnable; update_load_set(&se->load, weight); #ifdef CONFIG_SMP do { u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib; se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); se->avg.runnable_load_avg = div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider); } while (0); #endif enqueue_load_avg(cfs_rq, se); if (se->on_rq) { account_entity_enqueue(cfs_rq, se); enqueue_runnable_load_avg(cfs_rq, se); } } void reweight_task(struct task_struct *p, int prio) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); struct load_weight *load = &se->load; unsigned long weight = scale_load(sched_prio_to_weight[prio]); reweight_entity(cfs_rq, se, weight, weight); load->inv_weight = sched_prio_to_wmult[prio]; } #ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_SMP /* * All this does is approximate the hierarchical proportion which includes that * global sum we all love to hate. * * That is, the weight of a group entity, is the proportional share of the * group weight based on the group runqueue weights. That is: * * tg->weight * grq->load.weight * ge->load.weight = ----------------------------- (1) * \Sum grq->load.weight * * Now, because computing that sum is prohibitively expensive to compute (been * there, done that) we approximate it with this average stuff. The average * moves slower and therefore the approximation is cheaper and more stable. * * So instead of the above, we substitute: * * grq->load.weight -> grq->avg.load_avg (2) * * which yields the following: * * tg->weight * grq->avg.load_avg * ge->load.weight = ------------------------------ (3) * tg->load_avg * * Where: tg->load_avg ~= \Sum grq->avg.load_avg * * That is shares_avg, and it is right (given the approximation (2)). * * The problem with it is that because the average is slow -- it was designed * to be exactly that of course -- this leads to transients in boundary * conditions. In specific, the case where the group was idle and we start the * one task. It takes time for our CPU's grq->avg.load_avg to build up, * yielding bad latency etc.. * * Now, in that special case (1) reduces to: * * tg->weight * grq->load.weight * ge->load.weight = ----------------------------- = tg->weight (4) * grp->load.weight * * That is, the sum collapses because all other CPUs are idle; the UP scenario. * * So what we do is modify our approximation (3) to approach (4) in the (near) * UP case, like: * * ge->load.weight = * * tg->weight * grq->load.weight * --------------------------------------------------- (5) * tg->load_avg - grq->avg.load_avg + grq->load.weight * * But because grq->load.weight can drop to 0, resulting in a divide by zero, * we need to use grq->avg.load_avg as its lower bound, which then gives: * * * tg->weight * grq->load.weight * ge->load.weight = ----------------------------- (6) * tg_load_avg' * * Where: * * tg_load_avg' = tg->load_avg - grq->avg.load_avg + * max(grq->load.weight, grq->avg.load_avg) * * And that is shares_weight and is icky. In the (near) UP case it approaches * (4) while in the normal case it approaches (3). It consistently * overestimates the ge->load.weight and therefore: * * \Sum ge->load.weight >= tg->weight * * hence icky! */ static long calc_group_shares(struct cfs_rq *cfs_rq) { long tg_weight, tg_shares, load, shares; struct task_group *tg = cfs_rq->tg; tg_shares = READ_ONCE(tg->shares); load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); tg_weight = atomic_long_read(&tg->load_avg); /* Ensure tg_weight >= load */ tg_weight -= cfs_rq->tg_load_avg_contrib; tg_weight += load; shares = (tg_shares * load); if (tg_weight) shares /= tg_weight; /* * MIN_SHARES has to be unscaled here to support per-CPU partitioning * of a group with small tg->shares value. It is a floor value which is * assigned as a minimum load.weight to the sched_entity representing * the group on a CPU. * * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024 * on an 8-core system with 8 tasks each runnable on one CPU shares has * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In * case no task is runnable on a CPU MIN_SHARES=2 should be returned * instead of 0. */ return clamp_t(long, shares, MIN_SHARES, tg_shares); } /* * This calculates the effective runnable weight for a group entity based on * the group entity weight calculated above. * * Because of the above approximation (2), our group entity weight is * an load_avg based ratio (3). This means that it includes blocked load and * does not represent the runnable weight. * * Approximate the group entity's runnable weight per ratio from the group * runqueue: * * grq->avg.runnable_load_avg * ge->runnable_weight = ge->load.weight * -------------------------- (7) * grq->avg.load_avg * * However, analogous to above, since the avg numbers are slow, this leads to * transients in the from-idle case. Instead we use: * * ge->runnable_weight = ge->load.weight * * * max(grq->avg.runnable_load_avg, grq->runnable_weight) * ----------------------------------------------------- (8) * max(grq->avg.load_avg, grq->load.weight) * * Where these max() serve both to use the 'instant' values to fix the slow * from-idle and avoid the /0 on to-idle, similar to (6). */ static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares) { long runnable, load_avg; load_avg = max(cfs_rq->avg.load_avg, scale_load_down(cfs_rq->load.weight)); runnable = max(cfs_rq->avg.runnable_load_avg, scale_load_down(cfs_rq->runnable_weight)); runnable *= shares; if (load_avg) runnable /= load_avg; return clamp_t(long, runnable, MIN_SHARES, shares); } #endif /* CONFIG_SMP */ static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); /* * Recomputes the group entity based on the current state of its group * runqueue. */ static void update_cfs_group(struct sched_entity *se) { struct cfs_rq *gcfs_rq = group_cfs_rq(se); long shares, runnable; if (!gcfs_rq) return; if (throttled_hierarchy(gcfs_rq)) return; #ifndef CONFIG_SMP runnable = shares = READ_ONCE(gcfs_rq->tg->shares); if (likely(se->load.weight == shares)) return; #else shares = calc_group_shares(gcfs_rq); runnable = calc_group_runnable(gcfs_rq, shares); #endif reweight_entity(cfs_rq_of(se), se, shares, runnable); } #else /* CONFIG_FAIR_GROUP_SCHED */ static inline void update_cfs_group(struct sched_entity *se) { } #endif /* CONFIG_FAIR_GROUP_SCHED */ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) { struct rq *rq = rq_of(cfs_rq); if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) { /* * There are a few boundary cases this might miss but it should * get called often enough that that should (hopefully) not be * a real problem. * * It will not get called when we go idle, because the idle * thread is a different class (!fair), nor will the utilization * number include things like RT tasks. * * As is, the util number is not freq-invariant (we'd have to * implement arch_scale_freq_capacity() for that). * * See cpu_util(). */ cpufreq_update_util(rq, flags); } } #ifdef CONFIG_SMP #ifdef CONFIG_FAIR_GROUP_SCHED /** * update_tg_load_avg - update the tg's load avg * @cfs_rq: the cfs_rq whose avg changed * @force: update regardless of how small the difference * * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load. * However, because tg->load_avg is a global value there are performance * considerations. * * In order to avoid having to look at the other cfs_rq's, we use a * differential update where we store the last value we propagated. This in * turn allows skipping updates if the differential is 'small'. * * Updating tg's load_avg is necessary before update_cfs_share(). */ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) { long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; /* * No need to update load_avg for root_task_group as it is not used. */ if (cfs_rq->tg == &root_task_group) return; if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { atomic_long_add(delta, &cfs_rq->tg->load_avg); cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; } } /* * Called within set_task_rq() right before setting a task's CPU. The * caller only guarantees p->pi_lock is held; no other assumptions, * including the state of rq->lock, should be made. */ void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next) { u64 p_last_update_time; u64 n_last_update_time; if (!sched_feat(ATTACH_AGE_LOAD)) return; /* * We are supposed to update the task to "current" time, then its up to * date and ready to go to new CPU/cfs_rq. But we have difficulty in * getting what current time is, so simply throw away the out-of-date * time. This will result in the wakee task is less decayed, but giving * the wakee more load sounds not bad. */ if (!(se->avg.last_update_time && prev)) return; #ifndef CONFIG_64BIT { u64 p_last_update_time_copy; u64 n_last_update_time_copy; do { p_last_update_time_copy = prev->load_last_update_time_copy; n_last_update_time_copy = next->load_last_update_time_copy; smp_rmb(); p_last_update_time = prev->avg.last_update_time; n_last_update_time = next->avg.last_update_time; } while (p_last_update_time != p_last_update_time_copy || n_last_update_time != n_last_update_time_copy); } #else p_last_update_time = prev->avg.last_update_time; n_last_update_time = next->avg.last_update_time; #endif __update_load_avg_blocked_se(p_last_update_time, se); se->avg.last_update_time = n_last_update_time; } /* * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to * propagate its contribution. The key to this propagation is the invariant * that for each group: * * ge->avg == grq->avg (1) * * _IFF_ we look at the pure running and runnable sums. Because they * represent the very same entity, just at different points in the hierarchy. * * Per the above update_tg_cfs_util() is trivial and simply copies the running * sum over (but still wrong, because the group entity and group rq do not have * their PELT windows aligned). * * However, update_tg_cfs_runnable() is more complex. So we have: * * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2) * * And since, like util, the runnable part should be directly transferable, * the following would _appear_ to be the straight forward approach: * * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3) * * And per (1) we have: * * ge->avg.runnable_avg == grq->avg.runnable_avg * * Which gives: * * ge->load.weight * grq->avg.load_avg * ge->avg.load_avg = ----------------------------------- (4) * grq->load.weight * * Except that is wrong! * * Because while for entities historical weight is not important and we * really only care about our future and therefore can consider a pure * runnable sum, runqueues can NOT do this. * * We specifically want runqueues to have a load_avg that includes * historical weights. Those represent the blocked load, the load we expect * to (shortly) return to us. This only works by keeping the weights as * integral part of the sum. We therefore cannot decompose as per (3). * * Another reason this doesn't work is that runnable isn't a 0-sum entity. * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the * rq itself is runnable anywhere between 2/3 and 1 depending on how the * runnable section of these tasks overlap (or not). If they were to perfectly * align the rq as a whole would be runnable 2/3 of the time. If however we * always have at least 1 runnable task, the rq as a whole is always runnable. * * So we'll have to approximate.. :/ * * Given the constraint: * * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX * * We can construct a rule that adds runnable to a rq by assuming minimal * overlap. * * On removal, we'll assume each task is equally runnable; which yields: * * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight * * XXX: only do this for the part of runnable > running ? * */ static inline void update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) { long delta = gcfs_rq->avg.util_avg - se->avg.util_avg; /* Nothing to update */ if (!delta) return; /* * The relation between sum and avg is: * * LOAD_AVG_MAX - 1024 + sa->period_contrib * * however, the PELT windows are not aligned between grq and gse. */ /* Set new sched_entity's utilization */ se->avg.util_avg = gcfs_rq->avg.util_avg; se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX; /* Update parent cfs_rq utilization */ add_positive(&cfs_rq->avg.util_avg, delta); cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX; } static inline void update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) { long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum; unsigned long runnable_load_avg, load_avg; u64 runnable_load_sum, load_sum = 0; s64 delta_sum; if (!runnable_sum) return; gcfs_rq->prop_runnable_sum = 0; if (runnable_sum >= 0) { /* * Add runnable; clip at LOAD_AVG_MAX. Reflects that until * the CPU is saturated running == runnable. */ runnable_sum += se->avg.load_sum; runnable_sum = min(runnable_sum, (long)LOAD_AVG_MAX); } else { /* * Estimate the new unweighted runnable_sum of the gcfs_rq by * assuming all tasks are equally runnable. */ if (scale_load_down(gcfs_rq->load.weight)) { load_sum = div_s64(gcfs_rq->avg.load_sum, scale_load_down(gcfs_rq->load.weight)); } /* But make sure to not inflate se's runnable */ runnable_sum = min(se->avg.load_sum, load_sum); } /* * runnable_sum can't be lower than running_sum * Rescale running sum to be in the same range as runnable sum * running_sum is in [0 : LOAD_AVG_MAX << SCHED_CAPACITY_SHIFT] * runnable_sum is in [0 : LOAD_AVG_MAX] */ running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; runnable_sum = max(runnable_sum, running_sum); load_sum = (s64)se_weight(se) * runnable_sum; load_avg = div_s64(load_sum, LOAD_AVG_MAX); delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; delta_avg = load_avg - se->avg.load_avg; se->avg.load_sum = runnable_sum; se->avg.load_avg = load_avg; add_positive(&cfs_rq->avg.load_avg, delta_avg); add_positive(&cfs_rq->avg.load_sum, delta_sum); runnable_load_sum = (s64)se_runnable(se) * runnable_sum; runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX); delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum; delta_avg = runnable_load_avg - se->avg.runnable_load_avg; se->avg.runnable_load_sum = runnable_sum; se->avg.runnable_load_avg = runnable_load_avg; if (se->on_rq) { add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg); add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum); } } static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) { cfs_rq->propagate = 1; cfs_rq->prop_runnable_sum += runnable_sum; } /* Update task and its cfs_rq load average */ static inline int propagate_entity_load_avg(struct sched_entity *se) { struct cfs_rq *cfs_rq, *gcfs_rq; if (entity_is_task(se)) return 0; gcfs_rq = group_cfs_rq(se); if (!gcfs_rq->propagate) return 0; gcfs_rq->propagate = 0; cfs_rq = cfs_rq_of(se); add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); update_tg_cfs_util(cfs_rq, se, gcfs_rq); update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); trace_pelt_cfs_tp(cfs_rq); trace_pelt_se_tp(se); return 1; } /* * Check if we need to update the load and the utilization of a blocked * group_entity: */ static inline bool skip_blocked_update(struct sched_entity *se) { struct cfs_rq *gcfs_rq = group_cfs_rq(se); /* * If sched_entity still have not zero load or utilization, we have to * decay it: */ if (se->avg.load_avg || se->avg.util_avg) return false; /* * If there is a pending propagation, we have to update the load and * the utilization of the sched_entity: */ if (gcfs_rq->propagate) return false; /* * Otherwise, the load and the utilization of the sched_entity is * already zero and there is no pending propagation, so it will be a * waste of time to try to decay it: */ return true; } #else /* CONFIG_FAIR_GROUP_SCHED */ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} static inline int propagate_entity_load_avg(struct sched_entity *se) { return 0; } static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {} #endif /* CONFIG_FAIR_GROUP_SCHED */ /** * update_cfs_rq_load_avg - update the cfs_rq's load/util averages * @now: current time, as per cfs_rq_clock_pelt() * @cfs_rq: cfs_rq to update * * The cfs_rq avg is the direct sum of all its entities (blocked and runnable) * avg. The immediate corollary is that all (fair) tasks must be attached, see * post_init_entity_util_avg(). * * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example. * * Returns true if the load decayed or we removed load. * * Since both these conditions indicate a changed cfs_rq->avg.load we should * call update_tg_load_avg() when this function returns true. */ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) { unsigned long removed_load = 0, removed_util = 0, removed_runnable_sum = 0; struct sched_avg *sa = &cfs_rq->avg; int decayed = 0; if (cfs_rq->removed.nr) { unsigned long r; u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib; raw_spin_lock(&cfs_rq->removed.lock); swap(cfs_rq->removed.util_avg, removed_util); swap(cfs_rq->removed.load_avg, removed_load); swap(cfs_rq->removed.runnable_sum, removed_runnable_sum); cfs_rq->removed.nr = 0; raw_spin_unlock(&cfs_rq->removed.lock); r = removed_load; sub_positive(&sa->load_avg, r); sub_positive(&sa->load_sum, r * divider); r = removed_util; sub_positive(&sa->util_avg, r); sub_positive(&sa->util_sum, r * divider); add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum); decayed = 1; } decayed |= __update_load_avg_cfs_rq(now, cfs_rq); #ifndef CONFIG_64BIT smp_wmb(); cfs_rq->load_last_update_time_copy = sa->last_update_time; #endif if (decayed) cfs_rq_util_change(cfs_rq, 0); return decayed; } /** * attach_entity_load_avg - attach this entity to its cfs_rq load avg * @cfs_rq: cfs_rq to attach to * @se: sched_entity to attach * @flags: migration hints * * Must call update_cfs_rq_load_avg() before this, since we rely on * cfs_rq->avg.last_update_time being current. */ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib; /* * When we attach the @se to the @cfs_rq, we must align the decay * window because without that, really weird and wonderful things can * happen. * * XXX illustrate */ se->avg.last_update_time = cfs_rq->avg.last_update_time; se->avg.period_contrib = cfs_rq->avg.period_contrib; /* * Hell(o) Nasty stuff.. we need to recompute _sum based on the new * period_contrib. This isn't strictly correct, but since we're * entirely outside of the PELT hierarchy, nobody cares if we truncate * _sum a little. */ se->avg.util_sum = se->avg.util_avg * divider; se->avg.load_sum = divider; if (se_weight(se)) { se->avg.load_sum = div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se)); } se->avg.runnable_load_sum = se->avg.load_sum; enqueue_load_avg(cfs_rq, se); cfs_rq->avg.util_avg += se->avg.util_avg; cfs_rq->avg.util_sum += se->avg.util_sum; add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); cfs_rq_util_change(cfs_rq, flags); trace_pelt_cfs_tp(cfs_rq); } /** * detach_entity_load_avg - detach this entity from its cfs_rq load avg * @cfs_rq: cfs_rq to detach from * @se: sched_entity to detach * * Must call update_cfs_rq_load_avg() before this, since we rely on * cfs_rq->avg.last_update_time being current. */ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { dequeue_load_avg(cfs_rq, se); sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); cfs_rq_util_change(cfs_rq, 0); trace_pelt_cfs_tp(cfs_rq); } /* * Optional action to be done while updating the load average */ #define UPDATE_TG 0x1 #define SKIP_AGE_LOAD 0x2 #define DO_ATTACH 0x4 /* Update task and its cfs_rq load average */ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { u64 now = cfs_rq_clock_pelt(cfs_rq); int decayed; /* * Track task load average for carrying it to new CPU after migrated, and * track group sched_entity load average for task_h_load calc in migration */ if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) __update_load_avg_se(now, cfs_rq, se); decayed = update_cfs_rq_load_avg(now, cfs_rq); decayed |= propagate_entity_load_avg(se); if (!se->avg.last_update_time && (flags & DO_ATTACH)) { /* * DO_ATTACH means we're here from enqueue_entity(). * !last_update_time means we've passed through * migrate_task_rq_fair() indicating we migrated. * * IOW we're enqueueing a task on a new CPU. */ attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION); update_tg_load_avg(cfs_rq, 0); } else if (decayed && (flags & UPDATE_TG)) update_tg_load_avg(cfs_rq, 0); } #ifndef CONFIG_64BIT static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) { u64 last_update_time_copy; u64 last_update_time; do { last_update_time_copy = cfs_rq->load_last_update_time_copy; smp_rmb(); last_update_time = cfs_rq->avg.last_update_time; } while (last_update_time != last_update_time_copy); return last_update_time; } #else static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) { return cfs_rq->avg.last_update_time; } #endif /* * Synchronize entity load avg of dequeued entity without locking * the previous rq. */ static void sync_entity_load_avg(struct sched_entity *se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); u64 last_update_time; last_update_time = cfs_rq_last_update_time(cfs_rq); __update_load_avg_blocked_se(last_update_time, se); } /* * Task first catches up with cfs_rq, and then subtract * itself from the cfs_rq (task must be off the queue now). */ static void remove_entity_load_avg(struct sched_entity *se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); unsigned long flags; /* * tasks cannot exit without having gone through wake_up_new_task() -> * post_init_entity_util_avg() which will have added things to the * cfs_rq, so we can remove unconditionally. */ sync_entity_load_avg(se); raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags); ++cfs_rq->removed.nr; cfs_rq->removed.util_avg += se->avg.util_avg; cfs_rq->removed.load_avg += se->avg.load_avg; cfs_rq->removed.runnable_sum += se->avg.load_sum; /* == runnable_sum */ raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags); } static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq) { return cfs_rq->avg.runnable_load_avg; } static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) { return cfs_rq->avg.load_avg; } static int idle_balance(struct rq *this_rq, struct rq_flags *rf); static inline unsigned long task_util(struct task_struct *p) { return READ_ONCE(p->se.avg.util_avg); } static inline unsigned long _task_util_est(struct task_struct *p) { struct util_est ue = READ_ONCE(p->se.avg.util_est); return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED); } static inline unsigned long task_util_est(struct task_struct *p) { return max(task_util(p), _task_util_est(p)); } static inline void util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) { unsigned int enqueued; if (!sched_feat(UTIL_EST)) return; /* Update root cfs_rq's estimated utilization */ enqueued = cfs_rq->avg.util_est.enqueued; enqueued += _task_util_est(p); WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); } /* * Check if a (signed) value is within a specified (unsigned) margin, * based on the observation that: * * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1) * * NOTE: this only works when value + maring < INT_MAX. */ static inline bool within_margin(int value, int margin) { return ((unsigned int)(value + margin - 1) < (2 * margin - 1)); } static void util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) { long last_ewma_diff; struct util_est ue; int cpu; if (!sched_feat(UTIL_EST)) return; /* Update root cfs_rq's estimated utilization */ ue.enqueued = cfs_rq->avg.util_est.enqueued; ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p)); WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); /* * Skip update of task's estimated utilization when the task has not * yet completed an activation, e.g. being migrated. */ if (!task_sleep) return; /* * If the PELT values haven't changed since enqueue time, * skip the util_est update. */ ue = p->se.avg.util_est; if (ue.enqueued & UTIL_AVG_UNCHANGED) return; /* * Skip update of task's estimated utilization when its EWMA is * already ~1% close to its last activation value. */ ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED); last_ewma_diff = ue.enqueued - ue.ewma; if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100))) return; /* * To avoid overestimation of actual task utilization, skip updates if * we cannot grant there is idle time in this CPU. */ cpu = cpu_of(rq_of(cfs_rq)); if (task_util(p) > capacity_orig_of(cpu)) return; /* * Update Task's estimated utilization * * When *p completes an activation we can consolidate another sample * of the task size. This is done by storing the current PELT value * as ue.enqueued and by using this value to update the Exponential * Weighted Moving Average (EWMA): * * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1) * = w * task_util(p) + ewma(t-1) - w * ewma(t-1) * = w * (task_util(p) - ewma(t-1)) + ewma(t-1) * = w * ( last_ewma_diff ) + ewma(t-1) * = w * (last_ewma_diff + ewma(t-1) / w) * * Where 'w' is the weight of new samples, which is configured to be * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT) */ ue.ewma <<= UTIL_EST_WEIGHT_SHIFT; ue.ewma += last_ewma_diff; ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; WRITE_ONCE(p->se.avg.util_est, ue); } static inline int task_fits_capacity(struct task_struct *p, long capacity) { return fits_capacity(task_util_est(p), capacity); } static inline void update_misfit_status(struct task_struct *p, struct rq *rq) { if (!static_branch_unlikely(&sched_asym_cpucapacity)) return; if (!p) { rq->misfit_task_load = 0; return; } if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) { rq->misfit_task_load = 0; return; } rq->misfit_task_load = task_h_load(p); } #else /* CONFIG_SMP */ #define UPDATE_TG 0x0 #define SKIP_AGE_LOAD 0x0 #define DO_ATTACH 0x0 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) { cfs_rq_util_change(cfs_rq, 0); } static inline void remove_entity_load_avg(struct sched_entity *se) {} static inline void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {} static inline void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} static inline int idle_balance(struct rq *rq, struct rq_flags *rf) { return 0; } static inline void util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} static inline void util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) {} static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} #endif /* CONFIG_SMP */ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) { #ifdef CONFIG_SCHED_DEBUG s64 d = se->vruntime - cfs_rq->min_vruntime; if (d < 0) d = -d; if (d > 3*sysctl_sched_latency) schedstat_inc(cfs_rq->nr_spread_over); #endif } static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) { u64 vruntime = cfs_rq->min_vruntime; /* * The 'current' period is already promised to the current tasks, * however the extra weight of the new task will slow them down a * little, place the new task so that it fits in the slot that * stays open at the end. */ if (initial && sched_feat(START_DEBIT)) vruntime += sched_vslice(cfs_rq, se); /* sleeps up to a single latency don't count. */ if (!initial) { unsigned long thresh = sysctl_sched_latency; /* * Halve their sleep time's effect, to allow * for a gentler effect of sleepers: */ if (sched_feat(GENTLE_FAIR_SLEEPERS)) thresh >>= 1; vruntime -= thresh; } /* ensure we never gain time by being placed backwards. */ se->vruntime = max_vruntime(se->vruntime, vruntime); } static void check_enqueue_throttle(struct cfs_rq *cfs_rq); static inline void check_schedstat_required(void) { #ifdef CONFIG_SCHEDSTATS if (schedstat_enabled()) return; /* Force schedstat enabled if a dependent tracepoint is active */ if (trace_sched_stat_wait_enabled() || trace_sched_stat_sleep_enabled() || trace_sched_stat_iowait_enabled() || trace_sched_stat_blocked_enabled() || trace_sched_stat_runtime_enabled()) { printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " "stat_blocked and stat_runtime require the " "kernel parameter schedstats=enable or " "kernel.sched_schedstats=1\n"); } #endif } /* * MIGRATION * * dequeue * update_curr() * update_min_vruntime() * vruntime -= min_vruntime * * enqueue * update_curr() * update_min_vruntime() * vruntime += min_vruntime * * this way the vruntime transition between RQs is done when both * min_vruntime are up-to-date. * * WAKEUP (remote) * * ->migrate_task_rq_fair() (p->state == TASK_WAKING) * vruntime -= min_vruntime * * enqueue * update_curr() * update_min_vruntime() * vruntime += min_vruntime * * this way we don't have the most up-to-date min_vruntime on the originating * CPU and an up-to-date min_vruntime on the destination CPU. */ static void enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED); bool curr = cfs_rq->curr == se; /* * If we're the current task, we must renormalise before calling * update_curr(). */ if (renorm && curr) se->vruntime += cfs_rq->min_vruntime; update_curr(cfs_rq); /* * Otherwise, renormalise after, such that we're placed at the current * moment in time, instead of some random moment in the past. Being * placed in the past could significantly boost this task to the * fairness detriment of existing tasks. */ if (renorm && !curr) se->vruntime += cfs_rq->min_vruntime; /* * When enqueuing a sched_entity, we must: * - Update loads to have both entity and cfs_rq synced with now. * - Add its load to cfs_rq->runnable_avg * - For group_entity, update its weight to reflect the new share of * its group cfs_rq * - Add its new weight to cfs_rq->load.weight */ update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); update_cfs_group(se); enqueue_runnable_load_avg(cfs_rq, se); account_entity_enqueue(cfs_rq, se); if (flags & ENQUEUE_WAKEUP) place_entity(cfs_rq, se, 0); check_schedstat_required(); update_stats_enqueue(cfs_rq, se, flags); check_spread(cfs_rq, se); if (!curr) __enqueue_entity(cfs_rq, se); se->on_rq = 1; if (cfs_rq->nr_running == 1) { list_add_leaf_cfs_rq(cfs_rq); check_enqueue_throttle(cfs_rq); } } static void __clear_buddies_last(struct sched_entity *se) { for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); if (cfs_rq->last != se) break; cfs_rq->last = NULL; } } static void __clear_buddies_next(struct sched_entity *se) { for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); if (cfs_rq->next != se) break; cfs_rq->next = NULL; } } static void __clear_buddies_skip(struct sched_entity *se) { for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); if (cfs_rq->skip != se) break; cfs_rq->skip = NULL; } } static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) { if (cfs_rq->last == se) __clear_buddies_last(se); if (cfs_rq->next == se) __clear_buddies_next(se); if (cfs_rq->skip == se) __clear_buddies_skip(se); } static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); static void dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { /* * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); /* * When dequeuing a sched_entity, we must: * - Update loads to have both entity and cfs_rq synced with now. * - Subtract its load from the cfs_rq->runnable_avg. * - Subtract its previous weight from cfs_rq->load.weight. * - For group entity, update its weight to reflect the new share * of its group cfs_rq. */ update_load_avg(cfs_rq, se, UPDATE_TG); dequeue_runnable_load_avg(cfs_rq, se); update_stats_dequeue(cfs_rq, se, flags); clear_buddies(cfs_rq, se); if (se != cfs_rq->curr) __dequeue_entity(cfs_rq, se); se->on_rq = 0; account_entity_dequeue(cfs_rq, se); /* * Normalize after update_curr(); which will also have moved * min_vruntime if @se is the one holding it back. But before doing * update_min_vruntime() again, which will discount @se's position and * can move min_vruntime forward still more. */ if (!(flags & DEQUEUE_SLEEP)) se->vruntime -= cfs_rq->min_vruntime; /* return excess runtime on last dequeue */ return_cfs_rq_runtime(cfs_rq); update_cfs_group(se); /* * Now advance min_vruntime if @se was the entity holding it back, * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be * put back on, and if we advance min_vruntime, we'll be placed back * further than we started -- ie. we'll be penalized. */ if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) update_min_vruntime(cfs_rq); } /* * Preempt the current task with a newly woken task if needed: */ static void check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) { unsigned long ideal_runtime, delta_exec; struct sched_entity *se; s64 delta; ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { resched_curr(rq_of(cfs_rq)); /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. */ clear_buddies(cfs_rq, curr); return; } /* * Ensure that a task that missed wakeup preemption by a * narrow margin doesn't have to wait for a full slice. * This also mitigates buddy induced latencies under load. */ if (delta_exec < sysctl_sched_min_granularity) return; se = __pick_first_entity(cfs_rq); delta = curr->vruntime - se->vruntime; if (delta < 0) return; if (delta > ideal_runtime) resched_curr(rq_of(cfs_rq)); } static void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) { /* 'current' is not kept within the tree. */ if (se->on_rq) { /* * Any task has to be enqueued before it get to execute on * a CPU. So account for the time it spent waiting on the * runqueue. */ update_stats_wait_end(cfs_rq, se); __dequeue_entity(cfs_rq, se); update_load_avg(cfs_rq, se, UPDATE_TG); } update_stats_curr_start(cfs_rq, se); cfs_rq->curr = se; /* * Track our maximum slice length, if the CPU's load is at * least twice that of our own weight (i.e. dont track it * when there are only lesser-weight tasks around): */ if (schedstat_enabled() && rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { schedstat_set(se->statistics.slice_max, max((u64)schedstat_val(se->statistics.slice_max), se->sum_exec_runtime - se->prev_sum_exec_runtime)); } se->prev_sum_exec_runtime = se->sum_exec_runtime; } static int wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); /* * Pick the next process, keeping these things in mind, in this order: * 1) keep things fair between processes/task groups * 2) pick the "next" process, since someone really wants that to run * 3) pick the "last" process, for cache locality * 4) do not run the "skip" process, if something else is available */ static struct sched_entity * pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) { struct sched_entity *left = __pick_first_entity(cfs_rq); struct sched_entity *se; /* * If curr is set we have to see if its left of the leftmost entity * still in the tree, provided there was anything in the tree at all. */ if (!left || (curr && entity_before(curr, left))) left = curr; se = left; /* ideally we run the leftmost entity */ /* * Avoid running the skip buddy, if running something else can * be done without getting too unfair. */ if (cfs_rq->skip == se) { struct sched_entity *second; if (se == curr) { second = __pick_first_entity(cfs_rq); } else { second = __pick_next_entity(se); if (!second || (curr && entity_before(curr, second))) second = curr; } if (second && wakeup_preempt_entity(second, left) < 1) se = second; } /* * Prefer last buddy, try to return the CPU to a preempted task. */ if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) se = cfs_rq->last; /* * Someone really wants this to run. If it's not unfair, run it. */ if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) se = cfs_rq->next; clear_buddies(cfs_rq, se); return se; } static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) { /* * If still on the runqueue then deactivate_task() * was not called and update_curr() has to be done: */ if (prev->on_rq) update_curr(cfs_rq); /* throttle cfs_rqs exceeding runtime */ check_cfs_rq_runtime(cfs_rq); check_spread(cfs_rq, prev); if (prev->on_rq) { update_stats_wait_start(cfs_rq, prev); /* Put 'current' back into the tree. */ __enqueue_entity(cfs_rq, prev); /* in !on_rq case, update occurred at dequeue */ update_load_avg(cfs_rq, prev, 0); } cfs_rq->curr = NULL; } static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) { /* * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); /* * Ensure that runnable average is periodically updated. */ update_load_avg(cfs_rq, curr, UPDATE_TG); update_cfs_group(curr); #ifdef CONFIG_SCHED_HRTICK /* * queued ticks are scheduled to match the slice, so don't bother * validating it and just reschedule. */ if (queued) { resched_curr(rq_of(cfs_rq)); return; } /* * don't let the period tick interfere with the hrtick preemption */ if (!sched_feat(DOUBLE_TICK) && hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) return; #endif if (cfs_rq->nr_running > 1) check_preempt_tick(cfs_rq, curr); } /************************************************** * CFS bandwidth control machinery */ #ifdef CONFIG_CFS_BANDWIDTH #ifdef CONFIG_JUMP_LABEL static struct static_key __cfs_bandwidth_used; static inline bool cfs_bandwidth_used(void) { return static_key_false(&__cfs_bandwidth_used); } void cfs_bandwidth_usage_inc(void) { static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used); } void cfs_bandwidth_usage_dec(void) { static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used); } #else /* CONFIG_JUMP_LABEL */ static bool cfs_bandwidth_used(void) { return true; } void cfs_bandwidth_usage_inc(void) {} void cfs_bandwidth_usage_dec(void) {} #endif /* CONFIG_JUMP_LABEL */ /* * default period for cfs group bandwidth. * default: 0.1s, units: nanoseconds */ static inline u64 default_cfs_period(void) { return 100000000ULL; } static inline u64 sched_cfs_bandwidth_slice(void) { return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; } /* * Replenish runtime according to assigned quota and update expiration time. * We use sched_clock_cpu directly instead of rq->clock to avoid adding * additional synchronization around rq->lock. * * requires cfs_b->lock */ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) { u64 now; if (cfs_b->quota == RUNTIME_INF) return; now = sched_clock_cpu(smp_processor_id()); cfs_b->runtime = cfs_b->quota; } static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) { return &tg->cfs_bandwidth; } /* rq->task_clock normalized against any time this cfs_rq has spent throttled */ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) { if (unlikely(cfs_rq->throttle_count)) return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time; return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; } /* returns 0 on failure to allocate runtime */ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) { struct task_group *tg = cfs_rq->tg; struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); u64 amount = 0, min_amount; /* note: this is a positive sum as runtime_remaining <= 0 */ min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; raw_spin_lock(&cfs_b->lock); if (cfs_b->quota == RUNTIME_INF) amount = min_amount; else { start_cfs_bandwidth(cfs_b); if (cfs_b->runtime > 0) { amount = min(cfs_b->runtime, min_amount); cfs_b->runtime -= amount; cfs_b->idle = 0; } } raw_spin_unlock(&cfs_b->lock); cfs_rq->runtime_remaining += amount; return cfs_rq->runtime_remaining > 0; } static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) { /* dock delta_exec before expiring quota (as it could span periods) */ cfs_rq->runtime_remaining -= delta_exec; if (likely(cfs_rq->runtime_remaining > 0)) return; /* * if we're unable to extend our runtime we resched so that the active * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) resched_curr(rq_of(cfs_rq)); } static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) { if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) return; __account_cfs_rq_runtime(cfs_rq, delta_exec); } static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) { return cfs_bandwidth_used() && cfs_rq->throttled; } /* check whether cfs_rq, or any parent, is throttled */ static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) { return cfs_bandwidth_used() && cfs_rq->throttle_count; } /* * Ensure that neither of the group entities corresponding to src_cpu or * dest_cpu are members of a throttled hierarchy when performing group * load-balance operations. */ static inline int throttled_lb_pair(struct task_group *tg, int src_cpu, int dest_cpu) { struct cfs_rq *src_cfs_rq, *dest_cfs_rq; src_cfs_rq = tg->cfs_rq[src_cpu]; dest_cfs_rq = tg->cfs_rq[dest_cpu]; return throttled_hierarchy(src_cfs_rq) || throttled_hierarchy(dest_cfs_rq); } static int tg_unthrottle_up(struct task_group *tg, void *data) { struct rq *rq = data; struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; cfs_rq->throttle_count--; if (!cfs_rq->throttle_count) { /* adjust cfs_rq_clock_task() */ cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - cfs_rq->throttled_clock_task; /* Add cfs_rq with already running entity in the list */ if (cfs_rq->nr_running >= 1) list_add_leaf_cfs_rq(cfs_rq); } return 0; } static int tg_throttle_down(struct task_group *tg, void *data) { struct rq *rq = data; struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; /* group is entering throttled state, stop time */ if (!cfs_rq->throttle_count) { cfs_rq->throttled_clock_task = rq_clock_task(rq); list_del_leaf_cfs_rq(cfs_rq); } cfs_rq->throttle_count++; return 0; } static void throttle_cfs_rq(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; long task_delta, idle_task_delta, dequeue = 1; bool empty; se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; /* freeze hierarchy runnable averages while throttled */ rcu_read_lock(); walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); rcu_read_unlock(); task_delta = cfs_rq->h_nr_running; idle_task_delta = cfs_rq->idle_h_nr_running; for_each_sched_entity(se) { struct cfs_rq *qcfs_rq = cfs_rq_of(se); /* throttled entity or throttle-on-deactivate */ if (!se->on_rq) break; if (dequeue) dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); qcfs_rq->h_nr_running -= task_delta; qcfs_rq->idle_h_nr_running -= idle_task_delta; if (qcfs_rq->load.weight) dequeue = 0; } if (!se) sub_nr_running(rq, task_delta); cfs_rq->throttled = 1; cfs_rq->throttled_clock = rq_clock(rq); raw_spin_lock(&cfs_b->lock); empty = list_empty(&cfs_b->throttled_cfs_rq); /* * Add to the _head_ of the list, so that an already-started * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is * not running add to the tail so that later runqueues don't get starved. */ if (cfs_b->distribute_running) list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); else list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); /* * If we're the first throttled task, make sure the bandwidth * timer is running. */ if (empty) start_cfs_bandwidth(cfs_b); raw_spin_unlock(&cfs_b->lock); } void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; int enqueue = 1; long task_delta, idle_task_delta; se = cfs_rq->tg->se[cpu_of(rq)]; cfs_rq->throttled = 0; update_rq_clock(rq); raw_spin_lock(&cfs_b->lock); cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; list_del_rcu(&cfs_rq->throttled_list); raw_spin_unlock(&cfs_b->lock); /* update hierarchical throttle state */ walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); if (!cfs_rq->load.weight) return; task_delta = cfs_rq->h_nr_running; idle_task_delta = cfs_rq->idle_h_nr_running; for_each_sched_entity(se) { if (se->on_rq) enqueue = 0; cfs_rq = cfs_rq_of(se); if (enqueue) enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); cfs_rq->h_nr_running += task_delta; cfs_rq->idle_h_nr_running += idle_task_delta; if (cfs_rq_throttled(cfs_rq)) break; } assert_list_leaf_cfs_rq(rq); if (!se) add_nr_running(rq, task_delta); /* Determine whether we need to wake up potentially idle CPU: */ if (rq->curr == rq->idle && rq->cfs.nr_running) resched_curr(rq); } static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining) { struct cfs_rq *cfs_rq; u64 runtime; u64 starting_runtime = remaining; rcu_read_lock(); list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, throttled_list) { struct rq *rq = rq_of(cfs_rq); struct rq_flags rf; rq_lock_irqsave(rq, &rf); if (!cfs_rq_throttled(cfs_rq)) goto next; runtime = -cfs_rq->runtime_remaining + 1; if (runtime > remaining) runtime = remaining; remaining -= runtime; cfs_rq->runtime_remaining += runtime; /* we check whether we're throttled above */ if (cfs_rq->runtime_remaining > 0) unthrottle_cfs_rq(cfs_rq); next: rq_unlock_irqrestore(rq, &rf); if (!remaining) break; } rcu_read_unlock(); return starting_runtime - remaining; } /* * Responsible for refilling a task_group's bandwidth and unthrottling its * cfs_rqs as appropriate. If there has been no activity within the last * period the timer is deactivated until scheduling resumes; cfs_b->idle is * used to track this state. */ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags) { u64 runtime; int throttled; /* no need to continue the timer with no bandwidth constraint */ if (cfs_b->quota == RUNTIME_INF) goto out_deactivate; throttled = !list_empty(&cfs_b->throttled_cfs_rq); cfs_b->nr_periods += overrun; /* * idle depends on !throttled (for the case of a large deficit), and if * we're going inactive then everything else can be deferred */ if (cfs_b->idle && !throttled) goto out_deactivate; __refill_cfs_bandwidth_runtime(cfs_b); if (!throttled) { /* mark as potentially idle for the upcoming period */ cfs_b->idle = 1; return 0; } /* account preceding periods in which throttling occurred */ cfs_b->nr_throttled += overrun; /* * This check is repeated as we are holding onto the new bandwidth while * we unthrottle. This can potentially race with an unthrottled group * trying to acquire new bandwidth from the global pool. This can result * in us over-using our runtime if it is all used during this loop, but * only by limited amounts in that extreme case. */ while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { runtime = cfs_b->runtime; cfs_b->distribute_running = 1; raw_spin_unlock_irqrestore(&cfs_b->lock, flags); /* we can't nest cfs_b->lock while distributing bandwidth */ runtime = distribute_cfs_runtime(cfs_b, runtime); raw_spin_lock_irqsave(&cfs_b->lock, flags); cfs_b->distribute_running = 0; throttled = !list_empty(&cfs_b->throttled_cfs_rq); lsub_positive(&cfs_b->runtime, runtime); } /* * While we are ensured activity in the period following an * unthrottle, this also covers the case in which the new bandwidth is * insufficient to cover the existing bandwidth deficit. (Forcing the * timer to remain active while there are any throttled entities.) */ cfs_b->idle = 0; return 0; out_deactivate: return 1; } /* a cfs_rq won't donate quota below this amount */ static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC; /* minimum remaining period time to redistribute slack quota */ static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC; /* how long we wait to gather additional slack before distributing */ static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; /* * Are we near the end of the current quota period? * * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the * hrtimer base being cleared by hrtimer_start. In the case of * migrate_hrtimers, base is never cleared, so we are fine. */ static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) { struct hrtimer *refresh_timer = &cfs_b->period_timer; u64 remaining; /* if the call-back is running a quota refresh is already occurring */ if (hrtimer_callback_running(refresh_timer)) return 1; /* is a quota refresh about to occur? */ remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); if (remaining < min_expire) return 1; return 0; } static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) { u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration; /* if there's a quota refresh soon don't bother with slack */ if (runtime_refresh_within(cfs_b, min_left)) return; /* don't push forwards an existing deferred unthrottle */ if (cfs_b->slack_started) return; cfs_b->slack_started = true; hrtimer_start(&cfs_b->slack_timer, ns_to_ktime(cfs_bandwidth_slack_period), HRTIMER_MODE_REL); } /* we know any runtime found here is valid as update_curr() precedes return */ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) { struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; if (slack_runtime <= 0) return; raw_spin_lock(&cfs_b->lock); if (cfs_b->quota != RUNTIME_INF) { cfs_b->runtime += slack_runtime; /* we are under rq->lock, defer unthrottling using a timer */ if (cfs_b->runtime > sched_cfs_bandwidth_slice() && !list_empty(&cfs_b->throttled_cfs_rq)) start_cfs_slack_bandwidth(cfs_b); } raw_spin_unlock(&cfs_b->lock); /* even if it's not valid for return we don't want to try again */ cfs_rq->runtime_remaining -= slack_runtime; } static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) { if (!cfs_bandwidth_used()) return; if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) return; __return_cfs_rq_runtime(cfs_rq); } /* * This is done with a timer (instead of inline with bandwidth return) since * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs. */ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) { u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); unsigned long flags; /* confirm we're still not at a refresh boundary */ raw_spin_lock_irqsave(&cfs_b->lock, flags); cfs_b->slack_started = false; if (cfs_b->distribute_running) { raw_spin_unlock_irqrestore(&cfs_b->lock, flags); return; } if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { raw_spin_unlock_irqrestore(&cfs_b->lock, flags); return; } if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) runtime = cfs_b->runtime; if (runtime) cfs_b->distribute_running = 1; raw_spin_unlock_irqrestore(&cfs_b->lock, flags); if (!runtime) return; runtime = distribute_cfs_runtime(cfs_b, runtime); raw_spin_lock_irqsave(&cfs_b->lock, flags); lsub_positive(&cfs_b->runtime, runtime); cfs_b->distribute_running = 0; raw_spin_unlock_irqrestore(&cfs_b->lock, flags); } /* * When a group wakes up we want to make sure that its quota is not already * expired/exceeded, otherwise it may be allowed to steal additional ticks of * runtime as update_curr() throttling can not not trigger until it's on-rq. */ static void check_enqueue_throttle(struct cfs_rq *cfs_rq) { if (!cfs_bandwidth_used()) return; /* an active group must be handled by the update_curr()->put() path */ if (!cfs_rq->runtime_enabled || cfs_rq->curr) return; /* ensure the group is not already throttled */ if (cfs_rq_throttled(cfs_rq)) return; /* update runtime allocation */ account_cfs_rq_runtime(cfs_rq, 0); if (cfs_rq->runtime_remaining <= 0) throttle_cfs_rq(cfs_rq); } static void sync_throttle(struct task_group *tg, int cpu) { struct cfs_rq *pcfs_rq, *cfs_rq; if (!cfs_bandwidth_used()) return; if (!tg->parent) return; cfs_rq = tg->cfs_rq[cpu]; pcfs_rq = tg->parent->cfs_rq[cpu]; cfs_rq->throttle_count = pcfs_rq->throttle_count; cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); } /* conditionally throttle active cfs_rq's from put_prev_entity() */ static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { if (!cfs_bandwidth_used()) return false; if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) return false; /* * it's possible for a throttled entity to be forced into a running * state (e.g. set_curr_task), in this case we're finished. */ if (cfs_rq_throttled(cfs_rq)) return true; throttle_cfs_rq(cfs_rq); return true; } static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) { struct cfs_bandwidth *cfs_b = container_of(timer, struct cfs_bandwidth, slack_timer); do_sched_cfs_slack_timer(cfs_b); return HRTIMER_NORESTART; } extern const u64 max_cfs_quota_period; static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) { struct cfs_bandwidth *cfs_b = container_of(timer, struct cfs_bandwidth, period_timer); unsigned long flags; int overrun; int idle = 0; int count = 0; raw_spin_lock_irqsave(&cfs_b->lock, flags); for (;;) { overrun = hrtimer_forward_now(timer, cfs_b->period); if (!overrun) break; if (++count > 3) { u64 new, old = ktime_to_ns(cfs_b->period); new = (old * 147) / 128; /* ~115% */ new = min(new, max_cfs_quota_period); cfs_b->period = ns_to_ktime(new); /* since max is 1s, this is limited to 1e9^2, which fits in u64 */ cfs_b->quota *= new; cfs_b->quota = div64_u64(cfs_b->quota, old); pr_warn_ratelimited( "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n", smp_processor_id(), div_u64(new, NSEC_PER_USEC), div_u64(cfs_b->quota, NSEC_PER_USEC)); /* reset count so we don't come right back in here */ count = 0; } idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); } if (idle) cfs_b->period_active = 0; raw_spin_unlock_irqrestore(&cfs_b->lock, flags); return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; } void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) { raw_spin_lock_init(&cfs_b->lock); cfs_b->runtime = 0; cfs_b->quota = RUNTIME_INF; cfs_b->period = ns_to_ktime(default_cfs_period()); INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); cfs_b->period_timer.function = sched_cfs_period_timer; hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cfs_b->slack_timer.function = sched_cfs_slack_timer; cfs_b->distribute_running = 0; cfs_b->slack_started = false; } static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) { cfs_rq->runtime_enabled = 0; INIT_LIST_HEAD(&cfs_rq->throttled_list); } void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) { u64 overrun; lockdep_assert_held(&cfs_b->lock); if (cfs_b->period_active) return; cfs_b->period_active = 1; overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); } static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) { /* init_cfs_bandwidth() was not called */ if (!cfs_b->throttled_cfs_rq.next) return; hrtimer_cancel(&cfs_b->period_timer); hrtimer_cancel(&cfs_b->slack_timer); } /* * Both these CPU hotplug callbacks race against unregister_fair_sched_group() * * The race is harmless, since modifying bandwidth settings of unhooked group * bits doesn't do much. */ /* cpu online calback */ static void __maybe_unused update_runtime_enabled(struct rq *rq) { struct task_group *tg; lockdep_assert_held(&rq->lock); rcu_read_lock(); list_for_each_entry_rcu(tg, &task_groups, list) { struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; raw_spin_lock(&cfs_b->lock); cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; raw_spin_unlock(&cfs_b->lock); } rcu_read_unlock(); } /* cpu offline callback */ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) { struct task_group *tg; lockdep_assert_held(&rq->lock); rcu_read_lock(); list_for_each_entry_rcu(tg, &task_groups, list) { struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; if (!cfs_rq->runtime_enabled) continue; /* * clock_task is not advancing so we just need to make sure * there's some valid quota amount */ cfs_rq->runtime_remaining = 1; /* * Offline rq is schedulable till CPU is completely disabled * in take_cpu_down(), so we prevent new cfs throttling here. */ cfs_rq->runtime_enabled = 0; if (cfs_rq_throttled(cfs_rq)) unthrottle_cfs_rq(cfs_rq); } rcu_read_unlock(); } #else /* CONFIG_CFS_BANDWIDTH */ static inline bool cfs_bandwidth_used(void) { return false; } static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) { return rq_clock_task(rq_of(cfs_rq)); } static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} static inline void sync_throttle(struct task_group *tg, int cpu) {} static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) { return 0; } static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) { return 0; } static inline int throttled_lb_pair(struct task_group *tg, int src_cpu, int dest_cpu) { return 0; } void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} #ifdef CONFIG_FAIR_GROUP_SCHED static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} #endif static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) { return NULL; } static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} static inline void update_runtime_enabled(struct rq *rq) {} static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} #endif /* CONFIG_CFS_BANDWIDTH */ /************************************************** * CFS operations on tasks: */ #ifdef CONFIG_SCHED_HRTICK static void hrtick_start_fair(struct rq *rq, struct task_struct *p) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); SCHED_WARN_ON(task_rq(p) != rq); if (rq->cfs.h_nr_running > 1) { u64 slice = sched_slice(cfs_rq, se); u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; s64 delta = slice - ran; if (delta < 0) { if (rq->curr == p) resched_curr(rq); return; } hrtick_start(rq, delta); } } /* * called from enqueue/dequeue and updates the hrtick when the * current task is from our class and nr_running is low enough * to matter. */ static void hrtick_update(struct rq *rq) { struct task_struct *curr = rq->curr; if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) return; if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) hrtick_start_fair(rq, curr); } #else /* !CONFIG_SCHED_HRTICK */ static inline void hrtick_start_fair(struct rq *rq, struct task_struct *p) { } static inline void hrtick_update(struct rq *rq) { } #endif #ifdef CONFIG_SMP static inline unsigned long cpu_util(int cpu); static inline bool cpu_overutilized(int cpu) { return !fits_capacity(cpu_util(cpu), capacity_of(cpu)); } static inline void update_overutilized_status(struct rq *rq) { if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) { WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED); trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED); } } #else static inline void update_overutilized_status(struct rq *rq) { } #endif /* * The enqueue_task method is called before nr_running is * increased. Here we update the fair scheduling stats and * then put the task into the rbtree: */ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) { struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se; int idle_h_nr_running = task_has_idle_policy(p); /* * The code below (indirectly) updates schedutil which looks at * the cfs_rq utilization to select a frequency. * Let's add the task's estimated utilization to the cfs_rq's * estimated utilization, before we update schedutil. */ util_est_enqueue(&rq->cfs, p); /* * If in_iowait is set, the code below may not trigger any cpufreq * utilization updates, so do it here explicitly with the IOWAIT flag * passed. */ if (p->in_iowait) cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT); for_each_sched_entity(se) { if (se->on_rq) break; cfs_rq = cfs_rq_of(se); enqueue_entity(cfs_rq, se, flags); /* * end evaluation on encountering a throttled cfs_rq * * note: in the case of encountering a throttled cfs_rq we will * post the final h_nr_running increment below. */ if (cfs_rq_throttled(cfs_rq)) break; cfs_rq->h_nr_running++; cfs_rq->idle_h_nr_running += idle_h_nr_running; flags = ENQUEUE_WAKEUP; } for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); cfs_rq->h_nr_running++; cfs_rq->idle_h_nr_running += idle_h_nr_running; if (cfs_rq_throttled(cfs_rq)) break; update_load_avg(cfs_rq, se, UPDATE_TG); update_cfs_group(se); } if (!se) { add_nr_running(rq, 1); /* * Since new tasks are assigned an initial util_avg equal to * half of the spare capacity of their CPU, tiny tasks have the * ability to cross the overutilized threshold, which will * result in the load balancer ruining all the task placement * done by EAS. As a way to mitigate that effect, do not account * for the first enqueue operation of new tasks during the * overutilized flag detection. * * A better way of solving this problem would be to wait for * the PELT signals of tasks to converge before taking them * into account, but that is not straightforward to implement, * and the following generally works well enough in practice. */ if (flags & ENQUEUE_WAKEUP) update_overutilized_status(rq); } if (cfs_bandwidth_used()) { /* * When bandwidth control is enabled; the cfs_rq_throttled() * breaks in the above iteration can result in incomplete * leaf list maintenance, resulting in triggering the assertion * below. */ for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); if (list_add_leaf_cfs_rq(cfs_rq)) break; } } assert_list_leaf_cfs_rq(rq); hrtick_update(rq); } static void set_next_buddy(struct sched_entity *se); /* * The dequeue_task method is called before nr_running is * decreased. We remove the task from the rbtree and * update the fair scheduling stats: */ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) { struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se; int task_sleep = flags & DEQUEUE_SLEEP; int idle_h_nr_running = task_has_idle_policy(p); for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); dequeue_entity(cfs_rq, se, flags); /* * end evaluation on encountering a throttled cfs_rq * * note: in the case of encountering a throttled cfs_rq we will * post the final h_nr_running decrement below. */ if (cfs_rq_throttled(cfs_rq)) break; cfs_rq->h_nr_running--; cfs_rq->idle_h_nr_running -= idle_h_nr_running; /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) { /* Avoid re-evaluating load for this entity: */ se = parent_entity(se); /* * Bias pick_next to pick a task from this cfs_rq, as * p is sleeping when it is within its sched_slice. */ if (task_sleep && se && !throttled_hierarchy(cfs_rq)) set_next_buddy(se); break; } flags |= DEQUEUE_SLEEP; } for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); cfs_rq->h_nr_running--; cfs_rq->idle_h_nr_running -= idle_h_nr_running; if (cfs_rq_throttled(cfs_rq)) break; update_load_avg(cfs_rq, se, UPDATE_TG); update_cfs_group(se); } if (!se) sub_nr_running(rq, 1); util_est_dequeue(&rq->cfs, p, task_sleep); hrtick_update(rq); } #ifdef CONFIG_SMP /* Working cpumask for: load_balance, load_balance_newidle. */ DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); DEFINE_PER_CPU(cpumask_var_t, select_idle_mask); #ifdef CONFIG_NO_HZ_COMMON static struct { cpumask_var_t idle_cpus_mask; atomic_t nr_cpus; int has_blocked; /* Idle CPUS has blocked load */ unsigned long next_balance; /* in jiffy units */ unsigned long next_blocked; /* Next update of blocked load in jiffies */ } nohz ____cacheline_aligned; #endif /* CONFIG_NO_HZ_COMMON */ /* CPU only has SCHED_IDLE tasks enqueued */ static int sched_idle_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running && rq->nr_running); } static unsigned long cpu_runnable_load(struct rq *rq) { return cfs_rq_runnable_load_avg(&rq->cfs); } static unsigned long capacity_of(int cpu) { return cpu_rq(cpu)->cpu_capacity; } static unsigned long cpu_avg_load_per_task(int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running); unsigned long load_avg = cpu_runnable_load(rq); if (nr_running) return load_avg / nr_running; return 0; } static void record_wakee(struct task_struct *p) { /* * Only decay a single time; tasks that have less then 1 wakeup per * jiffy will not have built up many flips. */ if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) { current->wakee_flips >>= 1; current->wakee_flip_decay_ts = jiffies; } if (current->last_wakee != p) { current->last_wakee = p; current->wakee_flips++; } } /* * Detect M:N waker/wakee relationships via a switching-frequency heuristic. * * A waker of many should wake a different task than the one last awakened * at a frequency roughly N times higher than one of its wakees. * * In order to determine whether we should let the load spread vs consolidating * to shared cache, we look for a minimum 'flip' frequency of llc_size in one * partner, and a factor of lls_size higher frequency in the other. * * With both conditions met, we can be relatively sure that the relationship is * non-monogamous, with partner count exceeding socket size. * * Waker/wakee being client/server, worker/dispatcher, interrupt source or * whatever is irrelevant, spread criteria is apparent partner count exceeds * socket size. */ static int wake_wide(struct task_struct *p) { unsigned int master = current->wakee_flips; unsigned int slave = p->wakee_flips; int factor = this_cpu_read(sd_llc_size); if (master < slave) swap(master, slave); if (slave < factor || master < slave * factor) return 0; return 1; } /* * The purpose of wake_affine() is to quickly determine on which CPU we can run * soonest. For the purpose of speed we only consider the waking and previous * CPU. * * wake_affine_idle() - only considers 'now', it check if the waking CPU is * cache-affine and is (or will be) idle. * * wake_affine_weight() - considers the weight to reflect the average * scheduling latency of the CPUs. This seems to work * for the overloaded case. */ static int wake_affine_idle(int this_cpu, int prev_cpu, int sync) { /* * If this_cpu is idle, it implies the wakeup is from interrupt * context. Only allow the move if cache is shared. Otherwise an * interrupt intensive workload could force all tasks onto one * node depending on the IO topology or IRQ affinity settings. * * If the prev_cpu is idle and cache affine then avoid a migration. * There is no guarantee that the cache hot data from an interrupt * is more important than cache hot data on the prev_cpu and from * a cpufreq perspective, it's better to have higher utilisation * on one CPU. */ if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu; if (sync && cpu_rq(this_cpu)->nr_running == 1) return this_cpu; return nr_cpumask_bits; } static int wake_affine_weight(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync) { s64 this_eff_load, prev_eff_load; unsigned long task_load; this_eff_load = cpu_runnable_load(cpu_rq(this_cpu)); if (sync) { unsigned long current_load = task_h_load(current); if (current_load > this_eff_load) return this_cpu; this_eff_load -= current_load; } task_load = task_h_load(p); this_eff_load += task_load; if (sched_feat(WA_BIAS)) this_eff_load *= 100; this_eff_load *= capacity_of(prev_cpu); prev_eff_load = cpu_runnable_load(cpu_rq(prev_cpu)); prev_eff_load -= task_load; if (sched_feat(WA_BIAS)) prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; prev_eff_load *= capacity_of(this_cpu); /* * If sync, adjust the weight of prev_eff_load such that if * prev_eff == this_eff that select_idle_sibling() will consider * stacking the wakee on top of the waker if no other CPU is * idle. */ if (sync) prev_eff_load += 1; return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits; } static int wake_affine(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync) { int target = nr_cpumask_bits; if (sched_feat(WA_IDLE)) target = wake_affine_idle(this_cpu, prev_cpu, sync); if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits) target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); if (target == nr_cpumask_bits) return prev_cpu; schedstat_inc(sd->ttwu_move_affine); schedstat_inc(p->se.statistics.nr_wakeups_affine); return target; } static unsigned long cpu_util_without(int cpu, struct task_struct *p); static unsigned long capacity_spare_without(int cpu, struct task_struct *p) { return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0); } /* * find_idlest_group finds and returns the least busy CPU group within the * domain. * * Assumes p is allowed on at least one CPU in sd. */ static struct sched_group * find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu, int sd_flag) { struct sched_group *idlest = NULL, *group = sd->groups; struct sched_group *most_spare_sg = NULL; unsigned long min_runnable_load = ULONG_MAX; unsigned long this_runnable_load = ULONG_MAX; unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX; unsigned long most_spare = 0, this_spare = 0; int imbalance_scale = 100 + (sd->imbalance_pct-100)/2; unsigned long imbalance = scale_load_down(NICE_0_LOAD) * (sd->imbalance_pct-100) / 100; do { unsigned long load, avg_load, runnable_load; unsigned long spare_cap, max_spare_cap; int local_group; int i; /* Skip over this group if it has no CPUs allowed */ if (!cpumask_intersects(sched_group_span(group), p->cpus_ptr)) continue; local_group = cpumask_test_cpu(this_cpu, sched_group_span(group)); /* * Tally up the load of all CPUs in the group and find * the group containing the CPU with most spare capacity. */ avg_load = 0; runnable_load = 0; max_spare_cap = 0; for_each_cpu(i, sched_group_span(group)) { load = cpu_runnable_load(cpu_rq(i)); runnable_load += load; avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs); spare_cap = capacity_spare_without(i, p); if (spare_cap > max_spare_cap) max_spare_cap = spare_cap; } /* Adjust by relative CPU capacity of the group */ avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity; runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity; if (local_group) { this_runnable_load = runnable_load; this_avg_load = avg_load; this_spare = max_spare_cap; } else { if (min_runnable_load > (runnable_load + imbalance)) { /* * The runnable load is significantly smaller * so we can pick this new CPU: */ min_runnable_load = runnable_load; min_avg_load = avg_load; idlest = group; } else if ((runnable_load < (min_runnable_load + imbalance)) && (100*min_avg_load > imbalance_scale*avg_load)) { /* * The runnable loads are close so take the * blocked load into account through avg_load: */ min_avg_load = avg_load; idlest = group; } if (most_spare < max_spare_cap) { most_spare = max_spare_cap; most_spare_sg = group; } } } while (group = group->next, group != sd->groups); /* * The cross-over point between using spare capacity or least load * is too conservative for high utilization tasks on partially * utilized systems if we require spare_capacity > task_util(p), * so we allow for some task stuffing by using * spare_capacity > task_util(p)/2. * * Spare capacity can't be used for fork because the utilization has * not been set yet, we must first select a rq to compute the initial * utilization. */ if (sd_flag & SD_BALANCE_FORK) goto skip_spare; if (this_spare > task_util(p) / 2 && imbalance_scale*this_spare > 100*most_spare) return NULL; if (most_spare > task_util(p) / 2) return most_spare_sg; skip_spare: if (!idlest) return NULL; /* * When comparing groups across NUMA domains, it's possible for the * local domain to be very lightly loaded relative to the remote * domains but "imbalance" skews the comparison making remote CPUs * look much more favourable. When considering cross-domain, add * imbalance to the runnable load on the remote node and consider * staying local. */ if ((sd->flags & SD_NUMA) && min_runnable_load + imbalance >= this_runnable_load) return NULL; if (min_runnable_load > (this_runnable_load + imbalance)) return NULL; if ((this_runnable_load < (min_runnable_load + imbalance)) && (100*this_avg_load < imbalance_scale*min_avg_load)) return NULL; return idlest; } /* * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group. */ static int find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) { unsigned long load, min_load = ULONG_MAX; unsigned int min_exit_latency = UINT_MAX; u64 latest_idle_timestamp = 0; int least_loaded_cpu = this_cpu; int shallowest_idle_cpu = -1, si_cpu = -1; int i; /* Check if we have any choice: */ if (group->group_weight == 1) return cpumask_first(sched_group_span(group)); /* Traverse only the allowed CPUs */ for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { if (available_idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); if (idle && idle->exit_latency < min_exit_latency) { /* * We give priority to a CPU whose idle state * has the smallest exit latency irrespective * of any idle timestamp. */ min_exit_latency = idle->exit_latency; latest_idle_timestamp = rq->idle_stamp; shallowest_idle_cpu = i; } else if ((!idle || idle->exit_latency == min_exit_latency) && rq->idle_stamp > latest_idle_timestamp) { /* * If equal or no active idle state, then * the most recently idled CPU might have * a warmer cache. */ latest_idle_timestamp = rq->idle_stamp; shallowest_idle_cpu = i; } } else if (shallowest_idle_cpu == -1 && si_cpu == -1) { if (sched_idle_cpu(i)) { si_cpu = i; continue; } load = cpu_runnable_load(cpu_rq(i)); if (load < min_load) { min_load = load; least_loaded_cpu = i; } } } if (shallowest_idle_cpu != -1) return shallowest_idle_cpu; if (si_cpu != -1) return si_cpu; return least_loaded_cpu; } static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, int cpu, int prev_cpu, int sd_flag) { int new_cpu = cpu; if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) return prev_cpu; /* * We need task's util for capacity_spare_without, sync it up to * prev_cpu's last_update_time. */ if (!(sd_flag & SD_BALANCE_FORK)) sync_entity_load_avg(&p->se); while (sd) { struct sched_group *group; struct sched_domain *tmp; int weight; if (!(sd->flags & sd_flag)) { sd = sd->child; continue; } group = find_idlest_group(sd, p, cpu, sd_flag); if (!group) { sd = sd->child; continue; } new_cpu = find_idlest_group_cpu(group, p, cpu); if (new_cpu == cpu) { /* Now try balancing at a lower domain level of 'cpu': */ sd = sd->child; continue; } /* Now try balancing at a lower domain level of 'new_cpu': */ cpu = new_cpu; weight = sd->span_weight; sd = NULL; for_each_domain(cpu, tmp) { if (weight <= tmp->span_weight) break; if (tmp->flags & sd_flag) sd = tmp; } } return new_cpu; } #ifdef CONFIG_SCHED_SMT DEFINE_STATIC_KEY_FALSE(sched_smt_present); EXPORT_SYMBOL_GPL(sched_smt_present); static inline void set_idle_cores(int cpu, int val) { struct sched_domain_shared *sds; sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); if (sds) WRITE_ONCE(sds->has_idle_cores, val); } static inline bool test_idle_cores(int cpu, bool def) { struct sched_domain_shared *sds; sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); if (sds) return READ_ONCE(sds->has_idle_cores); return def; } /* * Scans the local SMT mask to see if the entire core is idle, and records this * information in sd_llc_shared->has_idle_cores. * * Since SMT siblings share all cache levels, inspecting this limited remote * state should be fairly cheap. */ void __update_idle_core(struct rq *rq) { int core = cpu_of(rq); int cpu; rcu_read_lock(); if (test_idle_cores(core, true)) goto unlock; for_each_cpu(cpu, cpu_smt_mask(core)) { if (cpu == core) continue; if (!available_idle_cpu(cpu)) goto unlock; } set_idle_cores(core, 1); unlock: rcu_read_unlock(); } /* * Scan the entire LLC domain for idle cores; this dynamically switches off if * there are no idle cores left in the system; tracked through * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above. */ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) { struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); int core, cpu; if (!static_branch_likely(&sched_smt_present)) return -1; if (!test_idle_cores(target, false)) return -1; cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); for_each_cpu_wrap(core, cpus, target) { bool idle = true; for_each_cpu(cpu, cpu_smt_mask(core)) { __cpumask_clear_cpu(cpu, cpus); if (!available_idle_cpu(cpu)) idle = false; } if (idle) return core; } /* * Failed to find an idle core; stop looking for one. */ set_idle_cores(target, 0); return -1; } /* * Scan the local SMT mask for idle CPUs. */ static int select_idle_smt(struct task_struct *p, int target) { int cpu, si_cpu = -1; if (!static_branch_likely(&sched_smt_present)) return -1; for_each_cpu(cpu, cpu_smt_mask(target)) { if (!cpumask_test_cpu(cpu, p->cpus_ptr)) continue; if (available_idle_cpu(cpu)) return cpu; if (si_cpu == -1 && sched_idle_cpu(cpu)) si_cpu = cpu; } return si_cpu; } #else /* CONFIG_SCHED_SMT */ static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) { return -1; } static inline int select_idle_smt(struct task_struct *p, int target) { return -1; } #endif /* CONFIG_SCHED_SMT */ /* * Scan the LLC domain for idle CPUs; this is dynamically regulated by * comparing the average scan cost (tracked in sd->avg_scan_cost) against the * average idle time for this rq (as found in rq->avg_idle). */ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target) { struct sched_domain *this_sd; u64 avg_cost, avg_idle; u64 time, cost; s64 delta; int this = smp_processor_id(); int cpu, nr = INT_MAX, si_cpu = -1; this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); if (!this_sd) return -1; /* * Due to large variance we need a large fuzz factor; hackbench in * particularly is sensitive here. */ avg_idle = this_rq()->avg_idle / 512; avg_cost = this_sd->avg_scan_cost + 1; if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost) return -1; if (sched_feat(SIS_PROP)) { u64 span_avg = sd->span_weight * avg_idle; if (span_avg > 4*avg_cost) nr = div_u64(span_avg, avg_cost); else nr = 4; } time = cpu_clock(this); for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { if (!--nr) return si_cpu; if (!cpumask_test_cpu(cpu, p->cpus_ptr)) continue; if (available_idle_cpu(cpu)) break; if (si_cpu == -1 && sched_idle_cpu(cpu)) si_cpu = cpu; } time = cpu_clock(this) - time; cost = this_sd->avg_scan_cost; delta = (s64)(time - cost) / 8; this_sd->avg_scan_cost += delta; return cpu; } /* * Try and locate an idle core/thread in the LLC cache domain. */ static int select_idle_sibling(struct task_struct *p, int prev, int target) { struct sched_domain *sd; int i, recent_used_cpu; if (available_idle_cpu(target) || sched_idle_cpu(target)) return target; /* * If the previous CPU is cache affine and idle, don't be stupid: */ if (prev != target && cpus_share_cache(prev, target) && (available_idle_cpu(prev) || sched_idle_cpu(prev))) return prev; /* Check a recently used CPU as a potential idle candidate: */ recent_used_cpu = p->recent_used_cpu; if (recent_used_cpu != prev && recent_used_cpu != target && cpus_share_cache(recent_used_cpu, target) && (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) { /* * Replace recent_used_cpu with prev as it is a potential * candidate for the next wake: */ p->recent_used_cpu = prev; return recent_used_cpu; } sd = rcu_dereference(per_cpu(sd_llc, target)); if (!sd) return target; i = select_idle_core(p, sd, target); if ((unsigned)i < nr_cpumask_bits) return i; i = select_idle_cpu(p, sd, target); if ((unsigned)i < nr_cpumask_bits) return i; i = select_idle_smt(p, target); if ((unsigned)i < nr_cpumask_bits) return i; return target; } /** * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks * @cpu: the CPU to get the utilization of * * The unit of the return value must be the one of capacity so we can compare * the utilization with the capacity of the CPU that is available for CFS task * (ie cpu_capacity). * * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the * recent utilization of currently non-runnable tasks on a CPU. It represents * the amount of utilization of a CPU in the range [0..capacity_orig] where * capacity_orig is the cpu_capacity available at the highest frequency * (arch_scale_freq_capacity()). * The utilization of a CPU converges towards a sum equal to or less than the * current capacity (capacity_curr <= capacity_orig) of the CPU because it is * the running time on this CPU scaled by capacity_curr. * * The estimated utilization of a CPU is defined to be the maximum between its * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks * currently RUNNABLE on that CPU. * This allows to properly represent the expected utilization of a CPU which * has just got a big task running since a long sleep period. At the same time * however it preserves the benefits of the "blocked utilization" in * describing the potential for other tasks waking up on the same CPU. * * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even * higher than capacity_orig because of unfortunate rounding in * cfs.avg.util_avg or just after migrating tasks and new task wakeups until * the average stabilizes with the new running time. We need to check that the * utilization stays within the range of [0..capacity_orig] and cap it if * necessary. Without utilization capping, a group could be seen as overloaded * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of * available capacity. We allow utilization to overshoot capacity_curr (but not * capacity_orig) as it useful for predicting the capacity required after task * migrations (scheduler-driven DVFS). * * Return: the (estimated) utilization for the specified CPU */ static inline unsigned long cpu_util(int cpu) { struct cfs_rq *cfs_rq; unsigned int util; cfs_rq = &cpu_rq(cpu)->cfs; util = READ_ONCE(cfs_rq->avg.util_avg); if (sched_feat(UTIL_EST)) util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued)); return min_t(unsigned long, util, capacity_orig_of(cpu)); } /* * cpu_util_without: compute cpu utilization without any contributions from *p * @cpu: the CPU which utilization is requested * @p: the task which utilization should be discounted * * The utilization of a CPU is defined by the utilization of tasks currently * enqueued on that CPU as well as tasks which are currently sleeping after an * execution on that CPU. * * This method returns the utilization of the specified CPU by discounting the * utilization of the specified task, whenever the task is currently * contributing to the CPU utilization. */ static unsigned long cpu_util_without(int cpu, struct task_struct *p) { struct cfs_rq *cfs_rq; unsigned int util; /* Task has no contribution or is new */ if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) return cpu_util(cpu); cfs_rq = &cpu_rq(cpu)->cfs; util = READ_ONCE(cfs_rq->avg.util_avg); /* Discount task's util from CPU's util */ lsub_positive(&util, task_util(p)); /* * Covered cases: * * a) if *p is the only task sleeping on this CPU, then: * cpu_util (== task_util) > util_est (== 0) * and thus we return: * cpu_util_without = (cpu_util - task_util) = 0 * * b) if other tasks are SLEEPING on this CPU, which is now exiting * IDLE, then: * cpu_util >= task_util * cpu_util > util_est (== 0) * and thus we discount *p's blocked utilization to return: * cpu_util_without = (cpu_util - task_util) >= 0 * * c) if other tasks are RUNNABLE on that CPU and * util_est > cpu_util * then we use util_est since it returns a more restrictive * estimation of the spare capacity on that CPU, by just * considering the expected utilization of tasks already * runnable on that CPU. * * Cases a) and b) are covered by the above code, while case c) is * covered by the following code when estimated utilization is * enabled. */ if (sched_feat(UTIL_EST)) { unsigned int estimated = READ_ONCE(cfs_rq->avg.util_est.enqueued); /* * Despite the following checks we still have a small window * for a possible race, when an execl's select_task_rq_fair() * races with LB's detach_task(): * * detach_task() * p->on_rq = TASK_ON_RQ_MIGRATING; * ---------------------------------- A * deactivate_task() \ * dequeue_task() + RaceTime * util_est_dequeue() / * ---------------------------------- B * * The additional check on "current == p" it's required to * properly fix the execl regression and it helps in further * reducing the chances for the above race. */ if (unlikely(task_on_rq_queued(p) || current == p)) lsub_positive(&estimated, _task_util_est(p)); util = max(util, estimated); } /* * Utilization (estimated) can exceed the CPU capacity, thus let's * clamp to the maximum CPU capacity to ensure consistency with * the cpu_util call. */ return min_t(unsigned long, util, capacity_orig_of(cpu)); } /* * Disable WAKE_AFFINE in the case where task @p doesn't fit in the * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu. * * In that case WAKE_AFFINE doesn't make sense and we'll let * BALANCE_WAKE sort things out. */ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu) { long min_cap, max_cap; if (!static_branch_unlikely(&sched_asym_cpucapacity)) return 0; min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu)); max_cap = cpu_rq(cpu)->rd->max_cpu_capacity; /* Minimum capacity is close to max, no need to abort wake_affine */ if (max_cap - min_cap < max_cap >> 3) return 0; /* Bring task utilization in sync with prev_cpu */ sync_entity_load_avg(&p->se); return !task_fits_capacity(p, min_cap); } /* * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued) * to @dst_cpu. */ static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu) { struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg); /* * If @p migrates from @cpu to another, remove its contribution. Or, * if @p migrates from another CPU to @cpu, add its contribution. In * the other cases, @cpu is not impacted by the migration, so the * util_avg should already be correct. */ if (task_cpu(p) == cpu && dst_cpu != cpu) sub_positive(&util, task_util(p)); else if (task_cpu(p) != cpu && dst_cpu == cpu) util += task_util(p); if (sched_feat(UTIL_EST)) { util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued); /* * During wake-up, the task isn't enqueued yet and doesn't * appear in the cfs_rq->avg.util_est.enqueued of any rq, * so just add it (if needed) to "simulate" what will be * cpu_util() after the task has been enqueued. */ if (dst_cpu == cpu) util_est += _task_util_est(p); util = max(util, util_est); } return min(util, capacity_orig_of(cpu)); } /* * compute_energy(): Estimates the energy that would be consumed if @p was * migrated to @dst_cpu. compute_energy() predicts what will be the utilization * landscape of the * CPUs after the task migration, and uses the Energy Model * to compute what would be the energy if we decided to actually migrate that * task. */ static long compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) { unsigned int max_util, util_cfs, cpu_util, cpu_cap; unsigned long sum_util, energy = 0; struct task_struct *tsk; int cpu; for (; pd; pd = pd->next) { struct cpumask *pd_mask = perf_domain_span(pd); /* * The energy model mandates all the CPUs of a performance * domain have the same capacity. */ cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask)); max_util = sum_util = 0; /* * The capacity state of CPUs of the current rd can be driven by * CPUs of another rd if they belong to the same performance * domain. So, account for the utilization of these CPUs too * by masking pd with cpu_online_mask instead of the rd span. * * If an entire performance domain is outside of the current rd, * it will not appear in its pd list and will not be accounted * by compute_energy(). */ for_each_cpu_and(cpu, pd_mask, cpu_online_mask) { util_cfs = cpu_util_next(cpu, p, dst_cpu); /* * Busy time computation: utilization clamping is not * required since the ratio (sum_util / cpu_capacity) * is already enough to scale the EM reported power * consumption at the (eventually clamped) cpu_capacity. */ sum_util += schedutil_cpu_util(cpu, util_cfs, cpu_cap, ENERGY_UTIL, NULL); /* * Performance domain frequency: utilization clamping * must be considered since it affects the selection * of the performance domain frequency. * NOTE: in case RT tasks are running, by default the * FREQUENCY_UTIL's utilization can be max OPP. */ tsk = cpu == dst_cpu ? p : NULL; cpu_util = schedutil_cpu_util(cpu, util_cfs, cpu_cap, FREQUENCY_UTIL, tsk); max_util = max(max_util, cpu_util); } energy += em_pd_energy(pd->em_pd, max_util, sum_util); } return energy; } /* * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the * waking task. find_energy_efficient_cpu() looks for the CPU with maximum * spare capacity in each performance domain and uses it as a potential * candidate to execute the task. Then, it uses the Energy Model to figure * out which of the CPU candidates is the most energy-efficient. * * The rationale for this heuristic is as follows. In a performance domain, * all the most energy efficient CPU candidates (according to the Energy * Model) are those for which we'll request a low frequency. When there are * several CPUs for which the frequency request will be the same, we don't * have enough data to break the tie between them, because the Energy Model * only includes active power costs. With this model, if we assume that * frequency requests follow utilization (e.g. using schedutil), the CPU with * the maximum spare capacity in a performance domain is guaranteed to be among * the best candidates of the performance domain. * * In practice, it could be preferable from an energy standpoint to pack * small tasks on a CPU in order to let other CPUs go in deeper idle states, * but that could also hurt our chances to go cluster idle, and we have no * ways to tell with the current Energy Model if this is actually a good * idea or not. So, find_energy_efficient_cpu() basically favors * cluster-packing, and spreading inside a cluster. That should at least be * a good thing for latency, and this is consistent with the idea that most * of the energy savings of EAS come from the asymmetry of the system, and * not so much from breaking the tie between identical CPUs. That's also the * reason why EAS is enabled in the topology code only for systems where * SD_ASYM_CPUCAPACITY is set. * * NOTE: Forkees are not accepted in the energy-aware wake-up path because * they don't have any useful utilization data yet and it's not possible to * forecast their impact on energy consumption. Consequently, they will be * placed by find_idlest_cpu() on the least loaded CPU, which might turn out * to be energy-inefficient in some use-cases. The alternative would be to * bias new tasks towards specific types of CPUs first, or to try to infer * their util_avg from the parent task, but those heuristics could hurt * other use-cases too. So, until someone finds a better way to solve this, * let's keep things simple by re-using the existing slow path. */ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) { unsigned long prev_energy = ULONG_MAX, best_energy = ULONG_MAX; struct root_domain *rd = cpu_rq(smp_processor_id())->rd; int cpu, best_energy_cpu = prev_cpu; struct perf_domain *head, *pd; unsigned long cpu_cap, util; struct sched_domain *sd; rcu_read_lock(); pd = rcu_dereference(rd->pd); if (!pd || READ_ONCE(rd->overutilized)) goto fail; head = pd; /* * Energy-aware wake-up happens on the lowest sched_domain starting * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu. */ sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity)); while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) sd = sd->parent; if (!sd) goto fail; sync_entity_load_avg(&p->se); if (!task_util_est(p)) goto unlock; for (; pd; pd = pd->next) { unsigned long cur_energy, spare_cap, max_spare_cap = 0; int max_spare_cap_cpu = -1; for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) { if (!cpumask_test_cpu(cpu, p->cpus_ptr)) continue; /* Skip CPUs that will be overutilized. */ util = cpu_util_next(cpu, p, cpu); cpu_cap = capacity_of(cpu); if (!fits_capacity(util, cpu_cap)) continue; /* Always use prev_cpu as a candidate. */ if (cpu == prev_cpu) { prev_energy = compute_energy(p, prev_cpu, head); best_energy = min(best_energy, prev_energy); continue; } /* * Find the CPU with the maximum spare capacity in * the performance domain */ spare_cap = cpu_cap - util; if (spare_cap > max_spare_cap) { max_spare_cap = spare_cap; max_spare_cap_cpu = cpu; } } /* Evaluate the energy impact of using this CPU. */ if (max_spare_cap_cpu >= 0) { cur_energy = compute_energy(p, max_spare_cap_cpu, head); if (cur_energy < best_energy) { best_energy = cur_energy; best_energy_cpu = max_spare_cap_cpu; } } } unlock: rcu_read_unlock(); /* * Pick the best CPU if prev_cpu cannot be used, or if it saves at * least 6% of the energy used by prev_cpu. */ if (prev_energy == ULONG_MAX) return best_energy_cpu; if ((prev_energy - best_energy) > (prev_energy >> 4)) return best_energy_cpu; return prev_cpu; fail: rcu_read_unlock(); return -1; } /* * select_task_rq_fair: Select target runqueue for the waking task in domains * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE, * SD_BALANCE_FORK, or SD_BALANCE_EXEC. * * Balances load by selecting the idlest CPU in the idlest group, or under * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set. * * Returns the target CPU number. * * preempt must be disabled. */ static int select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags) { struct sched_domain *tmp, *sd = NULL; int cpu = smp_processor_id(); int new_cpu = prev_cpu; int want_affine = 0; int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING); if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); if (sched_energy_enabled()) { new_cpu = find_energy_efficient_cpu(p, prev_cpu); if (new_cpu >= 0) return new_cpu; new_cpu = prev_cpu; } want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) && cpumask_test_cpu(cpu, p->cpus_ptr); } rcu_read_lock(); for_each_domain(cpu, tmp) { if (!(tmp->flags & SD_LOAD_BALANCE)) break; /* * If both 'cpu' and 'prev_cpu' are part of this domain, * cpu is a valid SD_WAKE_AFFINE target. */ if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { if (cpu != prev_cpu) new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync); sd = NULL; /* Prefer wake_affine over balance flags */ break; } if (tmp->flags & sd_flag) sd = tmp; else if (!want_affine) break; } if (unlikely(sd)) { /* Slow path */ new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */ /* Fast path */ new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); if (want_affine) current->recent_used_cpu = cpu; } rcu_read_unlock(); return new_cpu; } static void detach_entity_cfs_rq(struct sched_entity *se); /* * Called immediately before a task is migrated to a new CPU; task_cpu(p) and * cfs_rq_of(p) references at time of call are still valid and identify the * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held. */ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) { /* * As blocked tasks retain absolute vruntime the migration needs to * deal with this by subtracting the old and adding the new * min_vruntime -- the latter is done by enqueue_entity() when placing * the task on the new runqueue. */ if (p->state == TASK_WAKING) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); u64 min_vruntime; #ifndef CONFIG_64BIT u64 min_vruntime_copy; do { min_vruntime_copy = cfs_rq->min_vruntime_copy; smp_rmb(); min_vruntime = cfs_rq->min_vruntime; } while (min_vruntime != min_vruntime_copy); #else min_vruntime = cfs_rq->min_vruntime; #endif se->vruntime -= min_vruntime; } if (p->on_rq == TASK_ON_RQ_MIGRATING) { /* * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old' * rq->lock and can modify state directly. */ lockdep_assert_held(&task_rq(p)->lock); detach_entity_cfs_rq(&p->se); } else { /* * We are supposed to update the task to "current" time, then * its up to date and ready to go to new CPU/cfs_rq. But we * have difficulty in getting what current time is, so simply * throw away the out-of-date time. This will result in the * wakee task is less decayed, but giving the wakee more load * sounds not bad. */ remove_entity_load_avg(&p->se); } /* Tell new CPU we are migrated */ p->se.avg.last_update_time = 0; /* We have migrated, no longer consider this task hot */ p->se.exec_start = 0; update_scan_period(p, new_cpu); } static void task_dead_fair(struct task_struct *p) { remove_entity_load_avg(&p->se); } #endif /* CONFIG_SMP */ static unsigned long wakeup_gran(struct sched_entity *se) { unsigned long gran = sysctl_sched_wakeup_granularity; /* * Since its curr running now, convert the gran from real-time * to virtual-time in his units. * * By using 'se' instead of 'curr' we penalize light tasks, so * they get preempted easier. That is, if 'se' < 'curr' then * the resulting gran will be larger, therefore penalizing the * lighter, if otoh 'se' > 'curr' then the resulting gran will * be smaller, again penalizing the lighter task. * * This is especially important for buddies when the leftmost * task is higher priority than the buddy. */ return calc_delta_fair(gran, se); } /* * Should 'se' preempt 'curr'. * * |s1 * |s2 * |s3 * g * |<--->|c * * w(c, s1) = -1 * w(c, s2) = 0 * w(c, s3) = 1 * */ static int wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) { s64 gran, vdiff = curr->vruntime - se->vruntime; if (vdiff <= 0) return -1; gran = wakeup_gran(se); if (vdiff > gran) return 1; return 0; } static void set_last_buddy(struct sched_entity *se) { if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) return; for_each_sched_entity(se) { if (SCHED_WARN_ON(!se->on_rq)) return; cfs_rq_of(se)->last = se; } } static void set_next_buddy(struct sched_entity *se) { if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) return; for_each_sched_entity(se) { if (SCHED_WARN_ON(!se->on_rq)) return; cfs_rq_of(se)->next = se; } } static void set_skip_buddy(struct sched_entity *se) { for_each_sched_entity(se) cfs_rq_of(se)->skip = se; } /* * Preempt the current task with a newly woken task if needed: */ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) { struct task_struct *curr = rq->curr; struct sched_entity *se = &curr->se, *pse = &p->se; struct cfs_rq *cfs_rq = task_cfs_rq(curr); int scale = cfs_rq->nr_running >= sched_nr_latency; int next_buddy_marked = 0; if (unlikely(se == pse)) return; /* * This is possible from callers such as attach_tasks(), in which we * unconditionally check_prempt_curr() after an enqueue (which may have * lead to a throttle). This both saves work and prevents false * next-buddy nomination below. */ if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) return; if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { set_next_buddy(pse); next_buddy_marked = 1; } /* * We can come here with TIF_NEED_RESCHED already set from new task * wake up path. * * Note: this also catches the edge-case of curr being in a throttled * group (e.g. via set_curr_task), since update_curr() (in the * enqueue of curr) will have resulted in resched being set. This * prevents us from potentially nominating it as a false LAST_BUDDY * below. */ if (test_tsk_need_resched(curr)) return; /* Idle tasks are by definition preempted by non-idle tasks. */ if (unlikely(task_has_idle_policy(curr)) && likely(!task_has_idle_policy(p))) goto preempt; /* * Batch and idle tasks do not preempt non-idle tasks (their preemption * is driven by the tick): */ if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) return; find_matching_se(&se, &pse); update_curr(cfs_rq_of(se)); BUG_ON(!pse); if (wakeup_preempt_entity(se, pse) == 1) { /* * Bias pick_next to pick the sched entity that is * triggering this preemption. */ if (!next_buddy_marked) set_next_buddy(pse); goto preempt; } return; preempt: resched_curr(rq); /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved * with schedule on the ->pre_schedule() or idle_balance() * point, either of which can * drop the rq lock. * * Also, during early boot the idle thread is in the fair class, * for obvious reasons its a bad idea to schedule back to it. */ if (unlikely(!se->on_rq || curr == rq->idle)) return; if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) set_last_buddy(se); } static struct task_struct * pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { struct cfs_rq *cfs_rq = &rq->cfs; struct sched_entity *se; struct task_struct *p; int new_tasks; again: if (!cfs_rq->nr_running) goto idle; #ifdef CONFIG_FAIR_GROUP_SCHED if (prev->sched_class != &fair_sched_class) goto simple; /* * Because of the set_next_buddy() in dequeue_task_fair() it is rather * likely that a next task is from the same cgroup as the current. * * Therefore attempt to avoid putting and setting the entire cgroup * hierarchy, only change the part that actually changes. */ do { struct sched_entity *curr = cfs_rq->curr; /* * Since we got here without doing put_prev_entity() we also * have to consider cfs_rq->curr. If it is still a runnable * entity, update_curr() will update its vruntime, otherwise * forget we've ever seen it. */ if (curr) { if (curr->on_rq) update_curr(cfs_rq); else curr = NULL; /* * This call to check_cfs_rq_runtime() will do the * throttle and dequeue its entity in the parent(s). * Therefore the nr_running test will indeed * be correct. */ if (unlikely(check_cfs_rq_runtime(cfs_rq))) { cfs_rq = &rq->cfs; if (!cfs_rq->nr_running) goto idle; goto simple; } } se = pick_next_entity(cfs_rq, curr); cfs_rq = group_cfs_rq(se); } while (cfs_rq); p = task_of(se); /* * Since we haven't yet done put_prev_entity and if the selected task * is a different task than we started out with, try and touch the * least amount of cfs_rqs. */ if (prev != p) { struct sched_entity *pse = &prev->se; while (!(cfs_rq = is_same_group(se, pse))) { int se_depth = se->depth; int pse_depth = pse->depth; if (se_depth <= pse_depth) { put_prev_entity(cfs_rq_of(pse), pse); pse = parent_entity(pse); } if (se_depth >= pse_depth) { set_next_entity(cfs_rq_of(se), se); se = parent_entity(se); } } put_prev_entity(cfs_rq, pse); set_next_entity(cfs_rq, se); } goto done; simple: #endif put_prev_task(rq, prev); do { se = pick_next_entity(cfs_rq, NULL); set_next_entity(cfs_rq, se); cfs_rq = group_cfs_rq(se); } while (cfs_rq); p = task_of(se); done: __maybe_unused; #ifdef CONFIG_SMP /* * Move the next running task to the front of * the list, so our cfs_tasks list becomes MRU * one. */ list_move(&p->se.group_node, &rq->cfs_tasks); #endif if (hrtick_enabled(rq)) hrtick_start_fair(rq, p); update_misfit_status(p, rq); return p; idle: update_misfit_status(NULL, rq); new_tasks = idle_balance(rq, rf); /* * Because idle_balance() releases (and re-acquires) rq->lock, it is * possible for any higher priority task to appear. In that case we * must re-start the pick_next_entity() loop. */ if (new_tasks < 0) return RETRY_TASK; if (new_tasks > 0) goto again; /* * rq is about to be idle, check if we need to update the * lost_idle_time of clock_pelt */ update_idle_rq_clock_pelt(rq); return NULL; } /* * Account for a descheduled task: */ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) { struct sched_entity *se = &prev->se; struct cfs_rq *cfs_rq; for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); put_prev_entity(cfs_rq, se); } } /* * sched_yield() is very simple * * The magic of dealing with the ->skip buddy is in pick_next_entity. */ static void yield_task_fair(struct rq *rq) { struct task_struct *curr = rq->curr; struct cfs_rq *cfs_rq = task_cfs_rq(curr); struct sched_entity *se = &curr->se; /* * Are we the only task in the tree? */ if (unlikely(rq->nr_running == 1)) return; clear_buddies(cfs_rq, se); if (curr->policy != SCHED_BATCH) { update_rq_clock(rq); /* * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); /* * Tell update_rq_clock() that we've just updated, * so we don't do microscopic update in schedule() * and double the fastpath cost. */ rq_clock_skip_update(rq); } set_skip_buddy(se); } static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) { struct sched_entity *se = &p->se; /* throttled hierarchies are not runnable */ if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) return false; /* Tell the scheduler that we'd really like pse to run next. */ set_next_buddy(se); yield_task_fair(rq); return true; } #ifdef CONFIG_SMP /************************************************** * Fair scheduling class load-balancing methods. * * BASICS * * The purpose of load-balancing is to achieve the same basic fairness the * per-CPU scheduler provides, namely provide a proportional amount of compute * time to each task. This is expressed in the following equation: * * W_i,n/P_i == W_j,n/P_j for all i,j (1) * * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight * W_i,0 is defined as: * * W_i,0 = \Sum_j w_i,j (2) * * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight * is derived from the nice value as per sched_prio_to_weight[]. * * The weight average is an exponential decay average of the instantaneous * weight: * * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3) * * C_i is the compute capacity of CPU i, typically it is the * fraction of 'recent' time available for SCHED_OTHER task execution. But it * can also include other factors [XXX]. * * To achieve this balance we define a measure of imbalance which follows * directly from (1): * * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4) * * We them move tasks around to minimize the imbalance. In the continuous * function space it is obvious this converges, in the discrete case we get * a few fun cases generally called infeasible weight scenarios. * * [XXX expand on: * - infeasible weights; * - local vs global optima in the discrete case. ] * * * SCHED DOMAINS * * In order to solve the imbalance equation (4), and avoid the obvious O(n^2) * for all i,j solution, we create a tree of CPUs that follows the hardware * topology where each level pairs two lower groups (or better). This results * in O(log n) layers. Furthermore we reduce the number of CPUs going up the * tree to only the first of the previous level and we decrease the frequency * of load-balance at each level inv. proportional to the number of CPUs in * the groups. * * This yields: * * log_2 n 1 n * \Sum { --- * --- * 2^i } = O(n) (5) * i = 0 2^i 2^i * `- size of each group * | | `- number of CPUs doing load-balance * | `- freq * `- sum over all levels * * Coupled with a limit on how many tasks we can migrate every balance pass, * this makes (5) the runtime complexity of the balancer. * * An important property here is that each CPU is still (indirectly) connected * to every other CPU in at most O(log n) steps: * * The adjacency matrix of the resulting graph is given by: * * log_2 n * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6) * k = 0 * * And you'll find that: * * A^(log_2 n)_i,j != 0 for all i,j (7) * * Showing there's indeed a path between every CPU in at most O(log n) steps. * The task movement gives a factor of O(m), giving a convergence complexity * of: * * O(nm log n), n := nr_cpus, m := nr_tasks (8) * * * WORK CONSERVING * * In order to avoid CPUs going idle while there's still work to do, new idle * balancing is more aggressive and has the newly idle CPU iterate up the domain * tree itself instead of relying on other CPUs to bring it work. * * This adds some complexity to both (5) and (8) but it reduces the total idle * time. * * [XXX more?] * * * CGROUPS * * Cgroups make a horror show out of (2), instead of a simple sum we get: * * s_k,i * W_i,0 = \Sum_j \Prod_k w_k * ----- (9) * S_k * * Where * * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10) * * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i. * * The big problem is S_k, its a global sum needed to compute a local (W_i) * property. * * [XXX write more on how we solve this.. _after_ merging pjt's patches that * rewrite all of this once again.] */ static unsigned long __read_mostly max_load_balance_interval = HZ/10; enum fbq_type { regular, remote, all }; enum group_type { group_other = 0, group_misfit_task, group_imbalanced, group_overloaded, }; #define LBF_ALL_PINNED 0x01 #define LBF_NEED_BREAK 0x02 #define LBF_DST_PINNED 0x04 #define LBF_SOME_PINNED 0x08 #define LBF_NOHZ_STATS 0x10 #define LBF_NOHZ_AGAIN 0x20 struct lb_env { struct sched_domain *sd; struct rq *src_rq; int src_cpu; int dst_cpu; struct rq *dst_rq; struct cpumask *dst_grpmask; int new_dst_cpu; enum cpu_idle_type idle; long imbalance; /* The set of CPUs under consideration for load-balancing */ struct cpumask *cpus; unsigned int flags; unsigned int loop; unsigned int loop_break; unsigned int loop_max; enum fbq_type fbq_type; enum group_type src_grp_type; struct list_head tasks; }; /* * Is this task likely cache-hot: */ static int task_hot(struct task_struct *p, struct lb_env *env) { s64 delta; lockdep_assert_held(&env->src_rq->lock); if (p->sched_class != &fair_sched_class) return 0; if (unlikely(task_has_idle_policy(p))) return 0; /* * Buddy candidates are cache hot: */ if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && (&p->se == cfs_rq_of(&p->se)->next || &p->se == cfs_rq_of(&p->se)->last)) return 1; if (sysctl_sched_migration_cost == -1) return 1; if (sysctl_sched_migration_cost == 0) return 0; delta = rq_clock_task(env->src_rq) - p->se.exec_start; return delta < (s64)sysctl_sched_migration_cost; } #ifdef CONFIG_NUMA_BALANCING /* * Returns 1, if task migration degrades locality * Returns 0, if task migration improves locality i.e migration preferred. * Returns -1, if task migration is not affected by locality. */ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) { struct numa_group *numa_group = rcu_dereference(p->numa_group); unsigned long src_weight, dst_weight; int src_nid, dst_nid, dist; if (!static_branch_likely(&sched_numa_balancing)) return -1; if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) return -1; src_nid = cpu_to_node(env->src_cpu); dst_nid = cpu_to_node(env->dst_cpu); if (src_nid == dst_nid) return -1; /* Migrating away from the preferred node is always bad. */ if (src_nid == p->numa_preferred_nid) { if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) return 1; else return -1; } /* Encourage migration to the preferred node. */ if (dst_nid == p->numa_preferred_nid) return 0; /* Leaving a core idle is often worse than degrading locality. */ if (env->idle == CPU_IDLE) return -1; dist = node_distance(src_nid, dst_nid); if (numa_group) { src_weight = group_weight(p, src_nid, dist); dst_weight = group_weight(p, dst_nid, dist); } else { src_weight = task_weight(p, src_nid, dist); dst_weight = task_weight(p, dst_nid, dist); } return dst_weight < src_weight; } #else static inline int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) { return -1; } #endif /* * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? */ static int can_migrate_task(struct task_struct *p, struct lb_env *env) { int tsk_cache_hot; lockdep_assert_held(&env->src_rq->lock); /* * We do not migrate tasks that are: * 1) throttled_lb_pair, or * 2) cannot be migrated to this CPU due to cpus_ptr, or * 3) running (obviously), or * 4) are cache-hot on their current CPU. */ if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) return 0; if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { int cpu; schedstat_inc(p->se.statistics.nr_failed_migrations_affine); env->flags |= LBF_SOME_PINNED; /* * Remember if this task can be migrated to any other CPU in * our sched_group. We may want to revisit it if we couldn't * meet load balance goals by pulling other tasks on src_cpu. * * Avoid computing new_dst_cpu for NEWLY_IDLE or if we have * already computed one in current iteration. */ if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED)) return 0; /* Prevent to re-select dst_cpu via env's CPUs: */ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { if (cpumask_test_cpu(cpu, p->cpus_ptr)) { env->flags |= LBF_DST_PINNED; env->new_dst_cpu = cpu; break; } } return 0; } /* Record that we found atleast one task that could run on dst_cpu */ env->flags &= ~LBF_ALL_PINNED; if (task_running(env->src_rq, p)) { schedstat_inc(p->se.statistics.nr_failed_migrations_running); return 0; } /* * Aggressive migration if: * 1) destination numa is preferred * 2) task is cache cold, or * 3) too many balance attempts have failed. */ tsk_cache_hot = migrate_degrades_locality(p, env); if (tsk_cache_hot == -1) tsk_cache_hot = task_hot(p, env); if (tsk_cache_hot <= 0 || env->sd->nr_balance_failed > env->sd->cache_nice_tries) { if (tsk_cache_hot == 1) { schedstat_inc(env->sd->lb_hot_gained[env->idle]); schedstat_inc(p->se.statistics.nr_forced_migrations); } return 1; } schedstat_inc(p->se.statistics.nr_failed_migrations_hot); return 0; } /* * detach_task() -- detach the task for the migration specified in env */ static void detach_task(struct task_struct *p, struct lb_env *env) { lockdep_assert_held(&env->src_rq->lock); deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); set_task_cpu(p, env->dst_cpu); } /* * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as * part of active balancing operations within "domain". * * Returns a task if successful and NULL otherwise. */ static struct task_struct *detach_one_task(struct lb_env *env) { struct task_struct *p; lockdep_assert_held(&env->src_rq->lock); list_for_each_entry_reverse(p, &env->src_rq->cfs_tasks, se.group_node) { if (!can_migrate_task(p, env)) continue; detach_task(p, env); /* * Right now, this is only the second place where * lb_gained[env->idle] is updated (other is detach_tasks) * so we can safely collect stats here rather than * inside detach_tasks(). */ schedstat_inc(env->sd->lb_gained[env->idle]); return p; } return NULL; } static const unsigned int sched_nr_migrate_break = 32; /* * detach_tasks() -- tries to detach up to imbalance runnable load from * busiest_rq, as part of a balancing operation within domain "sd". * * Returns number of detached tasks if successful and 0 otherwise. */ static int detach_tasks(struct lb_env *env) { struct list_head *tasks = &env->src_rq->cfs_tasks; struct task_struct *p; unsigned long load; int detached = 0; lockdep_assert_held(&env->src_rq->lock); if (env->imbalance <= 0) return 0; while (!list_empty(tasks)) { /* * We don't want to steal all, otherwise we may be treated likewise, * which could at worst lead to a livelock crash. */ if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) break; p = list_last_entry(tasks, struct task_struct, se.group_node); env->loop++; /* We've more or less seen every task there is, call it quits */ if (env->loop > env->loop_max) break; /* take a breather every nr_migrate tasks */ if (env->loop > env->loop_break) { env->loop_break += sched_nr_migrate_break; env->flags |= LBF_NEED_BREAK; break; } if (!can_migrate_task(p, env)) goto next; load = task_h_load(p); if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) goto next; if ((load / 2) > env->imbalance) goto next; detach_task(p, env); list_add(&p->se.group_node, &env->tasks); detached++; env->imbalance -= load; #ifdef CONFIG_PREEMPT /* * NEWIDLE balancing is a source of latency, so preemptible * kernels will stop after the first task is detached to minimize * the critical section. */ if (env->idle == CPU_NEWLY_IDLE) break; #endif /* * We only want to steal up to the prescribed amount of * runnable load. */ if (env->imbalance <= 0) break; continue; next: list_move(&p->se.group_node, tasks); } /* * Right now, this is one of only two places we collect this stat * so we can safely collect detach_one_task() stats here rather * than inside detach_one_task(). */ schedstat_add(env->sd->lb_gained[env->idle], detached); return detached; } /* * attach_task() -- attach the task detached by detach_task() to its new rq. */ static void attach_task(struct rq *rq, struct task_struct *p) { lockdep_assert_held(&rq->lock); BUG_ON(task_rq(p) != rq); activate_task(rq, p, ENQUEUE_NOCLOCK); check_preempt_curr(rq, p, 0); } /* * attach_one_task() -- attaches the task returned from detach_one_task() to * its new rq. */ static void attach_one_task(struct rq *rq, struct task_struct *p) { struct rq_flags rf; rq_lock(rq, &rf); update_rq_clock(rq); attach_task(rq, p); rq_unlock(rq, &rf); } /* * attach_tasks() -- attaches all tasks detached by detach_tasks() to their * new rq. */ static void attach_tasks(struct lb_env *env) { struct list_head *tasks = &env->tasks; struct task_struct *p; struct rq_flags rf; rq_lock(env->dst_rq, &rf); update_rq_clock(env->dst_rq); while (!list_empty(tasks)) { p = list_first_entry(tasks, struct task_struct, se.group_node); list_del_init(&p->se.group_node); attach_task(env->dst_rq, p); } rq_unlock(env->dst_rq, &rf); } #ifdef CONFIG_NO_HZ_COMMON static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { if (cfs_rq->avg.load_avg) return true; if (cfs_rq->avg.util_avg) return true; return false; } static inline bool others_have_blocked(struct rq *rq) { if (READ_ONCE(rq->avg_rt.util_avg)) return true; if (READ_ONCE(rq->avg_dl.util_avg)) return true; #ifdef CONFIG_HAVE_SCHED_AVG_IRQ if (READ_ONCE(rq->avg_irq.util_avg)) return true; #endif return false; } static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) { rq->last_blocked_load_update_tick = jiffies; if (!has_blocked) rq->has_blocked_load = 0; } #else static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; } static inline bool others_have_blocked(struct rq *rq) { return false; } static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {} #endif #ifdef CONFIG_FAIR_GROUP_SCHED static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) { if (cfs_rq->load.weight) return false; if (cfs_rq->avg.load_sum) return false; if (cfs_rq->avg.util_sum) return false; if (cfs_rq->avg.runnable_load_sum) return false; return true; } static void update_blocked_averages(int cpu) { struct rq *rq = cpu_rq(cpu); struct cfs_rq *cfs_rq, *pos; const struct sched_class *curr_class; struct rq_flags rf; bool done = true; rq_lock_irqsave(rq, &rf); update_rq_clock(rq); /* * Iterates the task_group tree in a bottom up fashion, see * list_add_leaf_cfs_rq() for details. */ for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { struct sched_entity *se; if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) update_tg_load_avg(cfs_rq, 0); /* Propagate pending load changes to the parent, if any: */ se = cfs_rq->tg->se[cpu]; if (se && !skip_blocked_update(se)) update_load_avg(cfs_rq_of(se), se, 0); /* * There can be a lot of idle CPU cgroups. Don't let fully * decayed cfs_rqs linger on the list. */ if (cfs_rq_is_decayed(cfs_rq)) list_del_leaf_cfs_rq(cfs_rq); /* Don't need periodic decay once load/util_avg are null */ if (cfs_rq_has_blocked(cfs_rq)) done = false; } curr_class = rq->curr->sched_class; update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class); update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class); update_irq_load_avg(rq, 0); /* Don't need periodic decay once load/util_avg are null */ if (others_have_blocked(rq)) done = false; update_blocked_load_status(rq, !done); rq_unlock_irqrestore(rq, &rf); } /* * Compute the hierarchical load factor for cfs_rq and all its ascendants. * This needs to be done in a top-down fashion because the load of a child * group is a fraction of its parents load. */ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; unsigned long now = jiffies; unsigned long load; if (cfs_rq->last_h_load_update == now) return; WRITE_ONCE(cfs_rq->h_load_next, NULL); for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); WRITE_ONCE(cfs_rq->h_load_next, se); if (cfs_rq->last_h_load_update == now) break; } if (!se) { cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); cfs_rq->last_h_load_update = now; } while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { load = cfs_rq->h_load; load = div64_ul(load * se->avg.load_avg, cfs_rq_load_avg(cfs_rq) + 1); cfs_rq = group_cfs_rq(se); cfs_rq->h_load = load; cfs_rq->last_h_load_update = now; } } static unsigned long task_h_load(struct task_struct *p) { struct cfs_rq *cfs_rq = task_cfs_rq(p); update_cfs_rq_h_load(cfs_rq); return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, cfs_rq_load_avg(cfs_rq) + 1); } #else static inline void update_blocked_averages(int cpu) { struct rq *rq = cpu_rq(cpu); struct cfs_rq *cfs_rq = &rq->cfs; const struct sched_class *curr_class; struct rq_flags rf; rq_lock_irqsave(rq, &rf); update_rq_clock(rq); update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq); curr_class = rq->curr->sched_class; update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class); update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class); update_irq_load_avg(rq, 0); update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq)); rq_unlock_irqrestore(rq, &rf); } static unsigned long task_h_load(struct task_struct *p) { return p->se.avg.load_avg; } #endif /********** Helpers for find_busiest_group ************************/ /* * sg_lb_stats - stats of a sched_group required for load_balancing */ struct sg_lb_stats { unsigned long avg_load; /*Avg load across the CPUs of the group */ unsigned long group_load; /* Total load over the CPUs of the group */ unsigned long load_per_task; unsigned long group_capacity; unsigned long group_util; /* Total utilization of the group */ unsigned int sum_nr_running; /* Nr tasks running in the group */ unsigned int idle_cpus; unsigned int group_weight; enum group_type group_type; int group_no_capacity; unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */ #ifdef CONFIG_NUMA_BALANCING unsigned int nr_numa_running; unsigned int nr_preferred_running; #endif }; /* * sd_lb_stats - Structure to store the statistics of a sched_domain * during load balancing. */ struct sd_lb_stats { struct sched_group *busiest; /* Busiest group in this sd */ struct sched_group *local; /* Local group in this sd */ unsigned long total_running; unsigned long total_load; /* Total load of all groups in sd */ unsigned long total_capacity; /* Total capacity of all groups in sd */ unsigned long avg_load; /* Average load across all groups in sd */ struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */ struct sg_lb_stats local_stat; /* Statistics of the local group */ }; static inline void init_sd_lb_stats(struct sd_lb_stats *sds) { /* * Skimp on the clearing to avoid duplicate work. We can avoid clearing * local_stat because update_sg_lb_stats() does a full clear/assignment. * We must however clear busiest_stat::avg_load because * update_sd_pick_busiest() reads this before assignment. */ *sds = (struct sd_lb_stats){ .busiest = NULL, .local = NULL, .total_running = 0UL, .total_load = 0UL, .total_capacity = 0UL, .busiest_stat = { .avg_load = 0UL, .sum_nr_running = 0, .group_type = group_other, }, }; } static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long max = arch_scale_cpu_capacity(cpu); unsigned long used, free; unsigned long irq; irq = cpu_util_irq(rq); if (unlikely(irq >= max)) return 1; used = READ_ONCE(rq->avg_rt.util_avg); used += READ_ONCE(rq->avg_dl.util_avg); if (unlikely(used >= max)) return 1; free = max - used; return scale_irq_capacity(free, irq, max); } static void update_cpu_capacity(struct sched_domain *sd, int cpu) { unsigned long capacity = scale_rt_capacity(sd, cpu); struct sched_group *sdg = sd->groups; cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu); if (!capacity) capacity = 1; cpu_rq(cpu)->cpu_capacity = capacity; sdg->sgc->capacity = capacity; sdg->sgc->min_capacity = capacity; sdg->sgc->max_capacity = capacity; } void update_group_capacity(struct sched_domain *sd, int cpu) { struct sched_domain *child = sd->child; struct sched_group *group, *sdg = sd->groups; unsigned long capacity, min_capacity, max_capacity; unsigned long interval; interval = msecs_to_jiffies(sd->balance_interval); interval = clamp(interval, 1UL, max_load_balance_interval); sdg->sgc->next_update = jiffies + interval; if (!child) { update_cpu_capacity(sd, cpu); return; } capacity = 0; min_capacity = ULONG_MAX; max_capacity = 0; if (child->flags & SD_OVERLAP) { /* * SD_OVERLAP domains cannot assume that child groups * span the current group. */ for_each_cpu(cpu, sched_group_span(sdg)) { struct sched_group_capacity *sgc; struct rq *rq = cpu_rq(cpu); /* * build_sched_domains() -> init_sched_groups_capacity() * gets here before we've attached the domains to the * runqueues. * * Use capacity_of(), which is set irrespective of domains * in update_cpu_capacity(). * * This avoids capacity from being 0 and * causing divide-by-zero issues on boot. */ if (unlikely(!rq->sd)) { capacity += capacity_of(cpu); } else { sgc = rq->sd->groups->sgc; capacity += sgc->capacity; } min_capacity = min(capacity, min_capacity); max_capacity = max(capacity, max_capacity); } } else { /* * !SD_OVERLAP domains can assume that child groups * span the current group. */ group = child->groups; do { struct sched_group_capacity *sgc = group->sgc; capacity += sgc->capacity; min_capacity = min(sgc->min_capacity, min_capacity); max_capacity = max(sgc->max_capacity, max_capacity); group = group->next; } while (group != child->groups); } sdg->sgc->capacity = capacity; sdg->sgc->min_capacity = min_capacity; sdg->sgc->max_capacity = max_capacity; } /* * Check whether the capacity of the rq has been noticeably reduced by side * activity. The imbalance_pct is used for the threshold. * Return true is the capacity is reduced */ static inline int check_cpu_capacity(struct rq *rq, struct sched_domain *sd) { return ((rq->cpu_capacity * sd->imbalance_pct) < (rq->cpu_capacity_orig * 100)); } /* * Check whether a rq has a misfit task and if it looks like we can actually * help that task: we can migrate the task to a CPU of higher capacity, or * the task's current CPU is heavily pressured. */ static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd) { return rq->misfit_task_load && (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity || check_cpu_capacity(rq, sd)); } /* * Group imbalance indicates (and tries to solve) the problem where balancing * groups is inadequate due to ->cpus_ptr constraints. * * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. * Something like: * * { 0 1 2 3 } { 4 5 6 7 } * * * * * * * If we were to balance group-wise we'd place two tasks in the first group and * two tasks in the second group. Clearly this is undesired as it will overload * cpu 3 and leave one of the CPUs in the second group unused. * * The current solution to this issue is detecting the skew in the first group * by noticing the lower domain failed to reach balance and had difficulty * moving tasks due to affinity constraints. * * When this is so detected; this group becomes a candidate for busiest; see * update_sd_pick_busiest(). And calculate_imbalance() and * find_busiest_group() avoid some of the usual balance conditions to allow it * to create an effective group imbalance. * * This is a somewhat tricky proposition since the next run might not find the * group imbalance and decide the groups need to be balanced again. A most * subtle and fragile situation. */ static inline int sg_imbalanced(struct sched_group *group) { return group->sgc->imbalance; } /* * group_has_capacity returns true if the group has spare capacity that could * be used by some tasks. * We consider that a group has spare capacity if the * number of task is * smaller than the number of CPUs or if the utilization is lower than the * available capacity for CFS tasks. * For the latter, we use a threshold to stabilize the state, to take into * account the variance of the tasks' load and to return true if the available * capacity in meaningful for the load balancer. * As an example, an available capacity of 1% can appear but it doesn't make * any benefit for the load balance. */ static inline bool group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs) { if (sgs->sum_nr_running < sgs->group_weight) return true; if ((sgs->group_capacity * 100) > (sgs->group_util * env->sd->imbalance_pct)) return true; return false; } /* * group_is_overloaded returns true if the group has more tasks than it can * handle. * group_is_overloaded is not equals to !group_has_capacity because a group * with the exact right number of tasks, has no more spare capacity but is not * overloaded so both group_has_capacity and group_is_overloaded return * false. */ static inline bool group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs) { if (sgs->sum_nr_running <= sgs->group_weight) return false; if ((sgs->group_capacity * 100) < (sgs->group_util * env->sd->imbalance_pct)) return true; return false; } /* * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller * per-CPU capacity than sched_group ref. */ static inline bool group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref) { return fits_capacity(sg->sgc->min_capacity, ref->sgc->min_capacity); } /* * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller * per-CPU capacity_orig than sched_group ref. */ static inline bool group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref) { return fits_capacity(sg->sgc->max_capacity, ref->sgc->max_capacity); } static inline enum group_type group_classify(struct sched_group *group, struct sg_lb_stats *sgs) { if (sgs->group_no_capacity) return group_overloaded; if (sg_imbalanced(group)) return group_imbalanced; if (sgs->group_misfit_task_load) return group_misfit_task; return group_other; } static bool update_nohz_stats(struct rq *rq, bool force) { #ifdef CONFIG_NO_HZ_COMMON unsigned int cpu = rq->cpu; if (!rq->has_blocked_load) return false; if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) return false; if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick)) return true; update_blocked_averages(cpu); return rq->has_blocked_load; #else return false; #endif } /** * update_sg_lb_stats - Update sched_group's statistics for load balancing. * @env: The load balancing environment. * @group: sched_group whose statistics are to be updated. * @sgs: variable to hold the statistics for this group. * @sg_status: Holds flag indicating the status of the sched_group */ static inline void update_sg_lb_stats(struct lb_env *env, struct sched_group *group, struct sg_lb_stats *sgs, int *sg_status) { int i, nr_running; memset(sgs, 0, sizeof(*sgs)); for_each_cpu_and(i, sched_group_span(group), env->cpus) { struct rq *rq = cpu_rq(i); if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false)) env->flags |= LBF_NOHZ_AGAIN; sgs->group_load += cpu_runnable_load(rq); sgs->group_util += cpu_util(i); sgs->sum_nr_running += rq->cfs.h_nr_running; nr_running = rq->nr_running; if (nr_running > 1) *sg_status |= SG_OVERLOAD; if (cpu_overutilized(i)) *sg_status |= SG_OVERUTILIZED; #ifdef CONFIG_NUMA_BALANCING sgs->nr_numa_running += rq->nr_numa_running; sgs->nr_preferred_running += rq->nr_preferred_running; #endif /* * No need to call idle_cpu() if nr_running is not 0 */ if (!nr_running && idle_cpu(i)) sgs->idle_cpus++; if (env->sd->flags & SD_ASYM_CPUCAPACITY && sgs->group_misfit_task_load < rq->misfit_task_load) { sgs->group_misfit_task_load = rq->misfit_task_load; *sg_status |= SG_OVERLOAD; } } /* Adjust by relative CPU capacity of the group */ sgs->group_capacity = group->sgc->capacity; sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity; if (sgs->sum_nr_running) sgs->load_per_task = sgs->group_load / sgs->sum_nr_running; sgs->group_weight = group->group_weight; sgs->group_no_capacity = group_is_overloaded(env, sgs); sgs->group_type = group_classify(group, sgs); } /** * update_sd_pick_busiest - return 1 on busiest group * @env: The load balancing environment. * @sds: sched_domain statistics * @sg: sched_group candidate to be checked for being the busiest * @sgs: sched_group statistics * * Determine if @sg is a busier group than the previously selected * busiest group. * * Return: %true if @sg is a busier group than the previously selected * busiest group. %false otherwise. */ static bool update_sd_pick_busiest(struct lb_env *env, struct sd_lb_stats *sds, struct sched_group *sg, struct sg_lb_stats *sgs) { struct sg_lb_stats *busiest = &sds->busiest_stat; /* * Don't try to pull misfit tasks we can't help. * We can use max_capacity here as reduction in capacity on some * CPUs in the group should either be possible to resolve * internally or be covered by avg_load imbalance (eventually). */ if (sgs->group_type == group_misfit_task && (!group_smaller_max_cpu_capacity(sg, sds->local) || !group_has_capacity(env, &sds->local_stat))) return false; if (sgs->group_type > busiest->group_type) return true; if (sgs->group_type < busiest->group_type) return false; if (sgs->avg_load <= busiest->avg_load) return false; if (!(env->sd->flags & SD_ASYM_CPUCAPACITY)) goto asym_packing; /* * Candidate sg has no more than one task per CPU and * has higher per-CPU capacity. Migrating tasks to less * capable CPUs may harm throughput. Maximize throughput, * power/energy consequences are not considered. */ if (sgs->sum_nr_running <= sgs->group_weight && group_smaller_min_cpu_capacity(sds->local, sg)) return false; /* * If we have more than one misfit sg go with the biggest misfit. */ if (sgs->group_type == group_misfit_task && sgs->group_misfit_task_load < busiest->group_misfit_task_load) return false; asym_packing: /* This is the busiest node in its class. */ if (!(env->sd->flags & SD_ASYM_PACKING)) return true; /* No ASYM_PACKING if target CPU is already busy */ if (env->idle == CPU_NOT_IDLE) return true; /* * ASYM_PACKING needs to move all the work to the highest * prority CPUs in the group, therefore mark all groups * of lower priority than ourself as busy. */ if (sgs->sum_nr_running && sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) { if (!sds->busiest) return true; /* Prefer to move from lowest priority CPU's work */ if (sched_asym_prefer(sds->busiest->asym_prefer_cpu, sg->asym_prefer_cpu)) return true; } return false; } #ifdef CONFIG_NUMA_BALANCING static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) { if (sgs->sum_nr_running > sgs->nr_numa_running) return regular; if (sgs->sum_nr_running > sgs->nr_preferred_running) return remote; return all; } static inline enum fbq_type fbq_classify_rq(struct rq *rq) { if (rq->nr_running > rq->nr_numa_running) return regular; if (rq->nr_running > rq->nr_preferred_running) return remote; return all; } #else static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) { return all; } static inline enum fbq_type fbq_classify_rq(struct rq *rq) { return regular; } #endif /* CONFIG_NUMA_BALANCING */ /** * update_sd_lb_stats - Update sched_domain's statistics for load balancing. * @env: The load balancing environment. * @sds: variable to hold the statistics for this sched_domain. */ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) { struct sched_domain *child = env->sd->child; struct sched_group *sg = env->sd->groups; struct sg_lb_stats *local = &sds->local_stat; struct sg_lb_stats tmp_sgs; bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING; int sg_status = 0; #ifdef CONFIG_NO_HZ_COMMON if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked)) env->flags |= LBF_NOHZ_STATS; #endif do { struct sg_lb_stats *sgs = &tmp_sgs; int local_group; local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); if (local_group) { sds->local = sg; sgs = local; if (env->idle != CPU_NEWLY_IDLE || time_after_eq(jiffies, sg->sgc->next_update)) update_group_capacity(env->sd, env->dst_cpu); } update_sg_lb_stats(env, sg, sgs, &sg_status); if (local_group) goto next_group; /* * In case the child domain prefers tasks go to siblings * first, lower the sg capacity so that we'll try * and move all the excess tasks away. We lower the capacity * of a group only if the local group has the capacity to fit * these excess tasks. The extra check prevents the case where * you always pull from the heaviest group when it is already * under-utilized (possible with a large weight task outweighs * the tasks on the system). */ if (prefer_sibling && sds->local && group_has_capacity(env, local) && (sgs->sum_nr_running > local->sum_nr_running + 1)) { sgs->group_no_capacity = 1; sgs->group_type = group_classify(sg, sgs); } if (update_sd_pick_busiest(env, sds, sg, sgs)) { sds->busiest = sg; sds->busiest_stat = *sgs; } next_group: /* Now, start updating sd_lb_stats */ sds->total_running += sgs->sum_nr_running; sds->total_load += sgs->group_load; sds->total_capacity += sgs->group_capacity; sg = sg->next; } while (sg != env->sd->groups); #ifdef CONFIG_NO_HZ_COMMON if ((env->flags & LBF_NOHZ_AGAIN) && cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) { WRITE_ONCE(nohz.next_blocked, jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD)); } #endif if (env->sd->flags & SD_NUMA) env->fbq_type = fbq_classify_group(&sds->busiest_stat); if (!env->sd->parent) { struct root_domain *rd = env->dst_rq->rd; /* update overload indicator if we are at root domain */ WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD); /* Update over-utilization (tipping point, U >= 0) indicator */ WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED); trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED); } else if (sg_status & SG_OVERUTILIZED) { struct root_domain *rd = env->dst_rq->rd; WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED); trace_sched_overutilized_tp(rd, SG_OVERUTILIZED); } } /** * check_asym_packing - Check to see if the group is packed into the * sched domain. * * This is primarily intended to used at the sibling level. Some * cores like POWER7 prefer to use lower numbered SMT threads. In the * case of POWER7, it can move to lower SMT modes only when higher * threads are idle. When in lower SMT modes, the threads will * perform better since they share less core resources. Hence when we * have idle threads, we want them to be the higher ones. * * This packing function is run on idle threads. It checks to see if * the busiest CPU in this domain (core in the P7 case) has a higher * CPU number than the packing function is being run on. Here we are * assuming lower CPU number will be equivalent to lower a SMT thread * number. * * Return: 1 when packing is required and a task should be moved to * this CPU. The amount of the imbalance is returned in env->imbalance. * * @env: The load balancing environment. * @sds: Statistics of the sched_domain which is to be packed */ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) { int busiest_cpu; if (!(env->sd->flags & SD_ASYM_PACKING)) return 0; if (env->idle == CPU_NOT_IDLE) return 0; if (!sds->busiest) return 0; busiest_cpu = sds->busiest->asym_prefer_cpu; if (sched_asym_prefer(busiest_cpu, env->dst_cpu)) return 0; env->imbalance = sds->busiest_stat.group_load; return 1; } /** * fix_small_imbalance - Calculate the minor imbalance that exists * amongst the groups of a sched_domain, during * load balancing. * @env: The load balancing environment. * @sds: Statistics of the sched_domain whose imbalance is to be calculated. */ static inline void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) { unsigned long tmp, capa_now = 0, capa_move = 0; unsigned int imbn = 2; unsigned long scaled_busy_load_per_task; struct sg_lb_stats *local, *busiest; local = &sds->local_stat; busiest = &sds->busiest_stat; if (!local->sum_nr_running) local->load_per_task = cpu_avg_load_per_task(env->dst_cpu); else if (busiest->load_per_task > local->load_per_task) imbn = 1; scaled_busy_load_per_task = (busiest->load_per_task * SCHED_CAPACITY_SCALE) / busiest->group_capacity; if (busiest->avg_load + scaled_busy_load_per_task >= local->avg_load + (scaled_busy_load_per_task * imbn)) { env->imbalance = busiest->load_per_task; return; } /* * OK, we don't have enough imbalance to justify moving tasks, * however we may be able to increase total CPU capacity used by * moving them. */ capa_now += busiest->group_capacity * min(busiest->load_per_task, busiest->avg_load); capa_now += local->group_capacity * min(local->load_per_task, local->avg_load); capa_now /= SCHED_CAPACITY_SCALE; /* Amount of load we'd subtract */ if (busiest->avg_load > scaled_busy_load_per_task) { capa_move += busiest->group_capacity * min(busiest->load_per_task, busiest->avg_load - scaled_busy_load_per_task); } /* Amount of load we'd add */ if (busiest->avg_load * busiest->group_capacity < busiest->load_per_task * SCHED_CAPACITY_SCALE) { tmp = (busiest->avg_load * busiest->group_capacity) / local->group_capacity; } else { tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) / local->group_capacity; } capa_move += local->group_capacity * min(local->load_per_task, local->avg_load + tmp); capa_move /= SCHED_CAPACITY_SCALE; /* Move if we gain throughput */ if (capa_move > capa_now) env->imbalance = busiest->load_per_task; } /** * calculate_imbalance - Calculate the amount of imbalance present within the * groups of a given sched_domain during load balance. * @env: load balance environment * @sds: statistics of the sched_domain whose imbalance is to be calculated. */ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) { unsigned long max_pull, load_above_capacity = ~0UL; struct sg_lb_stats *local, *busiest; local = &sds->local_stat; busiest = &sds->busiest_stat; if (busiest->group_type == group_imbalanced) { /* * In the group_imb case we cannot rely on group-wide averages * to ensure CPU-load equilibrium, look at wider averages. XXX */ busiest->load_per_task = min(busiest->load_per_task, sds->avg_load); } /* * Avg load of busiest sg can be less and avg load of local sg can * be greater than avg load across all sgs of sd because avg load * factors in sg capacity and sgs with smaller group_type are * skipped when updating the busiest sg: */ if (busiest->group_type != group_misfit_task && (busiest->avg_load <= sds->avg_load || local->avg_load >= sds->avg_load)) { env->imbalance = 0; return fix_small_imbalance(env, sds); } /* * If there aren't any idle CPUs, avoid creating some. */ if (busiest->group_type == group_overloaded && local->group_type == group_overloaded) { load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE; if (load_above_capacity > busiest->group_capacity) { load_above_capacity -= busiest->group_capacity; load_above_capacity *= scale_load_down(NICE_0_LOAD); load_above_capacity /= busiest->group_capacity; } else load_above_capacity = ~0UL; } /* * We're trying to get all the CPUs to the average_load, so we don't * want to push ourselves above the average load, nor do we wish to * reduce the max loaded CPU below the average load. At the same time, * we also don't want to reduce the group load below the group * capacity. Thus we look for the minimum possible imbalance. */ max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity); /* How much load to actually move to equalise the imbalance */ env->imbalance = min( max_pull * busiest->group_capacity, (sds->avg_load - local->avg_load) * local->group_capacity ) / SCHED_CAPACITY_SCALE; /* Boost imbalance to allow misfit task to be balanced. */ if (busiest->group_type == group_misfit_task) { env->imbalance = max_t(long, env->imbalance, busiest->group_misfit_task_load); } /* * if *imbalance is less than the average load per runnable task * there is no guarantee that any tasks will be moved so we'll have * a think about bumping its value to force at least one task to be * moved */ if (env->imbalance < busiest->load_per_task) return fix_small_imbalance(env, sds); } /******* find_busiest_group() helpers end here *********************/ /** * find_busiest_group - Returns the busiest group within the sched_domain * if there is an imbalance. * * Also calculates the amount of runnable load which should be moved * to restore balance. * * @env: The load balancing environment. * * Return: - The busiest group if imbalance exists. */ static struct sched_group *find_busiest_group(struct lb_env *env) { struct sg_lb_stats *local, *busiest; struct sd_lb_stats sds; init_sd_lb_stats(&sds); /* * Compute the various statistics relavent for load balancing at * this level. */ update_sd_lb_stats(env, &sds); if (sched_energy_enabled()) { struct root_domain *rd = env->dst_rq->rd; if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)) goto out_balanced; } local = &sds.local_stat; busiest = &sds.busiest_stat; /* ASYM feature bypasses nice load balance check */ if (check_asym_packing(env, &sds)) return sds.busiest; /* There is no busy sibling group to pull tasks from */ if (!sds.busiest || busiest->sum_nr_running == 0) goto out_balanced; /* XXX broken for overlapping NUMA groups */ sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load) / sds.total_capacity; /* * If the busiest group is imbalanced the below checks don't * work because they assume all things are equal, which typically * isn't true due to cpus_ptr constraints and the like. */ if (busiest->group_type == group_imbalanced) goto force_balance; /* * When dst_cpu is idle, prevent SMP nice and/or asymmetric group * capacities from resulting in underutilization due to avg_load. */ if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) && busiest->group_no_capacity) goto force_balance; /* Misfit tasks should be dealt with regardless of the avg load */ if (busiest->group_type == group_misfit_task) goto force_balance; /* * If the local group is busier than the selected busiest group * don't try and pull any tasks. */ if (local->avg_load >= busiest->avg_load) goto out_balanced; /* * Don't pull any tasks if this group is already above the domain * average load. */ if (local->avg_load >= sds.avg_load) goto out_balanced; if (env->idle == CPU_IDLE) { /* * This CPU is idle. If the busiest group is not overloaded * and there is no imbalance between this and busiest group * wrt idle CPUs, it is balanced. The imbalance becomes * significant if the diff is greater than 1 otherwise we * might end up to just move the imbalance on another group */ if ((busiest->group_type != group_overloaded) && (local->idle_cpus <= (busiest->idle_cpus + 1))) goto out_balanced; } else { /* * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use * imbalance_pct to be conservative. */ if (100 * busiest->avg_load <= env->sd->imbalance_pct * local->avg_load) goto out_balanced; } force_balance: /* Looks like there is an imbalance. Compute it */ env->src_grp_type = busiest->group_type; calculate_imbalance(env, &sds); return env->imbalance ? sds.busiest : NULL; out_balanced: env->imbalance = 0; return NULL; } /* * find_busiest_queue - find the busiest runqueue among the CPUs in the group. */ static struct rq *find_busiest_queue(struct lb_env *env, struct sched_group *group) { struct rq *busiest = NULL, *rq; unsigned long busiest_load = 0, busiest_capacity = 1; int i; for_each_cpu_and(i, sched_group_span(group), env->cpus) { unsigned long capacity, load; enum fbq_type rt; rq = cpu_rq(i); rt = fbq_classify_rq(rq); /* * We classify groups/runqueues into three groups: * - regular: there are !numa tasks * - remote: there are numa tasks that run on the 'wrong' node * - all: there is no distinction * * In order to avoid migrating ideally placed numa tasks, * ignore those when there's better options. * * If we ignore the actual busiest queue to migrate another * task, the next balance pass can still reduce the busiest * queue by moving tasks around inside the node. * * If we cannot move enough load due to this classification * the next pass will adjust the group classification and * allow migration of more tasks. * * Both cases only affect the total convergence complexity. */ if (rt > env->fbq_type) continue; /* * For ASYM_CPUCAPACITY domains with misfit tasks we simply * seek the "biggest" misfit task. */ if (env->src_grp_type == group_misfit_task) { if (rq->misfit_task_load > busiest_load) { busiest_load = rq->misfit_task_load; busiest = rq; } continue; } capacity = capacity_of(i); /* * For ASYM_CPUCAPACITY domains, don't pick a CPU that could * eventually lead to active_balancing high->low capacity. * Higher per-CPU capacity is considered better than balancing * average load. */ if (env->sd->flags & SD_ASYM_CPUCAPACITY && capacity_of(env->dst_cpu) < capacity && rq->nr_running == 1) continue; load = cpu_runnable_load(rq); /* * When comparing with imbalance, use cpu_runnable_load() * which is not scaled with the CPU capacity. */ if (rq->nr_running == 1 && load > env->imbalance && !check_cpu_capacity(rq, env->sd)) continue; /* * For the load comparisons with the other CPU's, consider * the cpu_runnable_load() scaled with the CPU capacity, so * that the load can be moved away from the CPU that is * potentially running at a lower capacity. * * Thus we're looking for max(load_i / capacity_i), crosswise * multiplication to rid ourselves of the division works out * to: load_i * capacity_j > load_j * capacity_i; where j is * our previous maximum. */ if (load * busiest_capacity > busiest_load * capacity) { busiest_load = load; busiest_capacity = capacity; busiest = rq; } } return busiest; } /* * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but * so long as it is large enough. */ #define MAX_PINNED_INTERVAL 512 static inline bool asym_active_balance(struct lb_env *env) { /* * ASYM_PACKING needs to force migrate tasks from busy but * lower priority CPUs in order to pack all tasks in the * highest priority CPUs. */ return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && sched_asym_prefer(env->dst_cpu, env->src_cpu); } static inline bool voluntary_active_balance(struct lb_env *env) { struct sched_domain *sd = env->sd; if (asym_active_balance(env)) return 1; /* * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. * It's worth migrating the task if the src_cpu's capacity is reduced * because of other sched_class or IRQs if more capacity stays * available on dst_cpu. */ if ((env->idle != CPU_NOT_IDLE) && (env->src_rq->cfs.h_nr_running == 1)) { if ((check_cpu_capacity(env->src_rq, sd)) && (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) return 1; } if (env->src_grp_type == group_misfit_task) return 1; return 0; } static int need_active_balance(struct lb_env *env) { struct sched_domain *sd = env->sd; if (voluntary_active_balance(env)) return 1; return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); } static int active_load_balance_cpu_stop(void *data); static int should_we_balance(struct lb_env *env) { struct sched_group *sg = env->sd->groups; int cpu, balance_cpu = -1; /* * Ensure the balancing environment is consistent; can happen * when the softirq triggers 'during' hotplug. */ if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) return 0; /* * In the newly idle case, we will allow all the CPUs * to do the newly idle load balance. */ if (env->idle == CPU_NEWLY_IDLE) return 1; /* Try to find first idle CPU */ for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) { if (!idle_cpu(cpu)) continue; balance_cpu = cpu; break; } if (balance_cpu == -1) balance_cpu = group_balance_cpu(sg); /* * First idle CPU or the first CPU(busiest) in this sched group * is eligible for doing load balancing at this and above domains. */ return balance_cpu == env->dst_cpu; } /* * Check this_cpu to ensure it is balanced within domain. Attempt to move * tasks if there is an imbalance. */ static int load_balance(int this_cpu, struct rq *this_rq, struct sched_domain *sd, enum cpu_idle_type idle, int *continue_balancing) { int ld_moved, cur_ld_moved, active_balance = 0; struct sched_domain *sd_parent = sd->parent; struct sched_group *group; struct rq *busiest; struct rq_flags rf; struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask); struct lb_env env = { .sd = sd, .dst_cpu = this_cpu, .dst_rq = this_rq, .dst_grpmask = sched_group_span(sd->groups), .idle = idle, .loop_break = sched_nr_migrate_break, .cpus = cpus, .fbq_type = all, .tasks = LIST_HEAD_INIT(env.tasks), }; cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask); schedstat_inc(sd->lb_count[idle]); redo: if (!should_we_balance(&env)) { *continue_balancing = 0; goto out_balanced; } group = find_busiest_group(&env); if (!group) { schedstat_inc(sd->lb_nobusyg[idle]); goto out_balanced; } busiest = find_busiest_queue(&env, group); if (!busiest) { schedstat_inc(sd->lb_nobusyq[idle]); goto out_balanced; } BUG_ON(busiest == env.dst_rq); schedstat_add(sd->lb_imbalance[idle], env.imbalance); env.src_cpu = busiest->cpu; env.src_rq = busiest; ld_moved = 0; if (busiest->nr_running > 1) { /* * Attempt to move tasks. If find_busiest_group has found * an imbalance but busiest->nr_running <= 1, the group is * still unbalanced. ld_moved simply stays zero, so it is * correctly treated as an imbalance. */ env.flags |= LBF_ALL_PINNED; env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); more_balance: rq_lock_irqsave(busiest, &rf); update_rq_clock(busiest); /* * cur_ld_moved - load moved in current iteration * ld_moved - cumulative load moved across iterations */ cur_ld_moved = detach_tasks(&env); /* * We've detached some tasks from busiest_rq. Every * task is masked "TASK_ON_RQ_MIGRATING", so we can safely * unlock busiest->lock, and we are able to be sure * that nobody can manipulate the tasks in parallel. * See task_rq_lock() family for the details. */ rq_unlock(busiest, &rf); if (cur_ld_moved) { attach_tasks(&env); ld_moved += cur_ld_moved; } local_irq_restore(rf.flags); if (env.flags & LBF_NEED_BREAK) { env.flags &= ~LBF_NEED_BREAK; goto more_balance; } /* * Revisit (affine) tasks on src_cpu that couldn't be moved to * us and move them to an alternate dst_cpu in our sched_group * where they can run. The upper limit on how many times we * iterate on same src_cpu is dependent on number of CPUs in our * sched_group. * * This changes load balance semantics a bit on who can move * load to a given_cpu. In addition to the given_cpu itself * (or a ilb_cpu acting on its behalf where given_cpu is * nohz-idle), we now have balance_cpu in a position to move * load to given_cpu. In rare situations, this may cause * conflicts (balance_cpu and given_cpu/ilb_cpu deciding * _independently_ and at _same_ time to move some load to * given_cpu) causing exceess load to be moved to given_cpu. * This however should not happen so much in practice and * moreover subsequent load balance cycles should correct the * excess load moved. */ if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { /* Prevent to re-select dst_cpu via env's CPUs */ __cpumask_clear_cpu(env.dst_cpu, env.cpus); env.dst_rq = cpu_rq(env.new_dst_cpu); env.dst_cpu = env.new_dst_cpu; env.flags &= ~LBF_DST_PINNED; env.loop = 0; env.loop_break = sched_nr_migrate_break; /* * Go back to "more_balance" rather than "redo" since we * need to continue with same src_cpu. */ goto more_balance; } /* * We failed to reach balance because of affinity. */ if (sd_parent) { int *group_imbalance = &sd_parent->groups->sgc->imbalance; if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) *group_imbalance = 1; } /* All tasks on this runqueue were pinned by CPU affinity */ if (unlikely(env.flags & LBF_ALL_PINNED)) { __cpumask_clear_cpu(cpu_of(busiest), cpus); /* * Attempting to continue load balancing at the current * sched_domain level only makes sense if there are * active CPUs remaining as possible busiest CPUs to * pull load from which are not contained within the * destination group that is receiving any migrated * load. */ if (!cpumask_subset(cpus, env.dst_grpmask)) { env.loop = 0; env.loop_break = sched_nr_migrate_break; goto redo; } goto out_all_pinned; } } if (!ld_moved) { schedstat_inc(sd->lb_failed[idle]); /* * Increment the failure counter only on periodic balance. * We do not want newidle balance, which can be very * frequent, pollute the failure counter causing * excessive cache_hot migrations and active balances. */ if (idle != CPU_NEWLY_IDLE) sd->nr_balance_failed++; if (need_active_balance(&env)) { unsigned long flags; raw_spin_lock_irqsave(&busiest->lock, flags); /* * Don't kick the active_load_balance_cpu_stop, * if the curr task on busiest CPU can't be * moved to this_cpu: */ if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { raw_spin_unlock_irqrestore(&busiest->lock, flags); env.flags |= LBF_ALL_PINNED; goto out_one_pinned; } /* * ->active_balance synchronizes accesses to * ->active_balance_work. Once set, it's cleared * only after active load balance is finished. */ if (!busiest->active_balance) { busiest->active_balance = 1; busiest->push_cpu = this_cpu; active_balance = 1; } raw_spin_unlock_irqrestore(&busiest->lock, flags); if (active_balance) { stop_one_cpu_nowait(cpu_of(busiest), active_load_balance_cpu_stop, busiest, &busiest->active_balance_work); } /* We've kicked active balancing, force task migration. */ sd->nr_balance_failed = sd->cache_nice_tries+1; } } else sd->nr_balance_failed = 0; if (likely(!active_balance) || voluntary_active_balance(&env)) { /* We were unbalanced, so reset the balancing interval */ sd->balance_interval = sd->min_interval; } else { /* * If we've begun active balancing, start to back off. This * case may not be covered by the all_pinned logic if there * is only 1 task on the busy runqueue (because we don't call * detach_tasks). */ if (sd->balance_interval < sd->max_interval) sd->balance_interval *= 2; } goto out; out_balanced: /* * We reach balance although we may have faced some affinity * constraints. Clear the imbalance flag only if other tasks got * a chance to move and fix the imbalance. */ if (sd_parent && !(env.flags & LBF_ALL_PINNED)) { int *group_imbalance = &sd_parent->groups->sgc->imbalance; if (*group_imbalance) *group_imbalance = 0; } out_all_pinned: /* * We reach balance because all tasks are pinned at this level so * we can't migrate them. Let the imbalance flag set so parent level * can try to migrate them. */ schedstat_inc(sd->lb_balanced[idle]); sd->nr_balance_failed = 0; out_one_pinned: ld_moved = 0; /* * idle_balance() disregards balance intervals, so we could repeatedly * reach this code, which would lead to balance_interval skyrocketting * in a short amount of time. Skip the balance_interval increase logic * to avoid that. */ if (env.idle == CPU_NEWLY_IDLE) goto out; /* tune up the balancing interval */ if ((env.flags & LBF_ALL_PINNED && sd->balance_interval < MAX_PINNED_INTERVAL) || sd->balance_interval < sd->max_interval) sd->balance_interval *= 2; out: return ld_moved; } static inline unsigned long get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) { unsigned long interval = sd->balance_interval; if (cpu_busy) interval *= sd->busy_factor; /* scale ms to jiffies */ interval = msecs_to_jiffies(interval); interval = clamp(interval, 1UL, max_load_balance_interval); return interval; } static inline void update_next_balance(struct sched_domain *sd, unsigned long *next_balance) { unsigned long interval, next; /* used by idle balance, so cpu_busy = 0 */ interval = get_sd_balance_interval(sd, 0); next = sd->last_balance + interval; if (time_after(*next_balance, next)) *next_balance = next; } /* * active_load_balance_cpu_stop is run by the CPU stopper. It pushes * running tasks off the busiest CPU onto idle CPUs. It requires at * least 1 task to be running on each physical CPU where possible, and * avoids physical / logical imbalances. */ static int active_load_balance_cpu_stop(void *data) { struct rq *busiest_rq = data; int busiest_cpu = cpu_of(busiest_rq); int target_cpu = busiest_rq->push_cpu; struct rq *target_rq = cpu_rq(target_cpu); struct sched_domain *sd; struct task_struct *p = NULL; struct rq_flags rf; rq_lock_irq(busiest_rq, &rf); /* * Between queueing the stop-work and running it is a hole in which * CPUs can become inactive. We should not move tasks from or to * inactive CPUs. */ if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu)) goto out_unlock; /* Make sure the requested CPU hasn't gone down in the meantime: */ if (unlikely(busiest_cpu != smp_processor_id() || !busiest_rq->active_balance)) goto out_unlock; /* Is there any task to move? */ if (busiest_rq->nr_running <= 1) goto out_unlock; /* * This condition is "impossible", if it occurs * we need to fix it. Originally reported by * Bjorn Helgaas on a 128-CPU setup. */ BUG_ON(busiest_rq == target_rq); /* Search for an sd spanning us and the target CPU. */ rcu_read_lock(); for_each_domain(target_cpu, sd) { if ((sd->flags & SD_LOAD_BALANCE) && cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) break; } if (likely(sd)) { struct lb_env env = { .sd = sd, .dst_cpu = target_cpu, .dst_rq = target_rq, .src_cpu = busiest_rq->cpu, .src_rq = busiest_rq, .idle = CPU_IDLE, /* * can_migrate_task() doesn't need to compute new_dst_cpu * for active balancing. Since we have CPU_IDLE, but no * @dst_grpmask we need to make that test go away with lying * about DST_PINNED. */ .flags = LBF_DST_PINNED, }; schedstat_inc(sd->alb_count); update_rq_clock(busiest_rq); p = detach_one_task(&env); if (p) { schedstat_inc(sd->alb_pushed); /* Active balancing done, reset the failure counter. */ sd->nr_balance_failed = 0; } else { schedstat_inc(sd->alb_failed); } } rcu_read_unlock(); out_unlock: busiest_rq->active_balance = 0; rq_unlock(busiest_rq, &rf); if (p) attach_one_task(target_rq, p); local_irq_enable(); return 0; } static DEFINE_SPINLOCK(balancing); /* * Scale the max load_balance interval with the number of CPUs in the system. * This trades load-balance latency on larger machines for less cross talk. */ void update_max_interval(void) { max_load_balance_interval = HZ*num_online_cpus()/10; } /* * It checks each scheduling domain to see if it is due to be balanced, * and initiates a balancing operation if so. * * Balancing parameters are set up in init_sched_domains. */ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) { int continue_balancing = 1; int cpu = rq->cpu; unsigned long interval; struct sched_domain *sd; /* Earliest time when we have to do rebalance again */ unsigned long next_balance = jiffies + 60*HZ; int update_next_balance = 0; int need_serialize, need_decay = 0; u64 max_cost = 0; rcu_read_lock(); for_each_domain(cpu, sd) { /* * Decay the newidle max times here because this is a regular * visit to all the domains. Decay ~1% per second. */ if (time_after(jiffies, sd->next_decay_max_lb_cost)) { sd->max_newidle_lb_cost = (sd->max_newidle_lb_cost * 253) / 256; sd->next_decay_max_lb_cost = jiffies + HZ; need_decay = 1; } max_cost += sd->max_newidle_lb_cost; if (!(sd->flags & SD_LOAD_BALANCE)) continue; /* * Stop the load balance at this level. There is another * CPU in our sched group which is doing load balancing more * actively. */ if (!continue_balancing) { if (need_decay) continue; break; } interval = get_sd_balance_interval(sd, idle != CPU_IDLE); need_serialize = sd->flags & SD_SERIALIZE; if (need_serialize) { if (!spin_trylock(&balancing)) goto out; } if (time_after_eq(jiffies, sd->last_balance + interval)) { if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { /* * The LBF_DST_PINNED logic could have changed * env->dst_cpu, so we can't know our idle * state even if we migrated tasks. Update it. */ idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE; } sd->last_balance = jiffies; interval = get_sd_balance_interval(sd, idle != CPU_IDLE); } if (need_serialize) spin_unlock(&balancing); out: if (time_after(next_balance, sd->last_balance + interval)) { next_balance = sd->last_balance + interval; update_next_balance = 1; } } if (need_decay) { /* * Ensure the rq-wide value also decays but keep it at a * reasonable floor to avoid funnies with rq->avg_idle. */ rq->max_idle_balance_cost = max((u64)sysctl_sched_migration_cost, max_cost); } rcu_read_unlock(); /* * next_balance will be updated only when there is a need. * When the cpu is attached to null domain for ex, it will not be * updated. */ if (likely(update_next_balance)) { rq->next_balance = next_balance; #ifdef CONFIG_NO_HZ_COMMON /* * If this CPU has been elected to perform the nohz idle * balance. Other idle CPUs have already rebalanced with * nohz_idle_balance() and nohz.next_balance has been * updated accordingly. This CPU is now running the idle load * balance for itself and we need to update the * nohz.next_balance accordingly. */ if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) nohz.next_balance = rq->next_balance; #endif } } static inline int on_null_domain(struct rq *rq) { return unlikely(!rcu_dereference_sched(rq->sd)); } #ifdef CONFIG_NO_HZ_COMMON /* * idle load balancing details * - When one of the busy CPUs notice that there may be an idle rebalancing * needed, they will kick the idle load balancer, which then does idle * load balancing for all the idle CPUs. * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set * anywhere yet. */ static inline int find_new_ilb(void) { int ilb; for_each_cpu_and(ilb, nohz.idle_cpus_mask, housekeeping_cpumask(HK_FLAG_MISC)) { if (idle_cpu(ilb)) return ilb; } return nr_cpu_ids; } /* * Kick a CPU to do the nohz balancing, if it is time for it. We pick any * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one). */ static void kick_ilb(unsigned int flags) { int ilb_cpu; nohz.next_balance++; ilb_cpu = find_new_ilb(); if (ilb_cpu >= nr_cpu_ids) return; flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu)); if (flags & NOHZ_KICK_MASK) return; /* * Use smp_send_reschedule() instead of resched_cpu(). * This way we generate a sched IPI on the target CPU which * is idle. And the softirq performing nohz idle load balance * will be run before returning from the IPI. */ smp_send_reschedule(ilb_cpu); } /* * Current decision point for kicking the idle load balancer in the presence * of idle CPUs in the system. */ static void nohz_balancer_kick(struct rq *rq) { unsigned long now = jiffies; struct sched_domain_shared *sds; struct sched_domain *sd; int nr_busy, i, cpu = rq->cpu; unsigned int flags = 0; if (unlikely(rq->idle_balance)) return; /* * We may be recently in ticked or tickless idle mode. At the first * busy tick after returning from idle, we will update the busy stats. */ nohz_balance_exit_idle(rq); /* * None are in tickless mode and hence no need for NOHZ idle load * balancing. */ if (likely(!atomic_read(&nohz.nr_cpus))) return; if (READ_ONCE(nohz.has_blocked) && time_after(now, READ_ONCE(nohz.next_blocked))) flags = NOHZ_STATS_KICK; if (time_before(now, nohz.next_balance)) goto out; if (rq->nr_running >= 2) { flags = NOHZ_KICK_MASK; goto out; } rcu_read_lock(); sd = rcu_dereference(rq->sd); if (sd) { /* * If there's a CFS task and the current CPU has reduced * capacity; kick the ILB to see if there's a better CPU to run * on. */ if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) { flags = NOHZ_KICK_MASK; goto unlock; } } sd = rcu_dereference(per_cpu(sd_asym_packing, cpu)); if (sd) { /* * When ASYM_PACKING; see if there's a more preferred CPU * currently idle; in which case, kick the ILB to move tasks * around. */ for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { if (sched_asym_prefer(i, cpu)) { flags = NOHZ_KICK_MASK; goto unlock; } } } sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu)); if (sd) { /* * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU * to run the misfit task on. */ if (check_misfit_status(rq, sd)) { flags = NOHZ_KICK_MASK; goto unlock; } /* * For asymmetric systems, we do not want to nicely balance * cache use, instead we want to embrace asymmetry and only * ensure tasks have enough CPU capacity. * * Skip the LLC logic because it's not relevant in that case. */ goto unlock; } sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); if (sds) { /* * If there is an imbalance between LLC domains (IOW we could * increase the overall cache use), we need some less-loaded LLC * domain to pull some load. Likewise, we may need to spread * load within the current LLC domain (e.g. packed SMT cores but * other CPUs are idle). We can't really know from here how busy * the others are - so just get a nohz balance going if it looks * like this LLC domain has tasks we could move. */ nr_busy = atomic_read(&sds->nr_busy_cpus); if (nr_busy > 1) { flags = NOHZ_KICK_MASK; goto unlock; } } unlock: rcu_read_unlock(); out: if (flags) kick_ilb(flags); } static void set_cpu_sd_state_busy(int cpu) { struct sched_domain *sd; rcu_read_lock(); sd = rcu_dereference(per_cpu(sd_llc, cpu)); if (!sd || !sd->nohz_idle) goto unlock; sd->nohz_idle = 0; atomic_inc(&sd->shared->nr_busy_cpus); unlock: rcu_read_unlock(); } void nohz_balance_exit_idle(struct rq *rq) { SCHED_WARN_ON(rq != this_rq()); if (likely(!rq->nohz_tick_stopped)) return; rq->nohz_tick_stopped = 0; cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask); atomic_dec(&nohz.nr_cpus); set_cpu_sd_state_busy(rq->cpu); } static void set_cpu_sd_state_idle(int cpu) { struct sched_domain *sd; rcu_read_lock(); sd = rcu_dereference(per_cpu(sd_llc, cpu)); if (!sd || sd->nohz_idle) goto unlock; sd->nohz_idle = 1; atomic_dec(&sd->shared->nr_busy_cpus); unlock: rcu_read_unlock(); } /* * This routine will record that the CPU is going idle with tick stopped. * This info will be used in performing idle load balancing in the future. */ void nohz_balance_enter_idle(int cpu) { struct rq *rq = cpu_rq(cpu); SCHED_WARN_ON(cpu != smp_processor_id()); /* If this CPU is going down, then nothing needs to be done: */ if (!cpu_active(cpu)) return; /* Spare idle load balancing on CPUs that don't want to be disturbed: */ if (!housekeeping_cpu(cpu, HK_FLAG_SCHED)) return; /* * Can be set safely without rq->lock held * If a clear happens, it will have evaluated last additions because * rq->lock is held during the check and the clear */ rq->has_blocked_load = 1; /* * The tick is still stopped but load could have been added in the * meantime. We set the nohz.has_blocked flag to trig a check of the * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear * of nohz.has_blocked can only happen after checking the new load */ if (rq->nohz_tick_stopped) goto out; /* If we're a completely isolated CPU, we don't play: */ if (on_null_domain(rq)) return; rq->nohz_tick_stopped = 1; cpumask_set_cpu(cpu, nohz.idle_cpus_mask); atomic_inc(&nohz.nr_cpus); /* * Ensures that if nohz_idle_balance() fails to observe our * @idle_cpus_mask store, it must observe the @has_blocked * store. */ smp_mb__after_atomic(); set_cpu_sd_state_idle(cpu); out: /* * Each time a cpu enter idle, we assume that it has blocked load and * enable the periodic update of the load of idle cpus */ WRITE_ONCE(nohz.has_blocked, 1); } /* * Internal function that runs load balance for all idle cpus. The load balance * can be a simple update of blocked load or a complete load balance with * tasks movement depending of flags. * The function returns false if the loop has stopped before running * through all idle CPUs. */ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, enum cpu_idle_type idle) { /* Earliest time when we have to do rebalance again */ unsigned long now = jiffies; unsigned long next_balance = now + 60*HZ; bool has_blocked_load = false; int update_next_balance = 0; int this_cpu = this_rq->cpu; int balance_cpu; int ret = false; struct rq *rq; SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK); /* * We assume there will be no idle load after this update and clear * the has_blocked flag. If a cpu enters idle in the mean time, it will * set the has_blocked flag and trig another update of idle load. * Because a cpu that becomes idle, is added to idle_cpus_mask before * setting the flag, we are sure to not clear the state and not * check the load of an idle cpu. */ WRITE_ONCE(nohz.has_blocked, 0); /* * Ensures that if we miss the CPU, we must see the has_blocked * store from nohz_balance_enter_idle(). */ smp_mb(); for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) continue; /* * If this CPU gets work to do, stop the load balancing * work being done for other CPUs. Next load * balancing owner will pick it up. */ if (need_resched()) { has_blocked_load = true; goto abort; } rq = cpu_rq(balance_cpu); has_blocked_load |= update_nohz_stats(rq, true); /* * If time for next balance is due, * do the balance. */ if (time_after_eq(jiffies, rq->next_balance)) { struct rq_flags rf; rq_lock_irqsave(rq, &rf); update_rq_clock(rq); rq_unlock_irqrestore(rq, &rf); if (flags & NOHZ_BALANCE_KICK) rebalance_domains(rq, CPU_IDLE); } if (time_after(next_balance, rq->next_balance)) { next_balance = rq->next_balance; update_next_balance = 1; } } /* Newly idle CPU doesn't need an update */ if (idle != CPU_NEWLY_IDLE) { update_blocked_averages(this_cpu); has_blocked_load |= this_rq->has_blocked_load; } if (flags & NOHZ_BALANCE_KICK) rebalance_domains(this_rq, CPU_IDLE); WRITE_ONCE(nohz.next_blocked, now + msecs_to_jiffies(LOAD_AVG_PERIOD)); /* The full idle balance loop has been done */ ret = true; abort: /* There is still blocked load, enable periodic update */ if (has_blocked_load) WRITE_ONCE(nohz.has_blocked, 1); /* * next_balance will be updated only when there is a need. * When the CPU is attached to null domain for ex, it will not be * updated. */ if (likely(update_next_balance)) nohz.next_balance = next_balance; return ret; } /* * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the * rebalancing for all the cpus for whom scheduler ticks are stopped. */ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { int this_cpu = this_rq->cpu; unsigned int flags; if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_KICK_MASK)) return false; if (idle != CPU_IDLE) { atomic_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); return false; } /* could be _relaxed() */ flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); if (!(flags & NOHZ_KICK_MASK)) return false; _nohz_idle_balance(this_rq, flags, idle); return true; } static void nohz_newidle_balance(struct rq *this_rq) { int this_cpu = this_rq->cpu; /* * This CPU doesn't want to be disturbed by scheduler * housekeeping */ if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED)) return; /* Will wake up very soon. No time for doing anything else*/ if (this_rq->avg_idle < sysctl_sched_migration_cost) return; /* Don't need to update blocked load of idle CPUs*/ if (!READ_ONCE(nohz.has_blocked) || time_before(jiffies, READ_ONCE(nohz.next_blocked))) return; raw_spin_unlock(&this_rq->lock); /* * This CPU is going to be idle and blocked load of idle CPUs * need to be updated. Run the ilb locally as it is a good * candidate for ilb instead of waking up another idle CPU. * Kick an normal ilb if we failed to do the update. */ if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE)) kick_ilb(NOHZ_STATS_KICK); raw_spin_lock(&this_rq->lock); } #else /* !CONFIG_NO_HZ_COMMON */ static inline void nohz_balancer_kick(struct rq *rq) { } static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { return false; } static inline void nohz_newidle_balance(struct rq *this_rq) { } #endif /* CONFIG_NO_HZ_COMMON */ /* * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. */ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) { unsigned long next_balance = jiffies + HZ; int this_cpu = this_rq->cpu; struct sched_domain *sd; int pulled_task = 0; u64 curr_cost = 0; /* * We must set idle_stamp _before_ calling idle_balance(), such that we * measure the duration of idle_balance() as idle time. */ this_rq->idle_stamp = rq_clock(this_rq); /* * Do not pull tasks towards !active CPUs... */ if (!cpu_active(this_cpu)) return 0; /* * This is OK, because current is on_cpu, which avoids it being picked * for load-balance and preemption/IRQs are still disabled avoiding * further scheduler activity on it and we're being very careful to * re-start the picking loop. */ rq_unpin_lock(this_rq, rf); if (this_rq->avg_idle < sysctl_sched_migration_cost || !READ_ONCE(this_rq->rd->overload)) { rcu_read_lock(); sd = rcu_dereference_check_sched_domain(this_rq->sd); if (sd) update_next_balance(sd, &next_balance); rcu_read_unlock(); nohz_newidle_balance(this_rq); goto out; } raw_spin_unlock(&this_rq->lock); update_blocked_averages(this_cpu); rcu_read_lock(); for_each_domain(this_cpu, sd) { int continue_balancing = 1; u64 t0, domain_cost; if (!(sd->flags & SD_LOAD_BALANCE)) continue; if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { update_next_balance(sd, &next_balance); break; } if (sd->flags & SD_BALANCE_NEWIDLE) { t0 = sched_clock_cpu(this_cpu); pulled_task = load_balance(this_cpu, this_rq, sd, CPU_NEWLY_IDLE, &continue_balancing); domain_cost = sched_clock_cpu(this_cpu) - t0; if (domain_cost > sd->max_newidle_lb_cost) sd->max_newidle_lb_cost = domain_cost; curr_cost += domain_cost; } update_next_balance(sd, &next_balance); /* * Stop searching for tasks to pull if there are * now runnable tasks on this rq. */ if (pulled_task || this_rq->nr_running > 0) break; } rcu_read_unlock(); raw_spin_lock(&this_rq->lock); if (curr_cost > this_rq->max_idle_balance_cost) this_rq->max_idle_balance_cost = curr_cost; out: /* * While browsing the domains, we released the rq lock, a task could * have been enqueued in the meantime. Since we're not going idle, * pretend we pulled a task. */ if (this_rq->cfs.h_nr_running && !pulled_task) pulled_task = 1; /* Move the next balance forward */ if (time_after(this_rq->next_balance, next_balance)) this_rq->next_balance = next_balance; /* Is there a task of a high priority class? */ if (this_rq->nr_running != this_rq->cfs.h_nr_running) pulled_task = -1; if (pulled_task) this_rq->idle_stamp = 0; rq_repin_lock(this_rq, rf); return pulled_task; } /* * run_rebalance_domains is triggered when needed from the scheduler tick. * Also triggered for nohz idle balancing (with nohz_balancing_kick set). */ static __latent_entropy void run_rebalance_domains(struct softirq_action *h) { struct rq *this_rq = this_rq(); enum cpu_idle_type idle = this_rq->idle_balance ? CPU_IDLE : CPU_NOT_IDLE; /* * If this CPU has a pending nohz_balance_kick, then do the * balancing on behalf of the other idle CPUs whose ticks are * stopped. Do nohz_idle_balance *before* rebalance_domains to * give the idle CPUs a chance to load balance. Else we may * load balance only within the local sched_domain hierarchy * and abort nohz_idle_balance altogether if we pull some load. */ if (nohz_idle_balance(this_rq, idle)) return; /* normal load balance */ update_blocked_averages(this_rq->cpu); rebalance_domains(this_rq, idle); } /* * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. */ void trigger_load_balance(struct rq *rq) { /* Don't need to rebalance while attached to NULL domain */ if (unlikely(on_null_domain(rq))) return; if (time_after_eq(jiffies, rq->next_balance)) raise_softirq(SCHED_SOFTIRQ); nohz_balancer_kick(rq); } static void rq_online_fair(struct rq *rq) { update_sysctl(); update_runtime_enabled(rq); } static void rq_offline_fair(struct rq *rq) { update_sysctl(); /* Ensure any throttled groups are reachable by pick_next_task */ unthrottle_offline_cfs_rqs(rq); } #endif /* CONFIG_SMP */ /* * scheduler tick hitting a task of our scheduling class. * * NOTE: This function can be called remotely by the tick offload that * goes along full dynticks. Therefore no local assumption can be made * and everything must be accessed through the @rq and @curr passed in * parameters. */ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) { struct cfs_rq *cfs_rq; struct sched_entity *se = &curr->se; for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); entity_tick(cfs_rq, se, queued); } if (static_branch_unlikely(&sched_numa_balancing)) task_tick_numa(rq, curr); update_misfit_status(curr, rq); update_overutilized_status(task_rq(curr)); } /* * called on fork with the child task as argument from the parent's context * - child not yet on the tasklist * - preemption disabled */ static void task_fork_fair(struct task_struct *p) { struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se, *curr; struct rq *rq = this_rq(); struct rq_flags rf; rq_lock(rq, &rf); update_rq_clock(rq); cfs_rq = task_cfs_rq(current); curr = cfs_rq->curr; if (curr) { update_curr(cfs_rq); se->vruntime = curr->vruntime; } place_entity(cfs_rq, se, 1); if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { /* * Upon rescheduling, sched_class::put_prev_task() will place * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); resched_curr(rq); } se->vruntime -= cfs_rq->min_vruntime; rq_unlock(rq, &rf); } /* * Priority of the task has changed. Check to see if we preempt * the current task. */ static void prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) { if (!task_on_rq_queued(p)) return; /* * Reschedule if we are currently running on this runqueue and * our priority decreased, or if we are not currently running on * this runqueue and our priority is higher than the current's */ if (rq->curr == p) { if (p->prio > oldprio) resched_curr(rq); } else check_preempt_curr(rq, p, 0); } static inline bool vruntime_normalized(struct task_struct *p) { struct sched_entity *se = &p->se; /* * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases, * the dequeue_entity(.flags=0) will already have normalized the * vruntime. */ if (p->on_rq) return true; /* * When !on_rq, vruntime of the task has usually NOT been normalized. * But there are some cases where it has already been normalized: * * - A forked child which is waiting for being woken up by * wake_up_new_task(). * - A task which has been woken up by try_to_wake_up() and * waiting for actually being woken up by sched_ttwu_pending(). */ if (!se->sum_exec_runtime || (p->state == TASK_WAKING && p->sched_remote_wakeup)) return true; return false; } #ifdef CONFIG_FAIR_GROUP_SCHED /* * Propagate the changes of the sched_entity across the tg tree to make it * visible to the root */ static void propagate_entity_cfs_rq(struct sched_entity *se) { struct cfs_rq *cfs_rq; /* Start to propagate at parent */ se = se->parent; for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); if (cfs_rq_throttled(cfs_rq)) break; update_load_avg(cfs_rq, se, UPDATE_TG); } } #else static void propagate_entity_cfs_rq(struct sched_entity *se) { } #endif static void detach_entity_cfs_rq(struct sched_entity *se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); /* Catch up with the cfs_rq and remove our load when we leave */ update_load_avg(cfs_rq, se, 0); detach_entity_load_avg(cfs_rq, se); update_tg_load_avg(cfs_rq, false); propagate_entity_cfs_rq(se); } static void attach_entity_cfs_rq(struct sched_entity *se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); #ifdef CONFIG_FAIR_GROUP_SCHED /* * Since the real-depth could have been changed (only FAIR * class maintain depth value), reset depth properly. */ se->depth = se->parent ? se->parent->depth + 1 : 0; #endif /* Synchronize entity with its cfs_rq */ update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); attach_entity_load_avg(cfs_rq, se, 0); update_tg_load_avg(cfs_rq, false); propagate_entity_cfs_rq(se); } static void detach_task_cfs_rq(struct task_struct *p) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); if (!vruntime_normalized(p)) { /* * Fix up our vruntime so that the current sleep doesn't * cause 'unlimited' sleep bonus. */ place_entity(cfs_rq, se, 0); se->vruntime -= cfs_rq->min_vruntime; } detach_entity_cfs_rq(se); } static void attach_task_cfs_rq(struct task_struct *p) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); attach_entity_cfs_rq(se); if (!vruntime_normalized(p)) se->vruntime += cfs_rq->min_vruntime; } static void switched_from_fair(struct rq *rq, struct task_struct *p) { detach_task_cfs_rq(p); } static void switched_to_fair(struct rq *rq, struct task_struct *p) { attach_task_cfs_rq(p); if (task_on_rq_queued(p)) { /* * We were most likely switched from sched_rt, so * kick off the schedule if running, otherwise just see * if we can still preempt the current task. */ if (rq->curr == p) resched_curr(rq); else check_preempt_curr(rq, p, 0); } } /* Account for a task changing its policy or group. * * This routine is mostly called to set cfs_rq->curr field when a task * migrates between groups/classes. */ static void set_curr_task_fair(struct rq *rq) { struct sched_entity *se = &rq->curr->se; for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); set_next_entity(cfs_rq, se); /* ensure bandwidth has been allocated on our new cfs_rq */ account_cfs_rq_runtime(cfs_rq, 0); } } void init_cfs_rq(struct cfs_rq *cfs_rq) { cfs_rq->tasks_timeline = RB_ROOT_CACHED; cfs_rq->min_vruntime = (u64)(-(1LL << 20)); #ifndef CONFIG_64BIT cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; #endif #ifdef CONFIG_SMP raw_spin_lock_init(&cfs_rq->removed.lock); #endif } #ifdef CONFIG_FAIR_GROUP_SCHED static void task_set_group_fair(struct task_struct *p) { struct sched_entity *se = &p->se; set_task_rq(p, task_cpu(p)); se->depth = se->parent ? se->parent->depth + 1 : 0; } static void task_move_group_fair(struct task_struct *p) { detach_task_cfs_rq(p); set_task_rq(p, task_cpu(p)); #ifdef CONFIG_SMP /* Tell se's cfs_rq has been changed -- migrated */ p->se.avg.last_update_time = 0; #endif attach_task_cfs_rq(p); } static void task_change_group_fair(struct task_struct *p, int type) { switch (type) { case TASK_SET_GROUP: task_set_group_fair(p); break; case TASK_MOVE_GROUP: task_move_group_fair(p); break; } } void free_fair_sched_group(struct task_group *tg) { int i; destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); for_each_possible_cpu(i) { if (tg->cfs_rq) kfree(tg->cfs_rq[i]); if (tg->se) kfree(tg->se[i]); } kfree(tg->cfs_rq); kfree(tg->se); } int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) { struct sched_entity *se; struct cfs_rq *cfs_rq; int i; tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); if (!tg->cfs_rq) goto err; tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); if (!tg->se) goto err; tg->shares = NICE_0_LOAD; init_cfs_bandwidth(tg_cfs_bandwidth(tg)); for_each_possible_cpu(i) { cfs_rq = kzalloc_node(sizeof(struct cfs_rq), GFP_KERNEL, cpu_to_node(i)); if (!cfs_rq) goto err; se = kzalloc_node(sizeof(struct sched_entity), GFP_KERNEL, cpu_to_node(i)); if (!se) goto err_free_rq; init_cfs_rq(cfs_rq); init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); init_entity_runnable_average(se); } return 1; err_free_rq: kfree(cfs_rq); err: return 0; } void online_fair_sched_group(struct task_group *tg) { struct sched_entity *se; struct rq *rq; int i; for_each_possible_cpu(i) { rq = cpu_rq(i); se = tg->se[i]; raw_spin_lock_irq(&rq->lock); update_rq_clock(rq); attach_entity_cfs_rq(se); sync_throttle(tg, i); raw_spin_unlock_irq(&rq->lock); } } void unregister_fair_sched_group(struct task_group *tg) { unsigned long flags; struct rq *rq; int cpu; for_each_possible_cpu(cpu) { if (tg->se[cpu]) remove_entity_load_avg(tg->se[cpu]); /* * Only empty task groups can be destroyed; so we can speculatively * check on_list without danger of it being re-added. */ if (!tg->cfs_rq[cpu]->on_list) continue; rq = cpu_rq(cpu); raw_spin_lock_irqsave(&rq->lock, flags); list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); raw_spin_unlock_irqrestore(&rq->lock, flags); } } void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, struct sched_entity *parent) { struct rq *rq = cpu_rq(cpu); cfs_rq->tg = tg; cfs_rq->rq = rq; init_cfs_rq_runtime(cfs_rq); tg->cfs_rq[cpu] = cfs_rq; tg->se[cpu] = se; /* se could be NULL for root_task_group */ if (!se) return; if (!parent) { se->cfs_rq = &rq->cfs; se->depth = 0; } else { se->cfs_rq = parent->my_q; se->depth = parent->depth + 1; } se->my_q = cfs_rq; /* guarantee group entities always have weight */ update_load_set(&se->load, NICE_0_LOAD); se->parent = parent; } static DEFINE_MUTEX(shares_mutex); int sched_group_set_shares(struct task_group *tg, unsigned long shares) { int i; /* * We can't change the weight of the root cgroup. */ if (!tg->se[0]) return -EINVAL; shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); mutex_lock(&shares_mutex); if (tg->shares == shares) goto done; tg->shares = shares; for_each_possible_cpu(i) { struct rq *rq = cpu_rq(i); struct sched_entity *se = tg->se[i]; struct rq_flags rf; /* Propagate contribution to hierarchy */ rq_lock_irqsave(rq, &rf); update_rq_clock(rq); for_each_sched_entity(se) { update_load_avg(cfs_rq_of(se), se, UPDATE_TG); update_cfs_group(se); } rq_unlock_irqrestore(rq, &rf); } done: mutex_unlock(&shares_mutex); return 0; } #else /* CONFIG_FAIR_GROUP_SCHED */ void free_fair_sched_group(struct task_group *tg) { } int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) { return 1; } void online_fair_sched_group(struct task_group *tg) { } void unregister_fair_sched_group(struct task_group *tg) { } #endif /* CONFIG_FAIR_GROUP_SCHED */ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) { struct sched_entity *se = &task->se; unsigned int rr_interval = 0; /* * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise * idle runqueue: */ if (rq->cfs.load.weight) rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); return rr_interval; } /* * All the scheduling class methods: */ const struct sched_class fair_sched_class = { .next = &idle_sched_class, .enqueue_task = enqueue_task_fair, .dequeue_task = dequeue_task_fair, .yield_task = yield_task_fair, .yield_to_task = yield_to_task_fair, .check_preempt_curr = check_preempt_wakeup, .pick_next_task = pick_next_task_fair, .put_prev_task = put_prev_task_fair, #ifdef CONFIG_SMP .select_task_rq = select_task_rq_fair, .migrate_task_rq = migrate_task_rq_fair, .rq_online = rq_online_fair, .rq_offline = rq_offline_fair, .task_dead = task_dead_fair, .set_cpus_allowed = set_cpus_allowed_common, #endif .set_curr_task = set_curr_task_fair, .task_tick = task_tick_fair, .task_fork = task_fork_fair, .prio_changed = prio_changed_fair, .switched_from = switched_from_fair, .switched_to = switched_to_fair, .get_rr_interval = get_rr_interval_fair, .update_curr = update_curr_fair, #ifdef CONFIG_FAIR_GROUP_SCHED .task_change_group = task_change_group_fair, #endif #ifdef CONFIG_UCLAMP_TASK .uclamp_enabled = 1, #endif }; #ifdef CONFIG_SCHED_DEBUG void print_cfs_stats(struct seq_file *m, int cpu) { struct cfs_rq *cfs_rq, *pos; rcu_read_lock(); for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) print_cfs_rq(m, cpu, cfs_rq); rcu_read_unlock(); } #ifdef CONFIG_NUMA_BALANCING void show_numa_stats(struct task_struct *p, struct seq_file *m) { int node; unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0; struct numa_group *ng; rcu_read_lock(); ng = rcu_dereference(p->numa_group); for_each_online_node(node) { if (p->numa_faults) { tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; } if (ng) { gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)], gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; } print_numa_stats(m, node, tsf, tpf, gsf, gpf); } rcu_read_unlock(); } #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_SCHED_DEBUG */ __init void init_sched_fair_class(void) { #ifdef CONFIG_SMP open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); #ifdef CONFIG_NO_HZ_COMMON nohz.next_balance = jiffies; nohz.next_blocked = jiffies; zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); #endif #endif /* SMP */ } /* * Helper functions to facilitate extracting info from tracepoints. */ const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq) { #ifdef CONFIG_SMP return cfs_rq ? &cfs_rq->avg : NULL; #else return NULL; #endif } EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_avg); char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len) { if (!cfs_rq) { if (str) strlcpy(str, "(null)", len); else return NULL; } cfs_rq_tg_path(cfs_rq, str, len); return str; } EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_path); int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq) { return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1; } EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_cpu); const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq) { #ifdef CONFIG_SMP return rq ? &rq->avg_rt : NULL; #else return NULL; #endif } EXPORT_SYMBOL_GPL(sched_trace_rq_avg_rt); const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq) { #ifdef CONFIG_SMP return rq ? &rq->avg_dl : NULL; #else return NULL; #endif } EXPORT_SYMBOL_GPL(sched_trace_rq_avg_dl); const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq) { #if defined(CONFIG_SMP) && defined(CONFIG_HAVE_SCHED_AVG_IRQ) return rq ? &rq->avg_irq : NULL; #else return NULL; #endif } EXPORT_SYMBOL_GPL(sched_trace_rq_avg_irq); int sched_trace_rq_cpu(struct rq *rq) { return rq ? cpu_of(rq) : -1; } EXPORT_SYMBOL_GPL(sched_trace_rq_cpu); const struct cpumask *sched_trace_rd_span(struct root_domain *rd) { #ifdef CONFIG_SMP return rd ? rd->span : NULL; #else return NULL; #endif } EXPORT_SYMBOL_GPL(sched_trace_rd_span);
./CrossVul/dataset_final_sorted/CWE-400/c/good_1323_1
crossvul-cpp_data_good_1273_3
/* * Copyright 2012-15 Advanced Micro Devices, Inc.cls * * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dm_services.h" #include "stream_encoder.h" #include "resource.h" #include "include/irq_service_interface.h" #include "dce120_resource.h" #include "dce112/dce112_resource.h" #include "dce110/dce110_resource.h" #include "../virtual/virtual_stream_encoder.h" #include "dce120_timing_generator.h" #include "irq/dce120/irq_service_dce120.h" #include "dce/dce_opp.h" #include "dce/dce_clock_source.h" #include "dce/dce_ipp.h" #include "dce/dce_mem_input.h" #include "dce110/dce110_hw_sequencer.h" #include "dce120/dce120_hw_sequencer.h" #include "dce/dce_transform.h" #include "clk_mgr.h" #include "dce/dce_audio.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" #include "dce/dce_hwseq.h" #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "dce/dce_12_0_offset.h" #include "dce/dce_12_0_sh_mask.h" #include "soc15_hw_ip.h" #include "vega10_ip_offset.h" #include "nbio/nbio_6_1_offset.h" #include "mmhub/mmhub_9_4_0_offset.h" #include "mmhub/mmhub_9_4_0_sh_mask.h" #include "reg_helper.h" #include "dce100/dce100_resource.h" #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #endif enum dce120_clk_src_array_id { DCE120_CLK_SRC_PLL0, DCE120_CLK_SRC_PLL1, DCE120_CLK_SRC_PLL2, DCE120_CLK_SRC_PLL3, DCE120_CLK_SRC_PLL4, DCE120_CLK_SRC_PLL5, DCE120_CLK_SRC_TOTAL }; static const struct dce110_timing_generator_offsets dce120_tg_offsets[] = { { .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), }, { .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), } }; /* begin ********************* * macros to expend register list macro defined in HW object header file */ #define BASE_INNER(seg) \ DCE_BASE__INST0_SEG ## seg #define NBIO_BASE_INNER(seg) \ NBIF_BASE__INST0_SEG ## seg #define NBIO_BASE(seg) \ NBIO_BASE_INNER(seg) /* compile time expand base address. */ #define BASE(seg) \ BASE_INNER(seg) #define SR(reg_name)\ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name #define SRI(reg_name, block, id)\ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name /* MMHUB */ #define MMHUB_BASE_INNER(seg) \ MMHUB_BASE__INST0_SEG ## seg #define MMHUB_BASE(seg) \ MMHUB_BASE_INNER(seg) #define MMHUB_SR(reg_name)\ .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name /* macros to expend register list macro defined in HW object header file * end *********************/ static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE110_COMMON_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCE110(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCE110_COMMON_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; #define ipp_regs(id)\ [id] = {\ IPP_DCE110_REG_LIST_DCE_BASE(id)\ } static const struct dce_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), ipp_regs(4), ipp_regs(5) }; static const struct dce_ipp_shift ipp_shift = { IPP_DCE120_MASK_SH_LIST_SOC_BASE(__SHIFT) }; static const struct dce_ipp_mask ipp_mask = { IPP_DCE120_MASK_SH_LIST_SOC_BASE(_MASK) }; #define transform_regs(id)\ [id] = {\ XFM_COMMON_REG_LIST_DCE110(id)\ } static const struct dce_transform_registers xfm_regs[] = { transform_regs(0), transform_regs(1), transform_regs(2), transform_regs(3), transform_regs(4), transform_regs(5) }; static const struct dce_transform_shift xfm_shift = { XFM_COMMON_MASK_SH_LIST_SOC_BASE(__SHIFT) }; static const struct dce_transform_mask xfm_mask = { XFM_COMMON_MASK_SH_LIST_SOC_BASE(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id)\ [id] = {\ LE_DCE120_REG_LIST(id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3), link_regs(4), link_regs(5), link_regs(6), }; #define stream_enc_regs(id)\ [id] = {\ SE_COMMON_REG_LIST(id),\ .TMDS_CNTL = 0,\ } static const struct dce110_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4), stream_enc_regs(5) }; static const struct dce_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCE120(__SHIFT) }; static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE120(_MASK) }; #define opp_regs(id)\ [id] = {\ OPP_DCE_120_REG_LIST(id),\ } static const struct dce_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5) }; static const struct dce_opp_shift opp_shift = { OPP_COMMON_MASK_SH_LIST_DCE_120(__SHIFT) }; static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5) }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) static const struct dce_audio_shift audio_shift = { DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; #define clk_src_regs(index, id)\ [index] = {\ CS_COMMON_REG_LIST_DCE_112(id),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0, A), clk_src_regs(1, B), clk_src_regs(2, C), clk_src_regs(3, D), clk_src_regs(4, E), clk_src_regs(5, F) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCE_112(_MASK) }; struct output_pixel_processor *dce120_opp_create( struct dc_context *ctx, uint32_t inst) { struct dce110_opp *opp = kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); if (!opp) return NULL; dce110_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } struct dce_aux *dce120_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst]); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) }; struct dce_i2c_hw *dce120_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dce112_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static const struct bios_registers bios_regs = { .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3 + NBIO_BASE(mmBIOS_SCRATCH_3_BASE_IDX), .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX) }; static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 7, .num_stream_encoder = 6, .num_pll = 6, .num_ddc = 6, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, .pixel_format_support = { .argb8888 = true, .nv12 = false, .fp16 = false }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 1, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 1, .fp16 = 1 } }; static const struct dc_debug_options debug_defaults = { .disable_clock_gate = true, }; static struct clock_source *dce120_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(*clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce112_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } static void dce120_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } static bool dce120_hw_sequencer_create(struct dc *dc) { /* All registers used by dce11.2 match those in dce11 in offset and * structure */ dce120_hw_sequencer_construct(dc); /*TODO Move to separate file and Override what is needed */ return true; } static struct timing_generator *dce120_timing_generator_create( struct dc_context *ctx, uint32_t instance, const struct dce110_timing_generator_offsets *offsets) { struct dce110_timing_generator *tg110 = kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); if (!tg110) return NULL; dce120_timing_generator_construct(tg110, ctx, instance, offsets); return &tg110->base; } static void dce120_transform_destroy(struct transform **xfm) { kfree(TO_DCE_TRANSFORM(*xfm)); *xfm = NULL; } static void destruct(struct dce110_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); if (pool->base.transforms[i] != NULL) dce120_transform_destroy(&pool->base.transforms[i]); if (pool->base.ipps[i] != NULL) dce_ipp_destroy(&pool->base.ipps[i]); if (pool->base.mis[i] != NULL) { kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); pool->base.mis[i] = NULL; } if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } if (pool->base.timing_generators[i] != NULL) { kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i]) dce_aud_destroy(&pool->base.audios[i]); } for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) dce120_clock_source_destroy( &pool->base.clock_sources[i]); } if (pool->base.dp_clock_source != NULL) dce120_clock_source_destroy(&pool->base.dp_clock_source); if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { uint32_t reg_val = dm_read_reg_soc15(ctx, mmCC_DC_MISC_STRAPS, 0); straps->audio_stream_number = get_reg_field_value(reg_val, CC_DC_MISC_STRAPS, AUDIO_STREAM_NUMBER); straps->hdmi_disable = get_reg_field_value(reg_val, CC_DC_MISC_STRAPS, HDMI_DISABLE); reg_val = dm_read_reg_soc15(ctx, mmDC_PINSTRAPS, 0); straps->dc_pinstraps_audio = get_reg_field_value(reg_val, DC_PINSTRAPS, DC_PINSTRAPS_AUDIO); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = false, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true, }; static struct link_encoder *dce120_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; } static struct input_pixel_processor *dce120_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dce_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static struct stream_encoder *dce120_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dce110_stream_encoder *enc110 = kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); if (!enc110) return NULL; dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; } #define SRII(reg_name, block, id)\ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCE120_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCE12_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE12_MASK_SH_LIST(_MASK) }; /* HWSEQ regs for VG20 */ static const struct dce_hwseq_registers dce121_hwseq_reg = { HWSEQ_VG20_REG_LIST() }; static const struct dce_hwseq_shift dce121_hwseq_shift = { HWSEQ_VG20_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask dce121_hwseq_mask = { HWSEQ_VG20_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dce120_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static struct dce_hwseq *dce121_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &dce121_hwseq_reg; hws->shifts = &dce121_hwseq_shift; hws->masks = &dce121_hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce120_stream_encoder_create, .create_hwseq = dce120_hwseq_create, }; static const struct resource_create_funcs dce121_res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dce120_stream_encoder_create, .create_hwseq = dce121_hwseq_create, }; #define mi_inst_regs(id) { MI_DCE12_REG_LIST(id) } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), mi_inst_regs(1), mi_inst_regs(2), mi_inst_regs(3), mi_inst_regs(4), mi_inst_regs(5), }; static const struct dce_mem_input_shift mi_shifts = { MI_DCE12_MASK_SH_LIST(__SHIFT) }; static const struct dce_mem_input_mask mi_masks = { MI_DCE12_MASK_SH_LIST(_MASK) }; static struct mem_input *dce120_mem_input_create( struct dc_context *ctx, uint32_t inst) { struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), GFP_KERNEL); if (!dce_mi) { BREAK_TO_DEBUGGER(); return NULL; } dce120_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); return &dce_mi->base; } static struct transform *dce120_transform_create( struct dc_context *ctx, uint32_t inst) { struct dce_transform *transform = kzalloc(sizeof(struct dce_transform), GFP_KERNEL); if (!transform) return NULL; dce_transform_construct(transform, ctx, inst, &xfm_regs[inst], &xfm_shift, &xfm_mask); transform->lb_memory_size = 0x1404; /*5124*/ return &transform->base; } static void dce120_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } static const struct resource_funcs dce120_res_pool_funcs = { .destroy = dce120_destroy_resource_pool, .link_enc_create = dce120_link_encoder_create, .validate_bandwidth = dce112_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce112_add_stream_to_ctx, .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link }; static void bw_calcs_data_update_from_pplib(struct dc *dc) { struct dm_pp_clock_levels_with_latency eng_clks = {0}; struct dm_pp_clock_levels_with_latency mem_clks = {0}; struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0}; int i; unsigned int clk; unsigned int latency; /*original logic in dal3*/ int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; /*do system clock*/ if (!dm_pp_get_clock_levels_by_type_with_latency( dc->ctx, DM_PP_CLOCK_TYPE_ENGINE_CLK, &eng_clks) || eng_clks.num_levels == 0) { eng_clks.num_levels = 8; clk = 300000; for (i = 0; i < eng_clks.num_levels; i++) { eng_clks.data[i].clocks_in_khz = clk; clk += 100000; } } /* convert all the clock fro kHz to fix point mHz TODO: wloop data */ dc->bw_vbios->high_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000); dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000); dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000); dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000); dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000); dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000); dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000); dc->bw_vbios->low_sclk = bw_frc_to_fixed( eng_clks.data[0].clocks_in_khz, 1000); /*do memory clock*/ if (!dm_pp_get_clock_levels_by_type_with_latency( dc->ctx, DM_PP_CLOCK_TYPE_MEMORY_CLK, &mem_clks) || mem_clks.num_levels == 0) { mem_clks.num_levels = 3; clk = 250000; latency = 45; for (i = 0; i < eng_clks.num_levels; i++) { mem_clks.data[i].clocks_in_khz = clk; mem_clks.data[i].latency_in_us = latency; clk += 500000; latency -= 5; } } /* we don't need to call PPLIB for validation clock since they * also give us the highest sclk and highest mclk (UMA clock). * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula): * YCLK = UMACLK*m_memoryTypeMultiplier */ if (dc->bw_vbios->memory_type == bw_def_hbm) memory_type_multiplier = MEMORY_TYPE_HBM; dc->bw_vbios->low_yclk = bw_frc_to_fixed( mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, 1000); /* Now notify PPLib/SMU about which Watermarks sets they should select * depending on DPM state they are in. And update BW MGR GFX Engine and * Memory clock member variables for Watermarks calculations for each * Watermark Set */ clk_ranges.num_wm_sets = 4; clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A; clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz = eng_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz = mem_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B; clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000; clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz = mem_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C; clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz = eng_clks.data[0].clocks_in_khz; clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000; clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D; clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz = eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000; clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz = mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000; /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges); } static uint32_t read_pipe_fuses(struct dc_context *ctx) { uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); /* VG20 support max 6 pipes */ value = value & 0x3f; return value; } static bool construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) { unsigned int i; int j; struct dc_context *ctx = dc->ctx; struct irq_service_init_data irq_init_data; static const struct resource_create_funcs *res_funcs; bool is_vg20 = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev); uint32_t pipe_fuses; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap; pool->base.funcs = &dce120_res_pool_funcs; /* TODO: Fill more data from GreenlandAsicCapability.cpp */ pool->base.pipe_count = res_cap.num_timing_generator; pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; dc->caps.psp_setup_panel_mode = true; dc->debug = debug_defaults; /************************************************* * Create resources * *************************************************/ pool->base.clock_sources[DCE120_CLK_SRC_PLL0] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL1] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL2] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL3] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL4] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL4, &clk_src_regs[4], false); pool->base.clock_sources[DCE120_CLK_SRC_PLL5] = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL5, &clk_src_regs[5], false); pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL; pool->base.dp_clock_source = dce120_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true); for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto clk_src_create_fail; } } pool->base.dmcu = dce_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; } irq_init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data); if (!pool->base.irqs) goto irqs_create_fail; /* VG20: Pipe harvesting enabled, retrieve valid pipe fuses */ if (is_vg20) pipe_fuses = read_pipe_fuses(ctx); /* index to valid pipe resource */ j = 0; for (i = 0; i < pool->base.pipe_count; i++) { if (is_vg20) { if ((pipe_fuses & (1 << i)) != 0) { dm_error("DC: skip invalid pipe %d!\n", i); continue; } } pool->base.timing_generators[j] = dce120_timing_generator_create( ctx, i, &dce120_tg_offsets[i]); if (pool->base.timing_generators[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto controller_create_fail; } pool->base.mis[j] = dce120_mem_input_create(ctx, i); if (pool->base.mis[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto controller_create_fail; } pool->base.ipps[j] = dce120_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto controller_create_fail; } pool->base.transforms[j] = dce120_transform_create(ctx, i); if (pool->base.transforms[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create transform!\n"); goto res_create_fail; } pool->base.opps[j] = dce120_opp_create( ctx, i); if (pool->base.opps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); } /* check next valid pipe */ j++; } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce120_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto res_create_fail; } pool->base.hw_i2cs[i] = dce120_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create i2c engine!!\n"); goto res_create_fail; } pool->base.sw_i2cs[i] = NULL; } /* valid pipe num */ pool->base.pipe_count = j; pool->base.timing_generator_count = j; if (is_vg20) res_funcs = &dce121_res_create_funcs; else res_funcs = &res_create_funcs; if (!resource_construct(num_virtual_links, dc, &pool->base, res_funcs)) goto res_create_fail; /* Create hardware sequencer */ if (!dce120_hw_sequencer_create(dc)) goto controller_create_fail; dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); bw_calcs_data_update_from_pplib(dc); return true; irqs_create_fail: controller_create_fail: clk_src_create_fail: res_create_fail: destruct(pool); return false; } struct resource_pool *dce120_create_resource_pool( uint8_t num_virtual_links, struct dc *dc) { struct dce110_resource_pool *pool = kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; }
./CrossVul/dataset_final_sorted/CWE-400/c/good_1273_3
crossvul-cpp_data_bad_707_0
/*- * Copyright (c) 2003-2007 Tim Kientzle * Copyright (c) 2009 Andreas Henriksson <andreas@fatal.se> * Copyright (c) 2009-2012 Michihiro NAKAJIMA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "archive_platform.h" __FBSDID("$FreeBSD: head/lib/libarchive/archive_read_support_format_iso9660.c 201246 2009-12-30 05:30:35Z kientzle $"); #ifdef HAVE_ERRNO_H #include <errno.h> #endif /* #include <stdint.h> */ /* See archive_platform.h */ #include <stdio.h> #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #include <time.h> #ifdef HAVE_ZLIB_H #include <zlib.h> #endif #include "archive.h" #include "archive_endian.h" #include "archive_entry.h" #include "archive_entry_locale.h" #include "archive_private.h" #include "archive_read_private.h" #include "archive_string.h" /* * An overview of ISO 9660 format: * * Each disk is laid out as follows: * * 32k reserved for private use * * Volume descriptor table. Each volume descriptor * is 2k and specifies basic format information. * The "Primary Volume Descriptor" (PVD) is defined by the * standard and should always be present; other volume * descriptors include various vendor-specific extensions. * * Files and directories. Each file/dir is specified by * an "extent" (starting sector and length in bytes). * Dirs are just files with directory records packed one * after another. The PVD contains a single dir entry * specifying the location of the root directory. Everything * else follows from there. * * This module works by first reading the volume descriptors, then * building a list of directory entries, sorted by starting * sector. At each step, I look for the earliest dir entry that * hasn't yet been read, seek forward to that location and read * that entry. If it's a dir, I slurp in the new dir entries and * add them to the heap; if it's a regular file, I return the * corresponding archive_entry and wait for the client to request * the file body. This strategy allows us to read most compliant * CDs with a single pass through the data, as required by libarchive. */ #define LOGICAL_BLOCK_SIZE 2048 #define SYSTEM_AREA_BLOCK 16 /* Structure of on-disk primary volume descriptor. */ #define PVD_type_offset 0 #define PVD_type_size 1 #define PVD_id_offset (PVD_type_offset + PVD_type_size) #define PVD_id_size 5 #define PVD_version_offset (PVD_id_offset + PVD_id_size) #define PVD_version_size 1 #define PVD_reserved1_offset (PVD_version_offset + PVD_version_size) #define PVD_reserved1_size 1 #define PVD_system_id_offset (PVD_reserved1_offset + PVD_reserved1_size) #define PVD_system_id_size 32 #define PVD_volume_id_offset (PVD_system_id_offset + PVD_system_id_size) #define PVD_volume_id_size 32 #define PVD_reserved2_offset (PVD_volume_id_offset + PVD_volume_id_size) #define PVD_reserved2_size 8 #define PVD_volume_space_size_offset (PVD_reserved2_offset + PVD_reserved2_size) #define PVD_volume_space_size_size 8 #define PVD_reserved3_offset (PVD_volume_space_size_offset + PVD_volume_space_size_size) #define PVD_reserved3_size 32 #define PVD_volume_set_size_offset (PVD_reserved3_offset + PVD_reserved3_size) #define PVD_volume_set_size_size 4 #define PVD_volume_sequence_number_offset (PVD_volume_set_size_offset + PVD_volume_set_size_size) #define PVD_volume_sequence_number_size 4 #define PVD_logical_block_size_offset (PVD_volume_sequence_number_offset + PVD_volume_sequence_number_size) #define PVD_logical_block_size_size 4 #define PVD_path_table_size_offset (PVD_logical_block_size_offset + PVD_logical_block_size_size) #define PVD_path_table_size_size 8 #define PVD_type_1_path_table_offset (PVD_path_table_size_offset + PVD_path_table_size_size) #define PVD_type_1_path_table_size 4 #define PVD_opt_type_1_path_table_offset (PVD_type_1_path_table_offset + PVD_type_1_path_table_size) #define PVD_opt_type_1_path_table_size 4 #define PVD_type_m_path_table_offset (PVD_opt_type_1_path_table_offset + PVD_opt_type_1_path_table_size) #define PVD_type_m_path_table_size 4 #define PVD_opt_type_m_path_table_offset (PVD_type_m_path_table_offset + PVD_type_m_path_table_size) #define PVD_opt_type_m_path_table_size 4 #define PVD_root_directory_record_offset (PVD_opt_type_m_path_table_offset + PVD_opt_type_m_path_table_size) #define PVD_root_directory_record_size 34 #define PVD_volume_set_id_offset (PVD_root_directory_record_offset + PVD_root_directory_record_size) #define PVD_volume_set_id_size 128 #define PVD_publisher_id_offset (PVD_volume_set_id_offset + PVD_volume_set_id_size) #define PVD_publisher_id_size 128 #define PVD_preparer_id_offset (PVD_publisher_id_offset + PVD_publisher_id_size) #define PVD_preparer_id_size 128 #define PVD_application_id_offset (PVD_preparer_id_offset + PVD_preparer_id_size) #define PVD_application_id_size 128 #define PVD_copyright_file_id_offset (PVD_application_id_offset + PVD_application_id_size) #define PVD_copyright_file_id_size 37 #define PVD_abstract_file_id_offset (PVD_copyright_file_id_offset + PVD_copyright_file_id_size) #define PVD_abstract_file_id_size 37 #define PVD_bibliographic_file_id_offset (PVD_abstract_file_id_offset + PVD_abstract_file_id_size) #define PVD_bibliographic_file_id_size 37 #define PVD_creation_date_offset (PVD_bibliographic_file_id_offset + PVD_bibliographic_file_id_size) #define PVD_creation_date_size 17 #define PVD_modification_date_offset (PVD_creation_date_offset + PVD_creation_date_size) #define PVD_modification_date_size 17 #define PVD_expiration_date_offset (PVD_modification_date_offset + PVD_modification_date_size) #define PVD_expiration_date_size 17 #define PVD_effective_date_offset (PVD_expiration_date_offset + PVD_expiration_date_size) #define PVD_effective_date_size 17 #define PVD_file_structure_version_offset (PVD_effective_date_offset + PVD_effective_date_size) #define PVD_file_structure_version_size 1 #define PVD_reserved4_offset (PVD_file_structure_version_offset + PVD_file_structure_version_size) #define PVD_reserved4_size 1 #define PVD_application_data_offset (PVD_reserved4_offset + PVD_reserved4_size) #define PVD_application_data_size 512 #define PVD_reserved5_offset (PVD_application_data_offset + PVD_application_data_size) #define PVD_reserved5_size (2048 - PVD_reserved5_offset) /* TODO: It would make future maintenance easier to just hardcode the * above values. In particular, ECMA119 states the offsets as part of * the standard. That would eliminate the need for the following check.*/ #if PVD_reserved5_offset != 1395 #error PVD offset and size definitions are wrong. #endif /* Structure of optional on-disk supplementary volume descriptor. */ #define SVD_type_offset 0 #define SVD_type_size 1 #define SVD_id_offset (SVD_type_offset + SVD_type_size) #define SVD_id_size 5 #define SVD_version_offset (SVD_id_offset + SVD_id_size) #define SVD_version_size 1 /* ... */ #define SVD_reserved1_offset 72 #define SVD_reserved1_size 8 #define SVD_volume_space_size_offset 80 #define SVD_volume_space_size_size 8 #define SVD_escape_sequences_offset (SVD_volume_space_size_offset + SVD_volume_space_size_size) #define SVD_escape_sequences_size 32 /* ... */ #define SVD_logical_block_size_offset 128 #define SVD_logical_block_size_size 4 #define SVD_type_L_path_table_offset 140 #define SVD_type_M_path_table_offset 148 /* ... */ #define SVD_root_directory_record_offset 156 #define SVD_root_directory_record_size 34 #define SVD_file_structure_version_offset 881 #define SVD_reserved2_offset 882 #define SVD_reserved2_size 1 #define SVD_reserved3_offset 1395 #define SVD_reserved3_size 653 /* ... */ /* FIXME: validate correctness of last SVD entry offset. */ /* Structure of an on-disk directory record. */ /* Note: ISO9660 stores each multi-byte integer twice, once in * each byte order. The sizes here are the size of just one * of the two integers. (This is why the offset of a field isn't * the same as the offset+size of the previous field.) */ #define DR_length_offset 0 #define DR_length_size 1 #define DR_ext_attr_length_offset 1 #define DR_ext_attr_length_size 1 #define DR_extent_offset 2 #define DR_extent_size 4 #define DR_size_offset 10 #define DR_size_size 4 #define DR_date_offset 18 #define DR_date_size 7 #define DR_flags_offset 25 #define DR_flags_size 1 #define DR_file_unit_size_offset 26 #define DR_file_unit_size_size 1 #define DR_interleave_offset 27 #define DR_interleave_size 1 #define DR_volume_sequence_number_offset 28 #define DR_volume_sequence_number_size 2 #define DR_name_len_offset 32 #define DR_name_len_size 1 #define DR_name_offset 33 #ifdef HAVE_ZLIB_H static const unsigned char zisofs_magic[8] = { 0x37, 0xE4, 0x53, 0x96, 0xC9, 0xDB, 0xD6, 0x07 }; struct zisofs { /* Set 1 if this file compressed by paged zlib */ int pz; int pz_log2_bs; /* Log2 of block size */ uint64_t pz_uncompressed_size; int initialized; unsigned char *uncompressed_buffer; size_t uncompressed_buffer_size; uint32_t pz_offset; unsigned char header[16]; size_t header_avail; int header_passed; unsigned char *block_pointers; size_t block_pointers_alloc; size_t block_pointers_size; size_t block_pointers_avail; size_t block_off; uint32_t block_avail; z_stream stream; int stream_valid; }; #else struct zisofs { /* Set 1 if this file compressed by paged zlib */ int pz; }; #endif struct content { uint64_t offset;/* Offset on disk. */ uint64_t size; /* File size in bytes. */ struct content *next; }; /* In-memory storage for a directory record. */ struct file_info { struct file_info *use_next; struct file_info *parent; struct file_info *next; struct file_info *re_next; int subdirs; uint64_t key; /* Heap Key. */ uint64_t offset; /* Offset on disk. */ uint64_t size; /* File size in bytes. */ uint32_t ce_offset; /* Offset of CE. */ uint32_t ce_size; /* Size of CE. */ char rr_moved; /* Flag to rr_moved. */ char rr_moved_has_re_only; char re; /* Having RRIP "RE" extension. */ char re_descendant; uint64_t cl_offset; /* Having RRIP "CL" extension. */ int birthtime_is_set; time_t birthtime; /* File created time. */ time_t mtime; /* File last modified time. */ time_t atime; /* File last accessed time. */ time_t ctime; /* File attribute change time. */ uint64_t rdev; /* Device number. */ mode_t mode; uid_t uid; gid_t gid; int64_t number; int nlinks; struct archive_string name; /* Pathname */ unsigned char *utf16be_name; size_t utf16be_bytes; char name_continues; /* Non-zero if name continues */ struct archive_string symlink; char symlink_continues; /* Non-zero if link continues */ /* Set 1 if this file compressed by paged zlib(zisofs) */ int pz; int pz_log2_bs; /* Log2 of block size */ uint64_t pz_uncompressed_size; /* Set 1 if this file is multi extent. */ int multi_extent; struct { struct content *first; struct content **last; } contents; struct { struct file_info *first; struct file_info **last; } rede_files; }; struct heap_queue { struct file_info **files; int allocated; int used; }; struct iso9660 { int magic; #define ISO9660_MAGIC 0x96609660 int opt_support_joliet; int opt_support_rockridge; struct archive_string pathname; char seenRockridge; /* Set true if RR extensions are used. */ char seenSUSP; /* Set true if SUSP is being used. */ char seenJoliet; unsigned char suspOffset; struct file_info *rr_moved; struct read_ce_queue { struct read_ce_req { uint64_t offset;/* Offset of CE on disk. */ struct file_info *file; } *reqs; int cnt; int allocated; } read_ce_req; int64_t previous_number; struct archive_string previous_pathname; struct file_info *use_files; struct heap_queue pending_files; struct { struct file_info *first; struct file_info **last; } cache_files; struct { struct file_info *first; struct file_info **last; } re_files; uint64_t current_position; ssize_t logical_block_size; uint64_t volume_size; /* Total size of volume in bytes. */ int32_t volume_block;/* Total size of volume in logical blocks. */ struct vd { int location; /* Location of Extent. */ uint32_t size; } primary, joliet; int64_t entry_sparse_offset; int64_t entry_bytes_remaining; size_t entry_bytes_unconsumed; struct zisofs entry_zisofs; struct content *entry_content; struct archive_string_conv *sconv_utf16be; /* * Buffers for a full pathname in UTF-16BE in Joliet extensions. */ #define UTF16_NAME_MAX 1024 unsigned char *utf16be_path; size_t utf16be_path_len; unsigned char *utf16be_previous_path; size_t utf16be_previous_path_len; /* Null buffer used in bidder to improve its performance. */ unsigned char null[2048]; }; static int archive_read_format_iso9660_bid(struct archive_read *, int); static int archive_read_format_iso9660_options(struct archive_read *, const char *, const char *); static int archive_read_format_iso9660_cleanup(struct archive_read *); static int archive_read_format_iso9660_read_data(struct archive_read *, const void **, size_t *, int64_t *); static int archive_read_format_iso9660_read_data_skip(struct archive_read *); static int archive_read_format_iso9660_read_header(struct archive_read *, struct archive_entry *); static const char *build_pathname(struct archive_string *, struct file_info *, int); static int build_pathname_utf16be(unsigned char *, size_t, size_t *, struct file_info *); #if DEBUG static void dump_isodirrec(FILE *, const unsigned char *isodirrec); #endif static time_t time_from_tm(struct tm *); static time_t isodate17(const unsigned char *); static time_t isodate7(const unsigned char *); static int isBootRecord(struct iso9660 *, const unsigned char *); static int isVolumePartition(struct iso9660 *, const unsigned char *); static int isVDSetTerminator(struct iso9660 *, const unsigned char *); static int isJolietSVD(struct iso9660 *, const unsigned char *); static int isSVD(struct iso9660 *, const unsigned char *); static int isEVD(struct iso9660 *, const unsigned char *); static int isPVD(struct iso9660 *, const unsigned char *); static int next_cache_entry(struct archive_read *, struct iso9660 *, struct file_info **); static int next_entry_seek(struct archive_read *, struct iso9660 *, struct file_info **); static struct file_info * parse_file_info(struct archive_read *a, struct file_info *parent, const unsigned char *isodirrec, size_t reclen); static int parse_rockridge(struct archive_read *a, struct file_info *file, const unsigned char *start, const unsigned char *end); static int register_CE(struct archive_read *a, int32_t location, struct file_info *file); static int read_CE(struct archive_read *a, struct iso9660 *iso9660); static void parse_rockridge_NM1(struct file_info *, const unsigned char *, int); static void parse_rockridge_SL1(struct file_info *, const unsigned char *, int); static void parse_rockridge_TF1(struct file_info *, const unsigned char *, int); static void parse_rockridge_ZF1(struct file_info *, const unsigned char *, int); static void register_file(struct iso9660 *, struct file_info *); static void release_files(struct iso9660 *); static unsigned toi(const void *p, int n); static inline void re_add_entry(struct iso9660 *, struct file_info *); static inline struct file_info * re_get_entry(struct iso9660 *); static inline int rede_add_entry(struct file_info *); static inline struct file_info * rede_get_entry(struct file_info *); static inline void cache_add_entry(struct iso9660 *iso9660, struct file_info *file); static inline struct file_info *cache_get_entry(struct iso9660 *iso9660); static int heap_add_entry(struct archive_read *a, struct heap_queue *heap, struct file_info *file, uint64_t key); static struct file_info *heap_get_entry(struct heap_queue *heap); #define add_entry(arch, iso9660, file) \ heap_add_entry(arch, &((iso9660)->pending_files), file, file->offset) #define next_entry(iso9660) \ heap_get_entry(&((iso9660)->pending_files)) int archive_read_support_format_iso9660(struct archive *_a) { struct archive_read *a = (struct archive_read *)_a; struct iso9660 *iso9660; int r; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_support_format_iso9660"); iso9660 = (struct iso9660 *)calloc(1, sizeof(*iso9660)); if (iso9660 == NULL) { archive_set_error(&a->archive, ENOMEM, "Can't allocate iso9660 data"); return (ARCHIVE_FATAL); } iso9660->magic = ISO9660_MAGIC; iso9660->cache_files.first = NULL; iso9660->cache_files.last = &(iso9660->cache_files.first); iso9660->re_files.first = NULL; iso9660->re_files.last = &(iso9660->re_files.first); /* Enable to support Joliet extensions by default. */ iso9660->opt_support_joliet = 1; /* Enable to support Rock Ridge extensions by default. */ iso9660->opt_support_rockridge = 1; r = __archive_read_register_format(a, iso9660, "iso9660", archive_read_format_iso9660_bid, archive_read_format_iso9660_options, archive_read_format_iso9660_read_header, archive_read_format_iso9660_read_data, archive_read_format_iso9660_read_data_skip, NULL, archive_read_format_iso9660_cleanup, NULL, NULL); if (r != ARCHIVE_OK) { free(iso9660); return (r); } return (ARCHIVE_OK); } static int archive_read_format_iso9660_bid(struct archive_read *a, int best_bid) { struct iso9660 *iso9660; ssize_t bytes_read; const unsigned char *p; int seenTerminator; /* If there's already a better bid than we can ever make, don't bother testing. */ if (best_bid > 48) return (-1); iso9660 = (struct iso9660 *)(a->format->data); /* * Skip the first 32k (reserved area) and get the first * 8 sectors of the volume descriptor table. Of course, * if the I/O layer gives us more, we'll take it. */ #define RESERVED_AREA (SYSTEM_AREA_BLOCK * LOGICAL_BLOCK_SIZE) p = __archive_read_ahead(a, RESERVED_AREA + 8 * LOGICAL_BLOCK_SIZE, &bytes_read); if (p == NULL) return (-1); /* Skip the reserved area. */ bytes_read -= RESERVED_AREA; p += RESERVED_AREA; /* Check each volume descriptor. */ seenTerminator = 0; for (; bytes_read > LOGICAL_BLOCK_SIZE; bytes_read -= LOGICAL_BLOCK_SIZE, p += LOGICAL_BLOCK_SIZE) { /* Do not handle undefined Volume Descriptor Type. */ if (p[0] >= 4 && p[0] <= 254) return (0); /* Standard Identifier must be "CD001" */ if (memcmp(p + 1, "CD001", 5) != 0) return (0); if (isPVD(iso9660, p)) continue; if (!iso9660->joliet.location) { if (isJolietSVD(iso9660, p)) continue; } if (isBootRecord(iso9660, p)) continue; if (isEVD(iso9660, p)) continue; if (isSVD(iso9660, p)) continue; if (isVolumePartition(iso9660, p)) continue; if (isVDSetTerminator(iso9660, p)) { seenTerminator = 1; break; } return (0); } /* * ISO 9660 format must have Primary Volume Descriptor and * Volume Descriptor Set Terminator. */ if (seenTerminator && iso9660->primary.location > 16) return (48); /* We didn't find a valid PVD; return a bid of zero. */ return (0); } static int archive_read_format_iso9660_options(struct archive_read *a, const char *key, const char *val) { struct iso9660 *iso9660; iso9660 = (struct iso9660 *)(a->format->data); if (strcmp(key, "joliet") == 0) { if (val == NULL || strcmp(val, "off") == 0 || strcmp(val, "ignore") == 0 || strcmp(val, "disable") == 0 || strcmp(val, "0") == 0) iso9660->opt_support_joliet = 0; else iso9660->opt_support_joliet = 1; return (ARCHIVE_OK); } if (strcmp(key, "rockridge") == 0 || strcmp(key, "Rockridge") == 0) { iso9660->opt_support_rockridge = val != NULL; return (ARCHIVE_OK); } /* Note: The "warn" return is just to inform the options * supervisor that we didn't handle it. It will generate * a suitable error if no one used this option. */ return (ARCHIVE_WARN); } static int isNull(struct iso9660 *iso9660, const unsigned char *h, unsigned offset, unsigned bytes) { while (bytes >= sizeof(iso9660->null)) { if (!memcmp(iso9660->null, h + offset, sizeof(iso9660->null))) return (0); offset += sizeof(iso9660->null); bytes -= sizeof(iso9660->null); } if (bytes) return memcmp(iso9660->null, h + offset, bytes) == 0; else return (1); } static int isBootRecord(struct iso9660 *iso9660, const unsigned char *h) { (void)iso9660; /* UNUSED */ /* Type of the Volume Descriptor Boot Record must be 0. */ if (h[0] != 0) return (0); /* Volume Descriptor Version must be 1. */ if (h[6] != 1) return (0); return (1); } static int isVolumePartition(struct iso9660 *iso9660, const unsigned char *h) { int32_t location; /* Type of the Volume Partition Descriptor must be 3. */ if (h[0] != 3) return (0); /* Volume Descriptor Version must be 1. */ if (h[6] != 1) return (0); /* Unused Field */ if (h[7] != 0) return (0); location = archive_le32dec(h + 72); if (location <= SYSTEM_AREA_BLOCK || location >= iso9660->volume_block) return (0); if ((uint32_t)location != archive_be32dec(h + 76)) return (0); return (1); } static int isVDSetTerminator(struct iso9660 *iso9660, const unsigned char *h) { (void)iso9660; /* UNUSED */ /* Type of the Volume Descriptor Set Terminator must be 255. */ if (h[0] != 255) return (0); /* Volume Descriptor Version must be 1. */ if (h[6] != 1) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, 7, 2048-7)) return (0); return (1); } static int isJolietSVD(struct iso9660 *iso9660, const unsigned char *h) { const unsigned char *p; ssize_t logical_block_size; int32_t volume_block; /* Check if current sector is a kind of Supplementary Volume * Descriptor. */ if (!isSVD(iso9660, h)) return (0); /* FIXME: do more validations according to joliet spec. */ /* check if this SVD contains joliet extension! */ p = h + SVD_escape_sequences_offset; /* N.B. Joliet spec says p[1] == '\\', but.... */ if (p[0] == '%' && p[1] == '/') { int level = 0; if (p[2] == '@') level = 1; else if (p[2] == 'C') level = 2; else if (p[2] == 'E') level = 3; else /* not joliet */ return (0); iso9660->seenJoliet = level; } else /* not joliet */ return (0); logical_block_size = archive_le16dec(h + SVD_logical_block_size_offset); volume_block = archive_le32dec(h + SVD_volume_space_size_offset); iso9660->logical_block_size = logical_block_size; iso9660->volume_block = volume_block; iso9660->volume_size = logical_block_size * (uint64_t)volume_block; /* Read Root Directory Record in Volume Descriptor. */ p = h + SVD_root_directory_record_offset; iso9660->joliet.location = archive_le32dec(p + DR_extent_offset); iso9660->joliet.size = archive_le32dec(p + DR_size_offset); return (48); } static int isSVD(struct iso9660 *iso9660, const unsigned char *h) { const unsigned char *p; ssize_t logical_block_size; int32_t volume_block; int32_t location; (void)iso9660; /* UNUSED */ /* Type 2 means it's a SVD. */ if (h[SVD_type_offset] != 2) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, SVD_reserved1_offset, SVD_reserved1_size)) return (0); if (!isNull(iso9660, h, SVD_reserved2_offset, SVD_reserved2_size)) return (0); if (!isNull(iso9660, h, SVD_reserved3_offset, SVD_reserved3_size)) return (0); /* File structure version must be 1 for ISO9660/ECMA119. */ if (h[SVD_file_structure_version_offset] != 1) return (0); logical_block_size = archive_le16dec(h + SVD_logical_block_size_offset); if (logical_block_size <= 0) return (0); volume_block = archive_le32dec(h + SVD_volume_space_size_offset); if (volume_block <= SYSTEM_AREA_BLOCK+4) return (0); /* Location of Occurrence of Type L Path Table must be * available location, * >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_le32dec(h+SVD_type_L_path_table_offset); if (location < SYSTEM_AREA_BLOCK+2 || location >= volume_block) return (0); /* The Type M Path Table must be at a valid location (WinISO * and probably other programs omit this, so we allow zero) * * >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_be32dec(h+SVD_type_M_path_table_offset); if ((location > 0 && location < SYSTEM_AREA_BLOCK+2) || location >= volume_block) return (0); /* Read Root Directory Record in Volume Descriptor. */ p = h + SVD_root_directory_record_offset; if (p[DR_length_offset] != 34) return (0); return (48); } static int isEVD(struct iso9660 *iso9660, const unsigned char *h) { const unsigned char *p; ssize_t logical_block_size; int32_t volume_block; int32_t location; (void)iso9660; /* UNUSED */ /* Type of the Enhanced Volume Descriptor must be 2. */ if (h[PVD_type_offset] != 2) return (0); /* EVD version must be 2. */ if (h[PVD_version_offset] != 2) return (0); /* Reserved field must be 0. */ if (h[PVD_reserved1_offset] != 0) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved2_offset, PVD_reserved2_size)) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved3_offset, PVD_reserved3_size)) return (0); /* Logical block size must be > 0. */ /* I've looked at Ecma 119 and can't find any stronger * restriction on this field. */ logical_block_size = archive_le16dec(h + PVD_logical_block_size_offset); if (logical_block_size <= 0) return (0); volume_block = archive_le32dec(h + PVD_volume_space_size_offset); if (volume_block <= SYSTEM_AREA_BLOCK+4) return (0); /* File structure version must be 2 for ISO9660:1999. */ if (h[PVD_file_structure_version_offset] != 2) return (0); /* Location of Occurrence of Type L Path Table must be * available location, * >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_le32dec(h+PVD_type_1_path_table_offset); if (location < SYSTEM_AREA_BLOCK+2 || location >= volume_block) return (0); /* Location of Occurrence of Type M Path Table must be * available location, * >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_be32dec(h+PVD_type_m_path_table_offset); if ((location > 0 && location < SYSTEM_AREA_BLOCK+2) || location >= volume_block) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved4_offset, PVD_reserved4_size)) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved5_offset, PVD_reserved5_size)) return (0); /* Read Root Directory Record in Volume Descriptor. */ p = h + PVD_root_directory_record_offset; if (p[DR_length_offset] != 34) return (0); return (48); } static int isPVD(struct iso9660 *iso9660, const unsigned char *h) { const unsigned char *p; ssize_t logical_block_size; int32_t volume_block; int32_t location; int i; /* Type of the Primary Volume Descriptor must be 1. */ if (h[PVD_type_offset] != 1) return (0); /* PVD version must be 1. */ if (h[PVD_version_offset] != 1) return (0); /* Reserved field must be 0. */ if (h[PVD_reserved1_offset] != 0) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved2_offset, PVD_reserved2_size)) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved3_offset, PVD_reserved3_size)) return (0); /* Logical block size must be > 0. */ /* I've looked at Ecma 119 and can't find any stronger * restriction on this field. */ logical_block_size = archive_le16dec(h + PVD_logical_block_size_offset); if (logical_block_size <= 0) return (0); volume_block = archive_le32dec(h + PVD_volume_space_size_offset); if (volume_block <= SYSTEM_AREA_BLOCK+4) return (0); /* File structure version must be 1 for ISO9660/ECMA119. */ if (h[PVD_file_structure_version_offset] != 1) return (0); /* Location of Occurrence of Type L Path Table must be * available location, * > SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_le32dec(h+PVD_type_1_path_table_offset); if (location < SYSTEM_AREA_BLOCK+2 || location >= volume_block) return (0); /* The Type M Path Table must also be at a valid location * (although ECMA 119 requires a Type M Path Table, WinISO and * probably other programs omit it, so we permit a zero here) * * >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_be32dec(h+PVD_type_m_path_table_offset); if ((location > 0 && location < SYSTEM_AREA_BLOCK+2) || location >= volume_block) return (0); /* Reserved field must be 0. */ /* But accept NetBSD/FreeBSD "makefs" images with 0x20 here. */ for (i = 0; i < PVD_reserved4_size; ++i) if (h[PVD_reserved4_offset + i] != 0 && h[PVD_reserved4_offset + i] != 0x20) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved5_offset, PVD_reserved5_size)) return (0); /* XXX TODO: Check other values for sanity; reject more * malformed PVDs. XXX */ /* Read Root Directory Record in Volume Descriptor. */ p = h + PVD_root_directory_record_offset; if (p[DR_length_offset] != 34) return (0); if (!iso9660->primary.location) { iso9660->logical_block_size = logical_block_size; iso9660->volume_block = volume_block; iso9660->volume_size = logical_block_size * (uint64_t)volume_block; iso9660->primary.location = archive_le32dec(p + DR_extent_offset); iso9660->primary.size = archive_le32dec(p + DR_size_offset); } return (48); } static int read_children(struct archive_read *a, struct file_info *parent) { struct iso9660 *iso9660; const unsigned char *b, *p; struct file_info *multi; size_t step, skip_size; iso9660 = (struct iso9660 *)(a->format->data); /* flush any remaining bytes from the last round to ensure * we're positioned */ if (iso9660->entry_bytes_unconsumed) { __archive_read_consume(a, iso9660->entry_bytes_unconsumed); iso9660->entry_bytes_unconsumed = 0; } if (iso9660->current_position > parent->offset) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Ignoring out-of-order directory (%s) %jd > %jd", parent->name.s, (intmax_t)iso9660->current_position, (intmax_t)parent->offset); return (ARCHIVE_WARN); } if (parent->offset + parent->size > iso9660->volume_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Directory is beyond end-of-media: %s", parent->name.s); return (ARCHIVE_WARN); } if (iso9660->current_position < parent->offset) { int64_t skipsize; skipsize = parent->offset - iso9660->current_position; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position = parent->offset; } step = (size_t)(((parent->size + iso9660->logical_block_size -1) / iso9660->logical_block_size) * iso9660->logical_block_size); b = __archive_read_ahead(a, step, NULL); if (b == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } iso9660->current_position += step; multi = NULL; skip_size = step; while (step) { p = b; b += iso9660->logical_block_size; step -= iso9660->logical_block_size; for (; *p != 0 && p < b && p + *p <= b; p += *p) { struct file_info *child; /* N.B.: these special directory identifiers * are 8 bit "values" even on a * Joliet CD with UCS-2 (16bit) encoding. */ /* Skip '.' entry. */ if (*(p + DR_name_len_offset) == 1 && *(p + DR_name_offset) == '\0') continue; /* Skip '..' entry. */ if (*(p + DR_name_len_offset) == 1 && *(p + DR_name_offset) == '\001') continue; child = parse_file_info(a, parent, p, b - p); if (child == NULL) { __archive_read_consume(a, skip_size); return (ARCHIVE_FATAL); } if (child->cl_offset == 0 && (child->multi_extent || multi != NULL)) { struct content *con; if (multi == NULL) { multi = child; multi->contents.first = NULL; multi->contents.last = &(multi->contents.first); } con = malloc(sizeof(struct content)); if (con == NULL) { archive_set_error( &a->archive, ENOMEM, "No memory for multi extent"); __archive_read_consume(a, skip_size); return (ARCHIVE_FATAL); } con->offset = child->offset; con->size = child->size; con->next = NULL; *multi->contents.last = con; multi->contents.last = &(con->next); if (multi == child) { if (add_entry(a, iso9660, child) != ARCHIVE_OK) return (ARCHIVE_FATAL); } else { multi->size += child->size; if (!child->multi_extent) multi = NULL; } } else if (add_entry(a, iso9660, child) != ARCHIVE_OK) return (ARCHIVE_FATAL); } } __archive_read_consume(a, skip_size); /* Read data which recorded by RRIP "CE" extension. */ if (read_CE(a, iso9660) != ARCHIVE_OK) return (ARCHIVE_FATAL); return (ARCHIVE_OK); } static int choose_volume(struct archive_read *a, struct iso9660 *iso9660) { struct file_info *file; int64_t skipsize; struct vd *vd; const void *block; char seenJoliet; vd = &(iso9660->primary); if (!iso9660->opt_support_joliet) iso9660->seenJoliet = 0; if (iso9660->seenJoliet && vd->location > iso9660->joliet.location) /* This condition is unlikely; by way of caution. */ vd = &(iso9660->joliet); skipsize = LOGICAL_BLOCK_SIZE * (int64_t)vd->location; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position = skipsize; block = __archive_read_ahead(a, vd->size, NULL); if (block == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } /* * While reading Root Directory, flag seenJoliet must be zero to * avoid converting special name 0x00(Current Directory) and * next byte to UCS2. */ seenJoliet = iso9660->seenJoliet;/* Save flag. */ iso9660->seenJoliet = 0; file = parse_file_info(a, NULL, block, vd->size); if (file == NULL) return (ARCHIVE_FATAL); iso9660->seenJoliet = seenJoliet; /* * If the iso image has both RockRidge and Joliet, we preferentially * use RockRidge Extensions rather than Joliet ones. */ if (vd == &(iso9660->primary) && iso9660->seenRockridge && iso9660->seenJoliet) iso9660->seenJoliet = 0; if (vd == &(iso9660->primary) && !iso9660->seenRockridge && iso9660->seenJoliet) { /* Switch reading data from primary to joliet. */ vd = &(iso9660->joliet); skipsize = LOGICAL_BLOCK_SIZE * (int64_t)vd->location; skipsize -= iso9660->current_position; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position += skipsize; block = __archive_read_ahead(a, vd->size, NULL); if (block == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } iso9660->seenJoliet = 0; file = parse_file_info(a, NULL, block, vd->size); if (file == NULL) return (ARCHIVE_FATAL); iso9660->seenJoliet = seenJoliet; } /* Store the root directory in the pending list. */ if (add_entry(a, iso9660, file) != ARCHIVE_OK) return (ARCHIVE_FATAL); if (iso9660->seenRockridge) { a->archive.archive_format = ARCHIVE_FORMAT_ISO9660_ROCKRIDGE; a->archive.archive_format_name = "ISO9660 with Rockridge extensions"; } return (ARCHIVE_OK); } static int archive_read_format_iso9660_read_header(struct archive_read *a, struct archive_entry *entry) { struct iso9660 *iso9660; struct file_info *file; int r, rd_r = ARCHIVE_OK; iso9660 = (struct iso9660 *)(a->format->data); if (!a->archive.archive_format) { a->archive.archive_format = ARCHIVE_FORMAT_ISO9660; a->archive.archive_format_name = "ISO9660"; } if (iso9660->current_position == 0) { r = choose_volume(a, iso9660); if (r != ARCHIVE_OK) return (r); } file = NULL;/* Eliminate a warning. */ /* Get the next entry that appears after the current offset. */ r = next_entry_seek(a, iso9660, &file); if (r != ARCHIVE_OK) return (r); if (iso9660->seenJoliet) { /* * Convert UTF-16BE of a filename to local locale MBS * and store the result into a filename field. */ if (iso9660->sconv_utf16be == NULL) { iso9660->sconv_utf16be = archive_string_conversion_from_charset( &(a->archive), "UTF-16BE", 1); if (iso9660->sconv_utf16be == NULL) /* Couldn't allocate memory */ return (ARCHIVE_FATAL); } if (iso9660->utf16be_path == NULL) { iso9660->utf16be_path = malloc(UTF16_NAME_MAX); if (iso9660->utf16be_path == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory"); return (ARCHIVE_FATAL); } } if (iso9660->utf16be_previous_path == NULL) { iso9660->utf16be_previous_path = malloc(UTF16_NAME_MAX); if (iso9660->utf16be_previous_path == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory"); return (ARCHIVE_FATAL); } } iso9660->utf16be_path_len = 0; if (build_pathname_utf16be(iso9660->utf16be_path, UTF16_NAME_MAX, &(iso9660->utf16be_path_len), file) != 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Pathname is too long"); return (ARCHIVE_FATAL); } r = archive_entry_copy_pathname_l(entry, (const char *)iso9660->utf16be_path, iso9660->utf16be_path_len, iso9660->sconv_utf16be); if (r != 0) { if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "No memory for Pathname"); return (ARCHIVE_FATAL); } archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Pathname cannot be converted " "from %s to current locale.", archive_string_conversion_charset_name( iso9660->sconv_utf16be)); rd_r = ARCHIVE_WARN; } } else { const char *path = build_pathname(&iso9660->pathname, file, 0); if (path == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Pathname is too long"); return (ARCHIVE_FATAL); } else { archive_string_empty(&iso9660->pathname); archive_entry_set_pathname(entry, path); } } iso9660->entry_bytes_remaining = file->size; /* Offset for sparse-file-aware clients. */ iso9660->entry_sparse_offset = 0; if (file->offset + file->size > iso9660->volume_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "File is beyond end-of-media: %s", archive_entry_pathname(entry)); iso9660->entry_bytes_remaining = 0; return (ARCHIVE_WARN); } /* Set up the entry structure with information about this entry. */ archive_entry_set_mode(entry, file->mode); archive_entry_set_uid(entry, file->uid); archive_entry_set_gid(entry, file->gid); archive_entry_set_nlink(entry, file->nlinks); if (file->birthtime_is_set) archive_entry_set_birthtime(entry, file->birthtime, 0); else archive_entry_unset_birthtime(entry); archive_entry_set_mtime(entry, file->mtime, 0); archive_entry_set_ctime(entry, file->ctime, 0); archive_entry_set_atime(entry, file->atime, 0); /* N.B.: Rock Ridge supports 64-bit device numbers. */ archive_entry_set_rdev(entry, (dev_t)file->rdev); archive_entry_set_size(entry, iso9660->entry_bytes_remaining); if (file->symlink.s != NULL) archive_entry_copy_symlink(entry, file->symlink.s); /* Note: If the input isn't seekable, we can't rewind to * return the same body again, so if the next entry refers to * the same data, we have to return it as a hardlink to the * original entry. */ if (file->number != -1 && file->number == iso9660->previous_number) { if (iso9660->seenJoliet) { r = archive_entry_copy_hardlink_l(entry, (const char *)iso9660->utf16be_previous_path, iso9660->utf16be_previous_path_len, iso9660->sconv_utf16be); if (r != 0) { if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "No memory for Linkname"); return (ARCHIVE_FATAL); } archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Linkname cannot be converted " "from %s to current locale.", archive_string_conversion_charset_name( iso9660->sconv_utf16be)); rd_r = ARCHIVE_WARN; } } else archive_entry_set_hardlink(entry, iso9660->previous_pathname.s); archive_entry_unset_size(entry); iso9660->entry_bytes_remaining = 0; return (rd_r); } if ((file->mode & AE_IFMT) != AE_IFDIR && file->offset < iso9660->current_position) { int64_t r64; r64 = __archive_read_seek(a, file->offset, SEEK_SET); if (r64 != (int64_t)file->offset) { /* We can't seek backwards to extract it, so issue * a warning. Note that this can only happen if * this entry was added to the heap after we passed * this offset, that is, only if the directory * mentioning this entry is later than the body of * the entry. Such layouts are very unusual; most * ISO9660 writers lay out and record all directory * information first, then store all file bodies. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Ignoring out-of-order file @%jx (%s) %jd < %jd", (intmax_t)file->number, iso9660->pathname.s, (intmax_t)file->offset, (intmax_t)iso9660->current_position); iso9660->entry_bytes_remaining = 0; return (ARCHIVE_WARN); } iso9660->current_position = (uint64_t)r64; } /* Initialize zisofs variables. */ iso9660->entry_zisofs.pz = file->pz; if (file->pz) { #ifdef HAVE_ZLIB_H struct zisofs *zisofs; zisofs = &iso9660->entry_zisofs; zisofs->initialized = 0; zisofs->pz_log2_bs = file->pz_log2_bs; zisofs->pz_uncompressed_size = file->pz_uncompressed_size; zisofs->pz_offset = 0; zisofs->header_avail = 0; zisofs->header_passed = 0; zisofs->block_pointers_avail = 0; #endif archive_entry_set_size(entry, file->pz_uncompressed_size); } iso9660->previous_number = file->number; if (iso9660->seenJoliet) { memcpy(iso9660->utf16be_previous_path, iso9660->utf16be_path, iso9660->utf16be_path_len); iso9660->utf16be_previous_path_len = iso9660->utf16be_path_len; } else archive_strcpy( &iso9660->previous_pathname, iso9660->pathname.s); /* Reset entry_bytes_remaining if the file is multi extent. */ iso9660->entry_content = file->contents.first; if (iso9660->entry_content != NULL) iso9660->entry_bytes_remaining = iso9660->entry_content->size; if (archive_entry_filetype(entry) == AE_IFDIR) { /* Overwrite nlinks by proper link number which is * calculated from number of sub directories. */ archive_entry_set_nlink(entry, 2 + file->subdirs); /* Directory data has been read completely. */ iso9660->entry_bytes_remaining = 0; } if (rd_r != ARCHIVE_OK) return (rd_r); return (ARCHIVE_OK); } static int archive_read_format_iso9660_read_data_skip(struct archive_read *a) { /* Because read_next_header always does an explicit skip * to the next entry, we don't need to do anything here. */ (void)a; /* UNUSED */ return (ARCHIVE_OK); } #ifdef HAVE_ZLIB_H static int zisofs_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { struct iso9660 *iso9660; struct zisofs *zisofs; const unsigned char *p; size_t avail; ssize_t bytes_read; size_t uncompressed_size; int r; iso9660 = (struct iso9660 *)(a->format->data); zisofs = &iso9660->entry_zisofs; p = __archive_read_ahead(a, 1, &bytes_read); if (bytes_read <= 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated zisofs file body"); return (ARCHIVE_FATAL); } if (bytes_read > iso9660->entry_bytes_remaining) bytes_read = (ssize_t)iso9660->entry_bytes_remaining; avail = bytes_read; uncompressed_size = 0; if (!zisofs->initialized) { size_t ceil, xsize; /* Allocate block pointers buffer. */ ceil = (size_t)((zisofs->pz_uncompressed_size + (((int64_t)1) << zisofs->pz_log2_bs) - 1) >> zisofs->pz_log2_bs); xsize = (ceil + 1) * 4; if (zisofs->block_pointers_alloc < xsize) { size_t alloc; if (zisofs->block_pointers != NULL) free(zisofs->block_pointers); alloc = ((xsize >> 10) + 1) << 10; zisofs->block_pointers = malloc(alloc); if (zisofs->block_pointers == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for zisofs decompression"); return (ARCHIVE_FATAL); } zisofs->block_pointers_alloc = alloc; } zisofs->block_pointers_size = xsize; /* Allocate uncompressed data buffer. */ xsize = (size_t)1UL << zisofs->pz_log2_bs; if (zisofs->uncompressed_buffer_size < xsize) { if (zisofs->uncompressed_buffer != NULL) free(zisofs->uncompressed_buffer); zisofs->uncompressed_buffer = malloc(xsize); if (zisofs->uncompressed_buffer == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for zisofs decompression"); return (ARCHIVE_FATAL); } } zisofs->uncompressed_buffer_size = xsize; /* * Read the file header, and check the magic code of zisofs. */ if (zisofs->header_avail < sizeof(zisofs->header)) { xsize = sizeof(zisofs->header) - zisofs->header_avail; if (avail < xsize) xsize = avail; memcpy(zisofs->header + zisofs->header_avail, p, xsize); zisofs->header_avail += xsize; avail -= xsize; p += xsize; } if (!zisofs->header_passed && zisofs->header_avail == sizeof(zisofs->header)) { int err = 0; if (memcmp(zisofs->header, zisofs_magic, sizeof(zisofs_magic)) != 0) err = 1; if (archive_le32dec(zisofs->header + 8) != zisofs->pz_uncompressed_size) err = 1; if (zisofs->header[12] != 4) err = 1; if (zisofs->header[13] != zisofs->pz_log2_bs) err = 1; if (err) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Illegal zisofs file body"); return (ARCHIVE_FATAL); } zisofs->header_passed = 1; } /* * Read block pointers. */ if (zisofs->header_passed && zisofs->block_pointers_avail < zisofs->block_pointers_size) { xsize = zisofs->block_pointers_size - zisofs->block_pointers_avail; if (avail < xsize) xsize = avail; memcpy(zisofs->block_pointers + zisofs->block_pointers_avail, p, xsize); zisofs->block_pointers_avail += xsize; avail -= xsize; p += xsize; if (zisofs->block_pointers_avail == zisofs->block_pointers_size) { /* We've got all block pointers and initialize * related variables. */ zisofs->block_off = 0; zisofs->block_avail = 0; /* Complete a initialization */ zisofs->initialized = 1; } } if (!zisofs->initialized) goto next_data; /* We need more data. */ } /* * Get block offsets from block pointers. */ if (zisofs->block_avail == 0) { uint32_t bst, bed; if (zisofs->block_off + 4 >= zisofs->block_pointers_size) { /* There isn't a pair of offsets. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Illegal zisofs block pointers"); return (ARCHIVE_FATAL); } bst = archive_le32dec( zisofs->block_pointers + zisofs->block_off); if (bst != zisofs->pz_offset + (bytes_read - avail)) { /* TODO: Should we seek offset of current file * by bst ? */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Illegal zisofs block pointers(cannot seek)"); return (ARCHIVE_FATAL); } bed = archive_le32dec( zisofs->block_pointers + zisofs->block_off + 4); if (bed < bst) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Illegal zisofs block pointers"); return (ARCHIVE_FATAL); } zisofs->block_avail = bed - bst; zisofs->block_off += 4; /* Initialize compression library for new block. */ if (zisofs->stream_valid) r = inflateReset(&zisofs->stream); else r = inflateInit(&zisofs->stream); if (r != Z_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Can't initialize zisofs decompression."); return (ARCHIVE_FATAL); } zisofs->stream_valid = 1; zisofs->stream.total_in = 0; zisofs->stream.total_out = 0; } /* * Make uncompressed data. */ if (zisofs->block_avail == 0) { memset(zisofs->uncompressed_buffer, 0, zisofs->uncompressed_buffer_size); uncompressed_size = zisofs->uncompressed_buffer_size; } else { zisofs->stream.next_in = (Bytef *)(uintptr_t)(const void *)p; if (avail > zisofs->block_avail) zisofs->stream.avail_in = zisofs->block_avail; else zisofs->stream.avail_in = (uInt)avail; zisofs->stream.next_out = zisofs->uncompressed_buffer; zisofs->stream.avail_out = (uInt)zisofs->uncompressed_buffer_size; r = inflate(&zisofs->stream, 0); switch (r) { case Z_OK: /* Decompressor made some progress.*/ case Z_STREAM_END: /* Found end of stream. */ break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "zisofs decompression failed (%d)", r); return (ARCHIVE_FATAL); } uncompressed_size = zisofs->uncompressed_buffer_size - zisofs->stream.avail_out; avail -= zisofs->stream.next_in - p; zisofs->block_avail -= (uint32_t)(zisofs->stream.next_in - p); } next_data: bytes_read -= avail; *buff = zisofs->uncompressed_buffer; *size = uncompressed_size; *offset = iso9660->entry_sparse_offset; iso9660->entry_sparse_offset += uncompressed_size; iso9660->entry_bytes_remaining -= bytes_read; iso9660->current_position += bytes_read; zisofs->pz_offset += (uint32_t)bytes_read; iso9660->entry_bytes_unconsumed += bytes_read; return (ARCHIVE_OK); } #else /* HAVE_ZLIB_H */ static int zisofs_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { (void)buff;/* UNUSED */ (void)size;/* UNUSED */ (void)offset;/* UNUSED */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "zisofs is not supported on this platform."); return (ARCHIVE_FAILED); } #endif /* HAVE_ZLIB_H */ static int archive_read_format_iso9660_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { ssize_t bytes_read; struct iso9660 *iso9660; iso9660 = (struct iso9660 *)(a->format->data); if (iso9660->entry_bytes_unconsumed) { __archive_read_consume(a, iso9660->entry_bytes_unconsumed); iso9660->entry_bytes_unconsumed = 0; } if (iso9660->entry_bytes_remaining <= 0) { if (iso9660->entry_content != NULL) iso9660->entry_content = iso9660->entry_content->next; if (iso9660->entry_content == NULL) { *buff = NULL; *size = 0; *offset = iso9660->entry_sparse_offset; return (ARCHIVE_EOF); } /* Seek forward to the start of the entry. */ if (iso9660->current_position < iso9660->entry_content->offset) { int64_t step; step = iso9660->entry_content->offset - iso9660->current_position; step = __archive_read_consume(a, step); if (step < 0) return ((int)step); iso9660->current_position = iso9660->entry_content->offset; } if (iso9660->entry_content->offset < iso9660->current_position) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Ignoring out-of-order file (%s) %jd < %jd", iso9660->pathname.s, (intmax_t)iso9660->entry_content->offset, (intmax_t)iso9660->current_position); *buff = NULL; *size = 0; *offset = iso9660->entry_sparse_offset; return (ARCHIVE_WARN); } iso9660->entry_bytes_remaining = iso9660->entry_content->size; } if (iso9660->entry_zisofs.pz) return (zisofs_read_data(a, buff, size, offset)); *buff = __archive_read_ahead(a, 1, &bytes_read); if (bytes_read == 0) archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Truncated input file"); if (*buff == NULL) return (ARCHIVE_FATAL); if (bytes_read > iso9660->entry_bytes_remaining) bytes_read = (ssize_t)iso9660->entry_bytes_remaining; *size = bytes_read; *offset = iso9660->entry_sparse_offset; iso9660->entry_sparse_offset += bytes_read; iso9660->entry_bytes_remaining -= bytes_read; iso9660->entry_bytes_unconsumed = bytes_read; iso9660->current_position += bytes_read; return (ARCHIVE_OK); } static int archive_read_format_iso9660_cleanup(struct archive_read *a) { struct iso9660 *iso9660; int r = ARCHIVE_OK; iso9660 = (struct iso9660 *)(a->format->data); release_files(iso9660); free(iso9660->read_ce_req.reqs); archive_string_free(&iso9660->pathname); archive_string_free(&iso9660->previous_pathname); if (iso9660->pending_files.files) free(iso9660->pending_files.files); #ifdef HAVE_ZLIB_H free(iso9660->entry_zisofs.uncompressed_buffer); free(iso9660->entry_zisofs.block_pointers); if (iso9660->entry_zisofs.stream_valid) { if (inflateEnd(&iso9660->entry_zisofs.stream) != Z_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to clean up zlib decompressor"); r = ARCHIVE_FATAL; } } #endif free(iso9660->utf16be_path); free(iso9660->utf16be_previous_path); free(iso9660); (a->format->data) = NULL; return (r); } /* * This routine parses a single ISO directory record, makes sense * of any extensions, and stores the result in memory. */ static struct file_info * parse_file_info(struct archive_read *a, struct file_info *parent, const unsigned char *isodirrec, size_t reclen) { struct iso9660 *iso9660; struct file_info *file, *filep; size_t name_len; const unsigned char *rr_start, *rr_end; const unsigned char *p; size_t dr_len; uint64_t fsize, offset; int32_t location; int flags; iso9660 = (struct iso9660 *)(a->format->data); if (reclen != 0) dr_len = (size_t)isodirrec[DR_length_offset]; /* * Sanity check that reclen is not zero and dr_len is greater than * reclen but at least 34 */ if (reclen == 0 || reclen < dr_len || dr_len < 34) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid length of directory record"); return (NULL); } name_len = (size_t)isodirrec[DR_name_len_offset]; location = archive_le32dec(isodirrec + DR_extent_offset); fsize = toi(isodirrec + DR_size_offset, DR_size_size); /* Sanity check that name_len doesn't exceed dr_len. */ if (dr_len - 33 < name_len || name_len == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid length of file identifier"); return (NULL); } /* Sanity check that location doesn't exceed volume block. * Don't check lower limit of location; it's possibility * the location has negative value when file type is symbolic * link or file size is zero. As far as I know latest mkisofs * do that. */ if (location > 0 && (location + ((fsize + iso9660->logical_block_size -1) / iso9660->logical_block_size)) > (uint32_t)iso9660->volume_block) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid location of extent of file"); return (NULL); } /* Sanity check that location doesn't have a negative value * when the file is not empty. it's too large. */ if (fsize != 0 && location < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid location of extent of file"); return (NULL); } /* Sanity check that this entry does not create a cycle. */ offset = iso9660->logical_block_size * (uint64_t)location; for (filep = parent; filep != NULL; filep = filep->parent) { if (filep->offset == offset) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Directory structure contains loop"); return (NULL); } } /* Create a new file entry and copy data from the ISO dir record. */ file = (struct file_info *)calloc(1, sizeof(*file)); if (file == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for file entry"); return (NULL); } file->parent = parent; file->offset = offset; file->size = fsize; file->mtime = isodate7(isodirrec + DR_date_offset); file->ctime = file->atime = file->mtime; file->rede_files.first = NULL; file->rede_files.last = &(file->rede_files.first); p = isodirrec + DR_name_offset; /* Rockridge extensions (if any) follow name. Compute this * before fidgeting the name_len below. */ rr_start = p + name_len + (name_len & 1 ? 0 : 1); rr_end = isodirrec + dr_len; if (iso9660->seenJoliet) { /* Joliet names are max 64 chars (128 bytes) according to spec, * but genisoimage/mkisofs allows recording longer Joliet * names which are 103 UCS2 characters(206 bytes) by their * option '-joliet-long'. */ if (name_len > 206) name_len = 206; name_len &= ~1; /* trim trailing first version and dot from filename. * * Remember we were in UTF-16BE land! * SEPARATOR 1 (.) and SEPARATOR 2 (;) are both * 16 bits big endian characters on Joliet. * * TODO: sanitize filename? * Joliet allows any UCS-2 char except: * *, /, :, ;, ? and \. */ /* Chop off trailing ';1' from files. */ if (name_len > 4 && p[name_len-4] == 0 && p[name_len-3] == ';' && p[name_len-2] == 0 && p[name_len-1] == '1') name_len -= 4; #if 0 /* XXX: this somehow manages to strip of single-character file extensions, like '.c'. */ /* Chop off trailing '.' from filenames. */ if (name_len > 2 && p[name_len-2] == 0 && p[name_len-1] == '.') name_len -= 2; #endif if ((file->utf16be_name = malloc(name_len)) == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for file name"); goto fail; } memcpy(file->utf16be_name, p, name_len); file->utf16be_bytes = name_len; } else { /* Chop off trailing ';1' from files. */ if (name_len > 2 && p[name_len - 2] == ';' && p[name_len - 1] == '1') name_len -= 2; /* Chop off trailing '.' from filenames. */ if (name_len > 1 && p[name_len - 1] == '.') --name_len; archive_strncpy(&file->name, (const char *)p, name_len); } flags = isodirrec[DR_flags_offset]; if (flags & 0x02) file->mode = AE_IFDIR | 0700; else file->mode = AE_IFREG | 0400; if (flags & 0x80) file->multi_extent = 1; else file->multi_extent = 0; /* * Use a location for the file number, which is treated as an inode * number to find out hardlink target. If Rockridge extensions is * being used, the file number will be overwritten by FILE SERIAL * NUMBER of RRIP "PX" extension. * Note: Old mkisofs did not record that FILE SERIAL NUMBER * in ISO images. * Note2: xorriso set 0 to the location of a symlink file. */ if (file->size == 0 && location >= 0) { /* If file->size is zero, its location points wrong place, * and so we should not use it for the file number. * When the location has negative value, it can be used * for the file number. */ file->number = -1; /* Do not appear before any directory entries. */ file->offset = -1; } else file->number = (int64_t)(uint32_t)location; /* Rockridge extensions overwrite information from above. */ if (iso9660->opt_support_rockridge) { if (parent == NULL && rr_end - rr_start >= 7) { p = rr_start; if (memcmp(p, "SP\x07\x01\xbe\xef", 6) == 0) { /* * SP extension stores the suspOffset * (Number of bytes to skip between * filename and SUSP records.) * It is mandatory by the SUSP standard * (IEEE 1281). * * It allows SUSP to coexist with * non-SUSP uses of the System * Use Area by placing non-SUSP data * before SUSP data. * * SP extension must be in the root * directory entry, disable all SUSP * processing if not found. */ iso9660->suspOffset = p[6]; iso9660->seenSUSP = 1; rr_start += 7; } } if (iso9660->seenSUSP) { int r; file->name_continues = 0; file->symlink_continues = 0; rr_start += iso9660->suspOffset; r = parse_rockridge(a, file, rr_start, rr_end); if (r != ARCHIVE_OK) goto fail; /* * A file size of symbolic link files in ISO images * made by makefs is not zero and its location is * the same as those of next regular file. That is * the same as hard like file and it causes unexpected * error. */ if (file->size > 0 && (file->mode & AE_IFMT) == AE_IFLNK) { file->size = 0; file->number = -1; file->offset = -1; } } else /* If there isn't SUSP, disable parsing * rock ridge extensions. */ iso9660->opt_support_rockridge = 0; } file->nlinks = 1;/* Reset nlink. we'll calculate it later. */ /* Tell file's parent how many children that parent has. */ if (parent != NULL && (flags & 0x02)) parent->subdirs++; if (iso9660->seenRockridge) { if (parent != NULL && parent->parent == NULL && (flags & 0x02) && iso9660->rr_moved == NULL && file->name.s && (strcmp(file->name.s, "rr_moved") == 0 || strcmp(file->name.s, ".rr_moved") == 0)) { iso9660->rr_moved = file; file->rr_moved = 1; file->rr_moved_has_re_only = 1; file->re = 0; parent->subdirs--; } else if (file->re) { /* * Sanity check: file's parent is rr_moved. */ if (parent == NULL || parent->rr_moved == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge RE"); goto fail; } /* * Sanity check: file does not have "CL" extension. */ if (file->cl_offset) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge RE and CL"); goto fail; } /* * Sanity check: The file type must be a directory. */ if ((flags & 0x02) == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge RE"); goto fail; } } else if (parent != NULL && parent->rr_moved) file->rr_moved_has_re_only = 0; else if (parent != NULL && (flags & 0x02) && (parent->re || parent->re_descendant)) file->re_descendant = 1; if (file->cl_offset) { struct file_info *r; if (parent == NULL || parent->parent == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge CL"); goto fail; } /* * Sanity check: The file type must be a regular file. */ if ((flags & 0x02) != 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge CL"); goto fail; } parent->subdirs++; /* Overwrite an offset and a number of this "CL" entry * to appear before other dirs. "+1" to those is to * make sure to appear after "RE" entry which this * "CL" entry should be connected with. */ file->offset = file->number = file->cl_offset + 1; /* * Sanity check: cl_offset does not point at its * the parents or itself. */ for (r = parent; r; r = r->parent) { if (r->offset == file->cl_offset) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge CL"); goto fail; } } if (file->cl_offset == file->offset || parent->rr_moved) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge CL"); goto fail; } } } #if DEBUG /* DEBUGGING: Warn about attributes I don't yet fully support. */ if ((flags & ~0x02) != 0) { fprintf(stderr, "\n ** Unrecognized flag: "); dump_isodirrec(stderr, isodirrec); fprintf(stderr, "\n"); } else if (toi(isodirrec + DR_volume_sequence_number_offset, 2) != 1) { fprintf(stderr, "\n ** Unrecognized sequence number: "); dump_isodirrec(stderr, isodirrec); fprintf(stderr, "\n"); } else if (*(isodirrec + DR_file_unit_size_offset) != 0) { fprintf(stderr, "\n ** Unexpected file unit size: "); dump_isodirrec(stderr, isodirrec); fprintf(stderr, "\n"); } else if (*(isodirrec + DR_interleave_offset) != 0) { fprintf(stderr, "\n ** Unexpected interleave: "); dump_isodirrec(stderr, isodirrec); fprintf(stderr, "\n"); } else if (*(isodirrec + DR_ext_attr_length_offset) != 0) { fprintf(stderr, "\n ** Unexpected extended attribute length: "); dump_isodirrec(stderr, isodirrec); fprintf(stderr, "\n"); } #endif register_file(iso9660, file); return (file); fail: archive_string_free(&file->name); free(file); return (NULL); } static int parse_rockridge(struct archive_read *a, struct file_info *file, const unsigned char *p, const unsigned char *end) { struct iso9660 *iso9660; iso9660 = (struct iso9660 *)(a->format->data); while (p + 4 <= end /* Enough space for another entry. */ && p[0] >= 'A' && p[0] <= 'Z' /* Sanity-check 1st char of name. */ && p[1] >= 'A' && p[1] <= 'Z' /* Sanity-check 2nd char of name. */ && p[2] >= 4 /* Sanity-check length. */ && p + p[2] <= end) { /* Sanity-check length. */ const unsigned char *data = p + 4; int data_length = p[2] - 4; int version = p[3]; switch(p[0]) { case 'C': if (p[1] == 'E') { if (version == 1 && data_length == 24) { /* * CE extension comprises: * 8 byte sector containing extension * 8 byte offset w/in above sector * 8 byte length of continuation */ int32_t location = archive_le32dec(data); file->ce_offset = archive_le32dec(data+8); file->ce_size = archive_le32dec(data+16); if (register_CE(a, location, file) != ARCHIVE_OK) return (ARCHIVE_FATAL); } } else if (p[1] == 'L') { if (version == 1 && data_length == 8) { file->cl_offset = (uint64_t) iso9660->logical_block_size * (uint64_t)archive_le32dec(data); iso9660->seenRockridge = 1; } } break; case 'N': if (p[1] == 'M') { if (version == 1) { parse_rockridge_NM1(file, data, data_length); iso9660->seenRockridge = 1; } } break; case 'P': /* * PD extension is padding; * contents are always ignored. * * PL extension won't appear; * contents are always ignored. */ if (p[1] == 'N') { if (version == 1 && data_length == 16) { file->rdev = toi(data,4); file->rdev <<= 32; file->rdev |= toi(data + 8, 4); iso9660->seenRockridge = 1; } } else if (p[1] == 'X') { /* * PX extension comprises: * 8 bytes for mode, * 8 bytes for nlinks, * 8 bytes for uid, * 8 bytes for gid, * 8 bytes for inode. */ if (version == 1) { if (data_length >= 8) file->mode = toi(data, 4); if (data_length >= 16) file->nlinks = toi(data + 8, 4); if (data_length >= 24) file->uid = toi(data + 16, 4); if (data_length >= 32) file->gid = toi(data + 24, 4); if (data_length >= 40) file->number = toi(data + 32, 4); iso9660->seenRockridge = 1; } } break; case 'R': if (p[1] == 'E' && version == 1) { file->re = 1; iso9660->seenRockridge = 1; } else if (p[1] == 'R' && version == 1) { /* * RR extension comprises: * one byte flag value * This extension is obsolete, * so contents are always ignored. */ } break; case 'S': if (p[1] == 'L') { if (version == 1) { parse_rockridge_SL1(file, data, data_length); iso9660->seenRockridge = 1; } } else if (p[1] == 'T' && data_length == 0 && version == 1) { /* * ST extension marks end of this * block of SUSP entries. * * It allows SUSP to coexist with * non-SUSP uses of the System * Use Area by placing non-SUSP data * after SUSP data. */ iso9660->seenSUSP = 0; iso9660->seenRockridge = 0; return (ARCHIVE_OK); } break; case 'T': if (p[1] == 'F') { if (version == 1) { parse_rockridge_TF1(file, data, data_length); iso9660->seenRockridge = 1; } } break; case 'Z': if (p[1] == 'F') { if (version == 1) parse_rockridge_ZF1(file, data, data_length); } break; default: break; } p += p[2]; } return (ARCHIVE_OK); } static int register_CE(struct archive_read *a, int32_t location, struct file_info *file) { struct iso9660 *iso9660; struct read_ce_queue *heap; struct read_ce_req *p; uint64_t offset, parent_offset; int hole, parent; iso9660 = (struct iso9660 *)(a->format->data); offset = ((uint64_t)location) * (uint64_t)iso9660->logical_block_size; if (((file->mode & AE_IFMT) == AE_IFREG && offset >= file->offset) || offset < iso9660->current_position || (((uint64_t)file->ce_offset) + file->ce_size) > (uint64_t)iso9660->logical_block_size || offset + file->ce_offset + file->ce_size > iso9660->volume_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid parameter in SUSP \"CE\" extension"); return (ARCHIVE_FATAL); } /* Expand our CE list as necessary. */ heap = &(iso9660->read_ce_req); if (heap->cnt >= heap->allocated) { int new_size; if (heap->allocated < 16) new_size = 16; else new_size = heap->allocated * 2; /* Overflow might keep us from growing the list. */ if (new_size <= heap->allocated) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } p = calloc(new_size, sizeof(p[0])); if (p == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } if (heap->reqs != NULL) { memcpy(p, heap->reqs, heap->cnt * sizeof(*p)); free(heap->reqs); } heap->reqs = p; heap->allocated = new_size; } /* * Start with hole at end, walk it up tree to find insertion point. */ hole = heap->cnt++; while (hole > 0) { parent = (hole - 1)/2; parent_offset = heap->reqs[parent].offset; if (offset >= parent_offset) { heap->reqs[hole].offset = offset; heap->reqs[hole].file = file; return (ARCHIVE_OK); } /* Move parent into hole <==> move hole up tree. */ heap->reqs[hole] = heap->reqs[parent]; hole = parent; } heap->reqs[0].offset = offset; heap->reqs[0].file = file; return (ARCHIVE_OK); } static void next_CE(struct read_ce_queue *heap) { uint64_t a_offset, b_offset, c_offset; int a, b, c; struct read_ce_req tmp; if (heap->cnt < 1) return; /* * Move the last item in the heap to the root of the tree */ heap->reqs[0] = heap->reqs[--(heap->cnt)]; /* * Rebalance the heap. */ a = 0; /* Starting element and its offset */ a_offset = heap->reqs[a].offset; for (;;) { b = a + a + 1; /* First child */ if (b >= heap->cnt) return; b_offset = heap->reqs[b].offset; c = b + 1; /* Use second child if it is smaller. */ if (c < heap->cnt) { c_offset = heap->reqs[c].offset; if (c_offset < b_offset) { b = c; b_offset = c_offset; } } if (a_offset <= b_offset) return; tmp = heap->reqs[a]; heap->reqs[a] = heap->reqs[b]; heap->reqs[b] = tmp; a = b; } } static int read_CE(struct archive_read *a, struct iso9660 *iso9660) { struct read_ce_queue *heap; const unsigned char *b, *p, *end; struct file_info *file; size_t step; int r; /* Read data which RRIP "CE" extension points. */ heap = &(iso9660->read_ce_req); step = iso9660->logical_block_size; while (heap->cnt && heap->reqs[0].offset == iso9660->current_position) { b = __archive_read_ahead(a, step, NULL); if (b == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } do { file = heap->reqs[0].file; if (file->ce_offset + file->ce_size > step) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Malformed CE information"); return (ARCHIVE_FATAL); } p = b + file->ce_offset; end = p + file->ce_size; next_CE(heap); r = parse_rockridge(a, file, p, end); if (r != ARCHIVE_OK) return (ARCHIVE_FATAL); } while (heap->cnt && heap->reqs[0].offset == iso9660->current_position); /* NOTE: Do not move this consume's code to front of * do-while loop. Registration of nested CE extension * might cause error because of current position. */ __archive_read_consume(a, step); iso9660->current_position += step; } return (ARCHIVE_OK); } static void parse_rockridge_NM1(struct file_info *file, const unsigned char *data, int data_length) { if (!file->name_continues) archive_string_empty(&file->name); file->name_continues = 0; if (data_length < 1) return; /* * NM version 1 extension comprises: * 1 byte flag, value is one of: * = 0: remainder is name * = 1: remainder is name, next NM entry continues name * = 2: "." * = 4: ".." * = 32: Implementation specific * All other values are reserved. */ switch(data[0]) { case 0: if (data_length < 2) return; archive_strncat(&file->name, (const char *)data + 1, data_length - 1); break; case 1: if (data_length < 2) return; archive_strncat(&file->name, (const char *)data + 1, data_length - 1); file->name_continues = 1; break; case 2: archive_strcat(&file->name, "."); break; case 4: archive_strcat(&file->name, ".."); break; default: return; } } static void parse_rockridge_TF1(struct file_info *file, const unsigned char *data, int data_length) { char flag; /* * TF extension comprises: * one byte flag * create time (optional) * modify time (optional) * access time (optional) * attribute time (optional) * Time format and presence of fields * is controlled by flag bits. */ if (data_length < 1) return; flag = data[0]; ++data; --data_length; if (flag & 0x80) { /* Use 17-byte time format. */ if ((flag & 1) && data_length >= 17) { /* Create time. */ file->birthtime_is_set = 1; file->birthtime = isodate17(data); data += 17; data_length -= 17; } if ((flag & 2) && data_length >= 17) { /* Modify time. */ file->mtime = isodate17(data); data += 17; data_length -= 17; } if ((flag & 4) && data_length >= 17) { /* Access time. */ file->atime = isodate17(data); data += 17; data_length -= 17; } if ((flag & 8) && data_length >= 17) { /* Attribute change time. */ file->ctime = isodate17(data); } } else { /* Use 7-byte time format. */ if ((flag & 1) && data_length >= 7) { /* Create time. */ file->birthtime_is_set = 1; file->birthtime = isodate7(data); data += 7; data_length -= 7; } if ((flag & 2) && data_length >= 7) { /* Modify time. */ file->mtime = isodate7(data); data += 7; data_length -= 7; } if ((flag & 4) && data_length >= 7) { /* Access time. */ file->atime = isodate7(data); data += 7; data_length -= 7; } if ((flag & 8) && data_length >= 7) { /* Attribute change time. */ file->ctime = isodate7(data); } } } static void parse_rockridge_SL1(struct file_info *file, const unsigned char *data, int data_length) { const char *separator = ""; if (!file->symlink_continues || file->symlink.length < 1) archive_string_empty(&file->symlink); file->symlink_continues = 0; /* * Defined flag values: * 0: This is the last SL record for this symbolic link * 1: this symbolic link field continues in next SL entry * All other values are reserved. */ if (data_length < 1) return; switch(*data) { case 0: break; case 1: file->symlink_continues = 1; break; default: return; } ++data; /* Skip flag byte. */ --data_length; /* * SL extension body stores "components". * Basically, this is a complicated way of storing * a POSIX path. It also interferes with using * symlinks for storing non-path data. <sigh> * * Each component is 2 bytes (flag and length) * possibly followed by name data. */ while (data_length >= 2) { unsigned char flag = *data++; unsigned char nlen = *data++; data_length -= 2; archive_strcat(&file->symlink, separator); separator = "/"; switch(flag) { case 0: /* Usual case, this is text. */ if (data_length < nlen) return; archive_strncat(&file->symlink, (const char *)data, nlen); break; case 0x01: /* Text continues in next component. */ if (data_length < nlen) return; archive_strncat(&file->symlink, (const char *)data, nlen); separator = ""; break; case 0x02: /* Current dir. */ archive_strcat(&file->symlink, "."); break; case 0x04: /* Parent dir. */ archive_strcat(&file->symlink, ".."); break; case 0x08: /* Root of filesystem. */ archive_strcat(&file->symlink, "/"); separator = ""; break; case 0x10: /* Undefined (historically "volume root" */ archive_string_empty(&file->symlink); archive_strcat(&file->symlink, "ROOT"); break; case 0x20: /* Undefined (historically "hostname") */ archive_strcat(&file->symlink, "hostname"); break; default: /* TODO: issue a warning ? */ return; } data += nlen; data_length -= nlen; } } static void parse_rockridge_ZF1(struct file_info *file, const unsigned char *data, int data_length) { if (data[0] == 0x70 && data[1] == 0x7a && data_length == 12) { /* paged zlib */ file->pz = 1; file->pz_log2_bs = data[3]; file->pz_uncompressed_size = archive_le32dec(&data[4]); } } static void register_file(struct iso9660 *iso9660, struct file_info *file) { file->use_next = iso9660->use_files; iso9660->use_files = file; } static void release_files(struct iso9660 *iso9660) { struct content *con, *connext; struct file_info *file; file = iso9660->use_files; while (file != NULL) { struct file_info *next = file->use_next; archive_string_free(&file->name); archive_string_free(&file->symlink); free(file->utf16be_name); con = file->contents.first; while (con != NULL) { connext = con->next; free(con); con = connext; } free(file); file = next; } } static int next_entry_seek(struct archive_read *a, struct iso9660 *iso9660, struct file_info **pfile) { struct file_info *file; int r; r = next_cache_entry(a, iso9660, pfile); if (r != ARCHIVE_OK) return (r); file = *pfile; /* Don't waste time seeking for zero-length bodies. */ if (file->size == 0) file->offset = iso9660->current_position; /* flush any remaining bytes from the last round to ensure * we're positioned */ if (iso9660->entry_bytes_unconsumed) { __archive_read_consume(a, iso9660->entry_bytes_unconsumed); iso9660->entry_bytes_unconsumed = 0; } /* Seek forward to the start of the entry. */ if (iso9660->current_position < file->offset) { int64_t step; step = file->offset - iso9660->current_position; step = __archive_read_consume(a, step); if (step < 0) return ((int)step); iso9660->current_position = file->offset; } /* We found body of file; handle it now. */ return (ARCHIVE_OK); } static int next_cache_entry(struct archive_read *a, struct iso9660 *iso9660, struct file_info **pfile) { struct file_info *file; struct { struct file_info *first; struct file_info **last; } empty_files; int64_t number; int count; file = cache_get_entry(iso9660); if (file != NULL) { *pfile = file; return (ARCHIVE_OK); } for (;;) { struct file_info *re, *d; *pfile = file = next_entry(iso9660); if (file == NULL) { /* * If directory entries all which are descendant of * rr_moved are still remaining, expose their. */ if (iso9660->re_files.first != NULL && iso9660->rr_moved != NULL && iso9660->rr_moved->rr_moved_has_re_only) /* Expose "rr_moved" entry. */ cache_add_entry(iso9660, iso9660->rr_moved); while ((re = re_get_entry(iso9660)) != NULL) { /* Expose its descendant dirs. */ while ((d = rede_get_entry(re)) != NULL) cache_add_entry(iso9660, d); } if (iso9660->cache_files.first != NULL) return (next_cache_entry(a, iso9660, pfile)); return (ARCHIVE_EOF); } if (file->cl_offset) { struct file_info *first_re = NULL; int nexted_re = 0; /* * Find "RE" dir for the current file, which * has "CL" flag. */ while ((re = re_get_entry(iso9660)) != first_re) { if (first_re == NULL) first_re = re; if (re->offset == file->cl_offset) { re->parent->subdirs--; re->parent = file->parent; re->re = 0; if (re->parent->re_descendant) { nexted_re = 1; re->re_descendant = 1; if (rede_add_entry(re) < 0) goto fatal_rr; /* Move a list of descendants * to a new ancestor. */ while ((d = rede_get_entry( re)) != NULL) if (rede_add_entry(d) < 0) goto fatal_rr; break; } /* Replace the current file * with "RE" dir */ *pfile = file = re; /* Expose its descendant */ while ((d = rede_get_entry( file)) != NULL) cache_add_entry( iso9660, d); break; } else re_add_entry(iso9660, re); } if (nexted_re) { /* * Do not expose this at this time * because we have not gotten its full-path * name yet. */ continue; } } else if ((file->mode & AE_IFMT) == AE_IFDIR) { int r; /* Read file entries in this dir. */ r = read_children(a, file); if (r != ARCHIVE_OK) return (r); /* * Handle a special dir of Rockridge extensions, * "rr_moved". */ if (file->rr_moved) { /* * If this has only the subdirectories which * have "RE" flags, do not expose at this time. */ if (file->rr_moved_has_re_only) continue; /* Otherwise expose "rr_moved" entry. */ } else if (file->re) { /* * Do not expose this at this time * because we have not gotten its full-path * name yet. */ re_add_entry(iso9660, file); continue; } else if (file->re_descendant) { /* * If the top level "RE" entry of this entry * is not exposed, we, accordingly, should not * expose this entry at this time because * we cannot make its proper full-path name. */ if (rede_add_entry(file) == 0) continue; /* Otherwise we can expose this entry because * it seems its top level "RE" has already been * exposed. */ } } break; } if ((file->mode & AE_IFMT) != AE_IFREG || file->number == -1) return (ARCHIVE_OK); count = 0; number = file->number; iso9660->cache_files.first = NULL; iso9660->cache_files.last = &(iso9660->cache_files.first); empty_files.first = NULL; empty_files.last = &empty_files.first; /* Collect files which has the same file serial number. * Peek pending_files so that file which number is different * is not put back. */ while (iso9660->pending_files.used > 0 && (iso9660->pending_files.files[0]->number == -1 || iso9660->pending_files.files[0]->number == number)) { if (file->number == -1) { /* This file has the same offset * but it's wrong offset which empty files * and symlink files have. * NOTE: This wrong offset was recorded by * old mkisofs utility. If ISO images is * created by latest mkisofs, this does not * happen. */ file->next = NULL; *empty_files.last = file; empty_files.last = &(file->next); } else { count++; cache_add_entry(iso9660, file); } file = next_entry(iso9660); } if (count == 0) { *pfile = file; return ((file == NULL)?ARCHIVE_EOF:ARCHIVE_OK); } if (file->number == -1) { file->next = NULL; *empty_files.last = file; empty_files.last = &(file->next); } else { count++; cache_add_entry(iso9660, file); } if (count > 1) { /* The count is the same as number of hardlink, * so much so that each nlinks of files in cache_file * is overwritten by value of the count. */ for (file = iso9660->cache_files.first; file != NULL; file = file->next) file->nlinks = count; } /* If there are empty files, that files are added * to the tail of the cache_files. */ if (empty_files.first != NULL) { *iso9660->cache_files.last = empty_files.first; iso9660->cache_files.last = empty_files.last; } *pfile = cache_get_entry(iso9660); return ((*pfile == NULL)?ARCHIVE_EOF:ARCHIVE_OK); fatal_rr: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to connect 'CL' pointer to 'RE' rr_moved pointer of " "Rockridge extensions: current position = %jd, CL offset = %jd", (intmax_t)iso9660->current_position, (intmax_t)file->cl_offset); return (ARCHIVE_FATAL); } static inline void re_add_entry(struct iso9660 *iso9660, struct file_info *file) { file->re_next = NULL; *iso9660->re_files.last = file; iso9660->re_files.last = &(file->re_next); } static inline struct file_info * re_get_entry(struct iso9660 *iso9660) { struct file_info *file; if ((file = iso9660->re_files.first) != NULL) { iso9660->re_files.first = file->re_next; if (iso9660->re_files.first == NULL) iso9660->re_files.last = &(iso9660->re_files.first); } return (file); } static inline int rede_add_entry(struct file_info *file) { struct file_info *re; /* * Find "RE" entry. */ re = file->parent; while (re != NULL && !re->re) re = re->parent; if (re == NULL) return (-1); file->re_next = NULL; *re->rede_files.last = file; re->rede_files.last = &(file->re_next); return (0); } static inline struct file_info * rede_get_entry(struct file_info *re) { struct file_info *file; if ((file = re->rede_files.first) != NULL) { re->rede_files.first = file->re_next; if (re->rede_files.first == NULL) re->rede_files.last = &(re->rede_files.first); } return (file); } static inline void cache_add_entry(struct iso9660 *iso9660, struct file_info *file) { file->next = NULL; *iso9660->cache_files.last = file; iso9660->cache_files.last = &(file->next); } static inline struct file_info * cache_get_entry(struct iso9660 *iso9660) { struct file_info *file; if ((file = iso9660->cache_files.first) != NULL) { iso9660->cache_files.first = file->next; if (iso9660->cache_files.first == NULL) iso9660->cache_files.last = &(iso9660->cache_files.first); } return (file); } static int heap_add_entry(struct archive_read *a, struct heap_queue *heap, struct file_info *file, uint64_t key) { uint64_t file_key, parent_key; int hole, parent; /* Expand our pending files list as necessary. */ if (heap->used >= heap->allocated) { struct file_info **new_pending_files; int new_size = heap->allocated * 2; if (heap->allocated < 1024) new_size = 1024; /* Overflow might keep us from growing the list. */ if (new_size <= heap->allocated) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } new_pending_files = (struct file_info **) malloc(new_size * sizeof(new_pending_files[0])); if (new_pending_files == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } if (heap->allocated) memcpy(new_pending_files, heap->files, heap->allocated * sizeof(new_pending_files[0])); if (heap->files != NULL) free(heap->files); heap->files = new_pending_files; heap->allocated = new_size; } file_key = file->key = key; /* * Start with hole at end, walk it up tree to find insertion point. */ hole = heap->used++; while (hole > 0) { parent = (hole - 1)/2; parent_key = heap->files[parent]->key; if (file_key >= parent_key) { heap->files[hole] = file; return (ARCHIVE_OK); } /* Move parent into hole <==> move hole up tree. */ heap->files[hole] = heap->files[parent]; hole = parent; } heap->files[0] = file; return (ARCHIVE_OK); } static struct file_info * heap_get_entry(struct heap_queue *heap) { uint64_t a_key, b_key, c_key; int a, b, c; struct file_info *r, *tmp; if (heap->used < 1) return (NULL); /* * The first file in the list is the earliest; we'll return this. */ r = heap->files[0]; /* * Move the last item in the heap to the root of the tree */ heap->files[0] = heap->files[--(heap->used)]; /* * Rebalance the heap. */ a = 0; /* Starting element and its heap key */ a_key = heap->files[a]->key; for (;;) { b = a + a + 1; /* First child */ if (b >= heap->used) return (r); b_key = heap->files[b]->key; c = b + 1; /* Use second child if it is smaller. */ if (c < heap->used) { c_key = heap->files[c]->key; if (c_key < b_key) { b = c; b_key = c_key; } } if (a_key <= b_key) return (r); tmp = heap->files[a]; heap->files[a] = heap->files[b]; heap->files[b] = tmp; a = b; } } static unsigned int toi(const void *p, int n) { const unsigned char *v = (const unsigned char *)p; if (n > 1) return v[0] + 256 * toi(v + 1, n - 1); if (n == 1) return v[0]; return (0); } static time_t isodate7(const unsigned char *v) { struct tm tm; int offset; time_t t; memset(&tm, 0, sizeof(tm)); tm.tm_year = v[0]; tm.tm_mon = v[1] - 1; tm.tm_mday = v[2]; tm.tm_hour = v[3]; tm.tm_min = v[4]; tm.tm_sec = v[5]; /* v[6] is the signed timezone offset, in 1/4-hour increments. */ offset = ((const signed char *)v)[6]; if (offset > -48 && offset < 52) { tm.tm_hour -= offset / 4; tm.tm_min -= (offset % 4) * 15; } t = time_from_tm(&tm); if (t == (time_t)-1) return ((time_t)0); return (t); } static time_t isodate17(const unsigned char *v) { struct tm tm; int offset; time_t t; memset(&tm, 0, sizeof(tm)); tm.tm_year = (v[0] - '0') * 1000 + (v[1] - '0') * 100 + (v[2] - '0') * 10 + (v[3] - '0') - 1900; tm.tm_mon = (v[4] - '0') * 10 + (v[5] - '0'); tm.tm_mday = (v[6] - '0') * 10 + (v[7] - '0'); tm.tm_hour = (v[8] - '0') * 10 + (v[9] - '0'); tm.tm_min = (v[10] - '0') * 10 + (v[11] - '0'); tm.tm_sec = (v[12] - '0') * 10 + (v[13] - '0'); /* v[16] is the signed timezone offset, in 1/4-hour increments. */ offset = ((const signed char *)v)[16]; if (offset > -48 && offset < 52) { tm.tm_hour -= offset / 4; tm.tm_min -= (offset % 4) * 15; } t = time_from_tm(&tm); if (t == (time_t)-1) return ((time_t)0); return (t); } static time_t time_from_tm(struct tm *t) { #if HAVE_TIMEGM /* Use platform timegm() if available. */ return (timegm(t)); #elif HAVE__MKGMTIME64 return (_mkgmtime64(t)); #else /* Else use direct calculation using POSIX assumptions. */ /* First, fix up tm_yday based on the year/month/day. */ if (mktime(t) == (time_t)-1) return ((time_t)-1); /* Then we can compute timegm() from first principles. */ return (t->tm_sec + t->tm_min * 60 + t->tm_hour * 3600 + t->tm_yday * 86400 + (t->tm_year - 70) * 31536000 + ((t->tm_year - 69) / 4) * 86400 - ((t->tm_year - 1) / 100) * 86400 + ((t->tm_year + 299) / 400) * 86400); #endif } static const char * build_pathname(struct archive_string *as, struct file_info *file, int depth) { // Plain ISO9660 only allows 8 dir levels; if we get // to 1000, then something is very, very wrong. if (depth > 1000) { return NULL; } if (file->parent != NULL && archive_strlen(&file->parent->name) > 0) { if (build_pathname(as, file->parent, depth + 1) == NULL) { return NULL; } archive_strcat(as, "/"); } if (archive_strlen(&file->name) == 0) archive_strcat(as, "."); else archive_string_concat(as, &file->name); return (as->s); } static int build_pathname_utf16be(unsigned char *p, size_t max, size_t *len, struct file_info *file) { if (file->parent != NULL && file->parent->utf16be_bytes > 0) { if (build_pathname_utf16be(p, max, len, file->parent) != 0) return (-1); p[*len] = 0; p[*len + 1] = '/'; *len += 2; } if (file->utf16be_bytes == 0) { if (*len + 2 > max) return (-1);/* Path is too long! */ p[*len] = 0; p[*len + 1] = '.'; *len += 2; } else { if (*len + file->utf16be_bytes > max) return (-1);/* Path is too long! */ memcpy(p + *len, file->utf16be_name, file->utf16be_bytes); *len += file->utf16be_bytes; } return (0); } #if DEBUG static void dump_isodirrec(FILE *out, const unsigned char *isodirrec) { fprintf(out, " l %d,", toi(isodirrec + DR_length_offset, DR_length_size)); fprintf(out, " a %d,", toi(isodirrec + DR_ext_attr_length_offset, DR_ext_attr_length_size)); fprintf(out, " ext 0x%x,", toi(isodirrec + DR_extent_offset, DR_extent_size)); fprintf(out, " s %d,", toi(isodirrec + DR_size_offset, DR_extent_size)); fprintf(out, " f 0x%x,", toi(isodirrec + DR_flags_offset, DR_flags_size)); fprintf(out, " u %d,", toi(isodirrec + DR_file_unit_size_offset, DR_file_unit_size_size)); fprintf(out, " ilv %d,", toi(isodirrec + DR_interleave_offset, DR_interleave_size)); fprintf(out, " seq %d,", toi(isodirrec + DR_volume_sequence_number_offset, DR_volume_sequence_number_size)); fprintf(out, " nl %d:", toi(isodirrec + DR_name_len_offset, DR_name_len_size)); fprintf(out, " `%.*s'", toi(isodirrec + DR_name_len_offset, DR_name_len_size), isodirrec + DR_name_offset); } #endif
./CrossVul/dataset_final_sorted/CWE-400/c/bad_707_0
crossvul-cpp_data_good_4422_2
// SPDX-License-Identifier: GPL-2.0-only /* * Xen event channels * * Xen models interrupts with abstract event channels. Because each * domain gets 1024 event channels, but NR_IRQ is not that large, we * must dynamically map irqs<->event channels. The event channels * interface with the rest of the kernel by defining a xen interrupt * chip. When an event is received, it is mapped to an irq and sent * through the normal interrupt processing path. * * There are four kinds of events which can be mapped to an event * channel: * * 1. Inter-domain notifications. This includes all the virtual * device events, since they're driven by front-ends in another domain * (typically dom0). * 2. VIRQs, typically used for timers. These are per-cpu events. * 3. IPIs. * 4. PIRQs - Hardware interrupts. * * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 */ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt #include <linux/linkage.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/moduleparam.h> #include <linux/string.h> #include <linux/memblock.h> #include <linux/slab.h> #include <linux/irqnr.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/cpuhotplug.h> #include <linux/atomic.h> #include <linux/ktime.h> #ifdef CONFIG_X86 #include <asm/desc.h> #include <asm/ptrace.h> #include <asm/idtentry.h> #include <asm/irq.h> #include <asm/io_apic.h> #include <asm/i8259.h> #include <asm/xen/pci.h> #endif #include <asm/sync_bitops.h> #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> #include <xen/page.h> #include <xen/xen.h> #include <xen/hvm.h> #include <xen/xen-ops.h> #include <xen/events.h> #include <xen/interface/xen.h> #include <xen/interface/event_channel.h> #include <xen/interface/hvm/hvm_op.h> #include <xen/interface/hvm/params.h> #include <xen/interface/physdev.h> #include <xen/interface/sched.h> #include <xen/interface/vcpu.h> #include <asm/hw_irq.h> #include "events_internal.h" #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "xen." static uint __read_mostly event_loop_timeout = 2; module_param(event_loop_timeout, uint, 0644); static uint __read_mostly event_eoi_delay = 10; module_param(event_eoi_delay, uint, 0644); const struct evtchn_ops *evtchn_ops; /* * This lock protects updates to the following mapping and reference-count * arrays. The lock does not need to be acquired to read the mapping tables. */ static DEFINE_MUTEX(irq_mapping_update_lock); /* * Lock protecting event handling loop against removing event channels. * Adding of event channels is no issue as the associated IRQ becomes active * only after everything is setup (before request_[threaded_]irq() the handler * can't be entered for an event, as the event channel will be unmasked only * then). */ static DEFINE_RWLOCK(evtchn_rwlock); /* * Lock hierarchy: * * irq_mapping_update_lock * evtchn_rwlock * IRQ-desc lock * percpu eoi_list_lock */ static LIST_HEAD(xen_irq_list_head); /* IRQ <-> VIRQ mapping. */ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; /* IRQ <-> IPI mapping */ static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; int **evtchn_to_irq; #ifdef CONFIG_X86 static unsigned long *pirq_eoi_map; #endif static bool (*pirq_needs_eoi)(unsigned irq); #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq))) #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq))) #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq)) /* Xen will never allocate port zero for any purpose. */ #define VALID_EVTCHN(chn) ((chn) != 0) static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY]; static struct irq_chip xen_dynamic_chip; static struct irq_chip xen_lateeoi_chip; static struct irq_chip xen_percpu_chip; static struct irq_chip xen_pirq_chip; static void enable_dynirq(struct irq_data *data); static void disable_dynirq(struct irq_data *data); static DEFINE_PER_CPU(unsigned int, irq_epoch); static void clear_evtchn_to_irq_row(unsigned row) { unsigned col; for (col = 0; col < EVTCHN_PER_ROW; col++) WRITE_ONCE(evtchn_to_irq[row][col], -1); } static void clear_evtchn_to_irq_all(void) { unsigned row; for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) { if (evtchn_to_irq[row] == NULL) continue; clear_evtchn_to_irq_row(row); } } static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq) { unsigned row; unsigned col; if (evtchn >= xen_evtchn_max_channels()) return -EINVAL; row = EVTCHN_ROW(evtchn); col = EVTCHN_COL(evtchn); if (evtchn_to_irq[row] == NULL) { /* Unallocated irq entries return -1 anyway */ if (irq == -1) return 0; evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); if (evtchn_to_irq[row] == NULL) return -ENOMEM; clear_evtchn_to_irq_row(row); } WRITE_ONCE(evtchn_to_irq[row][col], irq); return 0; } int get_evtchn_to_irq(evtchn_port_t evtchn) { if (evtchn >= xen_evtchn_max_channels()) return -1; if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) return -1; return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]); } /* Get info for IRQ */ struct irq_info *info_for_irq(unsigned irq) { if (irq < nr_legacy_irqs()) return legacy_info_ptrs[irq]; else return irq_get_chip_data(irq); } static void set_info_for_irq(unsigned int irq, struct irq_info *info) { if (irq < nr_legacy_irqs()) legacy_info_ptrs[irq] = info; else irq_set_chip_data(irq, info); } /* Constructors for packed IRQ information. */ static int xen_irq_info_common_setup(struct irq_info *info, unsigned irq, enum xen_irq_type type, evtchn_port_t evtchn, unsigned short cpu) { int ret; BUG_ON(info->type != IRQT_UNBOUND && info->type != type); info->type = type; info->irq = irq; info->evtchn = evtchn; info->cpu = cpu; ret = set_evtchn_to_irq(evtchn, irq); if (ret < 0) return ret; irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); return xen_evtchn_port_setup(info); } static int xen_irq_info_evtchn_setup(unsigned irq, evtchn_port_t evtchn) { struct irq_info *info = info_for_irq(irq); return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0); } static int xen_irq_info_ipi_setup(unsigned cpu, unsigned irq, evtchn_port_t evtchn, enum ipi_vector ipi) { struct irq_info *info = info_for_irq(irq); info->u.ipi = ipi; per_cpu(ipi_to_irq, cpu)[ipi] = irq; return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0); } static int xen_irq_info_virq_setup(unsigned cpu, unsigned irq, evtchn_port_t evtchn, unsigned virq) { struct irq_info *info = info_for_irq(irq); info->u.virq = virq; per_cpu(virq_to_irq, cpu)[virq] = irq; return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0); } static int xen_irq_info_pirq_setup(unsigned irq, evtchn_port_t evtchn, unsigned pirq, unsigned gsi, uint16_t domid, unsigned char flags) { struct irq_info *info = info_for_irq(irq); info->u.pirq.pirq = pirq; info->u.pirq.gsi = gsi; info->u.pirq.domid = domid; info->u.pirq.flags = flags; return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0); } static void xen_irq_info_cleanup(struct irq_info *info) { set_evtchn_to_irq(info->evtchn, -1); info->evtchn = 0; } /* * Accessors for packed IRQ information. */ evtchn_port_t evtchn_from_irq(unsigned irq) { const struct irq_info *info = NULL; if (likely(irq < nr_irqs)) info = info_for_irq(irq); if (!info) return 0; return info->evtchn; } unsigned int irq_from_evtchn(evtchn_port_t evtchn) { return get_evtchn_to_irq(evtchn); } EXPORT_SYMBOL_GPL(irq_from_evtchn); int irq_from_virq(unsigned int cpu, unsigned int virq) { return per_cpu(virq_to_irq, cpu)[virq]; } static enum ipi_vector ipi_from_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_IPI); return info->u.ipi; } static unsigned virq_from_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_VIRQ); return info->u.virq; } static unsigned pirq_from_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_PIRQ); return info->u.pirq.pirq; } static enum xen_irq_type type_from_irq(unsigned irq) { return info_for_irq(irq)->type; } unsigned cpu_from_irq(unsigned irq) { return info_for_irq(irq)->cpu; } unsigned int cpu_from_evtchn(evtchn_port_t evtchn) { int irq = get_evtchn_to_irq(evtchn); unsigned ret = 0; if (irq != -1) ret = cpu_from_irq(irq); return ret; } #ifdef CONFIG_X86 static bool pirq_check_eoi_map(unsigned irq) { return test_bit(pirq_from_irq(irq), pirq_eoi_map); } #endif static bool pirq_needs_eoi_flag(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info->type != IRQT_PIRQ); return info->u.pirq.flags & PIRQ_NEEDS_EOI; } static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu) { int irq = get_evtchn_to_irq(evtchn); struct irq_info *info = info_for_irq(irq); BUG_ON(irq == -1); #ifdef CONFIG_SMP cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu)); #endif xen_evtchn_port_bind_to_cpu(info, cpu); info->cpu = cpu; } /** * notify_remote_via_irq - send event to remote end of event channel via irq * @irq: irq of event channel to send event to * * Unlike notify_remote_via_evtchn(), this is safe to use across * save/restore. Notifications on a broken connection are silently * dropped. */ void notify_remote_via_irq(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) notify_remote_via_evtchn(evtchn); } EXPORT_SYMBOL_GPL(notify_remote_via_irq); struct lateeoi_work { struct delayed_work delayed; spinlock_t eoi_list_lock; struct list_head eoi_list; }; static DEFINE_PER_CPU(struct lateeoi_work, lateeoi); static void lateeoi_list_del(struct irq_info *info) { struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu); unsigned long flags; spin_lock_irqsave(&eoi->eoi_list_lock, flags); list_del_init(&info->eoi_list); spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); } static void lateeoi_list_add(struct irq_info *info) { struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu); struct irq_info *elem; u64 now = get_jiffies_64(); unsigned long delay; unsigned long flags; if (now < info->eoi_time) delay = info->eoi_time - now; else delay = 1; spin_lock_irqsave(&eoi->eoi_list_lock, flags); if (list_empty(&eoi->eoi_list)) { list_add(&info->eoi_list, &eoi->eoi_list); mod_delayed_work_on(info->eoi_cpu, system_wq, &eoi->delayed, delay); } else { list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) { if (elem->eoi_time <= info->eoi_time) break; } list_add(&info->eoi_list, &elem->eoi_list); } spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); } static void xen_irq_lateeoi_locked(struct irq_info *info) { evtchn_port_t evtchn; unsigned int cpu; evtchn = info->evtchn; if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list)) return; cpu = info->eoi_cpu; if (info->eoi_time && info->irq_epoch == per_cpu(irq_epoch, cpu)) { lateeoi_list_add(info); return; } info->eoi_time = 0; unmask_evtchn(evtchn); } static void xen_irq_lateeoi_worker(struct work_struct *work) { struct lateeoi_work *eoi; struct irq_info *info; u64 now = get_jiffies_64(); unsigned long flags; eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed); read_lock_irqsave(&evtchn_rwlock, flags); while (true) { spin_lock(&eoi->eoi_list_lock); info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info, eoi_list); if (info == NULL || now < info->eoi_time) { spin_unlock(&eoi->eoi_list_lock); break; } list_del_init(&info->eoi_list); spin_unlock(&eoi->eoi_list_lock); info->eoi_time = 0; xen_irq_lateeoi_locked(info); } if (info) mod_delayed_work_on(info->eoi_cpu, system_wq, &eoi->delayed, info->eoi_time - now); read_unlock_irqrestore(&evtchn_rwlock, flags); } static void xen_cpu_init_eoi(unsigned int cpu) { struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu); INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker); spin_lock_init(&eoi->eoi_list_lock); INIT_LIST_HEAD(&eoi->eoi_list); } void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags) { struct irq_info *info; unsigned long flags; read_lock_irqsave(&evtchn_rwlock, flags); info = info_for_irq(irq); if (info) xen_irq_lateeoi_locked(info); read_unlock_irqrestore(&evtchn_rwlock, flags); } EXPORT_SYMBOL_GPL(xen_irq_lateeoi); static void xen_irq_init(unsigned irq) { struct irq_info *info; #ifdef CONFIG_SMP /* By default all event channels notify CPU#0. */ cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0)); #endif info = kzalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) panic("Unable to allocate metadata for IRQ%d\n", irq); info->type = IRQT_UNBOUND; info->refcnt = -1; set_info_for_irq(irq, info); INIT_LIST_HEAD(&info->eoi_list); list_add_tail(&info->list, &xen_irq_list_head); } static int __must_check xen_allocate_irqs_dynamic(int nvec) { int i, irq = irq_alloc_descs(-1, 0, nvec, -1); if (irq >= 0) { for (i = 0; i < nvec; i++) xen_irq_init(irq + i); } return irq; } static inline int __must_check xen_allocate_irq_dynamic(void) { return xen_allocate_irqs_dynamic(1); } static int __must_check xen_allocate_irq_gsi(unsigned gsi) { int irq; /* * A PV guest has no concept of a GSI (since it has no ACPI * nor access to/knowledge of the physical APICs). Therefore * all IRQs are dynamically allocated from the entire IRQ * space. */ if (xen_pv_domain() && !xen_initial_domain()) return xen_allocate_irq_dynamic(); /* Legacy IRQ descriptors are already allocated by the arch. */ if (gsi < nr_legacy_irqs()) irq = gsi; else irq = irq_alloc_desc_at(gsi, -1); xen_irq_init(irq); return irq; } static void xen_free_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); unsigned long flags; if (WARN_ON(!info)) return; write_lock_irqsave(&evtchn_rwlock, flags); if (!list_empty(&info->eoi_list)) lateeoi_list_del(info); list_del(&info->list); set_info_for_irq(irq, NULL); WARN_ON(info->refcnt > 0); write_unlock_irqrestore(&evtchn_rwlock, flags); kfree(info); /* Legacy IRQ descriptors are managed by the arch. */ if (irq < nr_legacy_irqs()) return; irq_free_desc(irq); } static void xen_evtchn_close(evtchn_port_t port) { struct evtchn_close close; close.port = port; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) BUG(); } static void pirq_query_unmask(int irq) { struct physdev_irq_status_query irq_status; struct irq_info *info = info_for_irq(irq); BUG_ON(info->type != IRQT_PIRQ); irq_status.irq = pirq_from_irq(irq); if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) irq_status.flags = 0; info->u.pirq.flags &= ~PIRQ_NEEDS_EOI; if (irq_status.flags & XENIRQSTAT_needs_eoi) info->u.pirq.flags |= PIRQ_NEEDS_EOI; } static void eoi_pirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; int rc = 0; if (!VALID_EVTCHN(evtchn)) return; if (unlikely(irqd_is_setaffinity_pending(data)) && likely(!irqd_irq_disabled(data))) { int masked = test_and_set_mask(evtchn); clear_evtchn(evtchn); irq_move_masked_irq(data); if (!masked) unmask_evtchn(evtchn); } else clear_evtchn(evtchn); if (pirq_needs_eoi(data->irq)) { rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); WARN_ON(rc); } } static void mask_ack_pirq(struct irq_data *data) { disable_dynirq(data); eoi_pirq(data); } static unsigned int __startup_pirq(unsigned int irq) { struct evtchn_bind_pirq bind_pirq; struct irq_info *info = info_for_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq); int rc; BUG_ON(info->type != IRQT_PIRQ); if (VALID_EVTCHN(evtchn)) goto out; bind_pirq.pirq = pirq_from_irq(irq); /* NB. We are happy to share unless we are probing. */ bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ? BIND_PIRQ__WILL_SHARE : 0; rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); if (rc != 0) { pr_warn("Failed to obtain physical IRQ %d\n", irq); return 0; } evtchn = bind_pirq.port; pirq_query_unmask(irq); rc = set_evtchn_to_irq(evtchn, irq); if (rc) goto err; info->evtchn = evtchn; bind_evtchn_to_cpu(evtchn, 0); rc = xen_evtchn_port_setup(info); if (rc) goto err; out: unmask_evtchn(evtchn); eoi_pirq(irq_get_irq_data(irq)); return 0; err: pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); xen_evtchn_close(evtchn); return 0; } static unsigned int startup_pirq(struct irq_data *data) { return __startup_pirq(data->irq); } static void shutdown_pirq(struct irq_data *data) { unsigned int irq = data->irq; struct irq_info *info = info_for_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq); BUG_ON(info->type != IRQT_PIRQ); if (!VALID_EVTCHN(evtchn)) return; mask_evtchn(evtchn); xen_evtchn_close(evtchn); xen_irq_info_cleanup(info); } static void enable_pirq(struct irq_data *data) { enable_dynirq(data); } static void disable_pirq(struct irq_data *data) { disable_dynirq(data); } int xen_irq_from_gsi(unsigned gsi) { struct irq_info *info; list_for_each_entry(info, &xen_irq_list_head, list) { if (info->type != IRQT_PIRQ) continue; if (info->u.pirq.gsi == gsi) return info->irq; } return -1; } EXPORT_SYMBOL_GPL(xen_irq_from_gsi); static void __unbind_from_irq(unsigned int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); struct irq_info *info = info_for_irq(irq); if (info->refcnt > 0) { info->refcnt--; if (info->refcnt != 0) return; } if (VALID_EVTCHN(evtchn)) { unsigned int cpu = cpu_from_irq(irq); xen_evtchn_close(evtchn); switch (type_from_irq(irq)) { case IRQT_VIRQ: per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; break; case IRQT_IPI: per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1; break; default: break; } xen_irq_info_cleanup(info); } xen_free_irq(irq); } /* * Do not make any assumptions regarding the relationship between the * IRQ number returned here and the Xen pirq argument. * * Note: We don't assign an event channel until the irq actually started * up. Return an existing irq if we've already got one for the gsi. * * Shareable implies level triggered, not shareable implies edge * triggered here. */ int xen_bind_pirq_gsi_to_irq(unsigned gsi, unsigned pirq, int shareable, char *name) { int irq = -1; struct physdev_irq irq_op; int ret; mutex_lock(&irq_mapping_update_lock); irq = xen_irq_from_gsi(gsi); if (irq != -1) { pr_info("%s: returning irq %d for gsi %u\n", __func__, irq, gsi); goto out; } irq = xen_allocate_irq_gsi(gsi); if (irq < 0) goto out; irq_op.irq = irq; irq_op.vector = 0; /* Only the privileged domain can do this. For non-priv, the pcifront * driver provides a PCI bus that does the call to do exactly * this in the priv domain. */ if (xen_initial_domain() && HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { xen_free_irq(irq); irq = -ENOSPC; goto out; } ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF, shareable ? PIRQ_SHAREABLE : 0); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } pirq_query_unmask(irq); /* We try to use the handler with the appropriate semantic for the * type of interrupt: if the interrupt is an edge triggered * interrupt we use handle_edge_irq. * * On the other hand if the interrupt is level triggered we use * handle_fasteoi_irq like the native code does for this kind of * interrupts. * * Depending on the Xen version, pirq_needs_eoi might return true * not only for level triggered interrupts but for edge triggered * interrupts too. In any case Xen always honors the eoi mechanism, * not injecting any more pirqs of the same kind if the first one * hasn't received an eoi yet. Therefore using the fasteoi handler * is the right choice either way. */ if (shareable) irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_fasteoi_irq, name); else irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, name); out: mutex_unlock(&irq_mapping_update_lock); return irq; } #ifdef CONFIG_PCI_MSI int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) { int rc; struct physdev_get_free_pirq op_get_free_pirq; op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI; rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); WARN_ONCE(rc == -ENOSYS, "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n"); return rc ? -1 : op_get_free_pirq.pirq; } int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, int pirq, int nvec, const char *name, domid_t domid) { int i, irq, ret; mutex_lock(&irq_mapping_update_lock); irq = xen_allocate_irqs_dynamic(nvec); if (irq < 0) goto out; for (i = 0; i < nvec; i++) { irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name); ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid, i == 0 ? 0 : PIRQ_MSI_GROUP); if (ret < 0) goto error_irq; } ret = irq_set_msi_desc(irq, msidesc); if (ret < 0) goto error_irq; out: mutex_unlock(&irq_mapping_update_lock); return irq; error_irq: while (nvec--) __unbind_from_irq(irq + nvec); mutex_unlock(&irq_mapping_update_lock); return ret; } #endif int xen_destroy_irq(int irq) { struct physdev_unmap_pirq unmap_irq; struct irq_info *info = info_for_irq(irq); int rc = -ENOENT; mutex_lock(&irq_mapping_update_lock); /* * If trying to remove a vector in a MSI group different * than the first one skip the PIRQ unmap unless this vector * is the first one in the group. */ if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) { unmap_irq.pirq = info->u.pirq.pirq; unmap_irq.domid = info->u.pirq.domid; rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); /* If another domain quits without making the pci_disable_msix * call, the Xen hypervisor takes care of freeing the PIRQs * (free_domain_pirqs). */ if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) pr_info("domain %d does not have %d anymore\n", info->u.pirq.domid, info->u.pirq.pirq); else if (rc) { pr_warn("unmap irq failed %d\n", rc); goto out; } } xen_free_irq(irq); out: mutex_unlock(&irq_mapping_update_lock); return rc; } int xen_irq_from_pirq(unsigned pirq) { int irq; struct irq_info *info; mutex_lock(&irq_mapping_update_lock); list_for_each_entry(info, &xen_irq_list_head, list) { if (info->type != IRQT_PIRQ) continue; irq = info->irq; if (info->u.pirq.pirq == pirq) goto out; } irq = -1; out: mutex_unlock(&irq_mapping_update_lock); return irq; } int xen_pirq_from_irq(unsigned irq) { return pirq_from_irq(irq); } EXPORT_SYMBOL_GPL(xen_pirq_from_irq); static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip) { int irq; int ret; if (evtchn >= xen_evtchn_max_channels()) return -ENOMEM; mutex_lock(&irq_mapping_update_lock); irq = get_evtchn_to_irq(evtchn); if (irq == -1) { irq = xen_allocate_irq_dynamic(); if (irq < 0) goto out; irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "event"); ret = xen_irq_info_evtchn_setup(irq, evtchn); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } /* New interdomain events are bound to VCPU 0. */ bind_evtchn_to_cpu(evtchn, 0); } else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_EVTCHN); } out: mutex_unlock(&irq_mapping_update_lock); return irq; } int bind_evtchn_to_irq(evtchn_port_t evtchn) { return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip); } EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn) { return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip); } EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi); static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; evtchn_port_t evtchn; int ret, irq; mutex_lock(&irq_mapping_update_lock); irq = per_cpu(ipi_to_irq, cpu)[ipi]; if (irq == -1) { irq = xen_allocate_irq_dynamic(); if (irq < 0) goto out; irq_set_chip_and_handler_name(irq, &xen_percpu_chip, handle_percpu_irq, "ipi"); bind_ipi.vcpu = xen_vcpu_nr(cpu); if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0) BUG(); evtchn = bind_ipi.port; ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } bind_evtchn_to_cpu(evtchn, cpu); } else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_IPI); } out: mutex_unlock(&irq_mapping_update_lock); return irq; } static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain, evtchn_port_t remote_port, struct irq_chip *chip) { struct evtchn_bind_interdomain bind_interdomain; int err; bind_interdomain.remote_dom = remote_domain; bind_interdomain.remote_port = remote_port; err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &bind_interdomain); return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port, chip); } int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, evtchn_port_t remote_port) { return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, &xen_dynamic_chip); } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq); int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain, evtchn_port_t remote_port) { return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, &xen_lateeoi_chip); } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi); static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn) { struct evtchn_status status; evtchn_port_t port; int rc = -ENOENT; memset(&status, 0, sizeof(status)); for (port = 0; port < xen_evtchn_max_channels(); port++) { status.dom = DOMID_SELF; status.port = port; rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); if (rc < 0) continue; if (status.status != EVTCHNSTAT_virq) continue; if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) { *evtchn = port; break; } } return rc; } /** * xen_evtchn_nr_channels - number of usable event channel ports * * This may be less than the maximum supported by the current * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum * supported. */ unsigned xen_evtchn_nr_channels(void) { return evtchn_ops->nr_channels(); } EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels); int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) { struct evtchn_bind_virq bind_virq; evtchn_port_t evtchn = 0; int irq, ret; mutex_lock(&irq_mapping_update_lock); irq = per_cpu(virq_to_irq, cpu)[virq]; if (irq == -1) { irq = xen_allocate_irq_dynamic(); if (irq < 0) goto out; if (percpu) irq_set_chip_and_handler_name(irq, &xen_percpu_chip, handle_percpu_irq, "virq"); else irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, handle_edge_irq, "virq"); bind_virq.virq = virq; bind_virq.vcpu = xen_vcpu_nr(cpu); ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq); if (ret == 0) evtchn = bind_virq.port; else { if (ret == -EEXIST) ret = find_virq(virq, cpu, &evtchn); BUG_ON(ret < 0); } ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } bind_evtchn_to_cpu(evtchn, cpu); } else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_VIRQ); } out: mutex_unlock(&irq_mapping_update_lock); return irq; } static void unbind_from_irq(unsigned int irq) { mutex_lock(&irq_mapping_update_lock); __unbind_from_irq(irq); mutex_unlock(&irq_mapping_update_lock); } static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id, struct irq_chip *chip) { int irq, retval; irq = bind_evtchn_to_irq_chip(evtchn, chip); if (irq < 0) return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags, devname, dev_id, &xen_dynamic_chip); } EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags, devname, dev_id, &xen_lateeoi_chip); } EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi); static int bind_interdomain_evtchn_to_irqhandler_chip( unsigned int remote_domain, evtchn_port_t remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id, struct irq_chip *chip) { int irq, retval; irq = bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, chip); if (irq < 0) return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, evtchn_port_t remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain, remote_port, handler, irqflags, devname, dev_id, &xen_dynamic_chip); } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler); int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain, evtchn_port_t remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain, remote_port, handler, irqflags, devname, dev_id, &xen_lateeoi_chip); } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi); int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq, retval; irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU); if (irq < 0) return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); int bind_ipi_to_irqhandler(enum ipi_vector ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq, retval; irq = bind_ipi_to_irq(ipi, cpu); if (irq < 0) return irq; irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } void unbind_from_irqhandler(unsigned int irq, void *dev_id) { struct irq_info *info = info_for_irq(irq); if (WARN_ON(!info)) return; free_irq(irq, dev_id); unbind_from_irq(irq); } EXPORT_SYMBOL_GPL(unbind_from_irqhandler); /** * xen_set_irq_priority() - set an event channel priority. * @irq:irq bound to an event channel. * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN. */ int xen_set_irq_priority(unsigned irq, unsigned priority) { struct evtchn_set_priority set_priority; set_priority.port = evtchn_from_irq(irq); set_priority.priority = priority; return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority, &set_priority); } EXPORT_SYMBOL_GPL(xen_set_irq_priority); int evtchn_make_refcounted(evtchn_port_t evtchn) { int irq = get_evtchn_to_irq(evtchn); struct irq_info *info; if (irq == -1) return -ENOENT; info = info_for_irq(irq); if (!info) return -ENOENT; WARN_ON(info->refcnt != -1); info->refcnt = 1; return 0; } EXPORT_SYMBOL_GPL(evtchn_make_refcounted); int evtchn_get(evtchn_port_t evtchn) { int irq; struct irq_info *info; int err = -ENOENT; if (evtchn >= xen_evtchn_max_channels()) return -EINVAL; mutex_lock(&irq_mapping_update_lock); irq = get_evtchn_to_irq(evtchn); if (irq == -1) goto done; info = info_for_irq(irq); if (!info) goto done; err = -EINVAL; if (info->refcnt <= 0) goto done; info->refcnt++; err = 0; done: mutex_unlock(&irq_mapping_update_lock); return err; } EXPORT_SYMBOL_GPL(evtchn_get); void evtchn_put(evtchn_port_t evtchn) { int irq = get_evtchn_to_irq(evtchn); if (WARN_ON(irq == -1)) return; unbind_from_irq(irq); } EXPORT_SYMBOL_GPL(evtchn_put); void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) { int irq; #ifdef CONFIG_X86 if (unlikely(vector == XEN_NMI_VECTOR)) { int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu), NULL); if (rc < 0) printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc); return; } #endif irq = per_cpu(ipi_to_irq, cpu)[vector]; BUG_ON(irq < 0); notify_remote_via_irq(irq); } struct evtchn_loop_ctrl { ktime_t timeout; unsigned count; bool defer_eoi; }; void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl) { int irq; struct irq_info *info; irq = get_evtchn_to_irq(port); if (irq == -1) return; /* * Check for timeout every 256 events. * We are setting the timeout value only after the first 256 * events in order to not hurt the common case of few loop * iterations. The 256 is basically an arbitrary value. * * In case we are hitting the timeout we need to defer all further * EOIs in order to ensure to leave the event handling loop rather * sooner than later. */ if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) { ktime_t kt = ktime_get(); if (!ctrl->timeout) { kt = ktime_add_ms(kt, jiffies_to_msecs(event_loop_timeout)); ctrl->timeout = kt; } else if (kt > ctrl->timeout) { ctrl->defer_eoi = true; } } info = info_for_irq(irq); if (ctrl->defer_eoi) { info->eoi_cpu = smp_processor_id(); info->irq_epoch = __this_cpu_read(irq_epoch); info->eoi_time = get_jiffies_64() + event_eoi_delay; } generic_handle_irq(irq); } static void __xen_evtchn_do_upcall(void) { struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); int cpu = smp_processor_id(); struct evtchn_loop_ctrl ctrl = { 0 }; read_lock(&evtchn_rwlock); do { vcpu_info->evtchn_upcall_pending = 0; xen_evtchn_handle_events(cpu, &ctrl); BUG_ON(!irqs_disabled()); virt_rmb(); /* Hypervisor can set upcall pending. */ } while (vcpu_info->evtchn_upcall_pending); read_unlock(&evtchn_rwlock); /* * Increment irq_epoch only now to defer EOIs only for * xen_irq_lateeoi() invocations occurring from inside the loop * above. */ __this_cpu_inc(irq_epoch); } void xen_evtchn_do_upcall(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); __xen_evtchn_do_upcall(); irq_exit(); set_irq_regs(old_regs); } void xen_hvm_evtchn_do_upcall(void) { __xen_evtchn_do_upcall(); } EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); /* Rebind a new event channel to an existing irq. */ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq) { struct irq_info *info = info_for_irq(irq); if (WARN_ON(!info)) return; /* Make sure the irq is masked, since the new event channel will also be masked. */ disable_irq(irq); mutex_lock(&irq_mapping_update_lock); /* After resume the irq<->evtchn mappings are all cleared out */ BUG_ON(get_evtchn_to_irq(evtchn) != -1); /* Expect irq to have been bound before, so there should be a proper type */ BUG_ON(info->type == IRQT_UNBOUND); (void)xen_irq_info_evtchn_setup(irq, evtchn); mutex_unlock(&irq_mapping_update_lock); bind_evtchn_to_cpu(evtchn, info->cpu); /* This will be deferred until interrupt is processed */ irq_set_affinity(irq, cpumask_of(info->cpu)); /* Unmask the event channel. */ enable_irq(irq); } /* Rebind an evtchn so that it gets delivered to a specific cpu */ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu) { struct evtchn_bind_vcpu bind_vcpu; int masked; if (!VALID_EVTCHN(evtchn)) return -1; if (!xen_support_evtchn_rebind()) return -1; /* Send future instances of this interrupt to other vcpu. */ bind_vcpu.port = evtchn; bind_vcpu.vcpu = xen_vcpu_nr(tcpu); /* * Mask the event while changing the VCPU binding to prevent * it being delivered on an unexpected VCPU. */ masked = test_and_set_mask(evtchn); /* * If this fails, it usually just indicates that we're dealing with a * virq or IPI channel, which don't actually need to be rebound. Ignore * it, but don't do the xenlinux-level rebind in that case. */ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) bind_evtchn_to_cpu(evtchn, tcpu); if (!masked) unmask_evtchn(evtchn); return 0; } static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, bool force) { unsigned tcpu = cpumask_first_and(dest, cpu_online_mask); int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu); if (!ret) irq_data_update_effective_affinity(data, cpumask_of(tcpu)); return ret; } /* To be called with desc->lock held. */ int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu) { struct irq_data *d = irq_desc_get_irq_data(desc); return set_affinity_irq(d, cpumask_of(tcpu), false); } EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn); static void enable_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); if (VALID_EVTCHN(evtchn)) unmask_evtchn(evtchn); } static void disable_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); if (VALID_EVTCHN(evtchn)) mask_evtchn(evtchn); } static void ack_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); if (!VALID_EVTCHN(evtchn)) return; if (unlikely(irqd_is_setaffinity_pending(data)) && likely(!irqd_irq_disabled(data))) { int masked = test_and_set_mask(evtchn); clear_evtchn(evtchn); irq_move_masked_irq(data); if (!masked) unmask_evtchn(evtchn); } else clear_evtchn(evtchn); } static void mask_ack_dynirq(struct irq_data *data) { disable_dynirq(data); ack_dynirq(data); } static int retrigger_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); int masked; if (!VALID_EVTCHN(evtchn)) return 0; masked = test_and_set_mask(evtchn); set_evtchn(evtchn); if (!masked) unmask_evtchn(evtchn); return 1; } static void restore_pirqs(void) { int pirq, rc, irq, gsi; struct physdev_map_pirq map_irq; struct irq_info *info; list_for_each_entry(info, &xen_irq_list_head, list) { if (info->type != IRQT_PIRQ) continue; pirq = info->u.pirq.pirq; gsi = info->u.pirq.gsi; irq = info->irq; /* save/restore of PT devices doesn't work, so at this point the * only devices present are GSI based emulated devices */ if (!gsi) continue; map_irq.domid = DOMID_SELF; map_irq.type = MAP_PIRQ_TYPE_GSI; map_irq.index = gsi; map_irq.pirq = pirq; rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); if (rc) { pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", gsi, irq, pirq, rc); xen_free_irq(irq); continue; } printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); __startup_pirq(irq); } } static void restore_cpu_virqs(unsigned int cpu) { struct evtchn_bind_virq bind_virq; evtchn_port_t evtchn; int virq, irq; for (virq = 0; virq < NR_VIRQS; virq++) { if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) continue; BUG_ON(virq_from_irq(irq) != virq); /* Get a new binding from Xen. */ bind_virq.virq = virq; bind_virq.vcpu = xen_vcpu_nr(cpu); if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq) != 0) BUG(); evtchn = bind_virq.port; /* Record the new mapping. */ (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq); bind_evtchn_to_cpu(evtchn, cpu); } } static void restore_cpu_ipis(unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; evtchn_port_t evtchn; int ipi, irq; for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) continue; BUG_ON(ipi_from_irq(irq) != ipi); /* Get a new binding from Xen. */ bind_ipi.vcpu = xen_vcpu_nr(cpu); if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0) BUG(); evtchn = bind_ipi.port; /* Record the new mapping. */ (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); bind_evtchn_to_cpu(evtchn, cpu); } } /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) clear_evtchn(evtchn); } EXPORT_SYMBOL(xen_clear_irq_pending); void xen_set_irq_pending(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) set_evtchn(evtchn); } bool xen_test_irq_pending(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); bool ret = false; if (VALID_EVTCHN(evtchn)) ret = test_evtchn(evtchn); return ret; } /* Poll waiting for an irq to become pending with timeout. In the usual case, * the irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq_timeout(int irq, u64 timeout) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) { struct sched_poll poll; poll.nr_ports = 1; poll.timeout = timeout; set_xen_guest_handle(poll.ports, &evtchn); if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) BUG(); } } EXPORT_SYMBOL(xen_poll_irq_timeout); /* Poll waiting for an irq to become pending. In the usual case, the * irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq(int irq) { xen_poll_irq_timeout(irq, 0 /* no timeout */); } /* Check whether the IRQ line is shared with other guests. */ int xen_test_irq_shared(int irq) { struct irq_info *info = info_for_irq(irq); struct physdev_irq_status_query irq_status; if (WARN_ON(!info)) return -ENOENT; irq_status.irq = info->u.pirq.pirq; if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) return 0; return !(irq_status.flags & XENIRQSTAT_shared); } EXPORT_SYMBOL_GPL(xen_test_irq_shared); void xen_irq_resume(void) { unsigned int cpu; struct irq_info *info; /* New event-channel space is not 'live' yet. */ xen_evtchn_resume(); /* No IRQ <-> event-channel mappings. */ list_for_each_entry(info, &xen_irq_list_head, list) info->evtchn = 0; /* zap event-channel binding */ clear_evtchn_to_irq_all(); for_each_possible_cpu(cpu) { restore_cpu_virqs(cpu); restore_cpu_ipis(cpu); } restore_pirqs(); } static struct irq_chip xen_dynamic_chip __read_mostly = { .name = "xen-dyn", .irq_disable = disable_dynirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, .irq_ack = ack_dynirq, .irq_mask_ack = mask_ack_dynirq, .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, }; static struct irq_chip xen_lateeoi_chip __read_mostly = { /* The chip name needs to contain "xen-dyn" for irqbalance to work. */ .name = "xen-dyn-lateeoi", .irq_disable = disable_dynirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, .irq_ack = mask_ack_dynirq, .irq_mask_ack = mask_ack_dynirq, .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, }; static struct irq_chip xen_pirq_chip __read_mostly = { .name = "xen-pirq", .irq_startup = startup_pirq, .irq_shutdown = shutdown_pirq, .irq_enable = enable_pirq, .irq_disable = disable_pirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, .irq_ack = eoi_pirq, .irq_eoi = eoi_pirq, .irq_mask_ack = mask_ack_pirq, .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, }; static struct irq_chip xen_percpu_chip __read_mostly = { .name = "xen-percpu", .irq_disable = disable_dynirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, .irq_ack = ack_dynirq, }; int xen_set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } EXPORT_SYMBOL_GPL(xen_set_callback_via); #ifdef CONFIG_XEN_PVHVM /* Vector callbacks are better than PCI interrupts to receive event * channel notifications because we can receive vector callbacks on any * vcpu and we don't need PCI support or APIC interactions. */ void xen_setup_callback_vector(void) { uint64_t callback_via; if (xen_have_vector_callback) { callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR); if (xen_set_callback_via(callback_via)) { pr_err("Request for Xen HVM callback vector failed\n"); xen_have_vector_callback = 0; } } } static __init void xen_alloc_callback_vector(void) { if (!xen_have_vector_callback) return; pr_info("Xen HVM callback vector for event delivery is enabled\n"); alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback); } #else void xen_setup_callback_vector(void) {} static inline void xen_alloc_callback_vector(void) {} #endif static bool fifo_events = true; module_param(fifo_events, bool, 0); static int xen_evtchn_cpu_prepare(unsigned int cpu) { int ret = 0; xen_cpu_init_eoi(cpu); if (evtchn_ops->percpu_init) ret = evtchn_ops->percpu_init(cpu); return ret; } static int xen_evtchn_cpu_dead(unsigned int cpu) { int ret = 0; if (evtchn_ops->percpu_deinit) ret = evtchn_ops->percpu_deinit(cpu); return ret; } void __init xen_init_IRQ(void) { int ret = -EINVAL; evtchn_port_t evtchn; if (fifo_events) ret = xen_evtchn_fifo_init(); if (ret < 0) xen_evtchn_2l_init(); xen_cpu_init_eoi(smp_processor_id()); cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE, "xen/evtchn:prepare", xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead); evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()), sizeof(*evtchn_to_irq), GFP_KERNEL); BUG_ON(!evtchn_to_irq); /* No event channels are 'live' right now. */ for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++) mask_evtchn(evtchn); pirq_needs_eoi = pirq_needs_eoi_flag; #ifdef CONFIG_X86 if (xen_pv_domain()) { if (xen_initial_domain()) pci_xen_initial_domain(); } if (xen_feature(XENFEAT_hvm_callback_vector)) { xen_setup_callback_vector(); xen_alloc_callback_vector(); } if (xen_hvm_domain()) { native_init_IRQ(); /* pci_xen_hvm_init must be called after native_init_IRQ so that * __acpi_register_gsi can point at the right function */ pci_xen_hvm_init(); } else { int rc; struct physdev_pirq_eoi_gmfn eoi_gmfn; pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map); rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); if (rc != 0) { free_page((unsigned long) pirq_eoi_map); pirq_eoi_map = NULL; } else pirq_needs_eoi = pirq_check_eoi_map; } #endif }
./CrossVul/dataset_final_sorted/CWE-400/c/good_4422_2
crossvul-cpp_data_bad_316_0
/************************************************************ * Copyright (c) 1994 by Silicon Graphics Computer Systems, Inc. * * Permission to use, copy, modify, and distribute this * software and its documentation for any purpose and without * fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting * documentation, and that the name of Silicon Graphics not be * used in advertising or publicity pertaining to distribution * of the software without specific prior written permission. * Silicon Graphics makes no representation about the suitability * of this software for any purpose. It is provided "as is" * without any express or implied warranty. * * SILICON GRAPHICS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL SILICON * GRAPHICS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH * THE USE OR PERFORMANCE OF THIS SOFTWARE. * ********************************************************/ #include "xkbcomp-priv.h" #include "text.h" #include "expr.h" typedef bool (*IdentLookupFunc)(struct xkb_context *ctx, const void *priv, xkb_atom_t field, enum expr_value_type type, unsigned int *val_rtrn); bool ExprResolveLhs(struct xkb_context *ctx, const ExprDef *expr, const char **elem_rtrn, const char **field_rtrn, ExprDef **index_rtrn) { switch (expr->expr.op) { case EXPR_IDENT: *elem_rtrn = NULL; *field_rtrn = xkb_atom_text(ctx, expr->ident.ident); *index_rtrn = NULL; return true; case EXPR_FIELD_REF: *elem_rtrn = xkb_atom_text(ctx, expr->field_ref.element); *field_rtrn = xkb_atom_text(ctx, expr->field_ref.field); *index_rtrn = NULL; return true; case EXPR_ARRAY_REF: *elem_rtrn = xkb_atom_text(ctx, expr->array_ref.element); *field_rtrn = xkb_atom_text(ctx, expr->array_ref.field); *index_rtrn = expr->array_ref.entry; return true; default: break; } log_wsgo(ctx, "Unexpected operator %d in ResolveLhs\n", expr->expr.op); return false; } static bool SimpleLookup(struct xkb_context *ctx, const void *priv, xkb_atom_t field, enum expr_value_type type, unsigned int *val_rtrn) { const LookupEntry *entry; const char *str; if (!priv || field == XKB_ATOM_NONE || type != EXPR_TYPE_INT) return false; str = xkb_atom_text(ctx, field); for (entry = priv; entry && entry->name; entry++) { if (istreq(str, entry->name)) { *val_rtrn = entry->value; return true; } } return false; } /* Data passed in the *priv argument for LookupModMask. */ typedef struct { const struct xkb_mod_set *mods; enum mod_type mod_type; } LookupModMaskPriv; static bool LookupModMask(struct xkb_context *ctx, const void *priv, xkb_atom_t field, enum expr_value_type type, xkb_mod_mask_t *val_rtrn) { const char *str; xkb_mod_index_t ndx; const LookupModMaskPriv *arg = priv; const struct xkb_mod_set *mods = arg->mods; enum mod_type mod_type = arg->mod_type; if (type != EXPR_TYPE_INT) return false; str = xkb_atom_text(ctx, field); if (istreq(str, "all")) { *val_rtrn = MOD_REAL_MASK_ALL; return true; } if (istreq(str, "none")) { *val_rtrn = 0; return true; } ndx = XkbModNameToIndex(mods, field, mod_type); if (ndx == XKB_MOD_INVALID) return false; *val_rtrn = (1u << ndx); return true; } bool ExprResolveBoolean(struct xkb_context *ctx, const ExprDef *expr, bool *set_rtrn) { bool ok = false; const char *ident; switch (expr->expr.op) { case EXPR_VALUE: if (expr->expr.value_type != EXPR_TYPE_BOOLEAN) { log_err(ctx, "Found constant of type %s where boolean was expected\n", expr_value_type_to_string(expr->expr.value_type)); return false; } *set_rtrn = expr->boolean.set; return true; case EXPR_IDENT: ident = xkb_atom_text(ctx, expr->ident.ident); if (ident) { if (istreq(ident, "true") || istreq(ident, "yes") || istreq(ident, "on")) { *set_rtrn = true; return true; } else if (istreq(ident, "false") || istreq(ident, "no") || istreq(ident, "off")) { *set_rtrn = false; return true; } } log_err(ctx, "Identifier \"%s\" of type boolean is unknown\n", ident); return false; case EXPR_FIELD_REF: log_err(ctx, "Default \"%s.%s\" of type boolean is unknown\n", xkb_atom_text(ctx, expr->field_ref.element), xkb_atom_text(ctx, expr->field_ref.field)); return false; case EXPR_INVERT: case EXPR_NOT: ok = ExprResolveBoolean(ctx, expr, set_rtrn); if (ok) *set_rtrn = !*set_rtrn; return ok; case EXPR_ADD: case EXPR_SUBTRACT: case EXPR_MULTIPLY: case EXPR_DIVIDE: case EXPR_ASSIGN: case EXPR_NEGATE: case EXPR_UNARY_PLUS: log_err(ctx, "%s of boolean values not permitted\n", expr_op_type_to_string(expr->expr.op)); break; default: log_wsgo(ctx, "Unknown operator %d in ResolveBoolean\n", expr->expr.op); break; } return false; } bool ExprResolveKeyCode(struct xkb_context *ctx, const ExprDef *expr, xkb_keycode_t *kc) { xkb_keycode_t leftRtrn, rightRtrn; switch (expr->expr.op) { case EXPR_VALUE: if (expr->expr.value_type != EXPR_TYPE_INT) { log_err(ctx, "Found constant of type %s where an int was expected\n", expr_value_type_to_string(expr->expr.value_type)); return false; } *kc = (xkb_keycode_t) expr->integer.ival; return true; case EXPR_ADD: case EXPR_SUBTRACT: case EXPR_MULTIPLY: case EXPR_DIVIDE: if (!ExprResolveKeyCode(ctx, expr->binary.left, &leftRtrn) || !ExprResolveKeyCode(ctx, expr->binary.right, &rightRtrn)) return false; switch (expr->expr.op) { case EXPR_ADD: *kc = leftRtrn + rightRtrn; break; case EXPR_SUBTRACT: *kc = leftRtrn - rightRtrn; break; case EXPR_MULTIPLY: *kc = leftRtrn * rightRtrn; break; case EXPR_DIVIDE: if (rightRtrn == 0) { log_err(ctx, "Cannot divide by zero: %d / %d\n", leftRtrn, rightRtrn); return false; } *kc = leftRtrn / rightRtrn; break; default: break; } return true; case EXPR_NEGATE: if (!ExprResolveKeyCode(ctx, expr->unary.child, &leftRtrn)) return false; *kc = ~leftRtrn; return true; case EXPR_UNARY_PLUS: return ExprResolveKeyCode(ctx, expr->unary.child, kc); default: log_wsgo(ctx, "Unknown operator %d in ResolveKeyCode\n", expr->expr.op); break; } return false; } /** * This function returns ... something. It's a bit of a guess, really. * * If an integer is given in value ctx, it will be returned in ival. * If an ident or field reference is given, the lookup function (if given) * will be called. At the moment, only SimpleLookup use this, and they both * return the results in uval. And don't support field references. * * Cool. */ static bool ExprResolveIntegerLookup(struct xkb_context *ctx, const ExprDef *expr, int *val_rtrn, IdentLookupFunc lookup, const void *lookupPriv) { bool ok = false; int l, r; unsigned u; ExprDef *left, *right; switch (expr->expr.op) { case EXPR_VALUE: if (expr->expr.value_type != EXPR_TYPE_INT) { log_err(ctx, "Found constant of type %s where an int was expected\n", expr_value_type_to_string(expr->expr.value_type)); return false; } *val_rtrn = expr->integer.ival; return true; case EXPR_IDENT: if (lookup) ok = lookup(ctx, lookupPriv, expr->ident.ident, EXPR_TYPE_INT, &u); if (!ok) log_err(ctx, "Identifier \"%s\" of type int is unknown\n", xkb_atom_text(ctx, expr->ident.ident)); else *val_rtrn = (int) u; return ok; case EXPR_FIELD_REF: log_err(ctx, "Default \"%s.%s\" of type int is unknown\n", xkb_atom_text(ctx, expr->field_ref.element), xkb_atom_text(ctx, expr->field_ref.field)); return false; case EXPR_ADD: case EXPR_SUBTRACT: case EXPR_MULTIPLY: case EXPR_DIVIDE: left = expr->binary.left; right = expr->binary.right; if (!ExprResolveIntegerLookup(ctx, left, &l, lookup, lookupPriv) || !ExprResolveIntegerLookup(ctx, right, &r, lookup, lookupPriv)) return false; switch (expr->expr.op) { case EXPR_ADD: *val_rtrn = l + r; break; case EXPR_SUBTRACT: *val_rtrn = l - r; break; case EXPR_MULTIPLY: *val_rtrn = l * r; break; case EXPR_DIVIDE: if (r == 0) { log_err(ctx, "Cannot divide by zero: %d / %d\n", l, r); return false; } *val_rtrn = l / r; break; default: log_err(ctx, "%s of integers not permitted\n", expr_op_type_to_string(expr->expr.op)); return false; } return true; case EXPR_ASSIGN: log_wsgo(ctx, "Assignment operator not implemented yet\n"); break; case EXPR_NOT: log_err(ctx, "The ! operator cannot be applied to an integer\n"); return false; case EXPR_INVERT: case EXPR_NEGATE: left = expr->unary.child; if (!ExprResolveIntegerLookup(ctx, left, &l, lookup, lookupPriv)) return false; *val_rtrn = (expr->expr.op == EXPR_NEGATE ? -l : ~l); return true; case EXPR_UNARY_PLUS: left = expr->unary.child; return ExprResolveIntegerLookup(ctx, left, val_rtrn, lookup, lookupPriv); default: log_wsgo(ctx, "Unknown operator %d in ResolveInteger\n", expr->expr.op); break; } return false; } bool ExprResolveInteger(struct xkb_context *ctx, const ExprDef *expr, int *val_rtrn) { return ExprResolveIntegerLookup(ctx, expr, val_rtrn, NULL, NULL); } bool ExprResolveGroup(struct xkb_context *ctx, const ExprDef *expr, xkb_layout_index_t *group_rtrn) { bool ok; int result; ok = ExprResolveIntegerLookup(ctx, expr, &result, SimpleLookup, groupNames); if (!ok) return false; if (result <= 0 || result > XKB_MAX_GROUPS) { log_err(ctx, "Group index %u is out of range (1..%d)\n", result, XKB_MAX_GROUPS); return false; } *group_rtrn = (xkb_layout_index_t) result; return true; } bool ExprResolveLevel(struct xkb_context *ctx, const ExprDef *expr, xkb_level_index_t *level_rtrn) { bool ok; int result; ok = ExprResolveIntegerLookup(ctx, expr, &result, SimpleLookup, levelNames); if (!ok) return false; if (result < 1) { log_err(ctx, "Shift level %d is out of range\n", result); return false; } /* Level is zero-indexed from now on. */ *level_rtrn = (unsigned int) (result - 1); return true; } bool ExprResolveButton(struct xkb_context *ctx, const ExprDef *expr, int *btn_rtrn) { return ExprResolveIntegerLookup(ctx, expr, btn_rtrn, SimpleLookup, buttonNames); } bool ExprResolveString(struct xkb_context *ctx, const ExprDef *expr, xkb_atom_t *val_rtrn) { switch (expr->expr.op) { case EXPR_VALUE: if (expr->expr.value_type != EXPR_TYPE_STRING) { log_err(ctx, "Found constant of type %s, expected a string\n", expr_value_type_to_string(expr->expr.value_type)); return false; } *val_rtrn = expr->string.str; return true; case EXPR_IDENT: log_err(ctx, "Identifier \"%s\" of type string not found\n", xkb_atom_text(ctx, expr->ident.ident)); return false; case EXPR_FIELD_REF: log_err(ctx, "Default \"%s.%s\" of type string not found\n", xkb_atom_text(ctx, expr->field_ref.element), xkb_atom_text(ctx, expr->field_ref.field)); return false; case EXPR_ADD: case EXPR_SUBTRACT: case EXPR_MULTIPLY: case EXPR_DIVIDE: case EXPR_ASSIGN: case EXPR_NEGATE: case EXPR_INVERT: case EXPR_NOT: case EXPR_UNARY_PLUS: log_err(ctx, "%s of strings not permitted\n", expr_op_type_to_string(expr->expr.op)); return false; default: log_wsgo(ctx, "Unknown operator %d in ResolveString\n", expr->expr.op); break; } return false; } bool ExprResolveEnum(struct xkb_context *ctx, const ExprDef *expr, unsigned int *val_rtrn, const LookupEntry *values) { if (expr->expr.op != EXPR_IDENT) { log_err(ctx, "Found a %s where an enumerated value was expected\n", expr_op_type_to_string(expr->expr.op)); return false; } if (!SimpleLookup(ctx, values, expr->ident.ident, EXPR_TYPE_INT, val_rtrn)) { log_err(ctx, "Illegal identifier %s; expected one of:\n", xkb_atom_text(ctx, expr->ident.ident)); while (values && values->name) { log_err(ctx, "\t%s\n", values->name); values++; } return false; } return true; } static bool ExprResolveMaskLookup(struct xkb_context *ctx, const ExprDef *expr, unsigned int *val_rtrn, IdentLookupFunc lookup, const void *lookupPriv) { bool ok = false; unsigned int l = 0, r = 0; int v; ExprDef *left, *right; const char *bogus = NULL; switch (expr->expr.op) { case EXPR_VALUE: if (expr->expr.value_type != EXPR_TYPE_INT) { log_err(ctx, "Found constant of type %s where a mask was expected\n", expr_value_type_to_string(expr->expr.value_type)); return false; } *val_rtrn = (unsigned int) expr->integer.ival; return true; case EXPR_IDENT: ok = lookup(ctx, lookupPriv, expr->ident.ident, EXPR_TYPE_INT, val_rtrn); if (!ok) log_err(ctx, "Identifier \"%s\" of type int is unknown\n", xkb_atom_text(ctx, expr->ident.ident)); return ok; case EXPR_FIELD_REF: log_err(ctx, "Default \"%s.%s\" of type int is unknown\n", xkb_atom_text(ctx, expr->field_ref.element), xkb_atom_text(ctx, expr->field_ref.field)); return false; case EXPR_ARRAY_REF: bogus = "array reference"; /* fallthrough */ case EXPR_ACTION_DECL: if (bogus == NULL) bogus = "function use"; log_err(ctx, "Unexpected %s in mask expression; Expression Ignored\n", bogus); return false; case EXPR_ADD: case EXPR_SUBTRACT: case EXPR_MULTIPLY: case EXPR_DIVIDE: left = expr->binary.left; right = expr->binary.right; if (!ExprResolveMaskLookup(ctx, left, &l, lookup, lookupPriv) || !ExprResolveMaskLookup(ctx, right, &r, lookup, lookupPriv)) return false; switch (expr->expr.op) { case EXPR_ADD: *val_rtrn = l | r; break; case EXPR_SUBTRACT: *val_rtrn = l & (~r); break; case EXPR_MULTIPLY: case EXPR_DIVIDE: log_err(ctx, "Cannot %s masks; Illegal operation ignored\n", (expr->expr.op == EXPR_DIVIDE ? "divide" : "multiply")); return false; default: break; } return true; case EXPR_ASSIGN: log_wsgo(ctx, "Assignment operator not implemented yet\n"); break; case EXPR_INVERT: left = expr->unary.child; if (!ExprResolveIntegerLookup(ctx, left, &v, lookup, lookupPriv)) return false; *val_rtrn = ~v; return true; case EXPR_UNARY_PLUS: case EXPR_NEGATE: case EXPR_NOT: left = expr->unary.child; if (!ExprResolveIntegerLookup(ctx, left, &v, lookup, lookupPriv)) log_err(ctx, "The %s operator cannot be used with a mask\n", (expr->expr.op == EXPR_NEGATE ? "-" : "!")); return false; default: log_wsgo(ctx, "Unknown operator %d in ResolveMask\n", expr->expr.op); break; } return false; } bool ExprResolveMask(struct xkb_context *ctx, const ExprDef *expr, unsigned int *mask_rtrn, const LookupEntry *values) { return ExprResolveMaskLookup(ctx, expr, mask_rtrn, SimpleLookup, values); } bool ExprResolveModMask(struct xkb_context *ctx, const ExprDef *expr, enum mod_type mod_type, const struct xkb_mod_set *mods, xkb_mod_mask_t *mask_rtrn) { LookupModMaskPriv priv = { .mods = mods, .mod_type = mod_type }; return ExprResolveMaskLookup(ctx, expr, mask_rtrn, LookupModMask, &priv); } bool ExprResolveKeySym(struct xkb_context *ctx, const ExprDef *expr, xkb_keysym_t *sym_rtrn) { int val; if (expr->expr.op == EXPR_IDENT) { const char *str = xkb_atom_text(ctx, expr->ident.ident); *sym_rtrn = xkb_keysym_from_name(str, 0); if (*sym_rtrn != XKB_KEY_NoSymbol) return true; } if (!ExprResolveInteger(ctx, expr, &val)) return false; if (val < 0 || val >= 10) return false; *sym_rtrn = XKB_KEY_0 + (xkb_keysym_t) val; return true; } bool ExprResolveMod(struct xkb_context *ctx, const ExprDef *def, enum mod_type mod_type, const struct xkb_mod_set *mods, xkb_mod_index_t *ndx_rtrn) { xkb_mod_index_t ndx; xkb_atom_t name; if (def->expr.op != EXPR_IDENT) { log_err(ctx, "Cannot resolve virtual modifier: " "found %s where a virtual modifier name was expected\n", expr_op_type_to_string(def->expr.op)); return false; } name = def->ident.ident; ndx = XkbModNameToIndex(mods, name, mod_type); if (ndx == XKB_MOD_INVALID) { log_err(ctx, "Cannot resolve virtual modifier: " "\"%s\" was not previously declared\n", xkb_atom_text(ctx, name)); return false; } *ndx_rtrn = ndx; return true; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_316_0
crossvul-cpp_data_bad_1253_0
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "wifi.h" #include "core.h" #include "usb.h" #include "base.h" #include "ps.h" #include "rtl8192c/fw_common.h" #include <linux/export.h> #include <linux/module.h> MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("USB basic driver for rtlwifi"); #define REALTEK_USB_VENQT_READ 0xC0 #define REALTEK_USB_VENQT_WRITE 0x40 #define REALTEK_USB_VENQT_CMD_REQ 0x05 #define REALTEK_USB_VENQT_CMD_IDX 0x00 #define MAX_USBCTRL_VENDORREQ_TIMES 10 static void usbctrl_async_callback(struct urb *urb) { if (urb) { /* free dr */ kfree(urb->setup_packet); /* free databuf */ kfree(urb->transfer_buffer); } } static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request, u16 value, u16 index, void *pdata, u16 len) { int rc; unsigned int pipe; u8 reqtype; struct usb_ctrlrequest *dr; struct urb *urb; const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE; u8 *databuf; if (WARN_ON_ONCE(len > databuf_maxlen)) len = databuf_maxlen; pipe = usb_sndctrlpipe(udev, 0); /* write_out */ reqtype = REALTEK_USB_VENQT_WRITE; dr = kzalloc(sizeof(*dr), GFP_ATOMIC); if (!dr) return -ENOMEM; databuf = kzalloc(databuf_maxlen, GFP_ATOMIC); if (!databuf) { kfree(dr); return -ENOMEM; } urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { kfree(databuf); kfree(dr); return -ENOMEM; } dr->bRequestType = reqtype; dr->bRequest = request; dr->wValue = cpu_to_le16(value); dr->wIndex = cpu_to_le16(index); dr->wLength = cpu_to_le16(len); /* data are already in little-endian order */ memcpy(databuf, pdata, len); usb_fill_control_urb(urb, udev, pipe, (unsigned char *)dr, databuf, len, usbctrl_async_callback, NULL); rc = usb_submit_urb(urb, GFP_ATOMIC); if (rc < 0) { kfree(databuf); kfree(dr); } usb_free_urb(urb); return rc; } static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request, u16 value, u16 index, void *pdata, u16 len) { unsigned int pipe; int status; u8 reqtype; int vendorreq_times = 0; static int count; pipe = usb_rcvctrlpipe(udev, 0); /* read_in */ reqtype = REALTEK_USB_VENQT_READ; do { status = usb_control_msg(udev, pipe, request, reqtype, value, index, pdata, len, 1000); if (status < 0) { /* firmware download is checksumed, don't retry */ if ((value >= FW_8192C_START_ADDRESS && value <= FW_8192C_END_ADDRESS)) break; } else { break; } } while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES); if (status < 0 && count++ < 4) pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n", value, status, *(u32 *)pdata); return status; } static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len) { struct device *dev = rtlpriv->io.dev; struct usb_device *udev = to_usb_device(dev); u8 request; u16 wvalue; u16 index; __le32 *data; unsigned long flags; spin_lock_irqsave(&rtlpriv->locks.usb_lock, flags); if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT) rtlpriv->usb_data_index = 0; data = &rtlpriv->usb_data[rtlpriv->usb_data_index]; spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags); request = REALTEK_USB_VENQT_CMD_REQ; index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */ wvalue = (u16)addr; _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len); return le32_to_cpu(*data); } static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr) { return (u8)_usb_read_sync(rtlpriv, addr, 1); } static u16 _usb_read16_sync(struct rtl_priv *rtlpriv, u32 addr) { return (u16)_usb_read_sync(rtlpriv, addr, 2); } static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr) { return _usb_read_sync(rtlpriv, addr, 4); } static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val, u16 len) { u8 request; u16 wvalue; u16 index; __le32 data; request = REALTEK_USB_VENQT_CMD_REQ; index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */ wvalue = (u16)(addr&0x0000ffff); data = cpu_to_le32(val); _usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data, len); } static void _usb_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val) { struct device *dev = rtlpriv->io.dev; _usb_write_async(to_usb_device(dev), addr, val, 1); } static void _usb_write16_async(struct rtl_priv *rtlpriv, u32 addr, u16 val) { struct device *dev = rtlpriv->io.dev; _usb_write_async(to_usb_device(dev), addr, val, 2); } static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val) { struct device *dev = rtlpriv->io.dev; _usb_write_async(to_usb_device(dev), addr, val, 4); } static void _usb_writen_sync(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len) { struct device *dev = rtlpriv->io.dev; struct usb_device *udev = to_usb_device(dev); u8 request = REALTEK_USB_VENQT_CMD_REQ; u8 reqtype = REALTEK_USB_VENQT_WRITE; u16 wvalue; u16 index = REALTEK_USB_VENQT_CMD_IDX; int pipe = usb_sndctrlpipe(udev, 0); /* write_out */ u8 *buffer; wvalue = (u16)(addr & 0x0000ffff); buffer = kmemdup(data, len, GFP_ATOMIC); if (!buffer) return; usb_control_msg(udev, pipe, request, reqtype, wvalue, index, buffer, len, 50); kfree(buffer); } static void _rtl_usb_io_handler_init(struct device *dev, struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->io.dev = dev; mutex_init(&rtlpriv->io.bb_mutex); rtlpriv->io.write8_async = _usb_write8_async; rtlpriv->io.write16_async = _usb_write16_async; rtlpriv->io.write32_async = _usb_write32_async; rtlpriv->io.read8_sync = _usb_read8_sync; rtlpriv->io.read16_sync = _usb_read16_sync; rtlpriv->io.read32_sync = _usb_read32_sync; rtlpriv->io.writen_sync = _usb_writen_sync; } static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw) { struct rtl_priv __maybe_unused *rtlpriv = rtl_priv(hw); mutex_destroy(&rtlpriv->io.bb_mutex); } /* Default aggregation handler. Do nothing and just return the oldest skb. */ static struct sk_buff *_none_usb_tx_aggregate_hdl(struct ieee80211_hw *hw, struct sk_buff_head *list) { return skb_dequeue(list); } #define IS_HIGH_SPEED_USB(udev) \ ((USB_SPEED_HIGH == (udev)->speed) ? true : false) static int _rtl_usb_init_tx(struct ieee80211_hw *hw) { u32 i; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); rtlusb->max_bulk_out_size = IS_HIGH_SPEED_USB(rtlusb->udev) ? USB_HIGH_SPEED_BULK_SIZE : USB_FULL_SPEED_BULK_SIZE; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB Max Bulk-out Size=%d\n", rtlusb->max_bulk_out_size); for (i = 0; i < __RTL_TXQ_NUM; i++) { u32 ep_num = rtlusb->ep_map.ep_mapping[i]; if (!ep_num) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Invalid endpoint map setting!\n"); return -EINVAL; } } rtlusb->usb_tx_post_hdl = rtlpriv->cfg->usb_interface_cfg->usb_tx_post_hdl; rtlusb->usb_tx_cleanup = rtlpriv->cfg->usb_interface_cfg->usb_tx_cleanup; rtlusb->usb_tx_aggregate_hdl = (rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl) ? rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl : &_none_usb_tx_aggregate_hdl; init_usb_anchor(&rtlusb->tx_submitted); for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) { skb_queue_head_init(&rtlusb->tx_skb_queue[i]); init_usb_anchor(&rtlusb->tx_pending[i]); } return 0; } static void _rtl_rx_work(unsigned long param); static int _rtl_usb_init_rx(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); rtlusb->rx_max_size = rtlpriv->cfg->usb_interface_cfg->rx_max_size; rtlusb->rx_urb_num = rtlpriv->cfg->usb_interface_cfg->rx_urb_num; rtlusb->in_ep = rtlpriv->cfg->usb_interface_cfg->in_ep_num; rtlusb->usb_rx_hdl = rtlpriv->cfg->usb_interface_cfg->usb_rx_hdl; rtlusb->usb_rx_segregate_hdl = rtlpriv->cfg->usb_interface_cfg->usb_rx_segregate_hdl; pr_info("rx_max_size %d, rx_urb_num %d, in_ep %d\n", rtlusb->rx_max_size, rtlusb->rx_urb_num, rtlusb->in_ep); init_usb_anchor(&rtlusb->rx_submitted); init_usb_anchor(&rtlusb->rx_cleanup_urbs); skb_queue_head_init(&rtlusb->rx_queue); rtlusb->rx_work_tasklet.func = _rtl_rx_work; rtlusb->rx_work_tasklet.data = (unsigned long)rtlusb; return 0; } static int _rtl_usb_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); int err; u8 epidx; struct usb_interface *usb_intf = rtlusb->intf; u8 epnums = usb_intf->cur_altsetting->desc.bNumEndpoints; rtlusb->out_ep_nums = rtlusb->in_ep_nums = 0; for (epidx = 0; epidx < epnums; epidx++) { struct usb_endpoint_descriptor *pep_desc; pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc; if (usb_endpoint_dir_in(pep_desc)) rtlusb->in_ep_nums++; else if (usb_endpoint_dir_out(pep_desc)) rtlusb->out_ep_nums++; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB EP(0x%02x), MaxPacketSize=%d, Interval=%d\n", pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize, pep_desc->bInterval); } if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num) { pr_err("Too few input end points found\n"); return -EINVAL; } if (rtlusb->out_ep_nums == 0) { pr_err("No output end points found\n"); return -EINVAL; } /* usb endpoint mapping */ err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw); rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq; _rtl_usb_init_tx(hw); _rtl_usb_init_rx(hw); return err; } static void rtl_usb_init_sw(struct ieee80211_hw *hw) { struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); rtlhal->hw = hw; ppsc->inactiveps = false; ppsc->leisure_ps = false; ppsc->fwctrl_lps = false; ppsc->reg_fwctrl_lps = 3; ppsc->reg_max_lps_awakeintvl = 5; ppsc->fwctrl_psmode = FW_PS_DTIM_MODE; /* IBSS */ mac->beacon_interval = 100; /* AMPDU */ mac->min_space_cfg = 0; mac->max_mss_density = 0; /* set sane AMPDU defaults */ mac->current_ampdu_density = 7; mac->current_ampdu_factor = 3; /* QOS */ rtlusb->acm_method = EACMWAY2_SW; /* IRQ */ /* HIMR - turn all on */ rtlusb->irq_mask[0] = 0xFFFFFFFF; /* HIMR_EX - turn all on */ rtlusb->irq_mask[1] = 0xFFFFFFFF; rtlusb->disablehwsm = true; } static void _rtl_rx_completed(struct urb *urb); static int _rtl_prep_rx_urb(struct ieee80211_hw *hw, struct rtl_usb *rtlusb, struct urb *urb, gfp_t gfp_mask) { void *buf; buf = usb_alloc_coherent(rtlusb->udev, rtlusb->rx_max_size, gfp_mask, &urb->transfer_dma); if (!buf) { pr_err("Failed to usb_alloc_coherent!!\n"); return -ENOMEM; } usb_fill_bulk_urb(urb, rtlusb->udev, usb_rcvbulkpipe(rtlusb->udev, rtlusb->in_ep), buf, rtlusb->rx_max_size, _rtl_rx_completed, rtlusb); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; return 0; } static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *rxdesc = skb->data; struct ieee80211_hdr *hdr; bool unicast = false; __le16 fc; struct ieee80211_rx_status rx_status = {0}; struct rtl_stats stats = { .signal = 0, .rate = 0, }; skb_pull(skb, RTL_RX_DESC_SIZE); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb); skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift)); hdr = (struct ieee80211_hdr *)(skb->data); fc = hdr->frame_control; if (!stats.crc) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); if (is_broadcast_ether_addr(hdr->addr1)) { /*TODO*/; } else if (is_multicast_ether_addr(hdr->addr1)) { /*TODO*/ } else { unicast = true; rtlpriv->stats.rxbytesunicast += skb->len; } if (ieee80211_is_data(fc)) { rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); if (unicast) rtlpriv->link_info.num_rx_inperiod++; } /* static bcn for roaming */ rtl_beacon_statistic(hw, skb); } } static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *rxdesc = skb->data; struct ieee80211_hdr *hdr; bool unicast = false; __le16 fc; struct ieee80211_rx_status rx_status = {0}; struct rtl_stats stats = { .signal = 0, .rate = 0, }; skb_pull(skb, RTL_RX_DESC_SIZE); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb); skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift)); hdr = (struct ieee80211_hdr *)(skb->data); fc = hdr->frame_control; if (!stats.crc) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); if (is_broadcast_ether_addr(hdr->addr1)) { /*TODO*/; } else if (is_multicast_ether_addr(hdr->addr1)) { /*TODO*/ } else { unicast = true; rtlpriv->stats.rxbytesunicast += skb->len; } if (ieee80211_is_data(fc)) { rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); if (unicast) rtlpriv->link_info.num_rx_inperiod++; } /* static bcn for roaming */ rtl_beacon_statistic(hw, skb); if (likely(rtl_action_proc(hw, skb, false))) ieee80211_rx(hw, skb); else dev_kfree_skb_any(skb); } else { dev_kfree_skb_any(skb); } } static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb) { struct sk_buff *_skb; struct sk_buff_head rx_queue; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); skb_queue_head_init(&rx_queue); if (rtlusb->usb_rx_segregate_hdl) rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue); WARN_ON(skb_queue_empty(&rx_queue)); while (!skb_queue_empty(&rx_queue)) { _skb = skb_dequeue(&rx_queue); _rtl_usb_rx_process_agg(hw, _skb); ieee80211_rx(hw, _skb); } } #define __RX_SKB_MAX_QUEUED 64 static void _rtl_rx_work(unsigned long param) { struct rtl_usb *rtlusb = (struct rtl_usb *)param; struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf); struct sk_buff *skb; while ((skb = skb_dequeue(&rtlusb->rx_queue))) { if (unlikely(IS_USB_STOP(rtlusb))) { dev_kfree_skb_any(skb); continue; } if (likely(!rtlusb->usb_rx_segregate_hdl)) { _rtl_usb_rx_process_noagg(hw, skb); } else { /* TO DO */ _rtl_rx_pre_process(hw, skb); pr_err("rx agg not supported\n"); } } } static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr, unsigned int len) { #if NET_IP_ALIGN != 0 unsigned int padding = 0; #endif /* make function no-op when possible */ if (NET_IP_ALIGN == 0 || len < sizeof(*hdr)) return 0; #if NET_IP_ALIGN != 0 /* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */ /* TODO: deduplicate common code, define helper function instead? */ if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); padding ^= NET_IP_ALIGN; /* Input might be invalid, avoid accessing memory outside * the buffer. */ if ((unsigned long)qc - (unsigned long)hdr < len && *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) padding ^= NET_IP_ALIGN; } if (ieee80211_has_a4(hdr->frame_control)) padding ^= NET_IP_ALIGN; return padding; #endif } #define __RADIO_TAP_SIZE_RSV 32 static void _rtl_rx_completed(struct urb *_urb) { struct rtl_usb *rtlusb = (struct rtl_usb *)_urb->context; int err = 0; if (unlikely(IS_USB_STOP(rtlusb))) goto free; if (likely(0 == _urb->status)) { unsigned int padding; struct sk_buff *skb; unsigned int qlen; unsigned int size = _urb->actual_length; struct ieee80211_hdr *hdr; if (size < RTL_RX_DESC_SIZE + sizeof(struct ieee80211_hdr)) { pr_err("Too short packet from bulk IN! (len: %d)\n", size); goto resubmit; } qlen = skb_queue_len(&rtlusb->rx_queue); if (qlen >= __RX_SKB_MAX_QUEUED) { pr_err("Pending RX skbuff queue full! (qlen: %d)\n", qlen); goto resubmit; } hdr = (void *)(_urb->transfer_buffer + RTL_RX_DESC_SIZE); padding = _rtl_rx_get_padding(hdr, size - RTL_RX_DESC_SIZE); skb = dev_alloc_skb(size + __RADIO_TAP_SIZE_RSV + padding); if (!skb) { pr_err("Can't allocate skb for bulk IN!\n"); goto resubmit; } _rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep); /* Make sure the payload data is 4 byte aligned. */ skb_reserve(skb, padding); /* reserve some space for mac80211's radiotap */ skb_reserve(skb, __RADIO_TAP_SIZE_RSV); skb_put_data(skb, _urb->transfer_buffer, size); skb_queue_tail(&rtlusb->rx_queue, skb); tasklet_schedule(&rtlusb->rx_work_tasklet); goto resubmit; } switch (_urb->status) { /* disconnect */ case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: goto free; default: break; } resubmit: usb_anchor_urb(_urb, &rtlusb->rx_submitted); err = usb_submit_urb(_urb, GFP_ATOMIC); if (unlikely(err)) { usb_unanchor_urb(_urb); goto free; } return; free: /* On some architectures, usb_free_coherent must not be called from * hardirq context. Queue urb to cleanup list. */ usb_anchor_urb(_urb, &rtlusb->rx_cleanup_urbs); } #undef __RADIO_TAP_SIZE_RSV static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct urb *urb; usb_kill_anchored_urbs(&rtlusb->rx_submitted); tasklet_kill(&rtlusb->rx_work_tasklet); cancel_work_sync(&rtlpriv->works.lps_change_work); flush_workqueue(rtlpriv->works.rtl_wq); destroy_workqueue(rtlpriv->works.rtl_wq); skb_queue_purge(&rtlusb->rx_queue); while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) { usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); } } static int _rtl_usb_receive(struct ieee80211_hw *hw) { struct urb *urb; int err; int i; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); WARN_ON(0 == rtlusb->rx_urb_num); /* 1600 == 1514 + max WLAN header + rtk info */ WARN_ON(rtlusb->rx_max_size < 1600); for (i = 0; i < rtlusb->rx_urb_num; i++) { err = -ENOMEM; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) goto err_out; err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL); if (err < 0) { pr_err("Failed to prep_rx_urb!!\n"); usb_free_urb(urb); goto err_out; } usb_anchor_urb(urb, &rtlusb->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) goto err_out; usb_free_urb(urb); } return 0; err_out: usb_kill_anchored_urbs(&rtlusb->rx_submitted); _rtl_usb_cleanup_rx(hw); return err; } static int rtl_usb_start(struct ieee80211_hw *hw) { int err; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); err = rtlpriv->cfg->ops->hw_init(hw); if (!err) { rtl_init_rx_config(hw); /* Enable software */ SET_USB_START(rtlusb); /* should after adapter start and interrupt enable. */ set_hal_start(rtlhal); /* Start bulk IN */ err = _rtl_usb_receive(hw); } return err; } /*======================= tx =========================================*/ static void rtl_usb_cleanup(struct ieee80211_hw *hw) { u32 i; struct sk_buff *_skb; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct ieee80211_tx_info *txinfo; /* clean up rx stuff. */ _rtl_usb_cleanup_rx(hw); /* clean up tx stuff */ for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) { while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) { rtlusb->usb_tx_cleanup(hw, _skb); txinfo = IEEE80211_SKB_CB(_skb); ieee80211_tx_info_clear_status(txinfo); txinfo->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(hw, _skb); } usb_kill_anchored_urbs(&rtlusb->tx_pending[i]); } usb_kill_anchored_urbs(&rtlusb->tx_submitted); } /* We may add some struct into struct rtl_usb later. Do deinit here. */ static void rtl_usb_deinit(struct ieee80211_hw *hw) { rtl_usb_cleanup(hw); } static void rtl_usb_stop(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct urb *urb; /* should after adapter start and interrupt enable. */ set_hal_stop(rtlhal); cancel_work_sync(&rtlpriv->works.fill_h2c_cmd); /* Enable software */ SET_USB_STOP(rtlusb); /* free pre-allocated URBs from rtl_usb_start() */ usb_kill_anchored_urbs(&rtlusb->rx_submitted); tasklet_kill(&rtlusb->rx_work_tasklet); cancel_work_sync(&rtlpriv->works.lps_change_work); flush_workqueue(rtlpriv->works.rtl_wq); skb_queue_purge(&rtlusb->rx_queue); while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) { usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); } rtlpriv->cfg->ops->hw_disable(hw); } static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb) { int err; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); usb_anchor_urb(_urb, &rtlusb->tx_submitted); err = usb_submit_urb(_urb, GFP_ATOMIC); if (err < 0) { struct sk_buff *skb; pr_err("Failed to submit urb\n"); usb_unanchor_urb(_urb); skb = (struct sk_buff *)_urb->context; kfree_skb(skb); } usb_free_urb(_urb); } static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb, struct sk_buff *skb) { struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct ieee80211_tx_info *txinfo; rtlusb->usb_tx_post_hdl(hw, urb, skb); skb_pull(skb, RTL_TX_HEADER_SIZE); txinfo = IEEE80211_SKB_CB(skb); ieee80211_tx_info_clear_status(txinfo); txinfo->flags |= IEEE80211_TX_STAT_ACK; if (urb->status) { pr_err("Urb has error status 0x%X\n", urb->status); goto out; } /* TODO: statistics */ out: ieee80211_tx_status_irqsafe(hw, skb); return urb->status; } static void _rtl_tx_complete(struct urb *urb) { struct sk_buff *skb = (struct sk_buff *)urb->context; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0]; struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf); int err; if (unlikely(IS_USB_STOP(rtlusb))) return; err = _usb_tx_post(hw, urb, skb); if (err) { /* Ignore error and keep issuiing other urbs */ return; } } static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw, struct sk_buff *skb, u32 ep_num) { struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct urb *_urb; WARN_ON(NULL == skb); _urb = usb_alloc_urb(0, GFP_ATOMIC); if (!_urb) { kfree_skb(skb); return NULL; } _rtl_install_trx_info(rtlusb, skb, ep_num); usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev, ep_num), skb->data, skb->len, _rtl_tx_complete, skb); _urb->transfer_flags |= URB_ZERO_PACKET; return _urb; } static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, enum rtl_txq qnum) { struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); u32 ep_num; struct urb *_urb = NULL; struct sk_buff *_skb = NULL; WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl); if (unlikely(IS_USB_STOP(rtlusb))) { pr_err("USB device is stopping...\n"); kfree_skb(skb); return; } ep_num = rtlusb->ep_map.ep_mapping[qnum]; _skb = skb; _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num); if (unlikely(!_urb)) { pr_err("Can't allocate urb. Drop skb!\n"); kfree_skb(skb); return; } _rtl_submit_tx_urb(hw, _urb); } static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct sk_buff *skb, u16 hw_queue) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct rtl_tx_desc *pdesc = NULL; struct rtl_tcb_desc tcb_desc; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); __le16 fc = hdr->frame_control; u8 *pda_addr = hdr->addr1; memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); if (ieee80211_is_auth(fc)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n"); } if (rtlpriv->psc.sw_ps_enabled) { if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) && !ieee80211_has_pm(fc)) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); } rtl_action_proc(hw, skb, true); if (is_multicast_ether_addr(pda_addr)) rtlpriv->stats.txbytesmulticast += skb->len; else if (is_broadcast_ether_addr(pda_addr)) rtlpriv->stats.txbytesbroadcast += skb->len; else rtlpriv->stats.txbytesunicast += skb->len; rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, NULL, info, sta, skb, hw_queue, &tcb_desc); if (ieee80211_is_data(fc)) rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); } static int rtl_usb_tx(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct sk_buff *skb, struct rtl_tcb_desc *dummy) { struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); __le16 fc = hdr->frame_control; u16 hw_queue; if (unlikely(is_hal_stop(rtlhal))) goto err_free; hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb)); _rtl_usb_tx_preprocess(hw, sta, skb, hw_queue); _rtl_usb_transmit(hw, skb, hw_queue); return NETDEV_TX_OK; err_free: dev_kfree_skb_any(skb); return NETDEV_TX_OK; } static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct sk_buff *skb) { return false; } static void rtl_fill_h2c_cmd_work_callback(struct work_struct *work) { struct rtl_works *rtlworks = container_of(work, struct rtl_works, fill_h2c_cmd); struct ieee80211_hw *hw = rtlworks->hw; struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->cfg->ops->fill_h2c_cmd(hw, H2C_RA_MASK, 5, rtlpriv->rate_mask); } static const struct rtl_intf_ops rtl_usb_ops = { .adapter_start = rtl_usb_start, .adapter_stop = rtl_usb_stop, .adapter_tx = rtl_usb_tx, .waitq_insert = rtl_usb_tx_chk_waitq_insert, }; int rtl_usb_probe(struct usb_interface *intf, const struct usb_device_id *id, struct rtl_hal_cfg *rtl_hal_cfg) { int err; struct ieee80211_hw *hw = NULL; struct rtl_priv *rtlpriv = NULL; struct usb_device *udev; struct rtl_usb_priv *usb_priv; hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) + sizeof(struct rtl_usb_priv), &rtl_ops); if (!hw) { WARN_ONCE(true, "rtl_usb: ieee80211 alloc failed\n"); return -ENOMEM; } rtlpriv = hw->priv; rtlpriv->hw = hw; rtlpriv->usb_data = kcalloc(RTL_USB_MAX_RX_COUNT, sizeof(u32), GFP_KERNEL); if (!rtlpriv->usb_data) return -ENOMEM; /* this spin lock must be initialized early */ spin_lock_init(&rtlpriv->locks.usb_lock); INIT_WORK(&rtlpriv->works.fill_h2c_cmd, rtl_fill_h2c_cmd_work_callback); INIT_WORK(&rtlpriv->works.lps_change_work, rtl_lps_change_work_callback); rtlpriv->usb_data_index = 0; init_completion(&rtlpriv->firmware_loading_complete); SET_IEEE80211_DEV(hw, &intf->dev); udev = interface_to_usbdev(intf); usb_get_dev(udev); usb_priv = rtl_usbpriv(hw); memset(usb_priv, 0, sizeof(*usb_priv)); usb_priv->dev.intf = intf; usb_priv->dev.udev = udev; usb_set_intfdata(intf, hw); /* init cfg & intf_ops */ rtlpriv->rtlhal.interface = INTF_USB; rtlpriv->cfg = rtl_hal_cfg; rtlpriv->intf_ops = &rtl_usb_ops; /* Init IO handler */ _rtl_usb_io_handler_init(&udev->dev, hw); rtlpriv->cfg->ops->read_chip_version(hw); /*like read eeprom and so on */ rtlpriv->cfg->ops->read_eeprom_info(hw); err = _rtl_usb_init(hw); if (err) goto error_out2; rtl_usb_init_sw(hw); /* Init mac80211 sw */ err = rtl_init_core(hw); if (err) { pr_err("Can't allocate sw for mac80211\n"); goto error_out2; } if (rtlpriv->cfg->ops->init_sw_vars(hw)) { pr_err("Can't init_sw_vars\n"); goto error_out; } rtlpriv->cfg->ops->init_sw_leds(hw); err = ieee80211_register_hw(hw); if (err) { pr_err("Can't register mac80211 hw.\n"); err = -ENODEV; goto error_out; } rtlpriv->mac80211.mac80211_registered = 1; set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); return 0; error_out: rtl_deinit_core(hw); error_out2: _rtl_usb_io_handler_release(hw); usb_put_dev(udev); complete(&rtlpriv->firmware_loading_complete); return -ENODEV; } EXPORT_SYMBOL(rtl_usb_probe); void rtl_usb_disconnect(struct usb_interface *intf) { struct ieee80211_hw *hw = usb_get_intfdata(intf); struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); if (unlikely(!rtlpriv)) return; /* just in case driver is removed before firmware callback */ wait_for_completion(&rtlpriv->firmware_loading_complete); clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); /*ieee80211_unregister_hw will call ops_stop */ if (rtlmac->mac80211_registered == 1) { ieee80211_unregister_hw(hw); rtlmac->mac80211_registered = 0; } else { rtl_deinit_deferred_work(hw, false); rtlpriv->intf_ops->adapter_stop(hw); } /*deinit rfkill */ /* rtl_deinit_rfkill(hw); */ rtl_usb_deinit(hw); rtl_deinit_core(hw); kfree(rtlpriv->usb_data); rtlpriv->cfg->ops->deinit_sw_leds(hw); rtlpriv->cfg->ops->deinit_sw_vars(hw); _rtl_usb_io_handler_release(hw); usb_put_dev(rtlusb->udev); usb_set_intfdata(intf, NULL); ieee80211_free_hw(hw); } EXPORT_SYMBOL(rtl_usb_disconnect); int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message) { return 0; } EXPORT_SYMBOL(rtl_usb_suspend); int rtl_usb_resume(struct usb_interface *pusb_intf) { return 0; } EXPORT_SYMBOL(rtl_usb_resume);
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1253_0
crossvul-cpp_data_good_2563_1
/* +----------------------------------------------------------------------+ | PHP Version 7 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2017 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: Rasmus Lerdorf <rasmus@lerdorf.on.ca> | | Zeev Suraski <zeev@zend.com> | +----------------------------------------------------------------------+ */ /* $Id$ */ #include <stdio.h> #include "php.h" #include "ext/standard/php_standard.h" #include "ext/standard/credits.h" #include "zend_smart_str.h" #include "php_variables.h" #include "php_globals.h" #include "php_content_types.h" #include "SAPI.h" #include "zend_globals.h" #ifdef PHP_WIN32 # include "win32/php_inttypes.h" #endif /* for systems that need to override reading of environment variables */ void _php_import_environment_variables(zval *array_ptr); PHPAPI void (*php_import_environment_variables)(zval *array_ptr) = _php_import_environment_variables; PHPAPI void php_register_variable(char *var, char *strval, zval *track_vars_array) { php_register_variable_safe(var, strval, strlen(strval), track_vars_array); } /* binary-safe version */ PHPAPI void php_register_variable_safe(char *var, char *strval, size_t str_len, zval *track_vars_array) { zval new_entry; assert(strval != NULL); /* Prepare value */ ZVAL_NEW_STR(&new_entry, zend_string_init(strval, str_len, 0)); php_register_variable_ex(var, &new_entry, track_vars_array); } PHPAPI void php_register_variable_ex(char *var_name, zval *val, zval *track_vars_array) { char *p = NULL; char *ip = NULL; /* index pointer */ char *index; char *var, *var_orig; size_t var_len, index_len; zval gpc_element, *gpc_element_p; zend_bool is_array = 0; HashTable *symtable1 = NULL; ALLOCA_FLAG(use_heap) assert(var_name != NULL); if (track_vars_array && Z_TYPE_P(track_vars_array) == IS_ARRAY) { symtable1 = Z_ARRVAL_P(track_vars_array); } if (!symtable1) { /* Nothing to do */ zval_dtor(val); return; } /* ignore leading spaces in the variable name */ while (*var_name && *var_name==' ') { var_name++; } /* * Prepare variable name */ var_len = strlen(var_name); var = var_orig = do_alloca(var_len + 1, use_heap); memcpy(var_orig, var_name, var_len + 1); /* ensure that we don't have spaces or dots in the variable name (not binary safe) */ for (p = var; *p; p++) { if (*p == ' ' || *p == '.') { *p='_'; } else if (*p == '[') { is_array = 1; ip = p; *p = 0; break; } } var_len = p - var; if (var_len==0) { /* empty variable name, or variable name with a space in it */ zval_dtor(val); free_alloca(var_orig, use_heap); return; } /* GLOBALS hijack attempt, reject parameter */ if (symtable1 == &EG(symbol_table) && var_len == sizeof("GLOBALS")-1 && !memcmp(var, "GLOBALS", sizeof("GLOBALS")-1)) { zval_dtor(val); free_alloca(var_orig, use_heap); return; } index = var; index_len = var_len; if (is_array) { int nest_level = 0; while (1) { char *index_s; size_t new_idx_len = 0; if(++nest_level > PG(max_input_nesting_level)) { HashTable *ht; /* too many levels of nesting */ if (track_vars_array) { ht = Z_ARRVAL_P(track_vars_array); zend_symtable_str_del(ht, var, var_len); } zval_dtor(val); /* do not output the error message to the screen, this helps us to to avoid "information disclosure" */ if (!PG(display_errors)) { php_error_docref(NULL, E_WARNING, "Input variable nesting level exceeded " ZEND_LONG_FMT ". To increase the limit change max_input_nesting_level in php.ini.", PG(max_input_nesting_level)); } free_alloca(var_orig, use_heap); return; } ip++; index_s = ip; if (isspace(*ip)) { ip++; } if (*ip==']') { index_s = NULL; } else { ip = strchr(ip, ']'); if (!ip) { /* PHP variables cannot contain '[' in their names, so we replace the character with a '_' */ *(index_s - 1) = '_'; index_len = 0; if (index) { index_len = strlen(index); } goto plain_var; return; } *ip = 0; new_idx_len = strlen(index_s); } if (!index) { array_init(&gpc_element); if ((gpc_element_p = zend_hash_next_index_insert(symtable1, &gpc_element)) == NULL) { zval_ptr_dtor(&gpc_element); zval_dtor(val); free_alloca(var_orig, use_heap); return; } } else { gpc_element_p = zend_symtable_str_find(symtable1, index, index_len); if (!gpc_element_p) { zval tmp; array_init(&tmp); gpc_element_p = zend_symtable_str_update_ind(symtable1, index, index_len, &tmp); } else { if (Z_TYPE_P(gpc_element_p) == IS_INDIRECT) { gpc_element_p = Z_INDIRECT_P(gpc_element_p); } if (Z_TYPE_P(gpc_element_p) != IS_ARRAY) { zval_ptr_dtor(gpc_element_p); array_init(gpc_element_p); } } } symtable1 = Z_ARRVAL_P(gpc_element_p); /* ip pointed to the '[' character, now obtain the key */ index = index_s; index_len = new_idx_len; ip++; if (*ip == '[') { is_array = 1; *ip = 0; } else { goto plain_var; } } } else { plain_var: ZVAL_COPY_VALUE(&gpc_element, val); if (!index) { if ((gpc_element_p = zend_hash_next_index_insert(symtable1, &gpc_element)) == NULL) { zval_ptr_dtor(&gpc_element); } } else { /* * According to rfc2965, more specific paths are listed above the less specific ones. * If we encounter a duplicate cookie name, we should skip it, since it is not possible * to have the same (plain text) cookie name for the same path and we should not overwrite * more specific cookies with the less specific ones. */ if (Z_TYPE(PG(http_globals)[TRACK_VARS_COOKIE]) != IS_UNDEF && symtable1 == Z_ARRVAL(PG(http_globals)[TRACK_VARS_COOKIE]) && zend_symtable_str_exists(symtable1, index, index_len)) { zval_ptr_dtor(&gpc_element); } else { gpc_element_p = zend_symtable_str_update_ind(symtable1, index, index_len, &gpc_element); } } } free_alloca(var_orig, use_heap); } typedef struct post_var_data { smart_str str; char *ptr; char *end; uint64_t cnt; /* Bytes in ptr that have already been scanned for '&' */ size_t already_scanned; } post_var_data_t; static zend_bool add_post_var(zval *arr, post_var_data_t *var, zend_bool eof) { char *start, *ksep, *vsep, *val; size_t klen, vlen; size_t new_vlen; if (var->ptr >= var->end) { return 0; } start = var->ptr + var->already_scanned; vsep = memchr(start, '&', var->end - start); if (!vsep) { if (!eof) { var->already_scanned = var->end - var->ptr; return 0; } else { vsep = var->end; } } ksep = memchr(var->ptr, '=', vsep - var->ptr); if (ksep) { *ksep = '\0'; /* "foo=bar&" or "foo=&" */ klen = ksep - var->ptr; vlen = vsep - ++ksep; } else { ksep = ""; /* "foo&" */ klen = vsep - var->ptr; vlen = 0; } php_url_decode(var->ptr, klen); val = estrndup(ksep, vlen); if (vlen) { vlen = php_url_decode(val, vlen); } if (sapi_module.input_filter(PARSE_POST, var->ptr, &val, vlen, &new_vlen)) { php_register_variable_safe(var->ptr, val, new_vlen, arr); } efree(val); var->ptr = vsep + (vsep != var->end); var->already_scanned = 0; return 1; } static inline int add_post_vars(zval *arr, post_var_data_t *vars, zend_bool eof) { uint64_t max_vars = PG(max_input_vars); vars->ptr = ZSTR_VAL(vars->str.s); vars->end = ZSTR_VAL(vars->str.s) + ZSTR_LEN(vars->str.s); while (add_post_var(arr, vars, eof)) { if (++vars->cnt > max_vars) { php_error_docref(NULL, E_WARNING, "Input variables exceeded %" PRIu64 ". " "To increase the limit change max_input_vars in php.ini.", max_vars); return FAILURE; } } if (!eof) { memmove(ZSTR_VAL(vars->str.s), vars->ptr, ZSTR_LEN(vars->str.s) = vars->end - vars->ptr); } return SUCCESS; } #ifdef PHP_WIN32 #define SAPI_POST_HANDLER_BUFSIZ 16384 #else # define SAPI_POST_HANDLER_BUFSIZ BUFSIZ #endif SAPI_API SAPI_POST_HANDLER_FUNC(php_std_post_handler) { zval *arr = (zval *) arg; php_stream *s = SG(request_info).request_body; post_var_data_t post_data; if (s && SUCCESS == php_stream_rewind(s)) { memset(&post_data, 0, sizeof(post_data)); while (!php_stream_eof(s)) { char buf[SAPI_POST_HANDLER_BUFSIZ] = {0}; size_t len = php_stream_read(s, buf, SAPI_POST_HANDLER_BUFSIZ); if (len && len != (size_t) -1) { smart_str_appendl(&post_data.str, buf, len); if (SUCCESS != add_post_vars(arr, &post_data, 0)) { smart_str_free(&post_data.str); return; } } if (len != SAPI_POST_HANDLER_BUFSIZ){ break; } } if (post_data.str.s) { add_post_vars(arr, &post_data, 1); smart_str_free(&post_data.str); } } } #undef SAPI_POST_HANDLER_BUFSIZ SAPI_API SAPI_INPUT_FILTER_FUNC(php_default_input_filter) { /* TODO: check .ini setting here and apply user-defined input filter */ if(new_val_len) *new_val_len = val_len; return 1; } SAPI_API SAPI_TREAT_DATA_FUNC(php_default_treat_data) { char *res = NULL, *var, *val, *separator = NULL; const char *c_var; zval array; int free_buffer = 0; char *strtok_buf = NULL; zend_long count = 0; ZVAL_UNDEF(&array); switch (arg) { case PARSE_POST: case PARSE_GET: case PARSE_COOKIE: array_init(&array); switch (arg) { case PARSE_POST: zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_POST]); ZVAL_COPY_VALUE(&PG(http_globals)[TRACK_VARS_POST], &array); break; case PARSE_GET: zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_GET]); ZVAL_COPY_VALUE(&PG(http_globals)[TRACK_VARS_GET], &array); break; case PARSE_COOKIE: zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_COOKIE]); ZVAL_COPY_VALUE(&PG(http_globals)[TRACK_VARS_COOKIE], &array); break; } break; default: ZVAL_COPY_VALUE(&array, destArray); break; } if (arg == PARSE_POST) { sapi_handle_post(&array); return; } if (arg == PARSE_GET) { /* GET data */ c_var = SG(request_info).query_string; if (c_var && *c_var) { res = (char *) estrdup(c_var); free_buffer = 1; } else { free_buffer = 0; } } else if (arg == PARSE_COOKIE) { /* Cookie data */ c_var = SG(request_info).cookie_data; if (c_var && *c_var) { res = (char *) estrdup(c_var); free_buffer = 1; } else { free_buffer = 0; } } else if (arg == PARSE_STRING) { /* String data */ res = str; free_buffer = 1; } if (!res) { return; } switch (arg) { case PARSE_GET: case PARSE_STRING: separator = (char *) estrdup(PG(arg_separator).input); break; case PARSE_COOKIE: separator = ";\0"; break; } var = php_strtok_r(res, separator, &strtok_buf); while (var) { val = strchr(var, '='); if (arg == PARSE_COOKIE) { /* Remove leading spaces from cookie names, needed for multi-cookie header where ; can be followed by a space */ while (isspace(*var)) { var++; } if (var == val || *var == '\0') { goto next_cookie; } } if (++count > PG(max_input_vars)) { php_error_docref(NULL, E_WARNING, "Input variables exceeded " ZEND_LONG_FMT ". To increase the limit change max_input_vars in php.ini.", PG(max_input_vars)); break; } if (val) { /* have a value */ size_t val_len; size_t new_val_len; *val++ = '\0'; php_url_decode(var, strlen(var)); val_len = php_url_decode(val, strlen(val)); val = estrndup(val, val_len); if (sapi_module.input_filter(arg, var, &val, val_len, &new_val_len)) { php_register_variable_safe(var, val, new_val_len, &array); } efree(val); } else { size_t val_len; size_t new_val_len; php_url_decode(var, strlen(var)); val_len = 0; val = estrndup("", val_len); if (sapi_module.input_filter(arg, var, &val, val_len, &new_val_len)) { php_register_variable_safe(var, val, new_val_len, &array); } efree(val); } next_cookie: var = php_strtok_r(NULL, separator, &strtok_buf); } if (arg != PARSE_COOKIE) { efree(separator); } if (free_buffer) { efree(res); } } void _php_import_environment_variables(zval *array_ptr) { char buf[128]; char **env, *p, *t = buf; size_t alloc_size = sizeof(buf); unsigned long nlen; /* ptrdiff_t is not portable */ for (env = environ; env != NULL && *env != NULL; env++) { p = strchr(*env, '='); if (!p) { /* malformed entry? */ continue; } nlen = p - *env; if (nlen >= alloc_size) { alloc_size = nlen + 64; t = (t == buf ? emalloc(alloc_size): erealloc(t, alloc_size)); } memcpy(t, *env, nlen); t[nlen] = '\0'; php_register_variable(t, p + 1, array_ptr); } if (t != buf && t != NULL) { efree(t); } } zend_bool php_std_auto_global_callback(char *name, uint name_len) { zend_printf("%s\n", name); return 0; /* don't rearm */ } /* {{{ php_build_argv */ PHPAPI void php_build_argv(char *s, zval *track_vars_array) { zval arr, argc, tmp; int count = 0; char *ss, *space; if (!(SG(request_info).argc || track_vars_array)) { return; } array_init(&arr); /* Prepare argv */ if (SG(request_info).argc) { /* are we in cli sapi? */ int i; for (i = 0; i < SG(request_info).argc; i++) { ZVAL_STRING(&tmp, SG(request_info).argv[i]); if (zend_hash_next_index_insert(Z_ARRVAL(arr), &tmp) == NULL) { zend_string_free(Z_STR(tmp)); } } } else if (s && *s) { ss = s; while (ss) { space = strchr(ss, '+'); if (space) { *space = '\0'; } /* auto-type */ ZVAL_STRING(&tmp, ss); count++; if (zend_hash_next_index_insert(Z_ARRVAL(arr), &tmp) == NULL) { zend_string_free(Z_STR(tmp)); } if (space) { *space = '+'; ss = space + 1; } else { ss = space; } } } /* prepare argc */ if (SG(request_info).argc) { ZVAL_LONG(&argc, SG(request_info).argc); } else { ZVAL_LONG(&argc, count); } if (SG(request_info).argc) { Z_ADDREF(arr); zend_hash_str_update(&EG(symbol_table), "argv", sizeof("argv")-1, &arr); zend_hash_str_add(&EG(symbol_table), "argc", sizeof("argc")-1, &argc); } if (track_vars_array && Z_TYPE_P(track_vars_array) == IS_ARRAY) { Z_ADDREF(arr); zend_hash_str_update(Z_ARRVAL_P(track_vars_array), "argv", sizeof("argv")-1, &arr); zend_hash_str_update(Z_ARRVAL_P(track_vars_array), "argc", sizeof("argc")-1, &argc); } zval_ptr_dtor(&arr); } /* }}} */ /* {{{ php_register_server_variables */ static inline void php_register_server_variables(void) { zval request_time_float, request_time_long; zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_SERVER]); array_init(&PG(http_globals)[TRACK_VARS_SERVER]); /* Server variables */ if (sapi_module.register_server_variables) { sapi_module.register_server_variables(&PG(http_globals)[TRACK_VARS_SERVER]); } /* PHP Authentication support */ if (SG(request_info).auth_user) { php_register_variable("PHP_AUTH_USER", SG(request_info).auth_user, &PG(http_globals)[TRACK_VARS_SERVER]); } if (SG(request_info).auth_password) { php_register_variable("PHP_AUTH_PW", SG(request_info).auth_password, &PG(http_globals)[TRACK_VARS_SERVER]); } if (SG(request_info).auth_digest) { php_register_variable("PHP_AUTH_DIGEST", SG(request_info).auth_digest, &PG(http_globals)[TRACK_VARS_SERVER]); } /* store request init time */ ZVAL_DOUBLE(&request_time_float, sapi_get_request_time()); php_register_variable_ex("REQUEST_TIME_FLOAT", &request_time_float, &PG(http_globals)[TRACK_VARS_SERVER]); ZVAL_LONG(&request_time_long, zend_dval_to_lval(Z_DVAL(request_time_float))); php_register_variable_ex("REQUEST_TIME", &request_time_long, &PG(http_globals)[TRACK_VARS_SERVER]); } /* }}} */ /* {{{ php_autoglobal_merge */ static void php_autoglobal_merge(HashTable *dest, HashTable *src) { zval *src_entry, *dest_entry; zend_string *string_key; zend_ulong num_key; int globals_check = (dest == (&EG(symbol_table))); ZEND_HASH_FOREACH_KEY_VAL(src, num_key, string_key, src_entry) { if (Z_TYPE_P(src_entry) != IS_ARRAY || (string_key && (dest_entry = zend_hash_find(dest, string_key)) == NULL) || (string_key == NULL && (dest_entry = zend_hash_index_find(dest, num_key)) == NULL) || Z_TYPE_P(dest_entry) != IS_ARRAY) { if (Z_REFCOUNTED_P(src_entry)) { Z_ADDREF_P(src_entry); } if (string_key) { if (!globals_check || ZSTR_LEN(string_key) != sizeof("GLOBALS") - 1 || memcmp(ZSTR_VAL(string_key), "GLOBALS", sizeof("GLOBALS") - 1)) { zend_hash_update(dest, string_key, src_entry); } else if (Z_REFCOUNTED_P(src_entry)) { Z_DELREF_P(src_entry); } } else { zend_hash_index_update(dest, num_key, src_entry); } } else { SEPARATE_ARRAY(dest_entry); php_autoglobal_merge(Z_ARRVAL_P(dest_entry), Z_ARRVAL_P(src_entry)); } } ZEND_HASH_FOREACH_END(); } /* }}} */ /* {{{ php_hash_environment */ PHPAPI int php_hash_environment(void) { memset(PG(http_globals), 0, sizeof(PG(http_globals))); zend_activate_auto_globals(); if (PG(register_argc_argv)) { php_build_argv(SG(request_info).query_string, &PG(http_globals)[TRACK_VARS_SERVER]); } return SUCCESS; } /* }}} */ static zend_bool php_auto_globals_create_get(zend_string *name) { if (PG(variables_order) && (strchr(PG(variables_order),'G') || strchr(PG(variables_order),'g'))) { sapi_module.treat_data(PARSE_GET, NULL, NULL); } else { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_GET]); array_init(&PG(http_globals)[TRACK_VARS_GET]); } zend_hash_update(&EG(symbol_table), name, &PG(http_globals)[TRACK_VARS_GET]); Z_ADDREF(PG(http_globals)[TRACK_VARS_GET]); return 0; /* don't rearm */ } static zend_bool php_auto_globals_create_post(zend_string *name) { if (PG(variables_order) && (strchr(PG(variables_order),'P') || strchr(PG(variables_order),'p')) && !SG(headers_sent) && SG(request_info).request_method && !strcasecmp(SG(request_info).request_method, "POST")) { sapi_module.treat_data(PARSE_POST, NULL, NULL); } else { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_POST]); array_init(&PG(http_globals)[TRACK_VARS_POST]); } zend_hash_update(&EG(symbol_table), name, &PG(http_globals)[TRACK_VARS_POST]); Z_ADDREF(PG(http_globals)[TRACK_VARS_POST]); return 0; /* don't rearm */ } static zend_bool php_auto_globals_create_cookie(zend_string *name) { if (PG(variables_order) && (strchr(PG(variables_order),'C') || strchr(PG(variables_order),'c'))) { sapi_module.treat_data(PARSE_COOKIE, NULL, NULL); } else { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_COOKIE]); array_init(&PG(http_globals)[TRACK_VARS_COOKIE]); } zend_hash_update(&EG(symbol_table), name, &PG(http_globals)[TRACK_VARS_COOKIE]); Z_ADDREF(PG(http_globals)[TRACK_VARS_COOKIE]); return 0; /* don't rearm */ } static zend_bool php_auto_globals_create_files(zend_string *name) { if (Z_TYPE(PG(http_globals)[TRACK_VARS_FILES]) == IS_UNDEF) { array_init(&PG(http_globals)[TRACK_VARS_FILES]); } zend_hash_update(&EG(symbol_table), name, &PG(http_globals)[TRACK_VARS_FILES]); Z_ADDREF(PG(http_globals)[TRACK_VARS_FILES]); return 0; /* don't rearm */ } /* Upgly hack to fix HTTP_PROXY issue, see bug #72573 */ static void check_http_proxy(HashTable *var_table) { if (zend_hash_str_exists(var_table, "HTTP_PROXY", sizeof("HTTP_PROXY")-1)) { char *local_proxy = getenv("HTTP_PROXY"); if (!local_proxy) { zend_hash_str_del(var_table, "HTTP_PROXY", sizeof("HTTP_PROXY")-1); } else { zval local_zval; ZVAL_STRING(&local_zval, local_proxy); zend_hash_str_update(var_table, "HTTP_PROXY", sizeof("HTTP_PROXY")-1, &local_zval); } } } static zend_bool php_auto_globals_create_server(zend_string *name) { if (PG(variables_order) && (strchr(PG(variables_order),'S') || strchr(PG(variables_order),'s'))) { php_register_server_variables(); if (PG(register_argc_argv)) { if (SG(request_info).argc) { zval *argc, *argv; if ((argc = zend_hash_str_find_ind(&EG(symbol_table), "argc", sizeof("argc")-1)) != NULL && (argv = zend_hash_str_find_ind(&EG(symbol_table), "argv", sizeof("argv")-1)) != NULL) { Z_ADDREF_P(argv); zend_hash_str_update(Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER]), "argv", sizeof("argv")-1, argv); zend_hash_str_update(Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER]), "argc", sizeof("argc")-1, argc); } } else { php_build_argv(SG(request_info).query_string, &PG(http_globals)[TRACK_VARS_SERVER]); } } } else { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_SERVER]); array_init(&PG(http_globals)[TRACK_VARS_SERVER]); } check_http_proxy(Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER])); zend_hash_update(&EG(symbol_table), name, &PG(http_globals)[TRACK_VARS_SERVER]); Z_ADDREF(PG(http_globals)[TRACK_VARS_SERVER]); return 0; /* don't rearm */ } static zend_bool php_auto_globals_create_env(zend_string *name) { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_ENV]); array_init(&PG(http_globals)[TRACK_VARS_ENV]); if (PG(variables_order) && (strchr(PG(variables_order),'E') || strchr(PG(variables_order),'e'))) { php_import_environment_variables(&PG(http_globals)[TRACK_VARS_ENV]); } check_http_proxy(Z_ARRVAL(PG(http_globals)[TRACK_VARS_ENV])); zend_hash_update(&EG(symbol_table), name, &PG(http_globals)[TRACK_VARS_ENV]); Z_ADDREF(PG(http_globals)[TRACK_VARS_ENV]); return 0; /* don't rearm */ } static zend_bool php_auto_globals_create_request(zend_string *name) { zval form_variables; unsigned char _gpc_flags[3] = {0, 0, 0}; char *p; array_init(&form_variables); if (PG(request_order) != NULL) { p = PG(request_order); } else { p = PG(variables_order); } for (; p && *p; p++) { switch (*p) { case 'g': case 'G': if (!_gpc_flags[0]) { php_autoglobal_merge(Z_ARRVAL(form_variables), Z_ARRVAL(PG(http_globals)[TRACK_VARS_GET])); _gpc_flags[0] = 1; } break; case 'p': case 'P': if (!_gpc_flags[1]) { php_autoglobal_merge(Z_ARRVAL(form_variables), Z_ARRVAL(PG(http_globals)[TRACK_VARS_POST])); _gpc_flags[1] = 1; } break; case 'c': case 'C': if (!_gpc_flags[2]) { php_autoglobal_merge(Z_ARRVAL(form_variables), Z_ARRVAL(PG(http_globals)[TRACK_VARS_COOKIE])); _gpc_flags[2] = 1; } break; } } zend_hash_update(&EG(symbol_table), name, &form_variables); return 0; } void php_startup_auto_globals(void) { zend_register_auto_global(zend_string_init("_GET", sizeof("_GET")-1, 1), 0, php_auto_globals_create_get); zend_register_auto_global(zend_string_init("_POST", sizeof("_POST")-1, 1), 0, php_auto_globals_create_post); zend_register_auto_global(zend_string_init("_COOKIE", sizeof("_COOKIE")-1, 1), 0, php_auto_globals_create_cookie); zend_register_auto_global(zend_string_init("_SERVER", sizeof("_SERVER")-1, 1), PG(auto_globals_jit), php_auto_globals_create_server); zend_register_auto_global(zend_string_init("_ENV", sizeof("_ENV")-1, 1), PG(auto_globals_jit), php_auto_globals_create_env); zend_register_auto_global(zend_string_init("_REQUEST", sizeof("_REQUEST")-1, 1), PG(auto_globals_jit), php_auto_globals_create_request); zend_register_auto_global(zend_string_init("_FILES", sizeof("_FILES")-1, 1), 0, php_auto_globals_create_files); } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: sw=4 ts=4 fdm=marker * vim<600: sw=4 ts=4 */
./CrossVul/dataset_final_sorted/CWE-400/c/good_2563_1
crossvul-cpp_data_bad_1251_0
// SPDX-License-Identifier: GPL-2.0-or-later /* * Common library for ADIS16XXX devices * * Copyright 2012 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> */ #include <linux/export.h> #include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/iio/iio.h> #include <linux/iio/buffer.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #include <linux/iio/imu/adis.h> static int adis_update_scan_mode_burst(struct iio_dev *indio_dev, const unsigned long *scan_mask) { struct adis *adis = iio_device_get_drvdata(indio_dev); unsigned int burst_length; u8 *tx; /* All but the timestamp channel */ burst_length = (indio_dev->num_channels - 1) * sizeof(u16); burst_length += adis->burst->extra_len; adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL); if (!adis->xfer) return -ENOMEM; adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL); if (!adis->buffer) return -ENOMEM; tx = adis->buffer + burst_length; tx[0] = ADIS_READ_REG(adis->burst->reg_cmd); tx[1] = 0; adis->xfer[0].tx_buf = tx; adis->xfer[0].bits_per_word = 8; adis->xfer[0].len = 2; adis->xfer[1].rx_buf = adis->buffer; adis->xfer[1].bits_per_word = 8; adis->xfer[1].len = burst_length; spi_message_init(&adis->msg); spi_message_add_tail(&adis->xfer[0], &adis->msg); spi_message_add_tail(&adis->xfer[1], &adis->msg); return 0; } int adis_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask) { struct adis *adis = iio_device_get_drvdata(indio_dev); const struct iio_chan_spec *chan; unsigned int scan_count; unsigned int i, j; __be16 *tx, *rx; kfree(adis->xfer); kfree(adis->buffer); if (adis->burst && adis->burst->en) return adis_update_scan_mode_burst(indio_dev, scan_mask); scan_count = indio_dev->scan_bytes / 2; adis->xfer = kcalloc(scan_count + 1, sizeof(*adis->xfer), GFP_KERNEL); if (!adis->xfer) return -ENOMEM; adis->buffer = kcalloc(indio_dev->scan_bytes, 2, GFP_KERNEL); if (!adis->buffer) { kfree(adis->xfer); adis->xfer = NULL; return -ENOMEM; } rx = adis->buffer; tx = rx + scan_count; spi_message_init(&adis->msg); for (j = 0; j <= scan_count; j++) { adis->xfer[j].bits_per_word = 8; if (j != scan_count) adis->xfer[j].cs_change = 1; adis->xfer[j].len = 2; adis->xfer[j].delay_usecs = adis->data->read_delay; if (j < scan_count) adis->xfer[j].tx_buf = &tx[j]; if (j >= 1) adis->xfer[j].rx_buf = &rx[j - 1]; spi_message_add_tail(&adis->xfer[j], &adis->msg); } chan = indio_dev->channels; for (i = 0; i < indio_dev->num_channels; i++, chan++) { if (!test_bit(chan->scan_index, scan_mask)) continue; if (chan->scan_type.storagebits == 32) *tx++ = cpu_to_be16((chan->address + 2) << 8); *tx++ = cpu_to_be16(chan->address << 8); } return 0; } EXPORT_SYMBOL_GPL(adis_update_scan_mode); static irqreturn_t adis_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adis *adis = iio_device_get_drvdata(indio_dev); int ret; if (!adis->buffer) return -ENOMEM; if (adis->data->has_paging) { mutex_lock(&adis->txrx_lock); if (adis->current_page != 0) { adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID); adis->tx[1] = 0; spi_write(adis->spi, adis->tx, 2); } } ret = spi_sync(adis->spi, &adis->msg); if (ret) dev_err(&adis->spi->dev, "Failed to read data: %d", ret); if (adis->data->has_paging) { adis->current_page = 0; mutex_unlock(&adis->txrx_lock); } iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer, pf->timestamp); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; } /** * adis_setup_buffer_and_trigger() - Sets up buffer and trigger for the adis device * @adis: The adis device. * @indio_dev: The IIO device. * @trigger_handler: Optional trigger handler, may be NULL. * * Returns 0 on success, a negative error code otherwise. * * This function sets up the buffer and trigger for a adis devices. If * 'trigger_handler' is NULL the default trigger handler will be used. The * default trigger handler will simply read the registers assigned to the * currently active channels. * * adis_cleanup_buffer_and_trigger() should be called to free the resources * allocated by this function. */ int adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *)) { int ret; if (!trigger_handler) trigger_handler = adis_trigger_handler; ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, trigger_handler, NULL); if (ret) return ret; if (adis->spi->irq) { ret = adis_probe_trigger(adis, indio_dev); if (ret) goto error_buffer_cleanup; } return 0; error_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); return ret; } EXPORT_SYMBOL_GPL(adis_setup_buffer_and_trigger); /** * adis_cleanup_buffer_and_trigger() - Free buffer and trigger resources * @adis: The adis device. * @indio_dev: The IIO device. * * Frees resources allocated by adis_setup_buffer_and_trigger() */ void adis_cleanup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev) { if (adis->spi->irq) adis_remove_trigger(adis); kfree(adis->buffer); kfree(adis->xfer); iio_triggered_buffer_cleanup(indio_dev); } EXPORT_SYMBOL_GPL(adis_cleanup_buffer_and_trigger);
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1251_0
crossvul-cpp_data_good_1271_0
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/etherdevice.h> #include <linux/lockdep.h> #include <linux/pci.h> #include <linux/skbuff.h> #include <linux/vmalloc.h> #include <net/devlink.h> #include <net/dst_metadata.h> #include "main.h" #include "../nfpcore/nfp_cpp.h" #include "../nfpcore/nfp_nffw.h" #include "../nfpcore/nfp_nsp.h" #include "../nfp_app.h" #include "../nfp_main.h" #include "../nfp_net.h" #include "../nfp_net_repr.h" #include "../nfp_port.h" #include "./cmsg.h" #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL #define NFP_MIN_INT_PORT_ID 1 #define NFP_MAX_INT_PORT_ID 256 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn) { return "FLOWER"; } static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app) { return DEVLINK_ESWITCH_MODE_SWITCHDEV; } static int nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv, struct net_device *netdev) { struct net_device *entry; int i, id = 0; rcu_read_lock(); idr_for_each_entry(&priv->internal_ports.port_ids, entry, i) if (entry == netdev) { id = i; break; } rcu_read_unlock(); return id; } static int nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev) { struct nfp_flower_priv *priv = app->priv; int id; id = nfp_flower_lookup_internal_port_id(priv, netdev); if (id > 0) return id; idr_preload(GFP_ATOMIC); spin_lock_bh(&priv->internal_ports.lock); id = idr_alloc(&priv->internal_ports.port_ids, netdev, NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC); spin_unlock_bh(&priv->internal_ports.lock); idr_preload_end(); return id; } u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app, struct net_device *netdev) { int ext_port; if (nfp_netdev_is_nfp_repr(netdev)) { return nfp_repr_get_port_id(netdev); } else if (nfp_flower_internal_port_can_offload(app, netdev)) { ext_port = nfp_flower_get_internal_port_id(app, netdev); if (ext_port < 0) return 0; return nfp_flower_internal_port_get_port_id(ext_port); } return 0; } static struct net_device * nfp_flower_get_netdev_from_internal_port_id(struct nfp_app *app, int port_id) { struct nfp_flower_priv *priv = app->priv; struct net_device *netdev; rcu_read_lock(); netdev = idr_find(&priv->internal_ports.port_ids, port_id); rcu_read_unlock(); return netdev; } static void nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev) { struct nfp_flower_priv *priv = app->priv; int id; id = nfp_flower_lookup_internal_port_id(priv, netdev); if (!id) return; spin_lock_bh(&priv->internal_ports.lock); idr_remove(&priv->internal_ports.port_ids, id); spin_unlock_bh(&priv->internal_ports.lock); } static int nfp_flower_internal_port_event_handler(struct nfp_app *app, struct net_device *netdev, unsigned long event) { if (event == NETDEV_UNREGISTER && nfp_flower_internal_port_can_offload(app, netdev)) nfp_flower_free_internal_port_id(app, netdev); return NOTIFY_OK; } static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv) { spin_lock_init(&priv->internal_ports.lock); idr_init(&priv->internal_ports.port_ids); } static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv) { idr_destroy(&priv->internal_ports.port_ids); } static struct nfp_flower_non_repr_priv * nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev) { struct nfp_flower_priv *priv = app->priv; struct nfp_flower_non_repr_priv *entry; ASSERT_RTNL(); list_for_each_entry(entry, &priv->non_repr_priv, list) if (entry->netdev == netdev) return entry; return NULL; } void __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv) { non_repr_priv->ref_count++; } struct nfp_flower_non_repr_priv * nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev) { struct nfp_flower_priv *priv = app->priv; struct nfp_flower_non_repr_priv *entry; entry = nfp_flower_non_repr_priv_lookup(app, netdev); if (entry) goto inc_ref; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return NULL; entry->netdev = netdev; list_add(&entry->list, &priv->non_repr_priv); inc_ref: __nfp_flower_non_repr_priv_get(entry); return entry; } void __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv) { if (--non_repr_priv->ref_count) return; list_del(&non_repr_priv->list); kfree(non_repr_priv); } void nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev) { struct nfp_flower_non_repr_priv *entry; entry = nfp_flower_non_repr_priv_lookup(app, netdev); if (!entry) return; __nfp_flower_non_repr_priv_put(entry); } static enum nfp_repr_type nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port) { switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) { case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT: *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id); return NFP_REPR_TYPE_PHYS_PORT; case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT: *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id); if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) == NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF) return NFP_REPR_TYPE_PF; else return NFP_REPR_TYPE_VF; } return __NFP_REPR_TYPE_MAX; } static struct net_device * nfp_flower_dev_get(struct nfp_app *app, u32 port_id, bool *redir_egress) { enum nfp_repr_type repr_type; struct nfp_reprs *reprs; u8 port = 0; /* Check if the port is internal. */ if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT) { if (redir_egress) *redir_egress = true; port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id); return nfp_flower_get_netdev_from_internal_port_id(app, port); } repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); if (repr_type > NFP_REPR_TYPE_MAX) return NULL; reprs = rcu_dereference(app->reprs[repr_type]); if (!reprs) return NULL; if (port >= reprs->num_reprs) return NULL; return rcu_dereference(reprs->reprs[port]); } static int nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type, bool exists) { struct nfp_reprs *reprs; int i, err, count = 0; reprs = rcu_dereference_protected(app->reprs[type], lockdep_is_held(&app->pf->lock)); if (!reprs) return 0; for (i = 0; i < reprs->num_reprs; i++) { struct net_device *netdev; netdev = nfp_repr_get_locked(app, reprs, i); if (netdev) { struct nfp_repr *repr = netdev_priv(netdev); err = nfp_flower_cmsg_portreify(repr, exists); if (err) return err; count++; } } return count; } static int nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl) { struct nfp_flower_priv *priv = app->priv; if (!tot_repl) return 0; lockdep_assert_held(&app->pf->lock); if (!wait_event_timeout(priv->reify_wait_queue, atomic_read(replies) >= tot_repl, NFP_FL_REPLY_TIMEOUT)) { nfp_warn(app->cpp, "Not all reprs responded to reify\n"); return -EIO; } return 0; } static int nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr) { int err; err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false); if (err) return err; netif_tx_wake_all_queues(repr->netdev); return 0; } static int nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr) { netif_tx_disable(repr->netdev); return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false); } static void nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev) { struct nfp_repr *repr = netdev_priv(netdev); kfree(repr->app_priv); } static void nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev) { struct nfp_repr *repr = netdev_priv(netdev); struct nfp_flower_priv *priv = app->priv; atomic_t *replies = &priv->reify_replies; int err; atomic_set(replies, 0); err = nfp_flower_cmsg_portreify(repr, false); if (err) { nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n"); return; } nfp_flower_wait_repr_reify(app, replies, 1); } static void nfp_flower_sriov_disable(struct nfp_app *app) { struct nfp_flower_priv *priv = app->priv; if (!priv->nn) return; nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF); } static int nfp_flower_spawn_vnic_reprs(struct nfp_app *app, enum nfp_flower_cmsg_port_vnic_type vnic_type, enum nfp_repr_type repr_type, unsigned int cnt) { u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); struct nfp_flower_priv *priv = app->priv; atomic_t *replies = &priv->reify_replies; struct nfp_flower_repr_priv *repr_priv; enum nfp_port_type port_type; struct nfp_repr *nfp_repr; struct nfp_reprs *reprs; int i, err, reify_cnt; const u8 queue = 0; port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT : NFP_PORT_VF_PORT; reprs = nfp_reprs_alloc(cnt); if (!reprs) return -ENOMEM; for (i = 0; i < cnt; i++) { struct net_device *repr; struct nfp_port *port; u32 port_id; repr = nfp_repr_alloc(app); if (!repr) { err = -ENOMEM; goto err_reprs_clean; } repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); if (!repr_priv) { err = -ENOMEM; nfp_repr_free(repr); goto err_reprs_clean; } nfp_repr = netdev_priv(repr); nfp_repr->app_priv = repr_priv; repr_priv->nfp_repr = nfp_repr; /* For now we only support 1 PF */ WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); port = nfp_port_alloc(app, port_type, repr); if (IS_ERR(port)) { err = PTR_ERR(port); kfree(repr_priv); nfp_repr_free(repr); goto err_reprs_clean; } if (repr_type == NFP_REPR_TYPE_PF) { port->pf_id = i; port->vnic = priv->nn->dp.ctrl_bar; } else { port->pf_id = 0; port->vf_id = i; port->vnic = app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ; } eth_hw_addr_random(repr); port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type, i, queue); err = nfp_repr_init(app, repr, port_id, port, priv->nn->dp.netdev); if (err) { kfree(repr_priv); nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; } RCU_INIT_POINTER(reprs->reprs[i], repr); nfp_info(app->cpp, "%s%d Representor(%s) created\n", repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i, repr->name); } nfp_app_reprs_set(app, repr_type, reprs); atomic_set(replies, 0); reify_cnt = nfp_flower_reprs_reify(app, repr_type, true); if (reify_cnt < 0) { err = reify_cnt; nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); goto err_reprs_remove; } err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); if (err) goto err_reprs_remove; return 0; err_reprs_remove: reprs = nfp_app_reprs_set(app, repr_type, NULL); err_reprs_clean: nfp_reprs_clean_and_free(app, reprs); return err; } static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs) { struct nfp_flower_priv *priv = app->priv; if (!priv->nn) return 0; return nfp_flower_spawn_vnic_reprs(app, NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, NFP_REPR_TYPE_VF, num_vfs); } static int nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) { struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; atomic_t *replies = &priv->reify_replies; struct nfp_flower_repr_priv *repr_priv; struct nfp_repr *nfp_repr; struct sk_buff *ctrl_skb; struct nfp_reprs *reprs; int err, reify_cnt; unsigned int i; ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); if (!ctrl_skb) return -ENOMEM; reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); if (!reprs) { err = -ENOMEM; goto err_free_ctrl_skb; } for (i = 0; i < eth_tbl->count; i++) { unsigned int phys_port = eth_tbl->ports[i].index; struct net_device *repr; struct nfp_port *port; u32 cmsg_port_id; repr = nfp_repr_alloc(app); if (!repr) { err = -ENOMEM; goto err_reprs_clean; } repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); if (!repr_priv) { err = -ENOMEM; nfp_repr_free(repr); goto err_reprs_clean; } nfp_repr = netdev_priv(repr); nfp_repr->app_priv = repr_priv; repr_priv->nfp_repr = nfp_repr; port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); if (IS_ERR(port)) { err = PTR_ERR(port); kfree(repr_priv); nfp_repr_free(repr); goto err_reprs_clean; } err = nfp_port_init_phy_port(app->pf, app, port, i); if (err) { kfree(repr_priv); nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; } SET_NETDEV_DEV(repr, &priv->nn->pdev->dev); nfp_net_get_mac_addr(app->pf, repr, port); cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port); err = nfp_repr_init(app, repr, cmsg_port_id, port, priv->nn->dp.netdev); if (err) { kfree(repr_priv); nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; } nfp_flower_cmsg_mac_repr_add(ctrl_skb, i, eth_tbl->ports[i].nbi, eth_tbl->ports[i].base, phys_port); RCU_INIT_POINTER(reprs->reprs[phys_port], repr); nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n", phys_port, repr->name); } nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs); /* The REIFY/MAC_REPR control messages should be sent after the MAC * representors are registered using nfp_app_reprs_set(). This is * because the firmware may respond with control messages for the * MAC representors, f.e. to provide the driver with information * about their state, and without registration the driver will drop * any such messages. */ atomic_set(replies, 0); reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true); if (reify_cnt < 0) { err = reify_cnt; nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); goto err_reprs_remove; } err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); if (err) goto err_reprs_remove; nfp_ctrl_tx(app->ctrl, ctrl_skb); return 0; err_reprs_remove: reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL); err_reprs_clean: nfp_reprs_clean_and_free(app, reprs); err_free_ctrl_skb: kfree_skb(ctrl_skb); return err; } static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) { if (id > 0) { nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n"); goto err_invalid_port; } eth_hw_addr_random(nn->dp.netdev); netif_keep_dst(nn->dp.netdev); nn->vnic_no_name = true; return 0; err_invalid_port: nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev); return PTR_ERR_OR_ZERO(nn->port); } static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn) { struct nfp_flower_priv *priv = app->priv; if (app->pf->num_vfs) nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF); nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); priv->nn = NULL; } static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn) { struct nfp_flower_priv *priv = app->priv; int err; priv->nn = nn; err = nfp_flower_spawn_phy_reprs(app, app->priv); if (err) goto err_clear_nn; err = nfp_flower_spawn_vnic_reprs(app, NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF, NFP_REPR_TYPE_PF, 1); if (err) goto err_destroy_reprs_phy; if (app->pf->num_vfs) { err = nfp_flower_spawn_vnic_reprs(app, NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, NFP_REPR_TYPE_VF, app->pf->num_vfs); if (err) goto err_destroy_reprs_pf; } return 0; err_destroy_reprs_pf: nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); err_destroy_reprs_phy: nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); err_clear_nn: priv->nn = NULL; return err; } static int nfp_flower_init(struct nfp_app *app) { u64 version, features, ctx_count, num_mems; const struct nfp_pf *pf = app->pf; struct nfp_flower_priv *app_priv; int err; if (!pf->eth_tbl) { nfp_warn(app->cpp, "FlowerNIC requires eth table\n"); return -EINVAL; } if (!pf->mac_stats_bar) { nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n"); return -EINVAL; } if (!pf->vf_cfg_bar) { nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n"); return -EINVAL; } version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err); if (err) { nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n"); return err; } num_mems = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_SPLIT", &err); if (err) { nfp_warn(app->cpp, "FlowerNIC: unsupported host context memory: %d\n", err); err = 0; num_mems = 1; } if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM, num_mems) || !num_mems) { nfp_warn(app->cpp, "FlowerNIC: invalid host context memory: %llu\n", num_mems); return -EINVAL; } ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT", &err); if (err) { nfp_warn(app->cpp, "FlowerNIC: unsupported host context count: %d\n", err); err = 0; ctx_count = BIT(17); } /* We need to ensure hardware has enough flower capabilities. */ if (version != NFP_FLOWER_ALLOWED_VER) { nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n"); return -EINVAL; } app_priv = vzalloc(sizeof(struct nfp_flower_priv)); if (!app_priv) return -ENOMEM; app_priv->total_mem_units = num_mems; app_priv->active_mem_unit = 0; app_priv->stats_ring_size = roundup_pow_of_two(ctx_count); app->priv = app_priv; app_priv->app = app; skb_queue_head_init(&app_priv->cmsg_skbs_high); skb_queue_head_init(&app_priv->cmsg_skbs_low); INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); init_waitqueue_head(&app_priv->reify_wait_queue); init_waitqueue_head(&app_priv->mtu_conf.wait_q); spin_lock_init(&app_priv->mtu_conf.lock); err = nfp_flower_metadata_init(app, ctx_count, num_mems); if (err) goto err_free_app_priv; /* Extract the extra features supported by the firmware. */ features = nfp_rtsym_read_le(app->pf->rtbl, "_abi_flower_extra_features", &err); if (err) app_priv->flower_ext_feats = 0; else app_priv->flower_ext_feats = features; /* Tell the firmware that the driver supports lag. */ err = nfp_rtsym_write_le(app->pf->rtbl, "_abi_flower_balance_sync_enable", 1); if (!err) { app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG; nfp_flower_lag_init(&app_priv->nfp_lag); } else if (err == -ENOENT) { nfp_warn(app->cpp, "LAG not supported by FW.\n"); } else { goto err_cleanup_metadata; } if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) { /* Tell the firmware that the driver supports flow merging. */ err = nfp_rtsym_write_le(app->pf->rtbl, "_abi_flower_merge_hint_enable", 1); if (!err) { app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE; nfp_flower_internal_port_init(app_priv); } else if (err == -ENOENT) { nfp_warn(app->cpp, "Flow merge not supported by FW.\n"); } else { goto err_lag_clean; } } else { nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n"); } if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) nfp_flower_qos_init(app); INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); INIT_LIST_HEAD(&app_priv->non_repr_priv); app_priv->pre_tun_rule_cnt = 0; return 0; err_lag_clean: if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) nfp_flower_lag_cleanup(&app_priv->nfp_lag); err_cleanup_metadata: nfp_flower_metadata_cleanup(app); err_free_app_priv: vfree(app->priv); return err; } static void nfp_flower_clean(struct nfp_app *app) { struct nfp_flower_priv *app_priv = app->priv; skb_queue_purge(&app_priv->cmsg_skbs_high); skb_queue_purge(&app_priv->cmsg_skbs_low); flush_work(&app_priv->cmsg_work); if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) nfp_flower_qos_cleanup(app); if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) nfp_flower_lag_cleanup(&app_priv->nfp_lag); if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) nfp_flower_internal_port_cleanup(app_priv); nfp_flower_metadata_cleanup(app); vfree(app->priv); app->priv = NULL; } static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv) { bool ret; spin_lock_bh(&app_priv->mtu_conf.lock); ret = app_priv->mtu_conf.ack; spin_unlock_bh(&app_priv->mtu_conf.lock); return ret; } static int nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu) { struct nfp_flower_priv *app_priv = app->priv; struct nfp_repr *repr = netdev_priv(netdev); int err; /* Only need to config FW for physical port MTU change. */ if (repr->port->type != NFP_PORT_PHYS_PORT) return 0; if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) { nfp_err(app->cpp, "Physical port MTU setting not supported\n"); return -EINVAL; } spin_lock_bh(&app_priv->mtu_conf.lock); app_priv->mtu_conf.ack = false; app_priv->mtu_conf.requested_val = new_mtu; app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id; spin_unlock_bh(&app_priv->mtu_conf.lock); err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu, true); if (err) { spin_lock_bh(&app_priv->mtu_conf.lock); app_priv->mtu_conf.requested_val = 0; spin_unlock_bh(&app_priv->mtu_conf.lock); return err; } /* Wait for fw to ack the change. */ if (!wait_event_timeout(app_priv->mtu_conf.wait_q, nfp_flower_check_ack(app_priv), NFP_FL_REPLY_TIMEOUT)) { spin_lock_bh(&app_priv->mtu_conf.lock); app_priv->mtu_conf.requested_val = 0; spin_unlock_bh(&app_priv->mtu_conf.lock); nfp_warn(app->cpp, "MTU change not verified with fw\n"); return -EIO; } return 0; } static int nfp_flower_start(struct nfp_app *app) { struct nfp_flower_priv *app_priv = app->priv; int err; if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { err = nfp_flower_lag_reset(&app_priv->nfp_lag); if (err) return err; } return nfp_tunnel_config_start(app); } static void nfp_flower_stop(struct nfp_app *app) { nfp_tunnel_config_stop(app); } static int nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev, unsigned long event, void *ptr) { struct nfp_flower_priv *app_priv = app->priv; int ret; if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr); if (ret & NOTIFY_STOP_MASK) return ret; } ret = nfp_flower_reg_indir_block_handler(app, netdev, event); if (ret & NOTIFY_STOP_MASK) return ret; ret = nfp_flower_internal_port_event_handler(app, netdev, event); if (ret & NOTIFY_STOP_MASK) return ret; return nfp_tunnel_mac_event_handler(app, netdev, event, ptr); } const struct nfp_app_type app_flower = { .id = NFP_APP_FLOWER_NIC, .name = "flower", .ctrl_cap_mask = ~0U, .ctrl_has_meta = true, .extra_cap = nfp_flower_extra_cap, .init = nfp_flower_init, .clean = nfp_flower_clean, .repr_change_mtu = nfp_flower_repr_change_mtu, .vnic_alloc = nfp_flower_vnic_alloc, .vnic_init = nfp_flower_vnic_init, .vnic_clean = nfp_flower_vnic_clean, .repr_preclean = nfp_flower_repr_netdev_preclean, .repr_clean = nfp_flower_repr_netdev_clean, .repr_open = nfp_flower_repr_netdev_open, .repr_stop = nfp_flower_repr_netdev_stop, .start = nfp_flower_start, .stop = nfp_flower_stop, .netdev_event = nfp_flower_netdev_event, .ctrl_msg_rx = nfp_flower_cmsg_rx, .sriov_enable = nfp_flower_sriov_enable, .sriov_disable = nfp_flower_sriov_disable, .eswitch_mode_get = eswitch_mode_get, .dev_get = nfp_flower_dev_get, .setup_tc = nfp_flower_setup_tc, };
./CrossVul/dataset_final_sorted/CWE-400/c/good_1271_0
crossvul-cpp_data_good_1247_0
/* * Marvell Wireless LAN device driver: PCIE specific handling * * Copyright (C) 2011-2014, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include <linux/firmware.h> #include "decl.h" #include "ioctl.h" #include "util.h" #include "fw.h" #include "main.h" #include "wmm.h" #include "11n.h" #include "pcie.h" #define PCIE_VERSION "1.0" #define DRV_NAME "Marvell mwifiex PCIe" static struct mwifiex_if_ops pcie_ops; static const struct of_device_id mwifiex_pcie_of_match_table[] = { { .compatible = "pci11ab,2b42" }, { .compatible = "pci1b4b,2b42" }, { } }; static int mwifiex_pcie_probe_of(struct device *dev) { if (!of_match_node(mwifiex_pcie_of_match_table, dev->of_node)) { dev_err(dev, "required compatible string missing\n"); return -EINVAL; } return 0; } static void mwifiex_pcie_work(struct work_struct *work); static int mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, size_t size, int flags) { struct pcie_service_card *card = adapter->card; struct mwifiex_dma_mapping mapping; mapping.addr = pci_map_single(card->dev, skb->data, size, flags); if (pci_dma_mapping_error(card->dev, mapping.addr)) { mwifiex_dbg(adapter, ERROR, "failed to map pci memory!\n"); return -1; } mapping.len = size; mwifiex_store_mapping(skb, &mapping); return 0; } static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, int flags) { struct pcie_service_card *card = adapter->card; struct mwifiex_dma_mapping mapping; mwifiex_get_mapping(skb, &mapping); pci_unmap_single(card->dev, mapping.addr, mapping.len, flags); } /* * This function writes data into PCIE card register. */ static int mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data) { struct pcie_service_card *card = adapter->card; iowrite32(data, card->pci_mmap1 + reg); return 0; } /* This function reads data from PCIE card register. */ static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data) { struct pcie_service_card *card = adapter->card; *data = ioread32(card->pci_mmap1 + reg); if (*data == 0xffffffff) return 0xffffffff; return 0; } /* This function reads u8 data from PCIE card register. */ static int mwifiex_read_reg_byte(struct mwifiex_adapter *adapter, int reg, u8 *data) { struct pcie_service_card *card = adapter->card; *data = ioread8(card->pci_mmap1 + reg); return 0; } /* * This function reads sleep cookie and checks if FW is ready */ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter) { u32 cookie_value; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; if (!reg->sleep_cookie) return true; if (card->sleep_cookie_vbase) { cookie_value = get_unaligned_le32(card->sleep_cookie_vbase); mwifiex_dbg(adapter, INFO, "info: ACCESS_HW: sleep cookie=0x%x\n", cookie_value); if (cookie_value == FW_AWAKE_COOKIE) return true; } return false; } #ifdef CONFIG_PM_SLEEP /* * Kernel needs to suspend all functions separately. Therefore all * registered functions must have drivers with suspend and resume * methods. Failing that the kernel simply removes the whole card. * * If already not suspended, this function allocates and sends a host * sleep activate request to the firmware and turns off the traffic. */ static int mwifiex_pcie_suspend(struct device *dev) { struct mwifiex_adapter *adapter; struct pcie_service_card *card = dev_get_drvdata(dev); /* Might still be loading firmware */ wait_for_completion(&card->fw_done); adapter = card->adapter; if (!adapter) { dev_err(dev, "adapter is not valid\n"); return 0; } mwifiex_enable_wake(adapter); /* Enable the Host Sleep */ if (!mwifiex_enable_hs(adapter)) { mwifiex_dbg(adapter, ERROR, "cmd: failed to suspend\n"); clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags); mwifiex_disable_wake(adapter); return -EFAULT; } flush_workqueue(adapter->workqueue); /* Indicate device suspended */ set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags); return 0; } /* * Kernel needs to suspend all functions separately. Therefore all * registered functions must have drivers with suspend and resume * methods. Failing that the kernel simply removes the whole card. * * If already not resumed, this function turns on the traffic and * sends a host sleep cancel request to the firmware. */ static int mwifiex_pcie_resume(struct device *dev) { struct mwifiex_adapter *adapter; struct pcie_service_card *card = dev_get_drvdata(dev); if (!card->adapter) { dev_err(dev, "adapter structure is not valid\n"); return 0; } adapter = card->adapter; if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { mwifiex_dbg(adapter, WARN, "Device already resumed\n"); return 0; } clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), MWIFIEX_ASYNC_CMD); mwifiex_disable_wake(adapter); return 0; } #endif /* * This function probes an mwifiex device and registers it. It allocates * the card structure, enables PCIE function number and initiates the * device registration and initialization procedure by adding a logical * interface. */ static int mwifiex_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct pcie_service_card *card; int ret; pr_debug("info: vendor=0x%4.04X device=0x%4.04X rev=%d\n", pdev->vendor, pdev->device, pdev->revision); card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; init_completion(&card->fw_done); card->dev = pdev; if (ent->driver_data) { struct mwifiex_pcie_device *data = (void *)ent->driver_data; card->pcie.reg = data->reg; card->pcie.blksz_fw_dl = data->blksz_fw_dl; card->pcie.tx_buf_size = data->tx_buf_size; card->pcie.can_dump_fw = data->can_dump_fw; card->pcie.mem_type_mapping_tbl = data->mem_type_mapping_tbl; card->pcie.num_mem_types = data->num_mem_types; card->pcie.can_ext_scan = data->can_ext_scan; INIT_WORK(&card->work, mwifiex_pcie_work); } /* device tree node parsing and platform specific configuration*/ if (pdev->dev.of_node) { ret = mwifiex_pcie_probe_of(&pdev->dev); if (ret) return ret; } if (mwifiex_add_card(card, &card->fw_done, &pcie_ops, MWIFIEX_PCIE, &pdev->dev)) { pr_err("%s failed\n", __func__); return -1; } return 0; } /* * This function removes the interface and frees up the card structure. */ static void mwifiex_pcie_remove(struct pci_dev *pdev) { struct pcie_service_card *card; struct mwifiex_adapter *adapter; struct mwifiex_private *priv; const struct mwifiex_pcie_card_reg *reg; u32 fw_status; int ret; card = pci_get_drvdata(pdev); wait_for_completion(&card->fw_done); adapter = card->adapter; if (!adapter || !adapter->priv_num) return; reg = card->pcie.reg; if (reg) ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status); else fw_status = -1; if (fw_status == FIRMWARE_READY_PCIE && !adapter->mfg_mode) { mwifiex_deauthenticate_all(adapter); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); mwifiex_disable_auto_ds(priv); mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN); } mwifiex_remove_card(adapter); } static void mwifiex_pcie_shutdown(struct pci_dev *pdev) { mwifiex_pcie_remove(pdev); return; } static void mwifiex_pcie_coredump(struct device *dev) { struct pci_dev *pdev; struct pcie_service_card *card; pdev = container_of(dev, struct pci_dev, dev); card = pci_get_drvdata(pdev); if (!test_and_set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags)) schedule_work(&card->work); } static const struct pci_device_id mwifiex_ids[] = { { PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P, PCI_ANY_ID, PCI_ANY_ID, 0, 0, .driver_data = (unsigned long)&mwifiex_pcie8766, }, { PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8897, PCI_ANY_ID, PCI_ANY_ID, 0, 0, .driver_data = (unsigned long)&mwifiex_pcie8897, }, { PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, .driver_data = (unsigned long)&mwifiex_pcie8997, }, { PCIE_VENDOR_ID_V2_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, .driver_data = (unsigned long)&mwifiex_pcie8997, }, {}, }; MODULE_DEVICE_TABLE(pci, mwifiex_ids); /* * Cleanup all software without cleaning anything related to PCIe and HW. */ static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev) { struct pcie_service_card *card = pci_get_drvdata(pdev); struct mwifiex_adapter *adapter = card->adapter; if (!adapter) { dev_err(&pdev->dev, "%s: adapter structure is not valid\n", __func__); return; } mwifiex_dbg(adapter, INFO, "%s: vendor=0x%4.04x device=0x%4.04x rev=%d Pre-FLR\n", __func__, pdev->vendor, pdev->device, pdev->revision); mwifiex_shutdown_sw(adapter); clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); } /* * Kernel stores and restores PCIe function context before and after performing * FLR respectively. Reconfigure the software and firmware including firmware * redownload. */ static void mwifiex_pcie_reset_done(struct pci_dev *pdev) { struct pcie_service_card *card = pci_get_drvdata(pdev); struct mwifiex_adapter *adapter = card->adapter; int ret; if (!adapter) { dev_err(&pdev->dev, "%s: adapter structure is not valid\n", __func__); return; } mwifiex_dbg(adapter, INFO, "%s: vendor=0x%4.04x device=0x%4.04x rev=%d Post-FLR\n", __func__, pdev->vendor, pdev->device, pdev->revision); ret = mwifiex_reinit_sw(adapter); if (ret) dev_err(&pdev->dev, "reinit failed: %d\n", ret); else mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); } static const struct pci_error_handlers mwifiex_pcie_err_handler = { .reset_prepare = mwifiex_pcie_reset_prepare, .reset_done = mwifiex_pcie_reset_done, }; #ifdef CONFIG_PM_SLEEP /* Power Management Hooks */ static SIMPLE_DEV_PM_OPS(mwifiex_pcie_pm_ops, mwifiex_pcie_suspend, mwifiex_pcie_resume); #endif /* PCI Device Driver */ static struct pci_driver __refdata mwifiex_pcie = { .name = "mwifiex_pcie", .id_table = mwifiex_ids, .probe = mwifiex_pcie_probe, .remove = mwifiex_pcie_remove, .driver = { .coredump = mwifiex_pcie_coredump, #ifdef CONFIG_PM_SLEEP .pm = &mwifiex_pcie_pm_ops, #endif }, .shutdown = mwifiex_pcie_shutdown, .err_handler = &mwifiex_pcie_err_handler, }; /* * This function adds delay loop to ensure FW is awake before proceeding. */ static void mwifiex_pcie_dev_wakeup_delay(struct mwifiex_adapter *adapter) { int i = 0; while (mwifiex_pcie_ok_to_access_hw(adapter)) { i++; usleep_range(10, 20); /* 50ms max wait */ if (i == 5000) break; } return; } static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter, u32 max_delay_loop_cnt) { struct pcie_service_card *card = adapter->card; u8 *buffer; u32 sleep_cookie, count; struct sk_buff *cmdrsp = card->cmdrsp_buf; for (count = 0; count < max_delay_loop_cnt; count++) { pci_dma_sync_single_for_cpu(card->dev, MWIFIEX_SKB_DMA_ADDR(cmdrsp), sizeof(sleep_cookie), PCI_DMA_FROMDEVICE); buffer = cmdrsp->data; sleep_cookie = get_unaligned_le32(buffer); if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) { mwifiex_dbg(adapter, INFO, "sleep cookie found at count %d\n", count); break; } pci_dma_sync_single_for_device(card->dev, MWIFIEX_SKB_DMA_ADDR(cmdrsp), sizeof(sleep_cookie), PCI_DMA_FROMDEVICE); usleep_range(20, 30); } if (count >= max_delay_loop_cnt) mwifiex_dbg(adapter, INFO, "max count reached while accessing sleep cookie\n"); } /* This function wakes up the card by reading fw_status register. */ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; mwifiex_dbg(adapter, EVENT, "event: Wakeup device...\n"); if (reg->sleep_cookie) mwifiex_pcie_dev_wakeup_delay(adapter); /* Accessing fw_status register will wakeup device */ if (mwifiex_write_reg(adapter, reg->fw_status, FIRMWARE_READY_PCIE)) { mwifiex_dbg(adapter, ERROR, "Writing fw_status register failed\n"); return -1; } if (reg->sleep_cookie) { mwifiex_pcie_dev_wakeup_delay(adapter); mwifiex_dbg(adapter, INFO, "PCIE wakeup: Setting PS_STATE_AWAKE\n"); adapter->ps_state = PS_STATE_AWAKE; } return 0; } /* * This function is called after the card has woken up. * * The card configuration register is reset. */ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) { mwifiex_dbg(adapter, CMD, "cmd: Wakeup device completed\n"); return 0; } /* * This function disables the host interrupt. * * The host interrupt mask is read, the disable bit is reset and * written back to the card host interrupt mask register. */ static int mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter) { if (mwifiex_pcie_ok_to_access_hw(adapter)) { if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, 0x00000000)) { mwifiex_dbg(adapter, ERROR, "Disable host interrupt failed\n"); return -1; } } atomic_set(&adapter->tx_hw_pending, 0); return 0; } static void mwifiex_pcie_disable_host_int_noerr(struct mwifiex_adapter *adapter) { WARN_ON(mwifiex_pcie_disable_host_int(adapter)); } /* * This function enables the host interrupt. * * The host interrupt enable mask is written to the card * host interrupt mask register. */ static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter) { if (mwifiex_pcie_ok_to_access_hw(adapter)) { /* Simply write the mask to the register */ if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, HOST_INTR_MASK)) { mwifiex_dbg(adapter, ERROR, "Enable host interrupt failed\n"); return -1; } } return 0; } /* * This function initializes TX buffer ring descriptors */ static int mwifiex_init_txq_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; struct mwifiex_pcie_buf_desc *desc; struct mwifiex_pfu_buf_desc *desc2; int i; for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { card->tx_buf_list[i] = NULL; if (reg->pfu_enabled) { card->txbd_ring[i] = (void *)card->txbd_ring_vbase + (sizeof(*desc2) * i); desc2 = card->txbd_ring[i]; memset(desc2, 0, sizeof(*desc2)); } else { card->txbd_ring[i] = (void *)card->txbd_ring_vbase + (sizeof(*desc) * i); desc = card->txbd_ring[i]; memset(desc, 0, sizeof(*desc)); } } return 0; } /* This function initializes RX buffer ring descriptors. Each SKB is allocated * here and after mapping PCI memory, its physical address is assigned to * PCIE Rx buffer descriptor's physical address. */ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; struct sk_buff *skb; struct mwifiex_pcie_buf_desc *desc; struct mwifiex_pfu_buf_desc *desc2; dma_addr_t buf_pa; int i; for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { /* Allocate skb here so that firmware can DMA data from it */ skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE, GFP_KERNEL); if (!skb) { mwifiex_dbg(adapter, ERROR, "Unable to allocate skb for RX ring.\n"); kfree(card->rxbd_ring_vbase); return -ENOMEM; } if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_RX_DATA_BUF_SIZE, PCI_DMA_FROMDEVICE)) return -1; buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); mwifiex_dbg(adapter, INFO, "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n", skb, skb->len, skb->data, (u32)buf_pa, (u32)((u64)buf_pa >> 32)); card->rx_buf_list[i] = skb; if (reg->pfu_enabled) { card->rxbd_ring[i] = (void *)card->rxbd_ring_vbase + (sizeof(*desc2) * i); desc2 = card->rxbd_ring[i]; desc2->paddr = buf_pa; desc2->len = (u16)skb->len; desc2->frag_len = (u16)skb->len; desc2->flags = reg->ring_flag_eop | reg->ring_flag_sop; desc2->offset = 0; } else { card->rxbd_ring[i] = (void *)(card->rxbd_ring_vbase + (sizeof(*desc) * i)); desc = card->rxbd_ring[i]; desc->paddr = buf_pa; desc->len = (u16)skb->len; desc->flags = 0; } } return 0; } /* This function initializes event buffer ring descriptors. Each SKB is * allocated here and after mapping PCI memory, its physical address is assigned * to PCIE Rx buffer descriptor's physical address */ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; struct mwifiex_evt_buf_desc *desc; struct sk_buff *skb; dma_addr_t buf_pa; int i; for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) { /* Allocate skb here so that firmware can DMA data from it */ skb = dev_alloc_skb(MAX_EVENT_SIZE); if (!skb) { mwifiex_dbg(adapter, ERROR, "Unable to allocate skb for EVENT buf.\n"); kfree(card->evtbd_ring_vbase); return -ENOMEM; } skb_put(skb, MAX_EVENT_SIZE); if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE, PCI_DMA_FROMDEVICE)) { kfree_skb(skb); kfree(card->evtbd_ring_vbase); return -1; } buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); mwifiex_dbg(adapter, EVENT, "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n", skb, skb->len, skb->data, (u32)buf_pa, (u32)((u64)buf_pa >> 32)); card->evt_buf_list[i] = skb; card->evtbd_ring[i] = (void *)(card->evtbd_ring_vbase + (sizeof(*desc) * i)); desc = card->evtbd_ring[i]; desc->paddr = buf_pa; desc->len = (u16)skb->len; desc->flags = 0; } return 0; } /* This function cleans up TX buffer rings. If any of the buffer list has valid * SKB address, associated SKB is freed. */ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; struct sk_buff *skb; struct mwifiex_pcie_buf_desc *desc; struct mwifiex_pfu_buf_desc *desc2; int i; for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { if (reg->pfu_enabled) { desc2 = card->txbd_ring[i]; if (card->tx_buf_list[i]) { skb = card->tx_buf_list[i]; mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); dev_kfree_skb_any(skb); } memset(desc2, 0, sizeof(*desc2)); } else { desc = card->txbd_ring[i]; if (card->tx_buf_list[i]) { skb = card->tx_buf_list[i]; mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); dev_kfree_skb_any(skb); } memset(desc, 0, sizeof(*desc)); } card->tx_buf_list[i] = NULL; } atomic_set(&adapter->tx_hw_pending, 0); return; } /* This function cleans up RX buffer rings. If any of the buffer list has valid * SKB address, associated SKB is freed. */ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; struct mwifiex_pcie_buf_desc *desc; struct mwifiex_pfu_buf_desc *desc2; struct sk_buff *skb; int i; for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { if (reg->pfu_enabled) { desc2 = card->rxbd_ring[i]; if (card->rx_buf_list[i]) { skb = card->rx_buf_list[i]; mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(skb); } memset(desc2, 0, sizeof(*desc2)); } else { desc = card->rxbd_ring[i]; if (card->rx_buf_list[i]) { skb = card->rx_buf_list[i]; mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(skb); } memset(desc, 0, sizeof(*desc)); } card->rx_buf_list[i] = NULL; } return; } /* This function cleans up event buffer rings. If any of the buffer list has * valid SKB address, associated SKB is freed. */ static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; struct mwifiex_evt_buf_desc *desc; struct sk_buff *skb; int i; for (i = 0; i < MWIFIEX_MAX_EVT_BD; i++) { desc = card->evtbd_ring[i]; if (card->evt_buf_list[i]) { skb = card->evt_buf_list[i]; mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(skb); } card->evt_buf_list[i] = NULL; memset(desc, 0, sizeof(*desc)); } return; } /* This function creates buffer descriptor ring for TX */ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; /* * driver maintaines the write pointer and firmware maintaines the read * pointer. The write pointer starts at 0 (zero) while the read pointer * starts at zero with rollover bit set */ card->txbd_wrptr = 0; if (reg->pfu_enabled) card->txbd_rdptr = 0; else card->txbd_rdptr |= reg->tx_rollover_ind; /* allocate shared memory for the BD ring and divide the same in to several descriptors */ if (reg->pfu_enabled) card->txbd_ring_size = sizeof(struct mwifiex_pfu_buf_desc) * MWIFIEX_MAX_TXRX_BD; else card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) * MWIFIEX_MAX_TXRX_BD; mwifiex_dbg(adapter, INFO, "info: txbd_ring: Allocating %d bytes\n", card->txbd_ring_size); card->txbd_ring_vbase = pci_alloc_consistent(card->dev, card->txbd_ring_size, &card->txbd_ring_pbase); if (!card->txbd_ring_vbase) { mwifiex_dbg(adapter, ERROR, "allocate consistent memory (%d bytes) failed!\n", card->txbd_ring_size); return -ENOMEM; } mwifiex_dbg(adapter, DATA, "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n", card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase, (u32)((u64)card->txbd_ring_pbase >> 32), card->txbd_ring_size); return mwifiex_init_txq_ring(adapter); } static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; mwifiex_cleanup_txq_ring(adapter); if (card->txbd_ring_vbase) pci_free_consistent(card->dev, card->txbd_ring_size, card->txbd_ring_vbase, card->txbd_ring_pbase); card->txbd_ring_size = 0; card->txbd_wrptr = 0; card->txbd_rdptr = 0 | reg->tx_rollover_ind; card->txbd_ring_vbase = NULL; card->txbd_ring_pbase = 0; return 0; } /* * This function creates buffer descriptor ring for RX */ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; /* * driver maintaines the read pointer and firmware maintaines the write * pointer. The write pointer starts at 0 (zero) while the read pointer * starts at zero with rollover bit set */ card->rxbd_wrptr = 0; card->rxbd_rdptr = reg->rx_rollover_ind; if (reg->pfu_enabled) card->rxbd_ring_size = sizeof(struct mwifiex_pfu_buf_desc) * MWIFIEX_MAX_TXRX_BD; else card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) * MWIFIEX_MAX_TXRX_BD; mwifiex_dbg(adapter, INFO, "info: rxbd_ring: Allocating %d bytes\n", card->rxbd_ring_size); card->rxbd_ring_vbase = pci_alloc_consistent(card->dev, card->rxbd_ring_size, &card->rxbd_ring_pbase); if (!card->rxbd_ring_vbase) { mwifiex_dbg(adapter, ERROR, "allocate consistent memory (%d bytes) failed!\n", card->rxbd_ring_size); return -ENOMEM; } mwifiex_dbg(adapter, DATA, "info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n", card->rxbd_ring_vbase, (u32)card->rxbd_ring_pbase, (u32)((u64)card->rxbd_ring_pbase >> 32), card->rxbd_ring_size); return mwifiex_init_rxq_ring(adapter); } /* * This function deletes Buffer descriptor ring for RX */ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; mwifiex_cleanup_rxq_ring(adapter); if (card->rxbd_ring_vbase) pci_free_consistent(card->dev, card->rxbd_ring_size, card->rxbd_ring_vbase, card->rxbd_ring_pbase); card->rxbd_ring_size = 0; card->rxbd_wrptr = 0; card->rxbd_rdptr = 0 | reg->rx_rollover_ind; card->rxbd_ring_vbase = NULL; card->rxbd_ring_pbase = 0; return 0; } /* * This function creates buffer descriptor ring for Events */ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; /* * driver maintaines the read pointer and firmware maintaines the write * pointer. The write pointer starts at 0 (zero) while the read pointer * starts at zero with rollover bit set */ card->evtbd_wrptr = 0; card->evtbd_rdptr = reg->evt_rollover_ind; card->evtbd_ring_size = sizeof(struct mwifiex_evt_buf_desc) * MWIFIEX_MAX_EVT_BD; mwifiex_dbg(adapter, INFO, "info: evtbd_ring: Allocating %d bytes\n", card->evtbd_ring_size); card->evtbd_ring_vbase = pci_alloc_consistent(card->dev, card->evtbd_ring_size, &card->evtbd_ring_pbase); if (!card->evtbd_ring_vbase) { mwifiex_dbg(adapter, ERROR, "allocate consistent memory (%d bytes) failed!\n", card->evtbd_ring_size); return -ENOMEM; } mwifiex_dbg(adapter, EVENT, "info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n", card->evtbd_ring_vbase, (u32)card->evtbd_ring_pbase, (u32)((u64)card->evtbd_ring_pbase >> 32), card->evtbd_ring_size); return mwifiex_pcie_init_evt_ring(adapter); } /* * This function deletes Buffer descriptor ring for Events */ static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; mwifiex_cleanup_evt_ring(adapter); if (card->evtbd_ring_vbase) pci_free_consistent(card->dev, card->evtbd_ring_size, card->evtbd_ring_vbase, card->evtbd_ring_pbase); card->evtbd_wrptr = 0; card->evtbd_rdptr = 0 | reg->evt_rollover_ind; card->evtbd_ring_size = 0; card->evtbd_ring_vbase = NULL; card->evtbd_ring_pbase = 0; return 0; } /* * This function allocates a buffer for CMDRSP */ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; struct sk_buff *skb; /* Allocate memory for receiving command response data */ skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE); if (!skb) { mwifiex_dbg(adapter, ERROR, "Unable to allocate skb for command response data.\n"); return -ENOMEM; } skb_put(skb, MWIFIEX_UPLD_SIZE); if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, PCI_DMA_FROMDEVICE)) { kfree_skb(skb); return -1; } card->cmdrsp_buf = skb; return 0; } /* * This function deletes a buffer for CMDRSP */ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter) { struct pcie_service_card *card; if (!adapter) return 0; card = adapter->card; if (card && card->cmdrsp_buf) { mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(card->cmdrsp_buf); card->cmdrsp_buf = NULL; } if (card && card->cmd_buf) { mwifiex_unmap_pci_memory(adapter, card->cmd_buf, PCI_DMA_TODEVICE); dev_kfree_skb_any(card->cmd_buf); card->cmd_buf = NULL; } return 0; } /* * This function allocates a buffer for sleep cookie */ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; u32 tmp; card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32), &card->sleep_cookie_pbase); if (!card->sleep_cookie_vbase) { mwifiex_dbg(adapter, ERROR, "pci_alloc_consistent failed!\n"); return -ENOMEM; } /* Init val of Sleep Cookie */ tmp = FW_AWAKE_COOKIE; put_unaligned(tmp, card->sleep_cookie_vbase); mwifiex_dbg(adapter, INFO, "alloc_scook: sleep cookie=0x%x\n", get_unaligned(card->sleep_cookie_vbase)); return 0; } /* * This function deletes buffer for sleep cookie */ static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter) { struct pcie_service_card *card; if (!adapter) return 0; card = adapter->card; if (card && card->sleep_cookie_vbase) { pci_free_consistent(card->dev, sizeof(u32), card->sleep_cookie_vbase, card->sleep_cookie_pbase); card->sleep_cookie_vbase = NULL; } return 0; } /* This function flushes the TX buffer descriptor ring * This function defined as handler is also called while cleaning TXRX * during disconnect/ bss stop. */ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; if (!mwifiex_pcie_txbd_empty(card, card->txbd_rdptr)) { card->txbd_flush = 1; /* write pointer already set at last send * send dnld-rdy intr again, wait for completion. */ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DNLD_RDY)) { mwifiex_dbg(adapter, ERROR, "failed to assert dnld-rdy interrupt.\n"); return -1; } } return 0; } /* * This function unmaps and frees downloaded data buffer */ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter) { struct sk_buff *skb; u32 wrdoneidx, rdptr, num_tx_buffs, unmap_count = 0; struct mwifiex_pcie_buf_desc *desc; struct mwifiex_pfu_buf_desc *desc2; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; if (!mwifiex_pcie_ok_to_access_hw(adapter)) mwifiex_pm_wakeup_card(adapter); /* Read the TX ring read pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) { mwifiex_dbg(adapter, ERROR, "SEND COMP: failed to read reg->tx_rdptr\n"); return -1; } mwifiex_dbg(adapter, DATA, "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n", card->txbd_rdptr, rdptr); num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr; /* free from previous txbd_rdptr to current txbd_rdptr */ while (((card->txbd_rdptr & reg->tx_mask) != (rdptr & reg->tx_mask)) || ((card->txbd_rdptr & reg->tx_rollover_ind) != (rdptr & reg->tx_rollover_ind))) { wrdoneidx = (card->txbd_rdptr & reg->tx_mask) >> reg->tx_start_ptr; skb = card->tx_buf_list[wrdoneidx]; if (skb) { mwifiex_dbg(adapter, DATA, "SEND COMP: Detach skb %p at txbd_rdidx=%d\n", skb, wrdoneidx); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); unmap_count++; if (card->txbd_flush) mwifiex_write_data_complete(adapter, skb, 0, -1); else mwifiex_write_data_complete(adapter, skb, 0, 0); atomic_dec(&adapter->tx_hw_pending); } card->tx_buf_list[wrdoneidx] = NULL; if (reg->pfu_enabled) { desc2 = card->txbd_ring[wrdoneidx]; memset(desc2, 0, sizeof(*desc2)); } else { desc = card->txbd_ring[wrdoneidx]; memset(desc, 0, sizeof(*desc)); } switch (card->dev->device) { case PCIE_DEVICE_ID_MARVELL_88W8766P: card->txbd_rdptr++; break; case PCIE_DEVICE_ID_MARVELL_88W8897: case PCIE_DEVICE_ID_MARVELL_88W8997: card->txbd_rdptr += reg->ring_tx_start_ptr; break; } if ((card->txbd_rdptr & reg->tx_mask) == num_tx_buffs) card->txbd_rdptr = ((card->txbd_rdptr & reg->tx_rollover_ind) ^ reg->tx_rollover_ind); } if (unmap_count) adapter->data_sent = false; if (card->txbd_flush) { if (mwifiex_pcie_txbd_empty(card, card->txbd_rdptr)) card->txbd_flush = 0; else mwifiex_clean_pcie_ring_buf(adapter); } return 0; } /* This function sends data buffer to device. First 4 bytes of payload * are filled with payload length and payload type. Then this payload * is mapped to PCI device memory. Tx ring pointers are advanced accordingly. * Download ready interrupt to FW is deffered if Tx ring is not full and * additional payload can be accomodated. * Caller must ensure tx_param parameter to this function is not NULL. */ static int mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, struct mwifiex_tx_param *tx_param) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; u32 wrindx, num_tx_buffs, rx_val; int ret; dma_addr_t buf_pa; struct mwifiex_pcie_buf_desc *desc = NULL; struct mwifiex_pfu_buf_desc *desc2 = NULL; if (!(skb->data && skb->len)) { mwifiex_dbg(adapter, ERROR, "%s(): invalid parameter <%p, %#x>\n", __func__, skb->data, skb->len); return -1; } if (!mwifiex_pcie_ok_to_access_hw(adapter)) mwifiex_pm_wakeup_card(adapter); num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr; mwifiex_dbg(adapter, DATA, "info: SEND DATA: <Rd: %#x, Wr: %#x>\n", card->txbd_rdptr, card->txbd_wrptr); if (mwifiex_pcie_txbd_not_full(card)) { u8 *payload; adapter->data_sent = true; payload = skb->data; put_unaligned_le16((u16)skb->len, payload + 0); put_unaligned_le16(MWIFIEX_TYPE_DATA, payload + 2); if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE)) return -1; wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr; buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); card->tx_buf_list[wrindx] = skb; atomic_inc(&adapter->tx_hw_pending); if (reg->pfu_enabled) { desc2 = card->txbd_ring[wrindx]; desc2->paddr = buf_pa; desc2->len = (u16)skb->len; desc2->frag_len = (u16)skb->len; desc2->offset = 0; desc2->flags = MWIFIEX_BD_FLAG_FIRST_DESC | MWIFIEX_BD_FLAG_LAST_DESC; } else { desc = card->txbd_ring[wrindx]; desc->paddr = buf_pa; desc->len = (u16)skb->len; desc->flags = MWIFIEX_BD_FLAG_FIRST_DESC | MWIFIEX_BD_FLAG_LAST_DESC; } switch (card->dev->device) { case PCIE_DEVICE_ID_MARVELL_88W8766P: card->txbd_wrptr++; break; case PCIE_DEVICE_ID_MARVELL_88W8897: case PCIE_DEVICE_ID_MARVELL_88W8997: card->txbd_wrptr += reg->ring_tx_start_ptr; break; } if ((card->txbd_wrptr & reg->tx_mask) == num_tx_buffs) card->txbd_wrptr = ((card->txbd_wrptr & reg->tx_rollover_ind) ^ reg->tx_rollover_ind); rx_val = card->rxbd_rdptr & reg->rx_wrap_mask; /* Write the TX ring write pointer in to reg->tx_wrptr */ if (mwifiex_write_reg(adapter, reg->tx_wrptr, card->txbd_wrptr | rx_val)) { mwifiex_dbg(adapter, ERROR, "SEND DATA: failed to write reg->tx_wrptr\n"); ret = -1; goto done_unmap; } if ((mwifiex_pcie_txbd_not_full(card)) && tx_param->next_pkt_len) { /* have more packets and TxBD still can hold more */ mwifiex_dbg(adapter, DATA, "SEND DATA: delay dnld-rdy interrupt.\n"); adapter->data_sent = false; } else { /* Send the TX ready interrupt */ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DNLD_RDY)) { mwifiex_dbg(adapter, ERROR, "SEND DATA: failed to assert dnld-rdy interrupt.\n"); ret = -1; goto done_unmap; } } mwifiex_dbg(adapter, DATA, "info: SEND DATA: Updated <Rd: %#x, Wr:\t" "%#x> and sent packet to firmware successfully\n", card->txbd_rdptr, card->txbd_wrptr); } else { mwifiex_dbg(adapter, DATA, "info: TX Ring full, can't send packets to fw\n"); adapter->data_sent = true; /* Send the TX ready interrupt */ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DNLD_RDY)) mwifiex_dbg(adapter, ERROR, "SEND DATA: failed to assert door-bell intr\n"); return -EBUSY; } return -EINPROGRESS; done_unmap: mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); card->tx_buf_list[wrindx] = NULL; atomic_dec(&adapter->tx_hw_pending); if (reg->pfu_enabled) memset(desc2, 0, sizeof(*desc2)); else memset(desc, 0, sizeof(*desc)); return ret; } /* * This function handles received buffer ring and * dispatches packets to upper */ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; u32 wrptr, rd_index, tx_val; dma_addr_t buf_pa; int ret = 0; struct sk_buff *skb_tmp = NULL; struct mwifiex_pcie_buf_desc *desc; struct mwifiex_pfu_buf_desc *desc2; if (!mwifiex_pcie_ok_to_access_hw(adapter)) mwifiex_pm_wakeup_card(adapter); /* Read the RX ring Write pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) { mwifiex_dbg(adapter, ERROR, "RECV DATA: failed to read reg->rx_wrptr\n"); ret = -1; goto done; } card->rxbd_wrptr = wrptr; while (((wrptr & reg->rx_mask) != (card->rxbd_rdptr & reg->rx_mask)) || ((wrptr & reg->rx_rollover_ind) == (card->rxbd_rdptr & reg->rx_rollover_ind))) { struct sk_buff *skb_data; u16 rx_len; rd_index = card->rxbd_rdptr & reg->rx_mask; skb_data = card->rx_buf_list[rd_index]; /* If skb allocation was failed earlier for Rx packet, * rx_buf_list[rd_index] would have been left with a NULL. */ if (!skb_data) return -ENOMEM; mwifiex_unmap_pci_memory(adapter, skb_data, PCI_DMA_FROMDEVICE); card->rx_buf_list[rd_index] = NULL; /* Get data length from interface header - * first 2 bytes for len, next 2 bytes is for type */ rx_len = get_unaligned_le16(skb_data->data); if (WARN_ON(rx_len <= adapter->intf_hdr_len || rx_len > MWIFIEX_RX_DATA_BUF_SIZE)) { mwifiex_dbg(adapter, ERROR, "Invalid RX len %d, Rd=%#x, Wr=%#x\n", rx_len, card->rxbd_rdptr, wrptr); dev_kfree_skb_any(skb_data); } else { skb_put(skb_data, rx_len); mwifiex_dbg(adapter, DATA, "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n", card->rxbd_rdptr, wrptr, rx_len); skb_pull(skb_data, adapter->intf_hdr_len); if (adapter->rx_work_enabled) { skb_queue_tail(&adapter->rx_data_q, skb_data); adapter->data_received = true; atomic_inc(&adapter->rx_pending); } else { mwifiex_handle_rx_packet(adapter, skb_data); } } skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE, GFP_KERNEL); if (!skb_tmp) { mwifiex_dbg(adapter, ERROR, "Unable to allocate skb.\n"); return -ENOMEM; } if (mwifiex_map_pci_memory(adapter, skb_tmp, MWIFIEX_RX_DATA_BUF_SIZE, PCI_DMA_FROMDEVICE)) return -1; buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp); mwifiex_dbg(adapter, INFO, "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n", skb_tmp, rd_index); card->rx_buf_list[rd_index] = skb_tmp; if (reg->pfu_enabled) { desc2 = card->rxbd_ring[rd_index]; desc2->paddr = buf_pa; desc2->len = skb_tmp->len; desc2->frag_len = skb_tmp->len; desc2->offset = 0; desc2->flags = reg->ring_flag_sop | reg->ring_flag_eop; } else { desc = card->rxbd_ring[rd_index]; desc->paddr = buf_pa; desc->len = skb_tmp->len; desc->flags = 0; } if ((++card->rxbd_rdptr & reg->rx_mask) == MWIFIEX_MAX_TXRX_BD) { card->rxbd_rdptr = ((card->rxbd_rdptr & reg->rx_rollover_ind) ^ reg->rx_rollover_ind); } mwifiex_dbg(adapter, DATA, "info: RECV DATA: <Rd: %#x, Wr: %#x>\n", card->rxbd_rdptr, wrptr); tx_val = card->txbd_wrptr & reg->tx_wrap_mask; /* Write the RX ring read pointer in to reg->rx_rdptr */ if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr | tx_val)) { mwifiex_dbg(adapter, DATA, "RECV DATA: failed to write reg->rx_rdptr\n"); ret = -1; goto done; } /* Read the RX ring Write pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) { mwifiex_dbg(adapter, ERROR, "RECV DATA: failed to read reg->rx_wrptr\n"); ret = -1; goto done; } mwifiex_dbg(adapter, DATA, "info: RECV DATA: Rcvd packet from fw successfully\n"); card->rxbd_wrptr = wrptr; } done: return ret; } /* * This function downloads the boot command to device */ static int mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) { dma_addr_t buf_pa; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; if (!(skb->data && skb->len)) { mwifiex_dbg(adapter, ERROR, "Invalid parameter in %s <%p. len %d>\n", __func__, skb->data, skb->len); return -1; } if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE)) return -1; buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); /* Write the lower 32bits of the physical address to low command * address scratch register */ if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa)) { mwifiex_dbg(adapter, ERROR, "%s: failed to write download command to boot code.\n", __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); return -1; } /* Write the upper 32bits of the physical address to high command * address scratch register */ if (mwifiex_write_reg(adapter, reg->cmd_addr_hi, (u32)((u64)buf_pa >> 32))) { mwifiex_dbg(adapter, ERROR, "%s: failed to write download command to boot code.\n", __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); return -1; } /* Write the command length to cmd_size scratch register */ if (mwifiex_write_reg(adapter, reg->cmd_size, skb->len)) { mwifiex_dbg(adapter, ERROR, "%s: failed to write command len to cmd_size scratch reg\n", __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); return -1; } /* Ring the door bell */ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DOOR_BELL)) { mwifiex_dbg(adapter, ERROR, "%s: failed to assert door-bell intr\n", __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); return -1; } return 0; } /* This function init rx port in firmware which in turn enables to receive data * from device before transmitting any packet. */ static int mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; int tx_wrap = card->txbd_wrptr & reg->tx_wrap_mask; /* Write the RX ring read pointer in to reg->rx_rdptr */ if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr | tx_wrap)) { mwifiex_dbg(adapter, ERROR, "RECV DATA: failed to write reg->rx_rdptr\n"); return -1; } return 0; } /* This function downloads commands to the device */ static int mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; int ret = 0; dma_addr_t cmd_buf_pa, cmdrsp_buf_pa; u8 *payload = (u8 *)skb->data; if (!(skb->data && skb->len)) { mwifiex_dbg(adapter, ERROR, "Invalid parameter in %s <%p, %#x>\n", __func__, skb->data, skb->len); return -1; } /* Make sure a command response buffer is available */ if (!card->cmdrsp_buf) { mwifiex_dbg(adapter, ERROR, "No response buffer available, send command failed\n"); return -EBUSY; } if (!mwifiex_pcie_ok_to_access_hw(adapter)) mwifiex_pm_wakeup_card(adapter); adapter->cmd_sent = true; put_unaligned_le16((u16)skb->len, &payload[0]); put_unaligned_le16(MWIFIEX_TYPE_CMD, &payload[2]); if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE)) return -1; card->cmd_buf = skb; /* * Need to keep a reference, since core driver might free up this * buffer before we've unmapped it. */ skb_get(skb); /* To send a command, the driver will: 1. Write the 64bit physical address of the data buffer to cmd response address low + cmd response address high 2. Ring the door bell (i.e. set the door bell interrupt) In response to door bell interrupt, the firmware will perform the DMA of the command packet (first header to obtain the total length and then rest of the command). */ if (card->cmdrsp_buf) { cmdrsp_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmdrsp_buf); /* Write the lower 32bits of the cmdrsp buffer physical address */ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, (u32)cmdrsp_buf_pa)) { mwifiex_dbg(adapter, ERROR, "Failed to write download cmd to boot code.\n"); ret = -1; goto done; } /* Write the upper 32bits of the cmdrsp buffer physical address */ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, (u32)((u64)cmdrsp_buf_pa >> 32))) { mwifiex_dbg(adapter, ERROR, "Failed to write download cmd to boot code.\n"); ret = -1; goto done; } } cmd_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmd_buf); /* Write the lower 32bits of the physical address to reg->cmd_addr_lo */ if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)cmd_buf_pa)) { mwifiex_dbg(adapter, ERROR, "Failed to write download cmd to boot code.\n"); ret = -1; goto done; } /* Write the upper 32bits of the physical address to reg->cmd_addr_hi */ if (mwifiex_write_reg(adapter, reg->cmd_addr_hi, (u32)((u64)cmd_buf_pa >> 32))) { mwifiex_dbg(adapter, ERROR, "Failed to write download cmd to boot code.\n"); ret = -1; goto done; } /* Write the command length to reg->cmd_size */ if (mwifiex_write_reg(adapter, reg->cmd_size, card->cmd_buf->len)) { mwifiex_dbg(adapter, ERROR, "Failed to write cmd len to reg->cmd_size\n"); ret = -1; goto done; } /* Ring the door bell */ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DOOR_BELL)) { mwifiex_dbg(adapter, ERROR, "Failed to assert door-bell intr\n"); ret = -1; goto done; } done: if (ret) adapter->cmd_sent = false; return 0; } /* * This function handles command complete interrupt */ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; struct sk_buff *skb = card->cmdrsp_buf; int count = 0; u16 rx_len; mwifiex_dbg(adapter, CMD, "info: Rx CMD Response\n"); if (adapter->curr_cmd) mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE); else pci_dma_sync_single_for_cpu(card->dev, MWIFIEX_SKB_DMA_ADDR(skb), MWIFIEX_UPLD_SIZE, PCI_DMA_FROMDEVICE); /* Unmap the command as a response has been received. */ if (card->cmd_buf) { mwifiex_unmap_pci_memory(adapter, card->cmd_buf, PCI_DMA_TODEVICE); dev_kfree_skb_any(card->cmd_buf); card->cmd_buf = NULL; } rx_len = get_unaligned_le16(skb->data); skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len); skb_trim(skb, rx_len); if (!adapter->curr_cmd) { if (adapter->ps_state == PS_STATE_SLEEP_CFM) { pci_dma_sync_single_for_device(card->dev, MWIFIEX_SKB_DMA_ADDR(skb), MWIFIEX_SLEEP_COOKIE_SIZE, PCI_DMA_FROMDEVICE); if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_SLEEP_CFM_DONE)) { mwifiex_dbg(adapter, ERROR, "Write register failed\n"); return -1; } mwifiex_delay_for_sleep_cookie(adapter, MWIFIEX_MAX_DELAY_COUNT); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE); skb_pull(skb, adapter->intf_hdr_len); while (reg->sleep_cookie && (count++ < 10) && mwifiex_pcie_ok_to_access_hw(adapter)) usleep_range(50, 60); mwifiex_pcie_enable_host_int(adapter); mwifiex_process_sleep_confirm_resp(adapter, skb->data, skb->len); } else { mwifiex_dbg(adapter, ERROR, "There is no command but got cmdrsp\n"); } memcpy(adapter->upld_buf, skb->data, min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); skb_push(skb, adapter->intf_hdr_len); if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, PCI_DMA_FROMDEVICE)) return -1; } else if (mwifiex_pcie_ok_to_access_hw(adapter)) { skb_pull(skb, adapter->intf_hdr_len); adapter->curr_cmd->resp_skb = skb; adapter->cmd_resp_received = true; /* Take the pointer and set it to CMD node and will return in the response complete callback */ card->cmdrsp_buf = NULL; /* Clear the cmd-rsp buffer address in scratch registers. This will prevent firmware from writing to the same response buffer again. */ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0)) { mwifiex_dbg(adapter, ERROR, "cmd_done: failed to clear cmd_rsp_addr_lo\n"); return -1; } /* Write the upper 32bits of the cmdrsp buffer physical address */ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0)) { mwifiex_dbg(adapter, ERROR, "cmd_done: failed to clear cmd_rsp_addr_hi\n"); return -1; } } return 0; } /* * Command Response processing complete handler */ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter, struct sk_buff *skb) { struct pcie_service_card *card = adapter->card; if (skb) { card->cmdrsp_buf = skb; skb_push(card->cmdrsp_buf, adapter->intf_hdr_len); if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, PCI_DMA_FROMDEVICE)) return -1; } return 0; } /* * This function handles firmware event ready interrupt */ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK; u32 wrptr, event; struct mwifiex_evt_buf_desc *desc; if (!mwifiex_pcie_ok_to_access_hw(adapter)) mwifiex_pm_wakeup_card(adapter); if (adapter->event_received) { mwifiex_dbg(adapter, EVENT, "info: Event being processed,\t" "do not process this interrupt just yet\n"); return 0; } if (rdptr >= MWIFIEX_MAX_EVT_BD) { mwifiex_dbg(adapter, ERROR, "info: Invalid read pointer...\n"); return -1; } /* Read the event ring write pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) { mwifiex_dbg(adapter, ERROR, "EventReady: failed to read reg->evt_wrptr\n"); return -1; } mwifiex_dbg(adapter, EVENT, "info: EventReady: Initial <Rd: 0x%x, Wr: 0x%x>", card->evtbd_rdptr, wrptr); if (((wrptr & MWIFIEX_EVTBD_MASK) != (card->evtbd_rdptr & MWIFIEX_EVTBD_MASK)) || ((wrptr & reg->evt_rollover_ind) == (card->evtbd_rdptr & reg->evt_rollover_ind))) { struct sk_buff *skb_cmd; __le16 data_len = 0; u16 evt_len; mwifiex_dbg(adapter, INFO, "info: Read Index: %d\n", rdptr); skb_cmd = card->evt_buf_list[rdptr]; mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE); /* Take the pointer and set it to event pointer in adapter and will return back after event handling callback */ card->evt_buf_list[rdptr] = NULL; desc = card->evtbd_ring[rdptr]; memset(desc, 0, sizeof(*desc)); event = get_unaligned_le32( &skb_cmd->data[adapter->intf_hdr_len]); adapter->event_cause = event; /* The first 4bytes will be the event transfer header len is 2 bytes followed by type which is 2 bytes */ memcpy(&data_len, skb_cmd->data, sizeof(__le16)); evt_len = le16_to_cpu(data_len); skb_trim(skb_cmd, evt_len); skb_pull(skb_cmd, adapter->intf_hdr_len); mwifiex_dbg(adapter, EVENT, "info: Event length: %d\n", evt_len); if (evt_len > MWIFIEX_EVENT_HEADER_LEN && evt_len < MAX_EVENT_SIZE) memcpy(adapter->event_body, skb_cmd->data + MWIFIEX_EVENT_HEADER_LEN, evt_len - MWIFIEX_EVENT_HEADER_LEN); adapter->event_received = true; adapter->event_skb = skb_cmd; /* Do not update the event read pointer here, wait till the buffer is released. This is just to make things simpler, we need to find a better method of managing these buffers. */ } else { if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_EVENT_DONE)) { mwifiex_dbg(adapter, ERROR, "Write register failed\n"); return -1; } } return 0; } /* * Event processing complete handler */ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, struct sk_buff *skb) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; int ret = 0; u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK; u32 wrptr; struct mwifiex_evt_buf_desc *desc; if (!skb) return 0; if (rdptr >= MWIFIEX_MAX_EVT_BD) { mwifiex_dbg(adapter, ERROR, "event_complete: Invalid rdptr 0x%x\n", rdptr); return -EINVAL; } /* Read the event ring write pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) { mwifiex_dbg(adapter, ERROR, "event_complete: failed to read reg->evt_wrptr\n"); return -1; } if (!card->evt_buf_list[rdptr]) { skb_push(skb, adapter->intf_hdr_len); skb_put(skb, MAX_EVENT_SIZE - skb->len); if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE, PCI_DMA_FROMDEVICE)) return -1; card->evt_buf_list[rdptr] = skb; desc = card->evtbd_ring[rdptr]; desc->paddr = MWIFIEX_SKB_DMA_ADDR(skb); desc->len = (u16)skb->len; desc->flags = 0; skb = NULL; } else { mwifiex_dbg(adapter, ERROR, "info: ERROR: buf still valid at index %d, <%p, %p>\n", rdptr, card->evt_buf_list[rdptr], skb); } if ((++card->evtbd_rdptr & MWIFIEX_EVTBD_MASK) == MWIFIEX_MAX_EVT_BD) { card->evtbd_rdptr = ((card->evtbd_rdptr & reg->evt_rollover_ind) ^ reg->evt_rollover_ind); } mwifiex_dbg(adapter, EVENT, "info: Updated <Rd: 0x%x, Wr: 0x%x>", card->evtbd_rdptr, wrptr); /* Write the event ring read pointer in to reg->evt_rdptr */ if (mwifiex_write_reg(adapter, reg->evt_rdptr, card->evtbd_rdptr)) { mwifiex_dbg(adapter, ERROR, "event_complete: failed to read reg->evt_rdptr\n"); return -1; } mwifiex_dbg(adapter, EVENT, "info: Check Events Again\n"); ret = mwifiex_pcie_process_event_ready(adapter); return ret; } /* Combo firmware image is a combination of * (1) combo crc heaer, start with CMD5 * (2) bluetooth image, start with CMD7, end with CMD6, data wrapped in CMD1. * (3) wifi image. * * This function bypass the header and bluetooth part, return * the offset of tail wifi-only part. If the image is already wifi-only, * that is start with CMD1, return 0. */ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, const void *firmware, u32 firmware_len) { const struct mwifiex_fw_data *fwdata; u32 offset = 0, data_len, dnld_cmd; int ret = 0; bool cmd7_before = false, first_cmd = false; while (1) { /* Check for integer and buffer overflow */ if (offset + sizeof(fwdata->header) < sizeof(fwdata->header) || offset + sizeof(fwdata->header) >= firmware_len) { mwifiex_dbg(adapter, ERROR, "extract wifi-only fw failure!\n"); ret = -1; goto done; } fwdata = firmware + offset; dnld_cmd = le32_to_cpu(fwdata->header.dnld_cmd); data_len = le32_to_cpu(fwdata->header.data_length); /* Skip past header */ offset += sizeof(fwdata->header); switch (dnld_cmd) { case MWIFIEX_FW_DNLD_CMD_1: if (offset + data_len < data_len) { mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); ret = -1; goto done; } /* Image start with cmd1, already wifi-only firmware */ if (!first_cmd) { mwifiex_dbg(adapter, MSG, "input wifi-only firmware\n"); return 0; } if (!cmd7_before) { mwifiex_dbg(adapter, ERROR, "no cmd7 before cmd1!\n"); ret = -1; goto done; } offset += data_len; break; case MWIFIEX_FW_DNLD_CMD_5: first_cmd = true; /* Check for integer overflow */ if (offset + data_len < data_len) { mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); ret = -1; goto done; } offset += data_len; break; case MWIFIEX_FW_DNLD_CMD_6: first_cmd = true; /* Check for integer overflow */ if (offset + data_len < data_len) { mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); ret = -1; goto done; } offset += data_len; if (offset >= firmware_len) { mwifiex_dbg(adapter, ERROR, "extract wifi-only fw failure!\n"); ret = -1; } else { ret = offset; } goto done; case MWIFIEX_FW_DNLD_CMD_7: first_cmd = true; cmd7_before = true; break; default: mwifiex_dbg(adapter, ERROR, "unknown dnld_cmd %d\n", dnld_cmd); ret = -1; goto done; } } done: return ret; } /* * This function downloads the firmware to the card. * * Firmware is downloaded to the card in blocks. Every block download * is tested for CRC errors, and retried a number of times before * returning failure. */ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, struct mwifiex_fw_image *fw) { int ret; u8 *firmware = fw->fw_buf; u32 firmware_len = fw->fw_len; u32 offset = 0; struct sk_buff *skb; u32 txlen, tx_blocks = 0, tries, len, val; u32 block_retry_cnt = 0; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; if (!firmware || !firmware_len) { mwifiex_dbg(adapter, ERROR, "No firmware image found! Terminating download\n"); return -1; } mwifiex_dbg(adapter, INFO, "info: Downloading FW image (%d bytes)\n", firmware_len); if (mwifiex_pcie_disable_host_int(adapter)) { mwifiex_dbg(adapter, ERROR, "%s: Disabling interrupts failed.\n", __func__); return -1; } skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE); if (!skb) { ret = -ENOMEM; goto done; } ret = mwifiex_read_reg(adapter, PCIE_SCRATCH_13_REG, &val); if (ret) { mwifiex_dbg(adapter, FATAL, "Failed to read scratch register 13\n"); goto done; } /* PCIE FLR case: extract wifi part from combo firmware*/ if (val == MWIFIEX_PCIE_FLR_HAPPENS) { ret = mwifiex_extract_wifi_fw(adapter, firmware, firmware_len); if (ret < 0) { mwifiex_dbg(adapter, ERROR, "Failed to extract wifi fw\n"); goto done; } offset = ret; mwifiex_dbg(adapter, MSG, "info: dnld wifi firmware from %d bytes\n", offset); } /* Perform firmware data transfer */ do { u32 ireg_intr = 0; /* More data? */ if (offset >= firmware_len) break; for (tries = 0; tries < MAX_POLL_TRIES; tries++) { ret = mwifiex_read_reg(adapter, reg->cmd_size, &len); if (ret) { mwifiex_dbg(adapter, FATAL, "Failed reading len from boot code\n"); goto done; } if (len) break; usleep_range(10, 20); } if (!len) { break; } else if (len > MWIFIEX_UPLD_SIZE) { mwifiex_dbg(adapter, ERROR, "FW download failure @ %d, invalid length %d\n", offset, len); ret = -1; goto done; } txlen = len; if (len & BIT(0)) { block_retry_cnt++; if (block_retry_cnt > MAX_WRITE_IOMEM_RETRY) { mwifiex_dbg(adapter, ERROR, "FW download failure @ %d, over max\t" "retry count\n", offset); ret = -1; goto done; } mwifiex_dbg(adapter, ERROR, "FW CRC error indicated by the\t" "helper: len = 0x%04X, txlen = %d\n", len, txlen); len &= ~BIT(0); /* Setting this to 0 to resend from same offset */ txlen = 0; } else { block_retry_cnt = 0; /* Set blocksize to transfer - checking for last block */ if (firmware_len - offset < txlen) txlen = firmware_len - offset; tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) / card->pcie.blksz_fw_dl; /* Copy payload to buffer */ memmove(skb->data, &firmware[offset], txlen); } skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len); skb_trim(skb, tx_blocks * card->pcie.blksz_fw_dl); /* Send the boot command to device */ if (mwifiex_pcie_send_boot_cmd(adapter, skb)) { mwifiex_dbg(adapter, ERROR, "Failed to send firmware download command\n"); ret = -1; goto done; } /* Wait for the command done interrupt */ for (tries = 0; tries < MAX_POLL_TRIES; tries++) { if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS, &ireg_intr)) { mwifiex_dbg(adapter, ERROR, "%s: Failed to read\t" "interrupt status during fw dnld.\n", __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); ret = -1; goto done; } if (!(ireg_intr & CPU_INTR_DOOR_BELL)) break; usleep_range(10, 20); } if (ireg_intr & CPU_INTR_DOOR_BELL) { mwifiex_dbg(adapter, ERROR, "%s: Card failed to ACK download\n", __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); ret = -1; goto done; } mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); offset += txlen; } while (true); mwifiex_dbg(adapter, MSG, "info: FW download over, size %d bytes\n", offset); ret = 0; done: dev_kfree_skb_any(skb); return ret; } /* * This function checks the firmware status in card. */ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) { int ret = 0; u32 firmware_stat; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; u32 tries; /* Mask spurios interrupts */ if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS_MASK, HOST_INTR_MASK)) { mwifiex_dbg(adapter, ERROR, "Write register failed\n"); return -1; } mwifiex_dbg(adapter, INFO, "Setting driver ready signature\n"); if (mwifiex_write_reg(adapter, reg->drv_rdy, FIRMWARE_READY_PCIE)) { mwifiex_dbg(adapter, ERROR, "Failed to write driver ready signature\n"); return -1; } /* Wait for firmware initialization event */ for (tries = 0; tries < poll_num; tries++) { if (mwifiex_read_reg(adapter, reg->fw_status, &firmware_stat)) ret = -1; else ret = 0; mwifiex_dbg(adapter, INFO, "Try %d if FW is ready <%d,%#x>", tries, ret, firmware_stat); if (ret) continue; if (firmware_stat == FIRMWARE_READY_PCIE) { ret = 0; break; } else { msleep(100); ret = -1; } } return ret; } /* This function checks if WLAN is the winner. */ static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter) { u32 winner = 0; int ret = 0; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; if (mwifiex_read_reg(adapter, reg->fw_status, &winner)) { ret = -1; } else if (!winner) { mwifiex_dbg(adapter, INFO, "PCI-E is the winner\n"); adapter->winner = 1; } else { mwifiex_dbg(adapter, ERROR, "PCI-E is not the winner <%#x>", winner); } return ret; } /* * This function reads the interrupt status from card. */ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter, int msg_id) { u32 pcie_ireg; unsigned long flags; struct pcie_service_card *card = adapter->card; if (card->msi_enable) { spin_lock_irqsave(&adapter->int_lock, flags); adapter->int_status = 1; spin_unlock_irqrestore(&adapter->int_lock, flags); return; } if (!mwifiex_pcie_ok_to_access_hw(adapter)) return; if (card->msix_enable && msg_id >= 0) { pcie_ireg = BIT(msg_id); } else { if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) { mwifiex_dbg(adapter, ERROR, "Read register failed\n"); return; } if ((pcie_ireg == 0xFFFFFFFF) || !pcie_ireg) return; mwifiex_pcie_disable_host_int(adapter); /* Clear the pending interrupts */ if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS, ~pcie_ireg)) { mwifiex_dbg(adapter, ERROR, "Write register failed\n"); return; } } if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP && mwifiex_pcie_ok_to_access_hw(adapter)) { /* Potentially for PCIe we could get other * interrupts like shared. Don't change power * state until cookie is set */ adapter->ps_state = PS_STATE_AWAKE; adapter->pm_wakeup_fw_try = false; del_timer(&adapter->wakeup_timer); } spin_lock_irqsave(&adapter->int_lock, flags); adapter->int_status |= pcie_ireg; spin_unlock_irqrestore(&adapter->int_lock, flags); mwifiex_dbg(adapter, INTR, "ireg: 0x%08x\n", pcie_ireg); } /* * Interrupt handler for PCIe root port * * This function reads the interrupt status from firmware and assigns * the main process in workqueue which will handle the interrupt. */ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context) { struct mwifiex_msix_context *ctx = context; struct pci_dev *pdev = ctx->dev; struct pcie_service_card *card; struct mwifiex_adapter *adapter; card = pci_get_drvdata(pdev); if (!card->adapter) { pr_err("info: %s: card=%p adapter=%p\n", __func__, card, card ? card->adapter : NULL); goto exit; } adapter = card->adapter; if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) goto exit; if (card->msix_enable) mwifiex_interrupt_status(adapter, ctx->msg_id); else mwifiex_interrupt_status(adapter, -1); mwifiex_queue_main_work(adapter); exit: return IRQ_HANDLED; } /* * This function checks the current interrupt status. * * The following interrupts are checked and handled by this function - * - Data sent * - Command sent * - Command received * - Packets received * - Events received * * In case of Rx packets received, the packets are uploaded from card to * host and processed accordingly. */ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) { int ret; u32 pcie_ireg = 0; unsigned long flags; struct pcie_service_card *card = adapter->card; spin_lock_irqsave(&adapter->int_lock, flags); if (!card->msi_enable) { /* Clear out unused interrupts */ pcie_ireg = adapter->int_status; } adapter->int_status = 0; spin_unlock_irqrestore(&adapter->int_lock, flags); if (card->msi_enable) { if (mwifiex_pcie_ok_to_access_hw(adapter)) { if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) { mwifiex_dbg(adapter, ERROR, "Read register failed\n"); return -1; } if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) { if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS, ~pcie_ireg)) { mwifiex_dbg(adapter, ERROR, "Write register failed\n"); return -1; } if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP) { adapter->ps_state = PS_STATE_AWAKE; adapter->pm_wakeup_fw_try = false; del_timer(&adapter->wakeup_timer); } } } } if (pcie_ireg & HOST_INTR_DNLD_DONE) { mwifiex_dbg(adapter, INTR, "info: TX DNLD Done\n"); ret = mwifiex_pcie_send_data_complete(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_UPLD_RDY) { mwifiex_dbg(adapter, INTR, "info: Rx DATA\n"); ret = mwifiex_pcie_process_recv_data(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_EVENT_RDY) { mwifiex_dbg(adapter, INTR, "info: Rx EVENT\n"); ret = mwifiex_pcie_process_event_ready(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_CMD_DONE) { if (adapter->cmd_sent) { mwifiex_dbg(adapter, INTR, "info: CMD sent Interrupt\n"); adapter->cmd_sent = false; } /* Handle command response */ ret = mwifiex_pcie_process_cmd_complete(adapter); if (ret) return ret; } mwifiex_dbg(adapter, INTR, "info: cmd_sent=%d data_sent=%d\n", adapter->cmd_sent, adapter->data_sent); if (!card->msi_enable && !card->msix_enable && adapter->ps_state != PS_STATE_SLEEP) mwifiex_pcie_enable_host_int(adapter); return 0; } /* * This function downloads data from driver to card. * * Both commands and data packets are transferred to the card by this * function. * * This function adds the PCIE specific header to the front of the buffer * before transferring. The header contains the length of the packet and * the type. The firmware handles the packets based upon this set type. */ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type, struct sk_buff *skb, struct mwifiex_tx_param *tx_param) { if (!skb) { mwifiex_dbg(adapter, ERROR, "Passed NULL skb to %s\n", __func__); return -1; } if (type == MWIFIEX_TYPE_DATA) return mwifiex_pcie_send_data(adapter, skb, tx_param); else if (type == MWIFIEX_TYPE_CMD) return mwifiex_pcie_send_cmd(adapter, skb); return 0; } /* Function to dump PCIE scratch registers in case of FW crash */ static int mwifiex_pcie_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf) { char *p = drv_buf; char buf[256], *ptr; int i; u32 value; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; int pcie_scratch_reg[] = {PCIE_SCRATCH_12_REG, PCIE_SCRATCH_14_REG, PCIE_SCRATCH_15_REG}; if (!p) return 0; mwifiex_dbg(adapter, MSG, "PCIE register dump start\n"); if (mwifiex_read_reg(adapter, reg->fw_status, &value)) { mwifiex_dbg(adapter, ERROR, "failed to read firmware status"); return 0; } ptr = buf; mwifiex_dbg(adapter, MSG, "pcie scratch register:"); for (i = 0; i < ARRAY_SIZE(pcie_scratch_reg); i++) { mwifiex_read_reg(adapter, pcie_scratch_reg[i], &value); ptr += sprintf(ptr, "reg:0x%x, value=0x%x\n", pcie_scratch_reg[i], value); } mwifiex_dbg(adapter, MSG, "%s\n", buf); p += sprintf(p, "%s\n", buf); mwifiex_dbg(adapter, MSG, "PCIE register dump end\n"); return p - drv_buf; } /* This function read/write firmware */ static enum rdwr_status mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) { int ret, tries; u8 ctrl_data; u32 fw_status; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) return RDWR_STATUS_FAILURE; ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, reg->fw_dump_host_ready); if (ret) { mwifiex_dbg(adapter, ERROR, "PCIE write err\n"); return RDWR_STATUS_FAILURE; } for (tries = 0; tries < MAX_POLL_TRIES; tries++) { mwifiex_read_reg_byte(adapter, reg->fw_dump_ctrl, &ctrl_data); if (ctrl_data == FW_DUMP_DONE) return RDWR_STATUS_SUCCESS; if (doneflag && ctrl_data == doneflag) return RDWR_STATUS_DONE; if (ctrl_data != reg->fw_dump_host_ready) { mwifiex_dbg(adapter, WARN, "The ctrl reg was changed, re-try again!\n"); ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, reg->fw_dump_host_ready); if (ret) { mwifiex_dbg(adapter, ERROR, "PCIE write err\n"); return RDWR_STATUS_FAILURE; } } usleep_range(100, 200); } mwifiex_dbg(adapter, ERROR, "Fail to pull ctrl_data\n"); return RDWR_STATUS_FAILURE; } /* This function dump firmware memory to file */ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *creg = card->pcie.reg; unsigned int reg, reg_start, reg_end; u8 *dbg_ptr, *end_ptr, *tmp_ptr, fw_dump_num, dump_num; u8 idx, i, read_reg, doneflag = 0; enum rdwr_status stat; u32 memory_size; int ret; if (!card->pcie.can_dump_fw) return; for (idx = 0; idx < adapter->num_mem_types; idx++) { struct memory_type_mapping *entry = &adapter->mem_type_mapping_tbl[idx]; if (entry->mem_ptr) { vfree(entry->mem_ptr); entry->mem_ptr = NULL; } entry->mem_size = 0; } mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n"); /* Read the number of the memories which will dump */ stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); if (stat == RDWR_STATUS_FAILURE) return; reg = creg->fw_dump_start; mwifiex_read_reg_byte(adapter, reg, &fw_dump_num); /* W8997 chipset firmware dump will be restore in single region*/ if (fw_dump_num == 0) dump_num = 1; else dump_num = fw_dump_num; /* Read the length of every memory which will dump */ for (idx = 0; idx < dump_num; idx++) { struct memory_type_mapping *entry = &adapter->mem_type_mapping_tbl[idx]; memory_size = 0; if (fw_dump_num != 0) { stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); if (stat == RDWR_STATUS_FAILURE) return; reg = creg->fw_dump_start; for (i = 0; i < 4; i++) { mwifiex_read_reg_byte(adapter, reg, &read_reg); memory_size |= (read_reg << (i * 8)); reg++; } } else { memory_size = MWIFIEX_FW_DUMP_MAX_MEMSIZE; } if (memory_size == 0) { mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n"); ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl, creg->fw_dump_read_done); if (ret) { mwifiex_dbg(adapter, ERROR, "PCIE write err\n"); return; } break; } mwifiex_dbg(adapter, DUMP, "%s_SIZE=0x%x\n", entry->mem_name, memory_size); entry->mem_ptr = vmalloc(memory_size + 1); entry->mem_size = memory_size; if (!entry->mem_ptr) { mwifiex_dbg(adapter, ERROR, "Vmalloc %s failed\n", entry->mem_name); return; } dbg_ptr = entry->mem_ptr; end_ptr = dbg_ptr + memory_size; doneflag = entry->done_flag; mwifiex_dbg(adapter, DUMP, "Start %s output, please wait...\n", entry->mem_name); do { stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); if (RDWR_STATUS_FAILURE == stat) return; reg_start = creg->fw_dump_start; reg_end = creg->fw_dump_end; for (reg = reg_start; reg <= reg_end; reg++) { mwifiex_read_reg_byte(adapter, reg, dbg_ptr); if (dbg_ptr < end_ptr) { dbg_ptr++; continue; } mwifiex_dbg(adapter, ERROR, "pre-allocated buf not enough\n"); tmp_ptr = vzalloc(memory_size + MWIFIEX_SIZE_4K); if (!tmp_ptr) return; memcpy(tmp_ptr, entry->mem_ptr, memory_size); vfree(entry->mem_ptr); entry->mem_ptr = tmp_ptr; tmp_ptr = NULL; dbg_ptr = entry->mem_ptr + memory_size; memory_size += MWIFIEX_SIZE_4K; end_ptr = entry->mem_ptr + memory_size; } if (stat != RDWR_STATUS_DONE) continue; mwifiex_dbg(adapter, DUMP, "%s done: size=0x%tx\n", entry->mem_name, dbg_ptr - entry->mem_ptr); break; } while (true); } mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n"); } static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter) { adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE); if (!adapter->devdump_data) { mwifiex_dbg(adapter, ERROR, "vzalloc devdump data failure!\n"); return; } mwifiex_drv_info_dump(adapter); mwifiex_pcie_fw_dump(adapter); mwifiex_prepare_fw_dump_info(adapter); mwifiex_upload_device_dump(adapter); } static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; /* We can't afford to wait here; remove() might be waiting on us. If we * can't grab the device lock, maybe we'll get another chance later. */ pci_try_reset_function(card->dev); } static void mwifiex_pcie_work(struct work_struct *work) { struct pcie_service_card *card = container_of(work, struct pcie_service_card, work); if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags)) mwifiex_pcie_device_dump_work(card->adapter); if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags)) mwifiex_pcie_card_reset_work(card->adapter); } /* This function dumps FW information */ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; if (!test_and_set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags)) schedule_work(&card->work); } static void mwifiex_pcie_card_reset(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; if (!test_and_set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags)) schedule_work(&card->work); } static int mwifiex_pcie_alloc_buffers(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; int ret; card->cmdrsp_buf = NULL; ret = mwifiex_pcie_create_txbd_ring(adapter); if (ret) { mwifiex_dbg(adapter, ERROR, "Failed to create txbd ring\n"); goto err_cre_txbd; } ret = mwifiex_pcie_create_rxbd_ring(adapter); if (ret) { mwifiex_dbg(adapter, ERROR, "Failed to create rxbd ring\n"); goto err_cre_rxbd; } ret = mwifiex_pcie_create_evtbd_ring(adapter); if (ret) { mwifiex_dbg(adapter, ERROR, "Failed to create evtbd ring\n"); goto err_cre_evtbd; } ret = mwifiex_pcie_alloc_cmdrsp_buf(adapter); if (ret) { mwifiex_dbg(adapter, ERROR, "Failed to allocate cmdbuf buffer\n"); goto err_alloc_cmdbuf; } if (reg->sleep_cookie) { ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter); if (ret) { mwifiex_dbg(adapter, ERROR, "Failed to allocate sleep_cookie buffer\n"); goto err_alloc_cookie; } } else { card->sleep_cookie_vbase = NULL; } return 0; err_alloc_cookie: mwifiex_pcie_delete_cmdrsp_buf(adapter); err_alloc_cmdbuf: mwifiex_pcie_delete_evtbd_ring(adapter); err_cre_evtbd: mwifiex_pcie_delete_rxbd_ring(adapter); err_cre_rxbd: mwifiex_pcie_delete_txbd_ring(adapter); err_cre_txbd: return ret; } static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; if (reg->sleep_cookie) mwifiex_pcie_delete_sleep_cookie_buf(adapter); mwifiex_pcie_delete_cmdrsp_buf(adapter); mwifiex_pcie_delete_evtbd_ring(adapter); mwifiex_pcie_delete_rxbd_ring(adapter); mwifiex_pcie_delete_txbd_ring(adapter); } /* * This function initializes the PCI-E host memory space, WCB rings, etc. */ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; int ret; struct pci_dev *pdev = card->dev; pci_set_drvdata(pdev, card); ret = pci_enable_device(pdev); if (ret) goto err_enable_dev; pci_set_master(pdev); ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { pr_err("set_dma_mask(32) failed: %d\n", ret); goto err_set_dma_mask; } ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { pr_err("set_consistent_dma_mask(64) failed\n"); goto err_set_dma_mask; } ret = pci_request_region(pdev, 0, DRV_NAME); if (ret) { pr_err("req_reg(0) error\n"); goto err_req_region0; } card->pci_mmap = pci_iomap(pdev, 0, 0); if (!card->pci_mmap) { pr_err("iomap(0) error\n"); ret = -EIO; goto err_iomap0; } ret = pci_request_region(pdev, 2, DRV_NAME); if (ret) { pr_err("req_reg(2) error\n"); goto err_req_region2; } card->pci_mmap1 = pci_iomap(pdev, 2, 0); if (!card->pci_mmap1) { pr_err("iomap(2) error\n"); ret = -EIO; goto err_iomap2; } pr_notice("PCI memory map Virt0: %pK PCI memory map Virt2: %pK\n", card->pci_mmap, card->pci_mmap1); ret = mwifiex_pcie_alloc_buffers(adapter); if (ret) goto err_alloc_buffers; return 0; err_alloc_buffers: pci_iounmap(pdev, card->pci_mmap1); err_iomap2: pci_release_region(pdev, 2); err_req_region2: pci_iounmap(pdev, card->pci_mmap); err_iomap0: pci_release_region(pdev, 0); err_req_region0: err_set_dma_mask: pci_disable_device(pdev); err_enable_dev: return ret; } /* * This function cleans up the allocated card buffers. */ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; struct pci_dev *pdev = card->dev; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; int ret; u32 fw_status; cancel_work_sync(&card->work); ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status); if (fw_status == FIRMWARE_READY_PCIE) { mwifiex_dbg(adapter, INFO, "Clearing driver ready signature\n"); if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000)) mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n"); } pci_disable_device(pdev); pci_iounmap(pdev, card->pci_mmap); pci_iounmap(pdev, card->pci_mmap1); pci_release_region(pdev, 2); pci_release_region(pdev, 0); mwifiex_pcie_free_buffers(adapter); } static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) { int ret, i, j; struct pcie_service_card *card = adapter->card; struct pci_dev *pdev = card->dev; if (card->pcie.reg->msix_support) { for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) card->msix_entries[i].entry = i; ret = pci_enable_msix_exact(pdev, card->msix_entries, MWIFIEX_NUM_MSIX_VECTORS); if (!ret) { for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) { card->msix_ctx[i].dev = pdev; card->msix_ctx[i].msg_id = i; ret = request_irq(card->msix_entries[i].vector, mwifiex_pcie_interrupt, 0, "MWIFIEX_PCIE_MSIX", &card->msix_ctx[i]); if (ret) break; } if (ret) { mwifiex_dbg(adapter, INFO, "request_irq fail: %d\n", ret); for (j = 0; j < i; j++) free_irq(card->msix_entries[j].vector, &card->msix_ctx[i]); pci_disable_msix(pdev); } else { mwifiex_dbg(adapter, MSG, "MSIx enabled!"); card->msix_enable = 1; return 0; } } } if (pci_enable_msi(pdev) != 0) pci_disable_msi(pdev); else card->msi_enable = 1; mwifiex_dbg(adapter, INFO, "msi_enable = %d\n", card->msi_enable); card->share_irq_ctx.dev = pdev; card->share_irq_ctx.msg_id = -1; ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED, "MRVL_PCIE", &card->share_irq_ctx); if (ret) { pr_err("request_irq failed: ret=%d\n", ret); return -1; } return 0; } /* * This function gets the firmware name for downloading by revision id * * Read revision id register to get revision id */ static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter) { int revision_id = 0; int version, magic; struct pcie_service_card *card = adapter->card; switch (card->dev->device) { case PCIE_DEVICE_ID_MARVELL_88W8766P: strcpy(adapter->fw_name, PCIE8766_DEFAULT_FW_NAME); break; case PCIE_DEVICE_ID_MARVELL_88W8897: mwifiex_write_reg(adapter, 0x0c58, 0x80c00000); mwifiex_read_reg(adapter, 0x0c58, &revision_id); revision_id &= 0xff00; switch (revision_id) { case PCIE8897_A0: strcpy(adapter->fw_name, PCIE8897_A0_FW_NAME); break; case PCIE8897_B0: strcpy(adapter->fw_name, PCIE8897_B0_FW_NAME); break; default: strcpy(adapter->fw_name, PCIE8897_DEFAULT_FW_NAME); break; } break; case PCIE_DEVICE_ID_MARVELL_88W8997: mwifiex_read_reg(adapter, 0x8, &revision_id); mwifiex_read_reg(adapter, 0x0cd0, &version); mwifiex_read_reg(adapter, 0x0cd4, &magic); revision_id &= 0xff; version &= 0x7; magic &= 0xff; if (revision_id == PCIE8997_A1 && magic == CHIP_MAGIC_VALUE && version == CHIP_VER_PCIEUART) strcpy(adapter->fw_name, PCIEUART8997_FW_NAME_V4); else strcpy(adapter->fw_name, PCIEUSB8997_FW_NAME_V4); break; default: break; } } /* * This function registers the PCIE device. * * PCIE IRQ is claimed, block size is set and driver data is initialized. */ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; /* save adapter pointer in card */ card->adapter = adapter; if (mwifiex_pcie_request_irq(adapter)) return -1; adapter->tx_buf_size = card->pcie.tx_buf_size; adapter->mem_type_mapping_tbl = card->pcie.mem_type_mapping_tbl; adapter->num_mem_types = card->pcie.num_mem_types; adapter->ext_scan = card->pcie.can_ext_scan; mwifiex_pcie_get_fw_name(adapter); return 0; } /* * This function unregisters the PCIE device. * * The PCIE IRQ is released, the function is disabled and driver * data is set to null. */ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; struct pci_dev *pdev = card->dev; int i; if (card->msix_enable) { for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) synchronize_irq(card->msix_entries[i].vector); for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) free_irq(card->msix_entries[i].vector, &card->msix_ctx[i]); card->msix_enable = 0; pci_disable_msix(pdev); } else { mwifiex_dbg(adapter, INFO, "%s(): calling free_irq()\n", __func__); free_irq(card->dev->irq, &card->share_irq_ctx); if (card->msi_enable) pci_disable_msi(pdev); } card->adapter = NULL; } /* * This function initializes the PCI-E host memory space, WCB rings, etc., * similar to mwifiex_init_pcie(), but without resetting PCI-E state. */ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; struct pci_dev *pdev = card->dev; /* tx_buf_size might be changed to 3584 by firmware during * data transfer, we should reset it to default size. */ adapter->tx_buf_size = card->pcie.tx_buf_size; mwifiex_pcie_alloc_buffers(adapter); pci_set_master(pdev); } /* This function cleans up the PCI-E host memory space. */ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; struct pci_dev *pdev = card->dev; if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000)) mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n"); pci_clear_master(pdev); adapter->seq_num = 0; mwifiex_pcie_free_buffers(adapter); } static struct mwifiex_if_ops pcie_ops = { .init_if = mwifiex_init_pcie, .cleanup_if = mwifiex_cleanup_pcie, .check_fw_status = mwifiex_check_fw_status, .check_winner_status = mwifiex_check_winner_status, .prog_fw = mwifiex_prog_fw_w_helper, .register_dev = mwifiex_register_dev, .unregister_dev = mwifiex_unregister_dev, .enable_int = mwifiex_pcie_enable_host_int, .disable_int = mwifiex_pcie_disable_host_int_noerr, .process_int_status = mwifiex_process_int_status, .host_to_card = mwifiex_pcie_host_to_card, .wakeup = mwifiex_pm_wakeup_card, .wakeup_complete = mwifiex_pm_wakeup_card_complete, /* PCIE specific */ .cmdrsp_complete = mwifiex_pcie_cmdrsp_complete, .event_complete = mwifiex_pcie_event_complete, .update_mp_end_port = NULL, .cleanup_mpa_buf = NULL, .init_fw_port = mwifiex_pcie_init_fw_port, .clean_pcie_ring = mwifiex_clean_pcie_ring_buf, .card_reset = mwifiex_pcie_card_reset, .reg_dump = mwifiex_pcie_reg_dump, .device_dump = mwifiex_pcie_device_dump, .down_dev = mwifiex_pcie_down_dev, .up_dev = mwifiex_pcie_up_dev, }; module_pci_driver(mwifiex_pcie); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION); MODULE_VERSION(PCIE_VERSION); MODULE_LICENSE("GPL v2");
./CrossVul/dataset_final_sorted/CWE-400/c/good_1247_0
crossvul-cpp_data_good_1260_0
// SPDX-License-Identifier: GPL-2.0-or-later /* * SPI master driver using generic bitbanged GPIO * * Copyright (C) 2006,2008 David Brownell * Copyright (C) 2017 Linus Walleij */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/spi/spi_gpio.h> /* * This bitbanging SPI master driver should help make systems usable * when a native hardware SPI engine is not available, perhaps because * its driver isn't yet working or because the I/O pins it requires * are used for other purposes. * * platform_device->driver_data ... points to spi_gpio * * spi->controller_state ... reserved for bitbang framework code * * spi->master->dev.driver_data ... points to spi_gpio->bitbang */ struct spi_gpio { struct spi_bitbang bitbang; struct gpio_desc *sck; struct gpio_desc *miso; struct gpio_desc *mosi; struct gpio_desc **cs_gpios; }; /*----------------------------------------------------------------------*/ /* * Because the overhead of going through four GPIO procedure calls * per transferred bit can make performance a problem, this code * is set up so that you can use it in either of two ways: * * - The slow generic way: set up platform_data to hold the GPIO * numbers used for MISO/MOSI/SCK, and issue procedure calls for * each of them. This driver can handle several such busses. * * - The quicker inlined way: only helps with platform GPIO code * that inlines operations for constant GPIOs. This can give * you tight (fast!) inner loops, but each such bus needs a * new driver. You'll define a new C file, with Makefile and * Kconfig support; the C code can be a total of six lines: * * #define DRIVER_NAME "myboard_spi2" * #define SPI_MISO_GPIO 119 * #define SPI_MOSI_GPIO 120 * #define SPI_SCK_GPIO 121 * #define SPI_N_CHIPSEL 4 * #include "spi-gpio.c" */ #ifndef DRIVER_NAME #define DRIVER_NAME "spi_gpio" #define GENERIC_BITBANG /* vs tight inlines */ #endif /*----------------------------------------------------------------------*/ static inline struct spi_gpio *__pure spi_to_spi_gpio(const struct spi_device *spi) { const struct spi_bitbang *bang; struct spi_gpio *spi_gpio; bang = spi_master_get_devdata(spi->master); spi_gpio = container_of(bang, struct spi_gpio, bitbang); return spi_gpio; } /* These helpers are in turn called by the bitbang inlines */ static inline void setsck(const struct spi_device *spi, int is_on) { struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi); gpiod_set_value_cansleep(spi_gpio->sck, is_on); } static inline void setmosi(const struct spi_device *spi, int is_on) { struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi); gpiod_set_value_cansleep(spi_gpio->mosi, is_on); } static inline int getmiso(const struct spi_device *spi) { struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi); if (spi->mode & SPI_3WIRE) return !!gpiod_get_value_cansleep(spi_gpio->mosi); else return !!gpiod_get_value_cansleep(spi_gpio->miso); } /* * NOTE: this clocks "as fast as we can". It "should" be a function of the * requested device clock. Software overhead means we usually have trouble * reaching even one Mbit/sec (except when we can inline bitops), so for now * we'll just assume we never need additional per-bit slowdowns. */ #define spidelay(nsecs) do {} while (0) #include "spi-bitbang-txrx.h" /* * These functions can leverage inline expansion of GPIO calls to shrink * costs for a txrx bit, often by factors of around ten (by instruction * count). That is particularly visible for larger word sizes, but helps * even with default 8-bit words. * * REVISIT overheads calling these functions for each word also have * significant performance costs. Having txrx_bufs() calls that inline * the txrx_word() logic would help performance, e.g. on larger blocks * used with flash storage or MMC/SD. There should also be ways to make * GCC be less stupid about reloading registers inside the I/O loops, * even without inlined GPIO calls; __attribute__((hot)) on GCC 4.3? */ static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits, unsigned flags) { return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits); } static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits, unsigned flags) { return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits); } static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits, unsigned flags) { return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits); } static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits, unsigned flags) { return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits); } /* * These functions do not call setmosi or getmiso if respective flag * (SPI_MASTER_NO_RX or SPI_MASTER_NO_TX) is set, so they are safe to * call when such pin is not present or defined in the controller. * A separate set of callbacks is defined to get highest possible * speed in the generic case (when both MISO and MOSI lines are * available), as optimiser will remove the checks when argument is * constant. */ static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits, unsigned flags) { flags = spi->master->flags; return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits); } static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits, unsigned flags) { flags = spi->master->flags; return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits); } static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits, unsigned flags) { flags = spi->master->flags; return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits); } static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits, unsigned flags) { flags = spi->master->flags; return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits); } /*----------------------------------------------------------------------*/ static void spi_gpio_chipselect(struct spi_device *spi, int is_active) { struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi); /* set initial clock line level */ if (is_active) gpiod_set_value_cansleep(spi_gpio->sck, spi->mode & SPI_CPOL); /* Drive chip select line, if we have one */ if (spi_gpio->cs_gpios) { struct gpio_desc *cs = spi_gpio->cs_gpios[spi->chip_select]; /* SPI chip selects are normally active-low */ gpiod_set_value_cansleep(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active); } } static int spi_gpio_setup(struct spi_device *spi) { struct gpio_desc *cs; int status = 0; struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi); /* * The CS GPIOs have already been * initialized from the descriptor lookup. */ if (spi_gpio->cs_gpios) { cs = spi_gpio->cs_gpios[spi->chip_select]; if (!spi->controller_state && cs) status = gpiod_direction_output(cs, !(spi->mode & SPI_CS_HIGH)); } if (!status) status = spi_bitbang_setup(spi); return status; } static int spi_gpio_set_direction(struct spi_device *spi, bool output) { struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi); int ret; if (output) return gpiod_direction_output(spi_gpio->mosi, 1); ret = gpiod_direction_input(spi_gpio->mosi); if (ret) return ret; /* * Send a turnaround high impedance cycle when switching * from output to input. Theoretically there should be * a clock delay here, but as has been noted above, the * nsec delay function for bit-banged GPIO is simply * {} because bit-banging just doesn't get fast enough * anyway. */ if (spi->mode & SPI_3WIRE_HIZ) { gpiod_set_value_cansleep(spi_gpio->sck, !(spi->mode & SPI_CPOL)); gpiod_set_value_cansleep(spi_gpio->sck, !!(spi->mode & SPI_CPOL)); } return 0; } static void spi_gpio_cleanup(struct spi_device *spi) { spi_bitbang_cleanup(spi); } /* * It can be convenient to use this driver with pins that have alternate * functions associated with a "native" SPI controller if a driver for that * controller is not available, or is missing important functionality. * * On platforms which can do so, configure MISO with a weak pullup unless * there's an external pullup on that signal. That saves power by avoiding * floating signals. (A weak pulldown would save power too, but many * drivers expect to see all-ones data as the no slave "response".) */ static int spi_gpio_request(struct device *dev, struct spi_gpio *spi_gpio) { spi_gpio->mosi = devm_gpiod_get_optional(dev, "mosi", GPIOD_OUT_LOW); if (IS_ERR(spi_gpio->mosi)) return PTR_ERR(spi_gpio->mosi); spi_gpio->miso = devm_gpiod_get_optional(dev, "miso", GPIOD_IN); if (IS_ERR(spi_gpio->miso)) return PTR_ERR(spi_gpio->miso); spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW); return PTR_ERR_OR_ZERO(spi_gpio->sck); } #ifdef CONFIG_OF static const struct of_device_id spi_gpio_dt_ids[] = { { .compatible = "spi-gpio" }, {} }; MODULE_DEVICE_TABLE(of, spi_gpio_dt_ids); static int spi_gpio_probe_dt(struct platform_device *pdev, struct spi_master *master) { master->dev.of_node = pdev->dev.of_node; master->use_gpio_descriptors = true; return 0; } #else static inline int spi_gpio_probe_dt(struct platform_device *pdev, struct spi_master *master) { return 0; } #endif static int spi_gpio_probe_pdata(struct platform_device *pdev, struct spi_master *master) { struct device *dev = &pdev->dev; struct spi_gpio_platform_data *pdata = dev_get_platdata(dev); struct spi_gpio *spi_gpio = spi_master_get_devdata(master); int i; #ifdef GENERIC_BITBANG if (!pdata || !pdata->num_chipselect) return -ENODEV; #endif /* * The master needs to think there is a chipselect even if not * connected */ master->num_chipselect = pdata->num_chipselect ?: 1; spi_gpio->cs_gpios = devm_kcalloc(dev, master->num_chipselect, sizeof(*spi_gpio->cs_gpios), GFP_KERNEL); if (!spi_gpio->cs_gpios) return -ENOMEM; for (i = 0; i < master->num_chipselect; i++) { spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs", i, GPIOD_OUT_HIGH); if (IS_ERR(spi_gpio->cs_gpios[i])) return PTR_ERR(spi_gpio->cs_gpios[i]); } return 0; } static void spi_gpio_put(void *data) { spi_master_put(data); } static int spi_gpio_probe(struct platform_device *pdev) { int status; struct spi_master *master; struct spi_gpio *spi_gpio; struct device *dev = &pdev->dev; struct spi_bitbang *bb; const struct of_device_id *of_id; of_id = of_match_device(spi_gpio_dt_ids, &pdev->dev); master = spi_alloc_master(dev, sizeof(*spi_gpio)); if (!master) return -ENOMEM; status = devm_add_action_or_reset(&pdev->dev, spi_gpio_put, master); if (status) { spi_master_put(master); return status; } if (of_id) status = spi_gpio_probe_dt(pdev, master); else status = spi_gpio_probe_pdata(pdev, master); if (status) return status; spi_gpio = spi_master_get_devdata(master); status = spi_gpio_request(dev, spi_gpio); if (status) return status; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL | SPI_CS_HIGH; if (!spi_gpio->mosi) { /* HW configuration without MOSI pin * * No setting SPI_MASTER_NO_RX here - if there is only * a MOSI pin connected the host can still do RX by * changing the direction of the line. */ master->flags = SPI_MASTER_NO_TX; } master->bus_num = pdev->id; master->setup = spi_gpio_setup; master->cleanup = spi_gpio_cleanup; bb = &spi_gpio->bitbang; bb->master = master; /* * There is some additional business, apart from driving the CS GPIO * line, that we need to do on selection. This makes the local * callback for chipselect always get called. */ master->flags |= SPI_MASTER_GPIO_SS; bb->chipselect = spi_gpio_chipselect; bb->set_line_direction = spi_gpio_set_direction; if (master->flags & SPI_MASTER_NO_TX) { bb->txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0; bb->txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1; bb->txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2; bb->txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3; } else { bb->txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; bb->txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; bb->txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; bb->txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3; } bb->setup_transfer = spi_bitbang_setup_transfer; status = spi_bitbang_init(&spi_gpio->bitbang); if (status) return status; return devm_spi_register_master(&pdev->dev, spi_master_get(master)); } MODULE_ALIAS("platform:" DRIVER_NAME); static struct platform_driver spi_gpio_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = of_match_ptr(spi_gpio_dt_ids), }, .probe = spi_gpio_probe, }; module_platform_driver(spi_gpio_driver); MODULE_DESCRIPTION("SPI master driver using generic bitbanged GPIO "); MODULE_AUTHOR("David Brownell"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-400/c/good_1260_0
crossvul-cpp_data_good_1272_4
/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dm_services.h" #include "dc.h" #include "resource.h" #include "include/irq_service_interface.h" #include "dcn10_resource.h" #include "dcn10_ipp.h" #include "dcn10_mpc.h" #include "irq/dcn10/irq_service_dcn10.h" #include "dcn10_dpp.h" #include "dcn10_optc.h" #include "dcn10_hw_sequencer.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn10_opp.h" #include "dcn10_link_encoder.h" #include "dcn10_stream_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dce112/dce112_resource.h" #include "dcn10_hubp.h" #include "dcn10_hubbub.h" #include "soc15_hw_ip.h" #include "vega10_ip_offset.h" #include "dcn/dcn_1_0_offset.h" #include "dcn/dcn_1_0_sh_mask.h" #include "nbio/nbio_7_0_offset.h" #include "mmhub/mmhub_9_1_offset.h" #include "mmhub/mmhub_9_1_sh_mask.h" #include "reg_helper.h" #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" const struct _vcs_dpi_ip_params_st dcn1_0_ip = { .rob_buffer_size_kbytes = 64, .det_buffer_size_kbytes = 164, .dpte_buffer_size_in_pte_reqs_luma = 42, .dpp_output_buffer_pixels = 2560, .opp_output_buffer_lines = 1, .pixel_chunk_size_kbytes = 8, .pte_enable = 1, .pte_chunk_size_kbytes = 2, .meta_chunk_size_kbytes = 2, .writeback_chunk_size_kbytes = 2, .line_buffer_size_bits = 589824, .max_line_buffer_lines = 12, .IsLineBufferBppFixed = 0, .LineBufferFixedBpp = -1, .writeback_luma_buffer_size_kbytes = 12, .writeback_chroma_buffer_size_kbytes = 8, .max_num_dpp = 4, .max_num_wb = 2, .max_dchub_pscl_bw_pix_per_clk = 4, .max_pscl_lb_bw_pix_per_clk = 2, .max_lb_vscl_bw_pix_per_clk = 4, .max_vscl_hscl_bw_pix_per_clk = 4, .max_hscl_ratio = 4, .max_vscl_ratio = 4, .hscl_mults = 4, .vscl_mults = 4, .max_hscl_taps = 8, .max_vscl_taps = 8, .dispclk_ramp_margin_percent = 1, .underscan_factor = 1.10, .min_vblank_lines = 14, .dppclk_delay_subtotal = 90, .dispclk_delay_subtotal = 42, .dcfclk_cstate_latency = 10, .max_inter_dcn_tile_repeaters = 8, .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0, .bug_forcing_LC_req_same_size_fixed = 0, }; const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = { .sr_exit_time_us = 9.0, .sr_enter_plus_exit_time_us = 11.0, .urgent_latency_us = 4.0, .writeback_latency_us = 12.0, .ideal_dram_bw_after_urgent_percent = 80.0, .max_request_size_bytes = 256, .downspread_percent = 0.5, .dram_page_open_time_ns = 50.0, .dram_rw_turnaround_time_ns = 17.5, .dram_return_buffer_per_channel_bytes = 8192, .round_trip_ping_latency_dcfclk_cycles = 128, .urgent_out_of_order_return_per_channel_bytes = 256, .channel_interleave_bytes = 256, .num_banks = 8, .num_chans = 2, .vmm_page_size_bytes = 4096, .dram_clock_change_latency_us = 17.0, .writeback_dram_clock_change_latency_us = 23.0, .return_bus_width_bytes = 64, }; #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #endif enum dcn10_clk_src_array_id { DCN10_CLK_SRC_PLL0, DCN10_CLK_SRC_PLL1, DCN10_CLK_SRC_PLL2, DCN10_CLK_SRC_PLL3, DCN10_CLK_SRC_TOTAL, DCN101_CLK_SRC_TOTAL = DCN10_CLK_SRC_PLL3 }; /* begin ********************* * macros to expend register list macro defined in HW object header file */ /* DCN */ #define BASE_INNER(seg) \ DCE_BASE__INST0_SEG ## seg #define BASE(seg) \ BASE_INNER(seg) #define SR(reg_name)\ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name #define SRI(reg_name, block, id)\ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define SRII(reg_name, block, id)\ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name /* NBIO */ #define NBIO_BASE_INNER(seg) \ NBIF_BASE__INST0_SEG ## seg #define NBIO_BASE(seg) \ NBIO_BASE_INNER(seg) #define NBIO_SR(reg_name)\ .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name /* MMHUB */ #define MMHUB_BASE_INNER(seg) \ MMHUB_BASE__INST0_SEG ## seg #define MMHUB_BASE(seg) \ MMHUB_BASE_INNER(seg) #define MMHUB_SR(reg_name)\ .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name /* macros to expend register list macro defined in HW object header file * end *********************/ static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCN10_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCN10(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCN10_REG_LIST(0) }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCN10(_MASK) }; #define stream_enc_regs(id)\ [id] = {\ SE_DCN_REG_LIST(id)\ } static const struct dcn10_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), }; static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn10_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCN10(_MASK) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) static const struct dce_audio_shift audio_shift = { DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; #define aux_regs(id)\ [id] = {\ AUX_REG_LIST(id)\ } static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3) }; #define link_regs(id)\ [id] = {\ LE_DCN10_REG_LIST(id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } static const struct dcn10_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), link_regs(3) }; static const struct dcn10_link_enc_shift le_shift = { LINK_ENCODER_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK) }; #define ipp_regs(id)\ [id] = {\ IPP_REG_LIST_DCN10(id),\ } static const struct dcn10_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), }; static const struct dcn10_ipp_shift ipp_shift = { IPP_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn10_ipp_mask ipp_mask = { IPP_MASK_SH_LIST_DCN10(_MASK), }; #define opp_regs(id)\ [id] = {\ OPP_REG_LIST_DCN10(id),\ } static const struct dcn10_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), }; static const struct dcn10_opp_shift opp_shift = { OPP_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn10_opp_mask opp_mask = { OPP_MASK_SH_LIST_DCN10(_MASK), }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ .AUX_RESET_MASK = 0 \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define tf_regs(id)\ [id] = {\ TF_REG_LIST_DCN10(id),\ } static const struct dcn_dpp_registers tf_regs[] = { tf_regs(0), tf_regs(1), tf_regs(2), tf_regs(3), }; static const struct dcn_dpp_shift tf_shift = { TF_REG_LIST_SH_MASK_DCN10(__SHIFT), TF_DEBUG_REG_LIST_SH_DCN10 }; static const struct dcn_dpp_mask tf_mask = { TF_REG_LIST_SH_MASK_DCN10(_MASK), TF_DEBUG_REG_LIST_MASK_DCN10 }; static const struct dcn_mpc_registers mpc_regs = { MPC_COMMON_REG_LIST_DCN1_0(0), MPC_COMMON_REG_LIST_DCN1_0(1), MPC_COMMON_REG_LIST_DCN1_0(2), MPC_COMMON_REG_LIST_DCN1_0(3), MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(0), MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(1), MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(2), MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(3) }; static const struct dcn_mpc_shift mpc_shift = { MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) }; static const struct dcn_mpc_mask mpc_mask = { MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK), }; #define tg_regs(id)\ [id] = {TG_COMMON_REG_LIST_DCN1_0(id)} static const struct dcn_optc_registers tg_regs[] = { tg_regs(0), tg_regs(1), tg_regs(2), tg_regs(3), }; static const struct dcn_optc_shift tg_shift = { TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) }; static const struct dcn_optc_mask tg_mask = { TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK) }; static const struct bios_registers bios_regs = { NBIO_SR(BIOS_SCRATCH_3), NBIO_SR(BIOS_SCRATCH_6) }; #define hubp_regs(id)\ [id] = {\ HUBP_REG_LIST_DCN10(id)\ } static const struct dcn_mi_registers hubp_regs[] = { hubp_regs(0), hubp_regs(1), hubp_regs(2), hubp_regs(3), }; static const struct dcn_mi_shift hubp_shift = { HUBP_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn_mi_mask hubp_mask = { HUBP_MASK_SH_LIST_DCN10(_MASK) }; static const struct dcn_hubbub_registers hubbub_reg = { HUBBUB_REG_LIST_DCN10(0) }; static const struct dcn_hubbub_shift hubbub_shift = { HUBBUB_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dcn_hubbub_mask hubbub_mask = { HUBBUB_MASK_SH_LIST_DCN10(_MASK) }; #define clk_src_regs(index, pllid)\ [index] = {\ CS_COMMON_REG_LIST_DCN1_0(index, pllid),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0, A), clk_src_regs(1, B), clk_src_regs(2, C), clk_src_regs(3, D) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK) }; static const struct resource_caps res_cap = { .num_timing_generator = 4, .num_opp = 4, .num_video_plane = 4, .num_audio = 4, .num_stream_encoder = 4, .num_pll = 4, .num_ddc = 4, }; static const struct resource_caps rv2_res_cap = { .num_timing_generator = 3, .num_opp = 3, .num_video_plane = 3, .num_audio = 3, .num_stream_encoder = 3, .num_pll = 3, .num_ddc = 4, }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCN_UNIVERSAL, .blends_with_above = true, .blends_with_below = true, .per_pixel_alpha = true, .pixel_format_support = { .argb8888 = true, .nv12 = true, .fp16 = true }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 16000, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 250, .fp16 = 1 } }; static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = true, .disable_dmcu = true, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, /* raven smu dones't allow 0 disp clk, * smu min disp clk limit is 50Mhz * keep min disp clk 100Mhz avoid smu hang */ .min_disp_clk_khz = 100000, .disable_pplib_clock_request = false, .disable_pplib_wm_range = false, .pplib_wm_report_mode = WM_REPORT_DEFAULT, .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .force_single_disp_pipe_split = true, .disable_dcc = DCC_ENABLE, .voltage_align_fclk = true, .disable_stereo_support = true, .vsr_support = true, .performance_trace = false, .az_endpoint_mute_only = true, .recovery_enabled = false, /*enable this by default after testing.*/ .max_downscale_src_width = 3840, .underflow_assert_delay_us = 0xFFFFFFFF, }; static const struct dc_debug_options debug_defaults_diags = { .disable_dmcu = true, .force_abm_enable = false, .timing_trace = true, .clock_trace = true, .disable_stutter = true, .disable_pplib_clock_request = true, .disable_pplib_wm_range = true, .underflow_assert_delay_us = 0xFFFFFFFF, }; static void dcn10_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN10_DPP(*dpp)); *dpp = NULL; } static struct dpp *dcn10_dpp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_dpp *dpp = kzalloc(sizeof(struct dcn10_dpp), GFP_KERNEL); if (!dpp) return NULL; dpp1_construct(dpp, ctx, inst, &tf_regs[inst], &tf_shift, &tf_mask); return &dpp->base; } static struct input_pixel_processor *dcn10_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_ipp *ipp = kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dcn10_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } static struct output_pixel_processor *dcn10_opp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_opp *opp = kzalloc(sizeof(struct dcn10_opp), GFP_KERNEL); if (!opp) { BREAK_TO_DEBUGGER(); return NULL; } dcn10_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } struct dce_aux *dcn10_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst]); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) }; struct dce_i2c_hw *dcn10_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dcn1_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } static struct mpc *dcn10_mpc_create(struct dc_context *ctx) { struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc), GFP_KERNEL); if (!mpc10) return NULL; dcn10_mpc_construct(mpc10, ctx, &mpc_regs, &mpc_shift, &mpc_mask, 4); return &mpc10->base; } static struct hubbub *dcn10_hubbub_create(struct dc_context *ctx) { struct dcn10_hubbub *dcn10_hubbub = kzalloc(sizeof(struct dcn10_hubbub), GFP_KERNEL); if (!dcn10_hubbub) return NULL; hubbub1_construct(&dcn10_hubbub->base, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask); return &dcn10_hubbub->base; } static struct timing_generator *dcn10_timing_generator_create( struct dc_context *ctx, uint32_t instance) { struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; tgn10->base.inst = instance; tgn10->base.ctx = ctx; tgn10->tg_regs = &tg_regs[instance]; tgn10->tg_shift = &tg_shift; tgn10->tg_mask = &tg_mask; dcn10_timing_generator_init(tgn10); return &tgn10->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = false, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true }; struct link_encoder *dcn10_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dcn10_link_encoder *enc10 = kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL); if (!enc10) return NULL; dcn10_link_encoder_construct(enc10, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); return &enc10->base; } struct clock_source *dcn10_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dce112_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } BREAK_TO_DEBUGGER(); return NULL; } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); } static struct audio *create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } static struct stream_encoder *dcn10_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dcn10_stream_encoder *enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); if (!enc1) return NULL; dcn10_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc1->base; } static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCN1_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN1_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCN1_MASK_SH_LIST(_MASK) }; static struct dce_hwseq *dcn10_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; hws->wa.DEGVIDCN10_253 = true; hws->wa.false_optc_underflow = true; hws->wa.DEGVIDCN10_254 = true; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, .create_stream_encoder = dcn10_stream_encoder_create, .create_hwseq = dcn10_hwseq_create, }; static const struct resource_create_funcs res_create_maximus_funcs = { .read_dce_straps = NULL, .create_audio = NULL, .create_stream_encoder = NULL, .create_hwseq = dcn10_hwseq_create, }; void dcn10_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx) { struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); if (!pp_smu) return pp_smu; dm_pp_get_funcs(ctx, pp_smu); return pp_smu; } static void destruct(struct dcn10_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) { kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); pool->base.stream_enc[i] = NULL; } } if (pool->base.mpc != NULL) { kfree(TO_DCN10_MPC(pool->base.mpc)); pool->base.mpc = NULL; } if (pool->base.hubbub != NULL) { kfree(pool->base.hubbub); pool->base.hubbub = NULL; } for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); if (pool->base.dpps[i] != NULL) dcn10_dpp_destroy(&pool->base.dpps[i]); if (pool->base.ipps[i] != NULL) pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); if (pool->base.hubps[i] != NULL) { kfree(TO_DCN10_HUBP(pool->base.hubps[i])); pool->base.hubps[i] = NULL; } if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } if (pool->base.timing_generators[i] != NULL) { kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i]) dce_aud_destroy(&pool->base.audios[i]); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dcn10_clock_source_destroy(&pool->base.clock_sources[i]); pool->base.clock_sources[i] = NULL; } } if (pool->base.dp_clock_source != NULL) { dcn10_clock_source_destroy(&pool->base.dp_clock_source); pool->base.dp_clock_source = NULL; } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); kfree(pool->base.pp_smu); } static struct hubp *dcn10_hubp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_hubp *hubp1 = kzalloc(sizeof(struct dcn10_hubp), GFP_KERNEL); if (!hubp1) return NULL; dcn10_hubp_construct(hubp1, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask); return &hubp1->base; } static void get_pixel_clock_parameters( const struct pipe_ctx *pipe_ctx, struct pixel_clk_params *pixel_clk_params) { const struct dc_stream_state *stream = pipe_ctx->stream; pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; pixel_clk_params->encoder_object_id = stream->link->link_enc->id; pixel_clk_params->signal_type = pipe_ctx->stream->signal; pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; /* TODO: un-hardcode*/ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * LINK_RATE_REF_FREQ_IN_KHZ; pixel_clk_params->flags.ENABLE_SS = 0; pixel_clk_params->color_depth = stream->timing.display_color_depth; pixel_clk_params->flags.DISPLAY_BLANKED = 1; pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) pixel_clk_params->color_depth = COLOR_DEPTH_888; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) pixel_clk_params->requested_pix_clk_100hz /= 2; if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) pixel_clk_params->requested_pix_clk_100hz *= 2; } static void build_clamping_params(struct dc_stream_state *stream) { stream->clamping.clamping_level = CLAMPING_FULL_RANGE; stream->clamping.c_depth = stream->timing.display_color_depth; stream->clamping.pixel_encoding = stream->timing.pixel_encoding; } static void build_pipe_hw_param(struct pipe_ctx *pipe_ctx) { get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); pipe_ctx->clock_source->funcs->get_pix_clk_dividers( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, &pipe_ctx->pll_settings); pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; resource_build_bit_depth_reduction_params(pipe_ctx->stream, &pipe_ctx->stream->bit_depth_params); build_clamping_params(pipe_ctx->stream); } static enum dc_status build_mapped_resource( const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); /*TODO Seems unneeded anymore */ /* if (old_context && resource_is_stream_unchanged(old_context, stream)) { if (stream != NULL && old_context->streams[i] != NULL) { todo: shouldn't have to copy missing parameter here resource_build_bit_depth_reduction_params(stream, &stream->bit_depth_params); stream->clamping.pixel_encoding = stream->timing.pixel_encoding; resource_build_bit_depth_reduction_params(stream, &stream->bit_depth_params); build_clamping_params(stream); continue; } } */ if (!pipe_ctx) return DC_ERROR_UNEXPECTED; build_pipe_hw_param(pipe_ctx); return DC_OK; } enum dc_status dcn10_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_ERROR_UNEXPECTED; result = resource_map_pool_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = build_mapped_resource(dc, new_ctx, dc_stream); return result; } static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer( struct dc_state *context, const struct resource_pool *pool, struct dc_stream_state *stream) { struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe); if (!head_pipe) { ASSERT(0); return NULL; } if (!idle_pipe) return NULL; idle_pipe->stream = head_pipe->stream; idle_pipe->stream_res.tg = head_pipe->stream_res.tg; idle_pipe->stream_res.abm = head_pipe->stream_res.abm; idle_pipe->stream_res.opp = head_pipe->stream_res.opp; idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; return idle_pipe; } static bool dcn10_get_dcc_compression_cap(const struct dc *dc, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output) { return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( dc->res_pool->hubbub, input, output); } static void dcn10_destroy_resource_pool(struct resource_pool **pool) { struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool); destruct(dcn10_pool); kfree(dcn10_pool); *pool = NULL; } static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) { if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && caps->max_video_width != 0 && plane_state->src_rect.width > caps->max_video_width) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *context) { int i, j; bool video_down_scaled = false; bool video_large = false; bool desktop_large = false; bool dcc_disabled = false; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) continue; if (context->stream_status[i].plane_count > 2) return DC_FAIL_UNSUPPORTED_1; for (j = 0; j < context->stream_status[i].plane_count; j++) { struct dc_plane_state *plane = context->stream_status[i].plane_states[j]; if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { if (plane->src_rect.width > plane->dst_rect.width || plane->src_rect.height > plane->dst_rect.height) video_down_scaled = true; if (plane->src_rect.width >= 3840) video_large = true; } else { if (plane->src_rect.width >= 3840) desktop_large = true; if (!plane->dcc.enable) dcc_disabled = true; } } } /* * Workaround: On DCN10 there is UMC issue that causes underflow when * playing 4k video on 4k desktop with video downscaled and single channel * memory */ if (video_large && desktop_large && video_down_scaled && dcc_disabled && dc->dcn_soc->number_of_channels == 1) return DC_FAIL_SURFACE_VALIDATE; return DC_OK; } static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state) { enum dc_status result = DC_OK; enum surface_pixel_format surf_pix_format = plane_state->format; unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format); enum swizzle_mode_values swizzle = DC_SW_LINEAR; if (bpp == 64) swizzle = DC_SW_64KB_D; else swizzle = DC_SW_64KB_S; plane_state->tiling_info.gfx9.swizzle = swizzle; return result; } struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link( struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) { int i; int j = -1; struct dc_link *link = stream->link; for (i = 0; i < pool->stream_enc_count; i++) { if (!res_ctx->is_stream_enc_acquired[i] && pool->stream_enc[i]) { /* Store first available for MST second display * in daisy chain use case */ j = i; if (pool->stream_enc[i]->id == link->link_enc->preferred_engine) return pool->stream_enc[i]; } } /* * For CZ and later, we can allow DIG FE and BE to differ for all display types */ if (j >= 0) return pool->stream_enc[j]; return NULL; } static const struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn10_get_dcc_compression_cap }; static const struct resource_funcs dcn10_res_pool_funcs = { .destroy = dcn10_destroy_resource_pool, .link_enc_create = dcn10_link_encoder_create, .validate_bandwidth = dcn_validate_bandwidth, .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, .validate_plane = dcn10_validate_plane, .validate_global = dcn10_validate_global, .add_stream_to_ctx = dcn10_add_stream_to_ctx, .get_default_swizzle_mode = dcn10_get_default_swizzle_mode, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link }; static uint32_t read_pipe_fuses(struct dc_context *ctx) { uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); /* RV1 support max 4 pipes */ value = value & 0xf; return value; } static bool construct( uint8_t num_virtual_links, struct dc *dc, struct dcn10_resource_pool *pool) { int i; int j; struct dc_context *ctx = dc->ctx; uint32_t pipe_fuses = read_pipe_fuses(ctx); ctx->dc_bios->regs = &bios_regs; if (ctx->dce_version == DCN_VERSION_1_01) pool->base.res_cap = &rv2_res_cap; else pool->base.res_cap = &res_cap; pool->base.funcs = &dcn10_res_pool_funcs; /* * TODO fill in from actual raven resource when we create * more than virtual encoder */ /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; /* max pipe num for ASIC before check pipe fuses */ pool->base.pipe_count = pool->base.res_cap->num_timing_generator; if (dc->ctx->dce_version == DCN_VERSION_1_01) pool->base.pipe_count = 3; dc->caps.max_video_width = 3840; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 256; dc->caps.max_slave_planes = 1; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = false; /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */ dc->caps.force_dp_tps4_for_cp2520 = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else dc->debug = debug_defaults_diags; /************************************************* * Create resources * *************************************************/ pool->base.clock_sources[DCN10_CLK_SRC_PLL0] = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[DCN10_CLK_SRC_PLL1] = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[DCN10_CLK_SRC_PLL2] = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); if (dc->ctx->dce_version == DCN_VERSION_1_0) { pool->base.clock_sources[DCN10_CLK_SRC_PLL3] = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); } pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL; if (dc->ctx->dce_version == DCN_VERSION_1_01) pool->base.clk_src_count = DCN101_CLK_SRC_TOTAL; pool->base.dp_clock_source = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, /* todo: not reuse phy_pll registers */ &clk_src_regs[0], true); for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto fail; } } pool->base.dmcu = dcn10_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto fail; } dml_init_instance(&dc->dml, &dcn1_0_soc, &dcn1_0_ip, DML_PROJECT_RAVEN1); memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults)); memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults)); if (dc->ctx->dce_version == DCN_VERSION_1_01) { struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc; struct dcn_ip_params *dcn_ip = dc->dcn_ip; struct display_mode_lib *dml = &dc->dml; dml->ip.max_num_dpp = 3; /* TODO how to handle 23.84? */ dcn_soc->dram_clock_change_latency = 23; dcn_ip->max_num_dpp = 3; } if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { dc->dcn_soc->urgent_latency = 3; dc->debug.disable_dmcu = true; dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f; } dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width; ASSERT(dc->dcn_soc->number_of_channels < 3); if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/ dc->dcn_soc->number_of_channels = 2; if (dc->dcn_soc->number_of_channels == 1) { dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f; dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f; dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f; dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f; if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f; } } pool->base.pp_smu = dcn10_pp_smu_create(ctx); /* * Right now SMU/PPLIB and DAL all have the AZ D3 force PME notification * * implemented. So AZ D3 should work.For issue 197007. * */ if (pool->base.pp_smu != NULL && pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL) dc->debug.az_endpoint_mute_only = false; if (!dc->debug.disable_pplib_clock_request) dcn_bw_update_from_pplib(dc); dcn_bw_sync_calcs_and_dml(dc); if (!dc->debug.disable_pplib_wm_range) { dc->res_pool = &pool->base; dcn_bw_notify_pplib_of_wm_ranges(dc); } { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dcn10_create(&init_data); if (!pool->base.irqs) goto fail; } /* index to valid pipe resource */ j = 0; /* mem input -> ipp -> dpp -> opp -> TG */ for (i = 0; i < pool->base.pipe_count; i++) { /* if pipe is disabled, skip instance of HW pipe, * i.e, skip ASIC register instance */ if ((pipe_fuses & (1 << i)) != 0) continue; pool->base.hubps[j] = dcn10_hubp_create(ctx, i); if (pool->base.hubps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto fail; } pool->base.ipps[j] = dcn10_ipp_create(ctx, i); if (pool->base.ipps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto fail; } pool->base.dpps[j] = dcn10_dpp_create(ctx, i); if (pool->base.dpps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create dpp!\n"); goto fail; } pool->base.opps[j] = dcn10_opp_create(ctx, i); if (pool->base.opps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto fail; } pool->base.timing_generators[j] = dcn10_timing_generator_create( ctx, i); if (pool->base.timing_generators[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto fail; } /* check next valid pipe */ j++; } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dcn10_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto fail; } pool->base.hw_i2cs[i] = dcn10_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create hw i2c!!\n"); goto fail; } pool->base.sw_i2cs[i] = NULL; } /* valid pipe num */ pool->base.pipe_count = j; pool->base.timing_generator_count = j; /* within dml lib, it is hard code to 4. If ASIC pipe is fused, * the value may be changed */ dc->dml.ip.max_num_dpp = pool->base.pipe_count; dc->dcn_ip->max_num_dpp = pool->base.pipe_count; pool->base.mpc = dcn10_mpc_create(ctx); if (pool->base.mpc == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mpc!\n"); goto fail; } pool->base.hubbub = dcn10_hubbub_create(ctx); if (pool->base.hubbub == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create hubbub!\n"); goto fail; } if (!resource_construct(num_virtual_links, dc, &pool->base, (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? &res_create_funcs : &res_create_maximus_funcs))) goto fail; dcn10_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->cap_funcs = cap_funcs; return true; fail: destruct(pool); return false; } struct resource_pool *dcn10_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc) { struct dcn10_resource_pool *pool = kzalloc(sizeof(struct dcn10_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (construct(init_data->num_virtual_links, dc, pool)) return &pool->base; kfree(pool); BREAK_TO_DEBUGGER(); return NULL; }
./CrossVul/dataset_final_sorted/CWE-400/c/good_1272_4
crossvul-cpp_data_bad_1244_0
// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for the Conexant CX23885/7/8 PCIe bridge * * CX23888 Integrated Consumer Infrared Controller * * Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net> */ #include "cx23885.h" #include "cx23888-ir.h" #include <linux/kfifo.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/rc-core.h> static unsigned int ir_888_debug; module_param(ir_888_debug, int, 0644); MODULE_PARM_DESC(ir_888_debug, "enable debug messages [CX23888 IR controller]"); #define CX23888_IR_REG_BASE 0x170000 /* * These CX23888 register offsets have a straightforward one to one mapping * to the CX23885 register offsets of 0x200 through 0x218 */ #define CX23888_IR_CNTRL_REG 0x170000 #define CNTRL_WIN_3_3 0x00000000 #define CNTRL_WIN_4_3 0x00000001 #define CNTRL_WIN_3_4 0x00000002 #define CNTRL_WIN_4_4 0x00000003 #define CNTRL_WIN 0x00000003 #define CNTRL_EDG_NONE 0x00000000 #define CNTRL_EDG_FALL 0x00000004 #define CNTRL_EDG_RISE 0x00000008 #define CNTRL_EDG_BOTH 0x0000000C #define CNTRL_EDG 0x0000000C #define CNTRL_DMD 0x00000010 #define CNTRL_MOD 0x00000020 #define CNTRL_RFE 0x00000040 #define CNTRL_TFE 0x00000080 #define CNTRL_RXE 0x00000100 #define CNTRL_TXE 0x00000200 #define CNTRL_RIC 0x00000400 #define CNTRL_TIC 0x00000800 #define CNTRL_CPL 0x00001000 #define CNTRL_LBM 0x00002000 #define CNTRL_R 0x00004000 /* CX23888 specific control flag */ #define CNTRL_IVO 0x00008000 #define CX23888_IR_TXCLK_REG 0x170004 #define TXCLK_TCD 0x0000FFFF #define CX23888_IR_RXCLK_REG 0x170008 #define RXCLK_RCD 0x0000FFFF #define CX23888_IR_CDUTY_REG 0x17000C #define CDUTY_CDC 0x0000000F #define CX23888_IR_STATS_REG 0x170010 #define STATS_RTO 0x00000001 #define STATS_ROR 0x00000002 #define STATS_RBY 0x00000004 #define STATS_TBY 0x00000008 #define STATS_RSR 0x00000010 #define STATS_TSR 0x00000020 #define CX23888_IR_IRQEN_REG 0x170014 #define IRQEN_RTE 0x00000001 #define IRQEN_ROE 0x00000002 #define IRQEN_RSE 0x00000010 #define IRQEN_TSE 0x00000020 #define CX23888_IR_FILTR_REG 0x170018 #define FILTR_LPF 0x0000FFFF /* This register doesn't follow the pattern; it's 0x23C on a CX23885 */ #define CX23888_IR_FIFO_REG 0x170040 #define FIFO_RXTX 0x0000FFFF #define FIFO_RXTX_LVL 0x00010000 #define FIFO_RXTX_RTO 0x0001FFFF #define FIFO_RX_NDV 0x00020000 #define FIFO_RX_DEPTH 8 #define FIFO_TX_DEPTH 8 /* CX23888 unique registers */ #define CX23888_IR_SEEDP_REG 0x17001C #define CX23888_IR_TIMOL_REG 0x170020 #define CX23888_IR_WAKE0_REG 0x170024 #define CX23888_IR_WAKE1_REG 0x170028 #define CX23888_IR_WAKE2_REG 0x17002C #define CX23888_IR_MASK0_REG 0x170030 #define CX23888_IR_MASK1_REG 0x170034 #define CX23888_IR_MAKS2_REG 0x170038 #define CX23888_IR_DPIPG_REG 0x17003C #define CX23888_IR_LEARN_REG 0x170044 #define CX23888_VIDCLK_FREQ 108000000 /* 108 MHz, BT.656 */ #define CX23888_IR_REFCLK_FREQ (CX23888_VIDCLK_FREQ / 2) /* * We use this union internally for convenience, but callers to tx_write * and rx_read will be expecting records of type struct ir_raw_event. * Always ensure the size of this union is dictated by struct ir_raw_event. */ union cx23888_ir_fifo_rec { u32 hw_fifo_data; struct ir_raw_event ir_core_data; }; #define CX23888_IR_RX_KFIFO_SIZE (256 * sizeof(union cx23888_ir_fifo_rec)) #define CX23888_IR_TX_KFIFO_SIZE (256 * sizeof(union cx23888_ir_fifo_rec)) struct cx23888_ir_state { struct v4l2_subdev sd; struct cx23885_dev *dev; struct v4l2_subdev_ir_parameters rx_params; struct mutex rx_params_lock; atomic_t rxclk_divider; atomic_t rx_invert; struct kfifo rx_kfifo; spinlock_t rx_kfifo_lock; struct v4l2_subdev_ir_parameters tx_params; struct mutex tx_params_lock; atomic_t txclk_divider; }; static inline struct cx23888_ir_state *to_state(struct v4l2_subdev *sd) { return v4l2_get_subdevdata(sd); } /* * IR register block read and write functions */ static inline int cx23888_ir_write4(struct cx23885_dev *dev, u32 addr, u32 value) { cx_write(addr, value); return 0; } static inline u32 cx23888_ir_read4(struct cx23885_dev *dev, u32 addr) { return cx_read(addr); } static inline int cx23888_ir_and_or4(struct cx23885_dev *dev, u32 addr, u32 and_mask, u32 or_value) { cx_andor(addr, ~and_mask, or_value); return 0; } /* * Rx and Tx Clock Divider register computations * * Note the largest clock divider value of 0xffff corresponds to: * (0xffff + 1) * 1000 / 108/2 MHz = 1,213,629.629... ns * which fits in 21 bits, so we'll use unsigned int for time arguments. */ static inline u16 count_to_clock_divider(unsigned int d) { if (d > RXCLK_RCD + 1) d = RXCLK_RCD; else if (d < 2) d = 1; else d--; return (u16) d; } static inline u16 ns_to_clock_divider(unsigned int ns) { return count_to_clock_divider( DIV_ROUND_CLOSEST(CX23888_IR_REFCLK_FREQ / 1000000 * ns, 1000)); } static inline unsigned int clock_divider_to_ns(unsigned int divider) { /* Period of the Rx or Tx clock in ns */ return DIV_ROUND_CLOSEST((divider + 1) * 1000, CX23888_IR_REFCLK_FREQ / 1000000); } static inline u16 carrier_freq_to_clock_divider(unsigned int freq) { return count_to_clock_divider( DIV_ROUND_CLOSEST(CX23888_IR_REFCLK_FREQ, freq * 16)); } static inline unsigned int clock_divider_to_carrier_freq(unsigned int divider) { return DIV_ROUND_CLOSEST(CX23888_IR_REFCLK_FREQ, (divider + 1) * 16); } static inline u16 freq_to_clock_divider(unsigned int freq, unsigned int rollovers) { return count_to_clock_divider( DIV_ROUND_CLOSEST(CX23888_IR_REFCLK_FREQ, freq * rollovers)); } static inline unsigned int clock_divider_to_freq(unsigned int divider, unsigned int rollovers) { return DIV_ROUND_CLOSEST(CX23888_IR_REFCLK_FREQ, (divider + 1) * rollovers); } /* * Low Pass Filter register calculations * * Note the largest count value of 0xffff corresponds to: * 0xffff * 1000 / 108/2 MHz = 1,213,611.11... ns * which fits in 21 bits, so we'll use unsigned int for time arguments. */ static inline u16 count_to_lpf_count(unsigned int d) { if (d > FILTR_LPF) d = FILTR_LPF; else if (d < 4) d = 0; return (u16) d; } static inline u16 ns_to_lpf_count(unsigned int ns) { return count_to_lpf_count( DIV_ROUND_CLOSEST(CX23888_IR_REFCLK_FREQ / 1000000 * ns, 1000)); } static inline unsigned int lpf_count_to_ns(unsigned int count) { /* Duration of the Low Pass Filter rejection window in ns */ return DIV_ROUND_CLOSEST(count * 1000, CX23888_IR_REFCLK_FREQ / 1000000); } static inline unsigned int lpf_count_to_us(unsigned int count) { /* Duration of the Low Pass Filter rejection window in us */ return DIV_ROUND_CLOSEST(count, CX23888_IR_REFCLK_FREQ / 1000000); } /* * FIFO register pulse width count computations */ static u32 clock_divider_to_resolution(u16 divider) { /* * Resolution is the duration of 1 tick of the readable portion of * of the pulse width counter as read from the FIFO. The two lsb's are * not readable, hence the << 2. This function returns ns. */ return DIV_ROUND_CLOSEST((1 << 2) * ((u32) divider + 1) * 1000, CX23888_IR_REFCLK_FREQ / 1000000); } static u64 pulse_width_count_to_ns(u16 count, u16 divider) { u64 n; u32 rem; /* * The 2 lsb's of the pulse width timer count are not readable, hence * the (count << 2) | 0x3 */ n = (((u64) count << 2) | 0x3) * (divider + 1) * 1000; /* millicycles */ rem = do_div(n, CX23888_IR_REFCLK_FREQ / 1000000); /* / MHz => ns */ if (rem >= CX23888_IR_REFCLK_FREQ / 1000000 / 2) n++; return n; } static unsigned int pulse_width_count_to_us(u16 count, u16 divider) { u64 n; u32 rem; /* * The 2 lsb's of the pulse width timer count are not readable, hence * the (count << 2) | 0x3 */ n = (((u64) count << 2) | 0x3) * (divider + 1); /* cycles */ rem = do_div(n, CX23888_IR_REFCLK_FREQ / 1000000); /* / MHz => us */ if (rem >= CX23888_IR_REFCLK_FREQ / 1000000 / 2) n++; return (unsigned int) n; } /* * Pulse Clocks computations: Combined Pulse Width Count & Rx Clock Counts * * The total pulse clock count is an 18 bit pulse width timer count as the most * significant part and (up to) 16 bit clock divider count as a modulus. * When the Rx clock divider ticks down to 0, it increments the 18 bit pulse * width timer count's least significant bit. */ static u64 ns_to_pulse_clocks(u32 ns) { u64 clocks; u32 rem; clocks = CX23888_IR_REFCLK_FREQ / 1000000 * (u64) ns; /* millicycles */ rem = do_div(clocks, 1000); /* /1000 = cycles */ if (rem >= 1000 / 2) clocks++; return clocks; } static u16 pulse_clocks_to_clock_divider(u64 count) { do_div(count, (FIFO_RXTX << 2) | 0x3); /* net result needs to be rounded down and decremented by 1 */ if (count > RXCLK_RCD + 1) count = RXCLK_RCD; else if (count < 2) count = 1; else count--; return (u16) count; } /* * IR Control Register helpers */ enum tx_fifo_watermark { TX_FIFO_HALF_EMPTY = 0, TX_FIFO_EMPTY = CNTRL_TIC, }; enum rx_fifo_watermark { RX_FIFO_HALF_FULL = 0, RX_FIFO_NOT_EMPTY = CNTRL_RIC, }; static inline void control_tx_irq_watermark(struct cx23885_dev *dev, enum tx_fifo_watermark level) { cx23888_ir_and_or4(dev, CX23888_IR_CNTRL_REG, ~CNTRL_TIC, level); } static inline void control_rx_irq_watermark(struct cx23885_dev *dev, enum rx_fifo_watermark level) { cx23888_ir_and_or4(dev, CX23888_IR_CNTRL_REG, ~CNTRL_RIC, level); } static inline void control_tx_enable(struct cx23885_dev *dev, bool enable) { cx23888_ir_and_or4(dev, CX23888_IR_CNTRL_REG, ~(CNTRL_TXE | CNTRL_TFE), enable ? (CNTRL_TXE | CNTRL_TFE) : 0); } static inline void control_rx_enable(struct cx23885_dev *dev, bool enable) { cx23888_ir_and_or4(dev, CX23888_IR_CNTRL_REG, ~(CNTRL_RXE | CNTRL_RFE), enable ? (CNTRL_RXE | CNTRL_RFE) : 0); } static inline void control_tx_modulation_enable(struct cx23885_dev *dev, bool enable) { cx23888_ir_and_or4(dev, CX23888_IR_CNTRL_REG, ~CNTRL_MOD, enable ? CNTRL_MOD : 0); } static inline void control_rx_demodulation_enable(struct cx23885_dev *dev, bool enable) { cx23888_ir_and_or4(dev, CX23888_IR_CNTRL_REG, ~CNTRL_DMD, enable ? CNTRL_DMD : 0); } static inline void control_rx_s_edge_detection(struct cx23885_dev *dev, u32 edge_types) { cx23888_ir_and_or4(dev, CX23888_IR_CNTRL_REG, ~CNTRL_EDG_BOTH, edge_types & CNTRL_EDG_BOTH); } static void control_rx_s_carrier_window(struct cx23885_dev *dev, unsigned int carrier, unsigned int *carrier_range_low, unsigned int *carrier_range_high) { u32 v; unsigned int c16 = carrier * 16; if (*carrier_range_low < DIV_ROUND_CLOSEST(c16, 16 + 3)) { v = CNTRL_WIN_3_4; *carrier_range_low = DIV_ROUND_CLOSEST(c16, 16 + 4); } else { v = CNTRL_WIN_3_3; *carrier_range_low = DIV_ROUND_CLOSEST(c16, 16 + 3); } if (*carrier_range_high > DIV_ROUND_CLOSEST(c16, 16 - 3)) { v |= CNTRL_WIN_4_3; *carrier_range_high = DIV_ROUND_CLOSEST(c16, 16 - 4); } else { v |= CNTRL_WIN_3_3; *carrier_range_high = DIV_ROUND_CLOSEST(c16, 16 - 3); } cx23888_ir_and_or4(dev, CX23888_IR_CNTRL_REG, ~CNTRL_WIN, v); } static inline void control_tx_polarity_invert(struct cx23885_dev *dev, bool invert) { cx23888_ir_and_or4(dev, CX23888_IR_CNTRL_REG, ~CNTRL_CPL, invert ? CNTRL_CPL : 0); } static inline void control_tx_level_invert(struct cx23885_dev *dev, bool invert) { cx23888_ir_and_or4(dev, CX23888_IR_CNTRL_REG, ~CNTRL_IVO, invert ? CNTRL_IVO : 0); } /* * IR Rx & Tx Clock Register helpers */ static unsigned int txclk_tx_s_carrier(struct cx23885_dev *dev, unsigned int freq, u16 *divider) { *divider = carrier_freq_to_clock_divider(freq); cx23888_ir_write4(dev, CX23888_IR_TXCLK_REG, *divider); return clock_divider_to_carrier_freq(*divider); } static unsigned int rxclk_rx_s_carrier(struct cx23885_dev *dev, unsigned int freq, u16 *divider) { *divider = carrier_freq_to_clock_divider(freq); cx23888_ir_write4(dev, CX23888_IR_RXCLK_REG, *divider); return clock_divider_to_carrier_freq(*divider); } static u32 txclk_tx_s_max_pulse_width(struct cx23885_dev *dev, u32 ns, u16 *divider) { u64 pulse_clocks; if (ns > IR_MAX_DURATION) ns = IR_MAX_DURATION; pulse_clocks = ns_to_pulse_clocks(ns); *divider = pulse_clocks_to_clock_divider(pulse_clocks); cx23888_ir_write4(dev, CX23888_IR_TXCLK_REG, *divider); return (u32) pulse_width_count_to_ns(FIFO_RXTX, *divider); } static u32 rxclk_rx_s_max_pulse_width(struct cx23885_dev *dev, u32 ns, u16 *divider) { u64 pulse_clocks; if (ns > IR_MAX_DURATION) ns = IR_MAX_DURATION; pulse_clocks = ns_to_pulse_clocks(ns); *divider = pulse_clocks_to_clock_divider(pulse_clocks); cx23888_ir_write4(dev, CX23888_IR_RXCLK_REG, *divider); return (u32) pulse_width_count_to_ns(FIFO_RXTX, *divider); } /* * IR Tx Carrier Duty Cycle register helpers */ static unsigned int cduty_tx_s_duty_cycle(struct cx23885_dev *dev, unsigned int duty_cycle) { u32 n; n = DIV_ROUND_CLOSEST(duty_cycle * 100, 625); /* 16ths of 100% */ if (n != 0) n--; if (n > 15) n = 15; cx23888_ir_write4(dev, CX23888_IR_CDUTY_REG, n); return DIV_ROUND_CLOSEST((n + 1) * 100, 16); } /* * IR Filter Register helpers */ static u32 filter_rx_s_min_width(struct cx23885_dev *dev, u32 min_width_ns) { u32 count = ns_to_lpf_count(min_width_ns); cx23888_ir_write4(dev, CX23888_IR_FILTR_REG, count); return lpf_count_to_ns(count); } /* * IR IRQ Enable Register helpers */ static inline void irqenable_rx(struct cx23885_dev *dev, u32 mask) { mask &= (IRQEN_RTE | IRQEN_ROE | IRQEN_RSE); cx23888_ir_and_or4(dev, CX23888_IR_IRQEN_REG, ~(IRQEN_RTE | IRQEN_ROE | IRQEN_RSE), mask); } static inline void irqenable_tx(struct cx23885_dev *dev, u32 mask) { mask &= IRQEN_TSE; cx23888_ir_and_or4(dev, CX23888_IR_IRQEN_REG, ~IRQEN_TSE, mask); } /* * V4L2 Subdevice IR Ops */ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status, bool *handled) { struct cx23888_ir_state *state = to_state(sd); struct cx23885_dev *dev = state->dev; unsigned long flags; u32 cntrl = cx23888_ir_read4(dev, CX23888_IR_CNTRL_REG); u32 irqen = cx23888_ir_read4(dev, CX23888_IR_IRQEN_REG); u32 stats = cx23888_ir_read4(dev, CX23888_IR_STATS_REG); union cx23888_ir_fifo_rec rx_data[FIFO_RX_DEPTH]; unsigned int i, j, k; u32 events, v; int tsr, rsr, rto, ror, tse, rse, rte, roe, kror; tsr = stats & STATS_TSR; /* Tx FIFO Service Request */ rsr = stats & STATS_RSR; /* Rx FIFO Service Request */ rto = stats & STATS_RTO; /* Rx Pulse Width Timer Time Out */ ror = stats & STATS_ROR; /* Rx FIFO Over Run */ tse = irqen & IRQEN_TSE; /* Tx FIFO Service Request IRQ Enable */ rse = irqen & IRQEN_RSE; /* Rx FIFO Service Request IRQ Enable */ rte = irqen & IRQEN_RTE; /* Rx Pulse Width Timer Time Out IRQ Enable */ roe = irqen & IRQEN_ROE; /* Rx FIFO Over Run IRQ Enable */ *handled = false; v4l2_dbg(2, ir_888_debug, sd, "IRQ Status: %s %s %s %s %s %s\n", tsr ? "tsr" : " ", rsr ? "rsr" : " ", rto ? "rto" : " ", ror ? "ror" : " ", stats & STATS_TBY ? "tby" : " ", stats & STATS_RBY ? "rby" : " "); v4l2_dbg(2, ir_888_debug, sd, "IRQ Enables: %s %s %s %s\n", tse ? "tse" : " ", rse ? "rse" : " ", rte ? "rte" : " ", roe ? "roe" : " "); /* * Transmitter interrupt service */ if (tse && tsr) { /* * TODO: * Check the watermark threshold setting * Pull FIFO_TX_DEPTH or FIFO_TX_DEPTH/2 entries from tx_kfifo * Push the data to the hardware FIFO. * If there was nothing more to send in the tx_kfifo, disable * the TSR IRQ and notify the v4l2_device. * If there was something in the tx_kfifo, check the tx_kfifo * level and notify the v4l2_device, if it is low. */ /* For now, inhibit TSR interrupt until Tx is implemented */ irqenable_tx(dev, 0); events = V4L2_SUBDEV_IR_TX_FIFO_SERVICE_REQ; v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_TX_NOTIFY, &events); *handled = true; } /* * Receiver interrupt service */ kror = 0; if ((rse && rsr) || (rte && rto)) { /* * Receive data on RSR to clear the STATS_RSR. * Receive data on RTO, since we may not have yet hit the RSR * watermark when we receive the RTO. */ for (i = 0, v = FIFO_RX_NDV; (v & FIFO_RX_NDV) && !kror; i = 0) { for (j = 0; (v & FIFO_RX_NDV) && j < FIFO_RX_DEPTH; j++) { v = cx23888_ir_read4(dev, CX23888_IR_FIFO_REG); rx_data[i].hw_fifo_data = v & ~FIFO_RX_NDV; i++; } if (i == 0) break; j = i * sizeof(union cx23888_ir_fifo_rec); k = kfifo_in_locked(&state->rx_kfifo, (unsigned char *) rx_data, j, &state->rx_kfifo_lock); if (k != j) kror++; /* rx_kfifo over run */ } *handled = true; } events = 0; v = 0; if (kror) { events |= V4L2_SUBDEV_IR_RX_SW_FIFO_OVERRUN; v4l2_err(sd, "IR receiver software FIFO overrun\n"); } if (roe && ror) { /* * The RX FIFO Enable (CNTRL_RFE) must be toggled to clear * the Rx FIFO Over Run status (STATS_ROR) */ v |= CNTRL_RFE; events |= V4L2_SUBDEV_IR_RX_HW_FIFO_OVERRUN; v4l2_err(sd, "IR receiver hardware FIFO overrun\n"); } if (rte && rto) { /* * The IR Receiver Enable (CNTRL_RXE) must be toggled to clear * the Rx Pulse Width Timer Time Out (STATS_RTO) */ v |= CNTRL_RXE; events |= V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED; } if (v) { /* Clear STATS_ROR & STATS_RTO as needed by resetting hardware */ cx23888_ir_write4(dev, CX23888_IR_CNTRL_REG, cntrl & ~v); cx23888_ir_write4(dev, CX23888_IR_CNTRL_REG, cntrl); *handled = true; } spin_lock_irqsave(&state->rx_kfifo_lock, flags); if (kfifo_len(&state->rx_kfifo) >= CX23888_IR_RX_KFIFO_SIZE / 2) events |= V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ; spin_unlock_irqrestore(&state->rx_kfifo_lock, flags); if (events) v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_RX_NOTIFY, &events); return 0; } /* Receiver */ static int cx23888_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count, ssize_t *num) { struct cx23888_ir_state *state = to_state(sd); bool invert = (bool) atomic_read(&state->rx_invert); u16 divider = (u16) atomic_read(&state->rxclk_divider); unsigned int i, n; union cx23888_ir_fifo_rec *p; unsigned u, v, w; n = count / sizeof(union cx23888_ir_fifo_rec) * sizeof(union cx23888_ir_fifo_rec); if (n == 0) { *num = 0; return 0; } n = kfifo_out_locked(&state->rx_kfifo, buf, n, &state->rx_kfifo_lock); n /= sizeof(union cx23888_ir_fifo_rec); *num = n * sizeof(union cx23888_ir_fifo_rec); for (p = (union cx23888_ir_fifo_rec *) buf, i = 0; i < n; p++, i++) { if ((p->hw_fifo_data & FIFO_RXTX_RTO) == FIFO_RXTX_RTO) { /* Assume RTO was because of no IR light input */ u = 0; w = 1; } else { u = (p->hw_fifo_data & FIFO_RXTX_LVL) ? 1 : 0; if (invert) u = u ? 0 : 1; w = 0; } v = (unsigned) pulse_width_count_to_ns( (u16) (p->hw_fifo_data & FIFO_RXTX), divider); if (v > IR_MAX_DURATION) v = IR_MAX_DURATION; p->ir_core_data = (struct ir_raw_event) { .pulse = u, .duration = v, .timeout = w }; v4l2_dbg(2, ir_888_debug, sd, "rx read: %10u ns %s %s\n", v, u ? "mark" : "space", w ? "(timed out)" : ""); if (w) v4l2_dbg(2, ir_888_debug, sd, "rx read: end of rx\n"); } return 0; } static int cx23888_ir_rx_g_parameters(struct v4l2_subdev *sd, struct v4l2_subdev_ir_parameters *p) { struct cx23888_ir_state *state = to_state(sd); mutex_lock(&state->rx_params_lock); memcpy(p, &state->rx_params, sizeof(struct v4l2_subdev_ir_parameters)); mutex_unlock(&state->rx_params_lock); return 0; } static int cx23888_ir_rx_shutdown(struct v4l2_subdev *sd) { struct cx23888_ir_state *state = to_state(sd); struct cx23885_dev *dev = state->dev; mutex_lock(&state->rx_params_lock); /* Disable or slow down all IR Rx circuits and counters */ irqenable_rx(dev, 0); control_rx_enable(dev, false); control_rx_demodulation_enable(dev, false); control_rx_s_edge_detection(dev, CNTRL_EDG_NONE); filter_rx_s_min_width(dev, 0); cx23888_ir_write4(dev, CX23888_IR_RXCLK_REG, RXCLK_RCD); state->rx_params.shutdown = true; mutex_unlock(&state->rx_params_lock); return 0; } static int cx23888_ir_rx_s_parameters(struct v4l2_subdev *sd, struct v4l2_subdev_ir_parameters *p) { struct cx23888_ir_state *state = to_state(sd); struct cx23885_dev *dev = state->dev; struct v4l2_subdev_ir_parameters *o = &state->rx_params; u16 rxclk_divider; if (p->shutdown) return cx23888_ir_rx_shutdown(sd); if (p->mode != V4L2_SUBDEV_IR_MODE_PULSE_WIDTH) return -ENOSYS; mutex_lock(&state->rx_params_lock); o->shutdown = p->shutdown; o->mode = p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH; o->bytes_per_data_element = p->bytes_per_data_element = sizeof(union cx23888_ir_fifo_rec); /* Before we tweak the hardware, we have to disable the receiver */ irqenable_rx(dev, 0); control_rx_enable(dev, false); control_rx_demodulation_enable(dev, p->modulation); o->modulation = p->modulation; if (p->modulation) { p->carrier_freq = rxclk_rx_s_carrier(dev, p->carrier_freq, &rxclk_divider); o->carrier_freq = p->carrier_freq; o->duty_cycle = p->duty_cycle = 50; control_rx_s_carrier_window(dev, p->carrier_freq, &p->carrier_range_lower, &p->carrier_range_upper); o->carrier_range_lower = p->carrier_range_lower; o->carrier_range_upper = p->carrier_range_upper; p->max_pulse_width = (u32) pulse_width_count_to_ns(FIFO_RXTX, rxclk_divider); } else { p->max_pulse_width = rxclk_rx_s_max_pulse_width(dev, p->max_pulse_width, &rxclk_divider); } o->max_pulse_width = p->max_pulse_width; atomic_set(&state->rxclk_divider, rxclk_divider); p->noise_filter_min_width = filter_rx_s_min_width(dev, p->noise_filter_min_width); o->noise_filter_min_width = p->noise_filter_min_width; p->resolution = clock_divider_to_resolution(rxclk_divider); o->resolution = p->resolution; /* FIXME - make this dependent on resolution for better performance */ control_rx_irq_watermark(dev, RX_FIFO_HALF_FULL); control_rx_s_edge_detection(dev, CNTRL_EDG_BOTH); o->invert_level = p->invert_level; atomic_set(&state->rx_invert, p->invert_level); o->interrupt_enable = p->interrupt_enable; o->enable = p->enable; if (p->enable) { unsigned long flags; spin_lock_irqsave(&state->rx_kfifo_lock, flags); kfifo_reset(&state->rx_kfifo); /* reset tx_fifo too if there is one... */ spin_unlock_irqrestore(&state->rx_kfifo_lock, flags); if (p->interrupt_enable) irqenable_rx(dev, IRQEN_RSE | IRQEN_RTE | IRQEN_ROE); control_rx_enable(dev, p->enable); } mutex_unlock(&state->rx_params_lock); return 0; } /* Transmitter */ static int cx23888_ir_tx_write(struct v4l2_subdev *sd, u8 *buf, size_t count, ssize_t *num) { struct cx23888_ir_state *state = to_state(sd); struct cx23885_dev *dev = state->dev; /* For now enable the Tx FIFO Service interrupt & pretend we did work */ irqenable_tx(dev, IRQEN_TSE); *num = count; return 0; } static int cx23888_ir_tx_g_parameters(struct v4l2_subdev *sd, struct v4l2_subdev_ir_parameters *p) { struct cx23888_ir_state *state = to_state(sd); mutex_lock(&state->tx_params_lock); memcpy(p, &state->tx_params, sizeof(struct v4l2_subdev_ir_parameters)); mutex_unlock(&state->tx_params_lock); return 0; } static int cx23888_ir_tx_shutdown(struct v4l2_subdev *sd) { struct cx23888_ir_state *state = to_state(sd); struct cx23885_dev *dev = state->dev; mutex_lock(&state->tx_params_lock); /* Disable or slow down all IR Tx circuits and counters */ irqenable_tx(dev, 0); control_tx_enable(dev, false); control_tx_modulation_enable(dev, false); cx23888_ir_write4(dev, CX23888_IR_TXCLK_REG, TXCLK_TCD); state->tx_params.shutdown = true; mutex_unlock(&state->tx_params_lock); return 0; } static int cx23888_ir_tx_s_parameters(struct v4l2_subdev *sd, struct v4l2_subdev_ir_parameters *p) { struct cx23888_ir_state *state = to_state(sd); struct cx23885_dev *dev = state->dev; struct v4l2_subdev_ir_parameters *o = &state->tx_params; u16 txclk_divider; if (p->shutdown) return cx23888_ir_tx_shutdown(sd); if (p->mode != V4L2_SUBDEV_IR_MODE_PULSE_WIDTH) return -ENOSYS; mutex_lock(&state->tx_params_lock); o->shutdown = p->shutdown; o->mode = p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH; o->bytes_per_data_element = p->bytes_per_data_element = sizeof(union cx23888_ir_fifo_rec); /* Before we tweak the hardware, we have to disable the transmitter */ irqenable_tx(dev, 0); control_tx_enable(dev, false); control_tx_modulation_enable(dev, p->modulation); o->modulation = p->modulation; if (p->modulation) { p->carrier_freq = txclk_tx_s_carrier(dev, p->carrier_freq, &txclk_divider); o->carrier_freq = p->carrier_freq; p->duty_cycle = cduty_tx_s_duty_cycle(dev, p->duty_cycle); o->duty_cycle = p->duty_cycle; p->max_pulse_width = (u32) pulse_width_count_to_ns(FIFO_RXTX, txclk_divider); } else { p->max_pulse_width = txclk_tx_s_max_pulse_width(dev, p->max_pulse_width, &txclk_divider); } o->max_pulse_width = p->max_pulse_width; atomic_set(&state->txclk_divider, txclk_divider); p->resolution = clock_divider_to_resolution(txclk_divider); o->resolution = p->resolution; /* FIXME - make this dependent on resolution for better performance */ control_tx_irq_watermark(dev, TX_FIFO_HALF_EMPTY); control_tx_polarity_invert(dev, p->invert_carrier_sense); o->invert_carrier_sense = p->invert_carrier_sense; control_tx_level_invert(dev, p->invert_level); o->invert_level = p->invert_level; o->interrupt_enable = p->interrupt_enable; o->enable = p->enable; if (p->enable) { if (p->interrupt_enable) irqenable_tx(dev, IRQEN_TSE); control_tx_enable(dev, p->enable); } mutex_unlock(&state->tx_params_lock); return 0; } /* * V4L2 Subdevice Core Ops */ static int cx23888_ir_log_status(struct v4l2_subdev *sd) { struct cx23888_ir_state *state = to_state(sd); struct cx23885_dev *dev = state->dev; char *s; int i, j; u32 cntrl = cx23888_ir_read4(dev, CX23888_IR_CNTRL_REG); u32 txclk = cx23888_ir_read4(dev, CX23888_IR_TXCLK_REG) & TXCLK_TCD; u32 rxclk = cx23888_ir_read4(dev, CX23888_IR_RXCLK_REG) & RXCLK_RCD; u32 cduty = cx23888_ir_read4(dev, CX23888_IR_CDUTY_REG) & CDUTY_CDC; u32 stats = cx23888_ir_read4(dev, CX23888_IR_STATS_REG); u32 irqen = cx23888_ir_read4(dev, CX23888_IR_IRQEN_REG); u32 filtr = cx23888_ir_read4(dev, CX23888_IR_FILTR_REG) & FILTR_LPF; v4l2_info(sd, "IR Receiver:\n"); v4l2_info(sd, "\tEnabled: %s\n", cntrl & CNTRL_RXE ? "yes" : "no"); v4l2_info(sd, "\tDemodulation from a carrier: %s\n", cntrl & CNTRL_DMD ? "enabled" : "disabled"); v4l2_info(sd, "\tFIFO: %s\n", cntrl & CNTRL_RFE ? "enabled" : "disabled"); switch (cntrl & CNTRL_EDG) { case CNTRL_EDG_NONE: s = "disabled"; break; case CNTRL_EDG_FALL: s = "falling edge"; break; case CNTRL_EDG_RISE: s = "rising edge"; break; case CNTRL_EDG_BOTH: s = "rising & falling edges"; break; default: s = "??? edge"; break; } v4l2_info(sd, "\tPulse timers' start/stop trigger: %s\n", s); v4l2_info(sd, "\tFIFO data on pulse timer overflow: %s\n", cntrl & CNTRL_R ? "not loaded" : "overflow marker"); v4l2_info(sd, "\tFIFO interrupt watermark: %s\n", cntrl & CNTRL_RIC ? "not empty" : "half full or greater"); v4l2_info(sd, "\tLoopback mode: %s\n", cntrl & CNTRL_LBM ? "loopback active" : "normal receive"); if (cntrl & CNTRL_DMD) { v4l2_info(sd, "\tExpected carrier (16 clocks): %u Hz\n", clock_divider_to_carrier_freq(rxclk)); switch (cntrl & CNTRL_WIN) { case CNTRL_WIN_3_3: i = 3; j = 3; break; case CNTRL_WIN_4_3: i = 4; j = 3; break; case CNTRL_WIN_3_4: i = 3; j = 4; break; case CNTRL_WIN_4_4: i = 4; j = 4; break; default: i = 0; j = 0; break; } v4l2_info(sd, "\tNext carrier edge window: 16 clocks -%1d/+%1d, %u to %u Hz\n", i, j, clock_divider_to_freq(rxclk, 16 + j), clock_divider_to_freq(rxclk, 16 - i)); } v4l2_info(sd, "\tMax measurable pulse width: %u us, %llu ns\n", pulse_width_count_to_us(FIFO_RXTX, rxclk), pulse_width_count_to_ns(FIFO_RXTX, rxclk)); v4l2_info(sd, "\tLow pass filter: %s\n", filtr ? "enabled" : "disabled"); if (filtr) v4l2_info(sd, "\tMin acceptable pulse width (LPF): %u us, %u ns\n", lpf_count_to_us(filtr), lpf_count_to_ns(filtr)); v4l2_info(sd, "\tPulse width timer timed-out: %s\n", stats & STATS_RTO ? "yes" : "no"); v4l2_info(sd, "\tPulse width timer time-out intr: %s\n", irqen & IRQEN_RTE ? "enabled" : "disabled"); v4l2_info(sd, "\tFIFO overrun: %s\n", stats & STATS_ROR ? "yes" : "no"); v4l2_info(sd, "\tFIFO overrun interrupt: %s\n", irqen & IRQEN_ROE ? "enabled" : "disabled"); v4l2_info(sd, "\tBusy: %s\n", stats & STATS_RBY ? "yes" : "no"); v4l2_info(sd, "\tFIFO service requested: %s\n", stats & STATS_RSR ? "yes" : "no"); v4l2_info(sd, "\tFIFO service request interrupt: %s\n", irqen & IRQEN_RSE ? "enabled" : "disabled"); v4l2_info(sd, "IR Transmitter:\n"); v4l2_info(sd, "\tEnabled: %s\n", cntrl & CNTRL_TXE ? "yes" : "no"); v4l2_info(sd, "\tModulation onto a carrier: %s\n", cntrl & CNTRL_MOD ? "enabled" : "disabled"); v4l2_info(sd, "\tFIFO: %s\n", cntrl & CNTRL_TFE ? "enabled" : "disabled"); v4l2_info(sd, "\tFIFO interrupt watermark: %s\n", cntrl & CNTRL_TIC ? "not empty" : "half full or less"); v4l2_info(sd, "\tOutput pin level inversion %s\n", cntrl & CNTRL_IVO ? "yes" : "no"); v4l2_info(sd, "\tCarrier polarity: %s\n", cntrl & CNTRL_CPL ? "space:burst mark:noburst" : "space:noburst mark:burst"); if (cntrl & CNTRL_MOD) { v4l2_info(sd, "\tCarrier (16 clocks): %u Hz\n", clock_divider_to_carrier_freq(txclk)); v4l2_info(sd, "\tCarrier duty cycle: %2u/16\n", cduty + 1); } v4l2_info(sd, "\tMax pulse width: %u us, %llu ns\n", pulse_width_count_to_us(FIFO_RXTX, txclk), pulse_width_count_to_ns(FIFO_RXTX, txclk)); v4l2_info(sd, "\tBusy: %s\n", stats & STATS_TBY ? "yes" : "no"); v4l2_info(sd, "\tFIFO service requested: %s\n", stats & STATS_TSR ? "yes" : "no"); v4l2_info(sd, "\tFIFO service request interrupt: %s\n", irqen & IRQEN_TSE ? "enabled" : "disabled"); return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int cx23888_ir_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct cx23888_ir_state *state = to_state(sd); u32 addr = CX23888_IR_REG_BASE + (u32) reg->reg; if ((addr & 0x3) != 0) return -EINVAL; if (addr < CX23888_IR_CNTRL_REG || addr > CX23888_IR_LEARN_REG) return -EINVAL; reg->size = 4; reg->val = cx23888_ir_read4(state->dev, addr); return 0; } static int cx23888_ir_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) { struct cx23888_ir_state *state = to_state(sd); u32 addr = CX23888_IR_REG_BASE + (u32) reg->reg; if ((addr & 0x3) != 0) return -EINVAL; if (addr < CX23888_IR_CNTRL_REG || addr > CX23888_IR_LEARN_REG) return -EINVAL; cx23888_ir_write4(state->dev, addr, reg->val); return 0; } #endif static const struct v4l2_subdev_core_ops cx23888_ir_core_ops = { .log_status = cx23888_ir_log_status, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = cx23888_ir_g_register, .s_register = cx23888_ir_s_register, #endif .interrupt_service_routine = cx23888_ir_irq_handler, }; static const struct v4l2_subdev_ir_ops cx23888_ir_ir_ops = { .rx_read = cx23888_ir_rx_read, .rx_g_parameters = cx23888_ir_rx_g_parameters, .rx_s_parameters = cx23888_ir_rx_s_parameters, .tx_write = cx23888_ir_tx_write, .tx_g_parameters = cx23888_ir_tx_g_parameters, .tx_s_parameters = cx23888_ir_tx_s_parameters, }; static const struct v4l2_subdev_ops cx23888_ir_controller_ops = { .core = &cx23888_ir_core_ops, .ir = &cx23888_ir_ir_ops, }; static const struct v4l2_subdev_ir_parameters default_rx_params = { .bytes_per_data_element = sizeof(union cx23888_ir_fifo_rec), .mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH, .enable = false, .interrupt_enable = false, .shutdown = true, .modulation = true, .carrier_freq = 36000, /* 36 kHz - RC-5, RC-6, and RC-6A carrier */ /* RC-5: 666,667 ns = 1/36 kHz * 32 cycles * 1 mark * 0.75 */ /* RC-6A: 333,333 ns = 1/36 kHz * 16 cycles * 1 mark * 0.75 */ .noise_filter_min_width = 333333, /* ns */ .carrier_range_lower = 35000, .carrier_range_upper = 37000, .invert_level = false, }; static const struct v4l2_subdev_ir_parameters default_tx_params = { .bytes_per_data_element = sizeof(union cx23888_ir_fifo_rec), .mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH, .enable = false, .interrupt_enable = false, .shutdown = true, .modulation = true, .carrier_freq = 36000, /* 36 kHz - RC-5 carrier */ .duty_cycle = 25, /* 25 % - RC-5 carrier */ .invert_level = false, .invert_carrier_sense = false, }; int cx23888_ir_probe(struct cx23885_dev *dev) { struct cx23888_ir_state *state; struct v4l2_subdev *sd; struct v4l2_subdev_ir_parameters default_params; int ret; state = kzalloc(sizeof(struct cx23888_ir_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; spin_lock_init(&state->rx_kfifo_lock); if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL)) return -ENOMEM; state->dev = dev; sd = &state->sd; v4l2_subdev_init(sd, &cx23888_ir_controller_ops); v4l2_set_subdevdata(sd, state); /* FIXME - fix the formatting of dev->v4l2_dev.name and use it */ snprintf(sd->name, sizeof(sd->name), "%s/888-ir", dev->name); sd->grp_id = CX23885_HW_888_IR; ret = v4l2_device_register_subdev(&dev->v4l2_dev, sd); if (ret == 0) { /* * Ensure no interrupts arrive from '888 specific conditions, * since we ignore them in this driver to have commonality with * similar IR controller cores. */ cx23888_ir_write4(dev, CX23888_IR_IRQEN_REG, 0); mutex_init(&state->rx_params_lock); default_params = default_rx_params; v4l2_subdev_call(sd, ir, rx_s_parameters, &default_params); mutex_init(&state->tx_params_lock); default_params = default_tx_params; v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params); } else { kfifo_free(&state->rx_kfifo); } return ret; } int cx23888_ir_remove(struct cx23885_dev *dev) { struct v4l2_subdev *sd; struct cx23888_ir_state *state; sd = cx23885_find_hw(dev, CX23885_HW_888_IR); if (sd == NULL) return -ENODEV; cx23888_ir_rx_shutdown(sd); cx23888_ir_tx_shutdown(sd); state = to_state(sd); v4l2_device_unregister_subdev(sd); kfifo_free(&state->rx_kfifo); kfree(state); /* Nothing more to free() as state held the actual v4l2_subdev object */ return 0; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1244_0
crossvul-cpp_data_bad_1169_0
/****************************************************************************** * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * Copyright (c) 2010 Daniel Kiper * * Memory hotplug support was written by Daniel Kiper. Work on * it was sponsored by Google under Google Summer of Code 2010 * program. Jeremy Fitzhardinge from Citrix was the mentor for * this project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt #include <linux/cpu.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/cred.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/memblock.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/gfp.h> #include <linux/notifier.h> #include <linux/memory.h> #include <linux/memory_hotplug.h> #include <linux/percpu-defs.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/tlb.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> #include <xen/xen.h> #include <xen/interface/xen.h> #include <xen/interface/memory.h> #include <xen/balloon.h> #include <xen/features.h> #include <xen/page.h> #include <xen/mem-reservation.h> static int xen_hotplug_unpopulated; #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG static int zero; static int one = 1; static struct ctl_table balloon_table[] = { { .procname = "hotplug_unpopulated", .data = &xen_hotplug_unpopulated, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { } }; static struct ctl_table balloon_root[] = { { .procname = "balloon", .mode = 0555, .child = balloon_table, }, { } }; static struct ctl_table xen_root[] = { { .procname = "xen", .mode = 0555, .child = balloon_root, }, { } }; #endif /* * Use one extent per PAGE_SIZE to avoid to break down the page into * multiple frame. */ #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1) /* * balloon_process() state: * * BP_DONE: done or nothing to do, * BP_WAIT: wait to be rescheduled, * BP_EAGAIN: error, go to sleep, * BP_ECANCELED: error, balloon operation canceled. */ enum bp_state { BP_DONE, BP_WAIT, BP_EAGAIN, BP_ECANCELED }; static DEFINE_MUTEX(balloon_mutex); struct balloon_stats balloon_stats; EXPORT_SYMBOL_GPL(balloon_stats); /* We increase/decrease in batches which fit in a page */ static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)]; /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); /* Main work function, always executed in process context. */ static void balloon_process(struct work_struct *work); static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) /* balloon_append: add the given page to the balloon. */ static void __balloon_append(struct page *page) { /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(&page->lru, &ballooned_pages); balloon_stats.balloon_high++; } else { list_add(&page->lru, &ballooned_pages); balloon_stats.balloon_low++; } wake_up(&balloon_wq); } static void balloon_append(struct page *page) { __balloon_append(page); } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(bool require_lowmem) { struct page *page; if (list_empty(&ballooned_pages)) return NULL; page = list_entry(ballooned_pages.next, struct page, lru); if (require_lowmem && PageHighMem(page)) return NULL; list_del(&page->lru); if (PageHighMem(page)) balloon_stats.balloon_high--; else balloon_stats.balloon_low--; return page; } static struct page *balloon_next_page(struct page *page) { struct list_head *next = page->lru.next; if (next == &ballooned_pages) return NULL; return list_entry(next, struct page, lru); } static enum bp_state update_schedule(enum bp_state state) { if (state == BP_WAIT) return BP_WAIT; if (state == BP_ECANCELED) return BP_ECANCELED; if (state == BP_DONE) { balloon_stats.schedule_delay = 1; balloon_stats.retry_count = 1; return BP_DONE; } ++balloon_stats.retry_count; if (balloon_stats.max_retry_count != RETRY_UNLIMITED && balloon_stats.retry_count > balloon_stats.max_retry_count) { balloon_stats.schedule_delay = 1; balloon_stats.retry_count = 1; return BP_ECANCELED; } balloon_stats.schedule_delay <<= 1; if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay) balloon_stats.schedule_delay = balloon_stats.max_schedule_delay; return BP_EAGAIN; } #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG static void release_memory_resource(struct resource *resource) { if (!resource) return; /* * No need to reset region to identity mapped since we now * know that no I/O can be in this region */ release_resource(resource); kfree(resource); } static struct resource *additional_memory_resource(phys_addr_t size) { struct resource *res; int ret; res = kzalloc(sizeof(*res), GFP_KERNEL); if (!res) return NULL; res->name = "System RAM"; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; ret = allocate_resource(&iomem_resource, res, size, 0, -1, PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); if (ret < 0) { pr_err("Cannot allocate new System RAM resource\n"); kfree(res); return NULL; } #ifdef CONFIG_SPARSEMEM { unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT); unsigned long pfn = res->start >> PAGE_SHIFT; if (pfn > limit) { pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", pfn, limit); release_memory_resource(res); return NULL; } } #endif return res; } static enum bp_state reserve_additional_memory(void) { long credit; struct resource *resource; int nid, rc; unsigned long balloon_hotplug; credit = balloon_stats.target_pages + balloon_stats.target_unpopulated - balloon_stats.total_pages; /* * Already hotplugged enough pages? Wait for them to be * onlined. */ if (credit <= 0) return BP_WAIT; balloon_hotplug = round_up(credit, PAGES_PER_SECTION); resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE); if (!resource) goto err; nid = memory_add_physaddr_to_nid(resource->start); #ifdef CONFIG_XEN_HAVE_PVMMU /* * We don't support PV MMU when Linux and Xen is using * different page granularity. */ BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); /* * add_memory() will build page tables for the new memory so * the p2m must contain invalid entries so the correct * non-present PTEs will be written. * * If a failure occurs, the original (identity) p2m entries * are not restored since this region is now known not to * conflict with any devices. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long pfn, i; pfn = PFN_DOWN(resource->start); for (i = 0; i < balloon_hotplug; i++) { if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { pr_warn("set_phys_to_machine() failed, no memory added\n"); goto err; } } } #endif /* * add_memory_resource() will call online_pages() which in its turn * will call xen_online_page() callback causing deadlock if we don't * release balloon_mutex here. Unlocking here is safe because the * callers drop the mutex before trying again. */ mutex_unlock(&balloon_mutex); /* add_memory_resource() requires the device_hotplug lock */ lock_device_hotplug(); rc = add_memory_resource(nid, resource); unlock_device_hotplug(); mutex_lock(&balloon_mutex); if (rc) { pr_warn("Cannot add additional memory (%i)\n", rc); goto err; } balloon_stats.total_pages += balloon_hotplug; return BP_WAIT; err: release_memory_resource(resource); return BP_ECANCELED; } static void xen_online_page(struct page *page, unsigned int order) { unsigned long i, size = (1 << order); unsigned long start_pfn = page_to_pfn(page); struct page *p; pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); mutex_lock(&balloon_mutex); for (i = 0; i < size; i++) { p = pfn_to_page(start_pfn + i); __online_page_set_limits(p); __SetPageOffline(p); __balloon_append(p); } mutex_unlock(&balloon_mutex); } static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v) { if (val == MEM_ONLINE) schedule_delayed_work(&balloon_worker, 0); return NOTIFY_OK; } static struct notifier_block xen_memory_nb = { .notifier_call = xen_memory_notifier, .priority = 0 }; #else static enum bp_state reserve_additional_memory(void) { balloon_stats.target_pages = balloon_stats.current_pages; return BP_ECANCELED; } #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ static long current_credit(void) { return balloon_stats.target_pages - balloon_stats.current_pages; } static bool balloon_is_inflated(void) { return balloon_stats.balloon_low || balloon_stats.balloon_high; } static enum bp_state increase_reservation(unsigned long nr_pages) { int rc; unsigned long i; struct page *page; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); page = list_first_entry_or_null(&ballooned_pages, struct page, lru); for (i = 0; i < nr_pages; i++) { if (!page) { nr_pages = i; break; } frame_list[i] = page_to_xen_pfn(page); page = balloon_next_page(page); } rc = xenmem_reservation_increase(nr_pages, frame_list); if (rc <= 0) return BP_EAGAIN; for (i = 0; i < rc; i++) { page = balloon_retrieve(false); BUG_ON(page == NULL); xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]); /* Relinquish the page back to the allocator. */ __ClearPageOffline(page); free_reserved_page(page); } balloon_stats.current_pages += rc; return BP_DONE; } static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) { enum bp_state state = BP_DONE; unsigned long i; struct page *page, *tmp; int ret; LIST_HEAD(pages); if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { page = alloc_page(gfp); if (page == NULL) { nr_pages = i; state = BP_EAGAIN; break; } __SetPageOffline(page); adjust_managed_page_count(page, -1); xenmem_reservation_scrub_page(page); list_add(&page->lru, &pages); } /* * Ensure that ballooned highmem pages don't have kmaps. * * Do this before changing the p2m as kmap_flush_unused() * reads PTEs to obtain pages (and hence needs the original * p2m entry). */ kmap_flush_unused(); /* * Setup the frame, update direct mapping, invalidate P2M, * and add to balloon. */ i = 0; list_for_each_entry_safe(page, tmp, &pages, lru) { frame_list[i++] = xen_page_to_gfn(page); xenmem_reservation_va_mapping_reset(1, &page); list_del(&page->lru); balloon_append(page); } flush_tlb_all(); ret = xenmem_reservation_decrease(nr_pages, frame_list); BUG_ON(ret != nr_pages); balloon_stats.current_pages -= nr_pages; return state; } /* * As this is a work item it is guaranteed to run as a single instance only. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ static void balloon_process(struct work_struct *work) { enum bp_state state = BP_DONE; long credit; do { mutex_lock(&balloon_mutex); credit = current_credit(); if (credit > 0) { if (balloon_is_inflated()) state = increase_reservation(credit); else state = reserve_additional_memory(); } if (credit < 0) state = decrease_reservation(-credit, GFP_BALLOON); state = update_schedule(state); mutex_unlock(&balloon_mutex); cond_resched(); } while (credit && state == BP_DONE); /* Schedule more work if there is some still to be done. */ if (state == BP_EAGAIN) schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); } /* Resets the Xen limit, sets new target, and kicks off processing. */ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ balloon_stats.target_pages = target; schedule_delayed_work(&balloon_worker, 0); } EXPORT_SYMBOL_GPL(balloon_set_new_target); static int add_ballooned_pages(int nr_pages) { enum bp_state st; if (xen_hotplug_unpopulated) { st = reserve_additional_memory(); if (st != BP_ECANCELED) { mutex_unlock(&balloon_mutex); wait_event(balloon_wq, !list_empty(&ballooned_pages)); mutex_lock(&balloon_mutex); return 0; } } st = decrease_reservation(nr_pages, GFP_USER); if (st != BP_DONE) return -ENOMEM; return 0; } /** * alloc_xenballooned_pages - get pages that have been ballooned out * @nr_pages: Number of pages to get * @pages: pages returned * @return 0 on success, error otherwise */ int alloc_xenballooned_pages(int nr_pages, struct page **pages) { int pgno = 0; struct page *page; int ret; mutex_lock(&balloon_mutex); balloon_stats.target_unpopulated += nr_pages; while (pgno < nr_pages) { page = balloon_retrieve(true); if (page) { __ClearPageOffline(page); pages[pgno++] = page; #ifdef CONFIG_XEN_HAVE_PVMMU /* * We don't support PV MMU when Linux and Xen is using * different page granularity. */ BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); if (!xen_feature(XENFEAT_auto_translated_physmap)) { ret = xen_alloc_p2m_entry(page_to_pfn(page)); if (ret < 0) goto out_undo; } #endif } else { ret = add_ballooned_pages(nr_pages - pgno); if (ret < 0) goto out_undo; } } mutex_unlock(&balloon_mutex); return 0; out_undo: mutex_unlock(&balloon_mutex); free_xenballooned_pages(pgno, pages); return ret; } EXPORT_SYMBOL(alloc_xenballooned_pages); /** * free_xenballooned_pages - return pages retrieved with get_ballooned_pages * @nr_pages: Number of pages * @pages: pages to return */ void free_xenballooned_pages(int nr_pages, struct page **pages) { int i; mutex_lock(&balloon_mutex); for (i = 0; i < nr_pages; i++) { if (pages[i]) { __SetPageOffline(pages[i]); balloon_append(pages[i]); } } balloon_stats.target_unpopulated -= nr_pages; /* The balloon may be too large now. Shrink it if needed. */ if (current_credit()) schedule_delayed_work(&balloon_worker, 0); mutex_unlock(&balloon_mutex); } EXPORT_SYMBOL(free_xenballooned_pages); #ifdef CONFIG_XEN_PV static void __init balloon_add_region(unsigned long start_pfn, unsigned long pages) { unsigned long pfn, extra_pfn_end; struct page *page; /* * If the amount of usable memory has been limited (e.g., with * the 'mem' command line parameter), don't add pages beyond * this limit. */ extra_pfn_end = min(max_pfn, start_pfn + pages); for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { page = pfn_to_page(pfn); /* totalram_pages and totalhigh_pages do not include the boot-time balloon extension, so don't subtract from it. */ __balloon_append(page); } balloon_stats.total_pages += extra_pfn_end - start_pfn; } #endif static int __init balloon_init(void) { if (!xen_domain()) return -ENODEV; pr_info("Initialising balloon driver\n"); #ifdef CONFIG_XEN_PV balloon_stats.current_pages = xen_pv_domain() ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn) : get_num_physpages(); #else balloon_stats.current_pages = get_num_physpages(); #endif balloon_stats.target_pages = balloon_stats.current_pages; balloon_stats.balloon_low = 0; balloon_stats.balloon_high = 0; balloon_stats.total_pages = balloon_stats.current_pages; balloon_stats.schedule_delay = 1; balloon_stats.max_schedule_delay = 32; balloon_stats.retry_count = 1; balloon_stats.max_retry_count = RETRY_UNLIMITED; #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG set_online_page_callback(&xen_online_page); register_memory_notifier(&xen_memory_nb); register_sysctl_table(xen_root); #endif #ifdef CONFIG_XEN_PV { int i; /* * Initialize the balloon with pages from the extra memory * regions (see arch/x86/xen/setup.c). */ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) if (xen_extra_mem[i].n_pfns) balloon_add_region(xen_extra_mem[i].start_pfn, xen_extra_mem[i].n_pfns); } #endif /* Init the xen-balloon driver. */ xen_balloon_init(); return 0; } subsys_initcall(balloon_init);
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1169_0
crossvul-cpp_data_good_1237_0
/* * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/random.h> #include <linux/vmalloc.h> #include <linux/hardirq.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> #include "mlx5_core.h" #include "lib/eq.h" #include "lib/mlx5.h" #include "lib/pci_vsc.h" #include "diag/fw_tracer.h" enum { MLX5_HEALTH_POLL_INTERVAL = 2 * HZ, MAX_MISSES = 3, }; enum { MLX5_HEALTH_SYNDR_FW_ERR = 0x1, MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7, MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR = 0x8, MLX5_HEALTH_SYNDR_CRC_ERR = 0x9, MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa, MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb, MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc, MLX5_HEALTH_SYNDR_EQ_ERR = 0xd, MLX5_HEALTH_SYNDR_EQ_INV = 0xe, MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf, MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10 }; enum { MLX5_DROP_NEW_HEALTH_WORK, }; enum { MLX5_SENSOR_NO_ERR = 0, MLX5_SENSOR_PCI_COMM_ERR = 1, MLX5_SENSOR_PCI_ERR = 2, MLX5_SENSOR_NIC_DISABLED = 3, MLX5_SENSOR_NIC_SW_RESET = 4, MLX5_SENSOR_FW_SYND_RFR = 5, }; u8 mlx5_get_nic_state(struct mlx5_core_dev *dev) { return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 7; } void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state) { u32 cur_cmdq_addr_l_sz; cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz); iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) | state << MLX5_NIC_IFC_OFFSET, &dev->iseg->cmdq_addr_l_sz); } static bool sensor_pci_not_working(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; /* Offline PCI reads return 0xffffffff */ return (ioread32be(&h->fw_ver) == 0xffffffff); } static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; u32 rfr = ioread32be(&h->rfr) >> MLX5_RFR_OFFSET; u8 synd = ioread8(&h->synd); if (rfr && synd) mlx5_core_dbg(dev, "FW requests reset, synd: %d\n", synd); return rfr && synd; } static u32 check_fatal_sensors(struct mlx5_core_dev *dev) { if (sensor_pci_not_working(dev)) return MLX5_SENSOR_PCI_COMM_ERR; if (pci_channel_offline(dev->pdev)) return MLX5_SENSOR_PCI_ERR; if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) return MLX5_SENSOR_NIC_DISABLED; if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET) return MLX5_SENSOR_NIC_SW_RESET; if (sensor_fw_synd_rfr(dev)) return MLX5_SENSOR_FW_SYND_RFR; return MLX5_SENSOR_NO_ERR; } static int lock_sem_sw_reset(struct mlx5_core_dev *dev, bool lock) { enum mlx5_vsc_state state; int ret; if (!mlx5_core_is_pf(dev)) return -EBUSY; /* Try to lock GW access, this stage doesn't return * EBUSY because locked GW does not mean that other PF * already started the reset. */ ret = mlx5_vsc_gw_lock(dev); if (ret == -EBUSY) return -EINVAL; if (ret) return ret; state = lock ? MLX5_VSC_LOCK : MLX5_VSC_UNLOCK; /* At this stage, if the return status == EBUSY, then we know * for sure that another PF started the reset, so don't allow * another reset. */ ret = mlx5_vsc_sem_set_space(dev, MLX5_SEMAPHORE_SW_RESET, state); if (ret) mlx5_core_warn(dev, "Failed to lock SW reset semaphore\n"); /* Unlock GW access */ mlx5_vsc_gw_unlock(dev); return ret; } static bool reset_fw_if_needed(struct mlx5_core_dev *dev) { bool supported = (ioread32be(&dev->iseg->initializing) >> MLX5_FW_RESET_SUPPORTED_OFFSET) & 1; u32 fatal_error; if (!supported) return false; /* The reset only needs to be issued by one PF. The health buffer is * shared between all functions, and will be cleared during a reset. * Check again to avoid a redundant 2nd reset. If the fatal erros was * PCI related a reset won't help. */ fatal_error = check_fatal_sensors(dev); if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR || fatal_error == MLX5_SENSOR_NIC_DISABLED || fatal_error == MLX5_SENSOR_NIC_SW_RESET) { mlx5_core_warn(dev, "Not issuing FW reset. Either it's already done or won't help."); return false; } mlx5_core_warn(dev, "Issuing FW Reset\n"); /* Write the NIC interface field to initiate the reset, the command * interface address also resides here, don't overwrite it. */ mlx5_set_nic_state(dev, MLX5_NIC_IFC_SW_RESET); return true; } void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) { mutex_lock(&dev->intf_state_mutex); if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) goto unlock; if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) { dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; goto unlock; } if (check_fatal_sensors(dev) || force) { dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; mlx5_cmd_flush(dev); } mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); unlock: mutex_unlock(&dev->intf_state_mutex); } #define MLX5_CRDUMP_WAIT_MS 60000 #define MLX5_FW_RESET_WAIT_MS 1000 void mlx5_error_sw_reset(struct mlx5_core_dev *dev) { unsigned long end, delay_ms = MLX5_FW_RESET_WAIT_MS; int lock = -EBUSY; mutex_lock(&dev->intf_state_mutex); if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) goto unlock; mlx5_core_err(dev, "start\n"); if (check_fatal_sensors(dev) == MLX5_SENSOR_FW_SYND_RFR) { /* Get cr-dump and reset FW semaphore */ lock = lock_sem_sw_reset(dev, true); if (lock == -EBUSY) { delay_ms = MLX5_CRDUMP_WAIT_MS; goto recover_from_sw_reset; } /* Execute SW reset */ reset_fw_if_needed(dev); } recover_from_sw_reset: /* Recover from SW reset */ end = jiffies + msecs_to_jiffies(delay_ms); do { if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) break; cond_resched(); } while (!time_after(jiffies, end)); if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) { dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n", mlx5_get_nic_state(dev), delay_ms); } /* Release FW semaphore if you are the lock owner */ if (!lock) lock_sem_sw_reset(dev, false); mlx5_core_err(dev, "end\n"); unlock: mutex_unlock(&dev->intf_state_mutex); } static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) { u8 nic_interface = mlx5_get_nic_state(dev); switch (nic_interface) { case MLX5_NIC_IFC_FULL: mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n"); break; case MLX5_NIC_IFC_DISABLED: mlx5_core_warn(dev, "starting teardown\n"); break; case MLX5_NIC_IFC_NO_DRAM_NIC: mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n"); break; case MLX5_NIC_IFC_SW_RESET: /* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases: * 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded * and this is a VF), this is not recoverable by SW reset. * Logging of this is handled elsewhere. * 2. FW reset has been issued by another function, driver can * be reloaded to recover after the mode switches to * MLX5_NIC_IFC_DISABLED. */ if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR) mlx5_core_warn(dev, "NIC SW reset in progress\n"); break; default: mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n", nic_interface); } mlx5_disable_device(dev); } /* How much time to wait until health resetting the driver (in msecs) */ #define MLX5_RECOVERY_WAIT_MSECS 60000 static int mlx5_health_try_recover(struct mlx5_core_dev *dev) { unsigned long end; mlx5_core_warn(dev, "handling bad device here\n"); mlx5_handle_bad_state(dev); end = jiffies + msecs_to_jiffies(MLX5_RECOVERY_WAIT_MSECS); while (sensor_pci_not_working(dev)) { if (time_after(jiffies, end)) { mlx5_core_err(dev, "health recovery flow aborted, PCI reads still not working\n"); return -EIO; } msleep(100); } mlx5_core_err(dev, "starting health recovery flow\n"); mlx5_recover_device(dev); if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state) || check_fatal_sensors(dev)) { mlx5_core_err(dev, "health recovery failed\n"); return -EIO; } return 0; } static const char *hsynd_str(u8 synd) { switch (synd) { case MLX5_HEALTH_SYNDR_FW_ERR: return "firmware internal error"; case MLX5_HEALTH_SYNDR_IRISC_ERR: return "irisc not responding"; case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR: return "unrecoverable hardware error"; case MLX5_HEALTH_SYNDR_CRC_ERR: return "firmware CRC error"; case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR: return "ICM fetch PCI error"; case MLX5_HEALTH_SYNDR_HW_FTL_ERR: return "HW fatal error\n"; case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR: return "async EQ buffer overrun"; case MLX5_HEALTH_SYNDR_EQ_ERR: return "EQ error"; case MLX5_HEALTH_SYNDR_EQ_INV: return "Invalid EQ referenced"; case MLX5_HEALTH_SYNDR_FFSER_ERR: return "FFSER error"; case MLX5_HEALTH_SYNDR_HIGH_TEMP: return "High temperature"; default: return "unrecognized error"; } } static void print_health_info(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; char fw_str[18]; u32 fw; int i; /* If the syndrome is 0, the device is OK and no need to print buffer */ if (!ioread8(&h->synd)) return; for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) mlx5_core_err(dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i)); mlx5_core_err(dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr)); mlx5_core_err(dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra)); sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); mlx5_core_err(dev, "fw_ver %s\n", fw_str); mlx5_core_err(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); mlx5_core_err(dev, "irisc_index %d\n", ioread8(&h->irisc_index)); mlx5_core_err(dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd))); mlx5_core_err(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); fw = ioread32be(&h->fw_ver); mlx5_core_err(dev, "raw fw_ver 0x%08x\n", fw); } static int mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg) { struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter); struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; u8 synd; int err; synd = ioread8(&h->synd); err = devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd); if (err || !synd) return err; return devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd)); } struct mlx5_fw_reporter_ctx { u8 err_synd; int miss_counter; }; static int mlx5_fw_reporter_ctx_pairs_put(struct devlink_fmsg *fmsg, struct mlx5_fw_reporter_ctx *fw_reporter_ctx) { int err; err = devlink_fmsg_u8_pair_put(fmsg, "syndrome", fw_reporter_ctx->err_synd); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "fw_miss_counter", fw_reporter_ctx->miss_counter); if (err) return err; return 0; } static int mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev, struct devlink_fmsg *fmsg) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; int err; int i; if (!ioread8(&h->synd)) return 0; err = devlink_fmsg_pair_nest_start(fmsg, "health buffer"); if (err) return err; err = devlink_fmsg_obj_nest_start(fmsg); if (err) return err; err = devlink_fmsg_arr_pair_nest_start(fmsg, "assert_var"); if (err) return err; for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) { err = devlink_fmsg_u32_put(fmsg, ioread32be(h->assert_var + i)); if (err) return err; } err = devlink_fmsg_arr_pair_nest_end(fmsg); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "assert_exit_ptr", ioread32be(&h->assert_exit_ptr)); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "assert_callra", ioread32be(&h->assert_callra)); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id)); if (err) return err; err = devlink_fmsg_u8_pair_put(fmsg, "irisc_index", ioread8(&h->irisc_index)); if (err) return err; err = devlink_fmsg_u8_pair_put(fmsg, "synd", ioread8(&h->synd)); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd", ioread16be(&h->ext_synd)); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "raw_fw_ver", ioread32be(&h->fw_ver)); if (err) return err; err = devlink_fmsg_obj_nest_end(fmsg); if (err) return err; return devlink_fmsg_pair_nest_end(fmsg); } static int mlx5_fw_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *priv_ctx) { struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter); int err; err = mlx5_fw_tracer_trigger_core_dump_general(dev); if (err) return err; if (priv_ctx) { struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx; err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx); if (err) return err; } err = mlx5_fw_reporter_heath_buffer_data_put(dev, fmsg); if (err) return err; return mlx5_fw_tracer_get_saved_traces_objects(dev->tracer, fmsg); } static void mlx5_fw_reporter_err_work(struct work_struct *work) { struct mlx5_fw_reporter_ctx fw_reporter_ctx; struct mlx5_core_health *health; health = container_of(work, struct mlx5_core_health, report_work); if (IS_ERR_OR_NULL(health->fw_reporter)) return; fw_reporter_ctx.err_synd = health->synd; fw_reporter_ctx.miss_counter = health->miss_counter; if (fw_reporter_ctx.err_synd) { devlink_health_report(health->fw_reporter, "FW syndrom reported", &fw_reporter_ctx); return; } if (fw_reporter_ctx.miss_counter) devlink_health_report(health->fw_reporter, "FW miss counter reported", &fw_reporter_ctx); } static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = { .name = "fw", .diagnose = mlx5_fw_reporter_diagnose, .dump = mlx5_fw_reporter_dump, }; static int mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter, void *priv_ctx) { struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter); return mlx5_health_try_recover(dev); } #define MLX5_CR_DUMP_CHUNK_SIZE 256 static int mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *priv_ctx) { struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter); u32 crdump_size = dev->priv.health.crdump_size; u32 *cr_data; u32 data_size; u32 offset; int err; if (!mlx5_core_is_pf(dev)) return -EPERM; cr_data = kvmalloc(crdump_size, GFP_KERNEL); if (!cr_data) return -ENOMEM; err = mlx5_crdump_collect(dev, cr_data); if (err) goto free_data; if (priv_ctx) { struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx; err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx); if (err) goto free_data; } err = devlink_fmsg_arr_pair_nest_start(fmsg, "crdump_data"); if (err) goto free_data; for (offset = 0; offset < crdump_size; offset += data_size) { if (crdump_size - offset < MLX5_CR_DUMP_CHUNK_SIZE) data_size = crdump_size - offset; else data_size = MLX5_CR_DUMP_CHUNK_SIZE; err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset, data_size); if (err) goto free_data; } err = devlink_fmsg_arr_pair_nest_end(fmsg); free_data: kvfree(cr_data); return err; } static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work) { struct mlx5_fw_reporter_ctx fw_reporter_ctx; struct mlx5_core_health *health; struct mlx5_core_dev *dev; struct mlx5_priv *priv; health = container_of(work, struct mlx5_core_health, fatal_report_work); priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); mlx5_enter_error_state(dev, false); if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) { if (mlx5_health_try_recover(dev)) mlx5_core_err(dev, "health recovery failed\n"); return; } fw_reporter_ctx.err_synd = health->synd; fw_reporter_ctx.miss_counter = health->miss_counter; devlink_health_report(health->fw_fatal_reporter, "FW fatal error reported", &fw_reporter_ctx); } static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = { .name = "fw_fatal", .recover = mlx5_fw_fatal_reporter_recover, .dump = mlx5_fw_fatal_reporter_dump, }; #define MLX5_REPORTER_FW_GRACEFUL_PERIOD 1200000 static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct devlink *devlink = priv_to_devlink(dev); health->fw_reporter = devlink_health_reporter_create(devlink, &mlx5_fw_reporter_ops, 0, false, dev); if (IS_ERR(health->fw_reporter)) mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n", PTR_ERR(health->fw_reporter)); health->fw_fatal_reporter = devlink_health_reporter_create(devlink, &mlx5_fw_fatal_reporter_ops, MLX5_REPORTER_FW_GRACEFUL_PERIOD, true, dev); if (IS_ERR(health->fw_fatal_reporter)) mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n", PTR_ERR(health->fw_fatal_reporter)); } static void mlx5_fw_reporters_destroy(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; if (!IS_ERR_OR_NULL(health->fw_reporter)) devlink_health_reporter_destroy(health->fw_reporter); if (!IS_ERR_OR_NULL(health->fw_fatal_reporter)) devlink_health_reporter_destroy(health->fw_fatal_reporter); } static unsigned long get_next_poll_jiffies(void) { unsigned long next; get_random_bytes(&next, sizeof(next)); next %= HZ; next += jiffies + MLX5_HEALTH_POLL_INTERVAL; return next; } void mlx5_trigger_health_work(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) queue_work(health->wq, &health->fatal_report_work); else mlx5_core_err(dev, "new health works are not permitted at this stage\n"); spin_unlock_irqrestore(&health->wq_lock, flags); } static void poll_health(struct timer_list *t) { struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer); struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; u32 fatal_error; u8 prev_synd; u32 count; if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) goto out; fatal_error = check_fatal_sensors(dev); if (fatal_error && !health->fatal_error) { mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error); dev->priv.health.fatal_error = fatal_error; print_health_info(dev); mlx5_trigger_health_work(dev); goto out; } count = ioread32be(health->health_counter); if (count == health->prev) ++health->miss_counter; else health->miss_counter = 0; health->prev = count; if (health->miss_counter == MAX_MISSES) { mlx5_core_err(dev, "device's health compromised - reached miss count\n"); print_health_info(dev); queue_work(health->wq, &health->report_work); } prev_synd = health->synd; health->synd = ioread8(&h->synd); if (health->synd && health->synd != prev_synd) queue_work(health->wq, &health->report_work); out: mod_timer(&health->timer, get_next_poll_jiffies()); } void mlx5_start_health_poll(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; timer_setup(&health->timer, poll_health, 0); health->fatal_error = MLX5_SENSOR_NO_ERR; clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); health->health = &dev->iseg->health; health->health_counter = &dev->iseg->health_counter; health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL); add_timer(&health->timer); } void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; if (disable_health) { spin_lock_irqsave(&health->wq_lock, flags); set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); spin_unlock_irqrestore(&health->wq_lock, flags); } del_timer_sync(&health->timer); } void mlx5_drain_health_wq(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); spin_unlock_irqrestore(&health->wq_lock, flags); cancel_work_sync(&health->report_work); cancel_work_sync(&health->fatal_report_work); } void mlx5_health_flush(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; flush_workqueue(health->wq); } void mlx5_health_cleanup(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; destroy_workqueue(health->wq); mlx5_fw_reporters_destroy(dev); } int mlx5_health_init(struct mlx5_core_dev *dev) { struct mlx5_core_health *health; char *name; mlx5_fw_reporters_create(dev); health = &dev->priv.health; name = kmalloc(64, GFP_KERNEL); if (!name) goto out_err; strcpy(name, "mlx5_health"); strcat(name, dev_name(dev->device)); health->wq = create_singlethread_workqueue(name); kfree(name); if (!health->wq) goto out_err; spin_lock_init(&health->wq_lock); INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work); INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work); return 0; out_err: mlx5_fw_reporters_destroy(dev); return -ENOMEM; }
./CrossVul/dataset_final_sorted/CWE-400/c/good_1237_0
crossvul-cpp_data_good_5356_4
/* * IPV4 GSO/GRO offload support * Linux INET implementation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * UDPv4 GSO support */ #include <linux/skbuff.h> #include <net/udp.h> #include <net/protocol.h> static DEFINE_SPINLOCK(udp_offload_lock); static struct udp_offload_priv __rcu *udp_offload_base __read_mostly; #define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock)) struct udp_offload_priv { struct udp_offload *offload; possible_net_t net; struct rcu_head rcu; struct udp_offload_priv __rcu *next; }; static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features, struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, netdev_features_t features), __be16 new_protocol, bool is_ipv6) { int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); bool remcsum, need_csum, offload_csum, ufo; struct sk_buff *segs = ERR_PTR(-EINVAL); struct udphdr *uh = udp_hdr(skb); u16 mac_offset = skb->mac_header; __be16 protocol = skb->protocol; u16 mac_len = skb->mac_len; int udp_offset, outer_hlen; __wsum partial; if (unlikely(!pskb_may_pull(skb, tnl_hlen))) goto out; /* Adjust partial header checksum to negate old length. * We cannot rely on the value contained in uh->len as it is * possible that the actual value exceeds the boundaries of the * 16 bit length field due to the header being added outside of an * IP or IPv6 frame that was already limited to 64K - 1. */ partial = csum_sub(csum_unfold(uh->check), (__force __wsum)htonl(skb->len)); /* setup inner skb. */ skb->encapsulation = 0; __skb_pull(skb, tnl_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); skb->protocol = new_protocol; need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); skb->encap_hdr_csum = need_csum; remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); skb->remcsum_offload = remcsum; ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); /* Try to offload checksum if possible */ offload_csum = !!(need_csum && (skb->dev->features & (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) : (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM)))); features &= skb->dev->hw_enc_features; /* The only checksum offload we care about from here on out is the * outer one so strip the existing checksum feature flags and * instead set the flag based on our outer checksum offload value. */ if (remcsum || ufo) { features &= ~NETIF_F_CSUM_MASK; if (!need_csum || offload_csum) features |= NETIF_F_HW_CSUM; } /* segment inner packet. */ segs = gso_inner_segment(skb, features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, mac_len); goto out; } outer_hlen = skb_tnl_header_len(skb); udp_offset = outer_hlen - tnl_hlen; skb = segs; do { __be16 len; if (remcsum) skb->ip_summed = CHECKSUM_NONE; /* Set up inner headers if we are offloading inner checksum */ if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } skb->mac_len = mac_len; skb->protocol = protocol; __skb_push(skb, outer_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb_set_transport_header(skb, udp_offset); len = htons(skb->len - udp_offset); uh = udp_hdr(skb); uh->len = len; if (!need_csum) continue; uh->check = ~csum_fold(csum_add(partial, (__force __wsum)len)); if (skb->encapsulation || !offload_csum) { uh->check = gso_make_checksum(skb, ~uh->check); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } else { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); } } while ((skb = skb->next)); out: return segs; } struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features, bool is_ipv6) { __be16 protocol = skb->protocol; const struct net_offload **offloads; const struct net_offload *ops; struct sk_buff *segs = ERR_PTR(-EINVAL); struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, netdev_features_t features); rcu_read_lock(); switch (skb->inner_protocol_type) { case ENCAP_TYPE_ETHER: protocol = skb->inner_protocol; gso_inner_segment = skb_mac_gso_segment; break; case ENCAP_TYPE_IPPROTO: offloads = is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[skb->inner_ipproto]); if (!ops || !ops->callbacks.gso_segment) goto out_unlock; gso_inner_segment = ops->callbacks.gso_segment; break; default: goto out_unlock; } segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, protocol, is_ipv6); out_unlock: rcu_read_unlock(); return segs; } static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; __wsum csum; struct udphdr *uh; struct iphdr *iph; if (skb->encapsulation && (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { segs = skb_udp_tunnel_segment(skb, features, false); goto out; } if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto out; mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { /* Packet is from an untrusted source, reset gso_segs. */ int type = skb_shinfo(skb)->gso_type; if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_TUNNEL_REMCSUM | SKB_GSO_IPIP | SKB_GSO_GRE | SKB_GSO_GRE_CSUM) || !(type & (SKB_GSO_UDP)))) goto out; skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); segs = NULL; goto out; } /* Do software UFO. Complete and fill in the UDP checksum as * HW cannot do checksum of UDP packets sent as multiple * IP fragments. */ uh = udp_hdr(skb); iph = ip_hdr(skb); uh->check = 0; csum = skb_checksum(skb, 0, skb->len, 0); uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; skb->ip_summed = CHECKSUM_NONE; /* If there is no outer header we can fake a checksum offload * due to the fact that we have already done the checksum in * software prior to segmenting the frame. */ if (!skb->encap_hdr_csum) features |= NETIF_F_HW_CSUM; /* Fragment the skb. IP headers of the fragments are updated in * inet_gso_segment() */ segs = skb_segment(skb, features); out: return segs; } int udp_add_offload(struct net *net, struct udp_offload *uo) { struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC); if (!new_offload) return -ENOMEM; write_pnet(&new_offload->net, net); new_offload->offload = uo; spin_lock(&udp_offload_lock); new_offload->next = udp_offload_base; rcu_assign_pointer(udp_offload_base, new_offload); spin_unlock(&udp_offload_lock); return 0; } EXPORT_SYMBOL(udp_add_offload); static void udp_offload_free_routine(struct rcu_head *head) { struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu); kfree(ou_priv); } void udp_del_offload(struct udp_offload *uo) { struct udp_offload_priv __rcu **head = &udp_offload_base; struct udp_offload_priv *uo_priv; spin_lock(&udp_offload_lock); uo_priv = udp_deref_protected(*head); for (; uo_priv != NULL; uo_priv = udp_deref_protected(*head)) { if (uo_priv->offload == uo) { rcu_assign_pointer(*head, udp_deref_protected(uo_priv->next)); goto unlock; } head = &uo_priv->next; } pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port)); unlock: spin_unlock(&udp_offload_lock); if (uo_priv) call_rcu(&uo_priv->rcu, udp_offload_free_routine); } EXPORT_SYMBOL(udp_del_offload); struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, struct udphdr *uh) { struct udp_offload_priv *uo_priv; struct sk_buff *p, **pp = NULL; struct udphdr *uh2; unsigned int off = skb_gro_offset(skb); int flush = 1; if (NAPI_GRO_CB(skb)->encap_mark || (skb->ip_summed != CHECKSUM_PARTIAL && NAPI_GRO_CB(skb)->csum_cnt == 0 && !NAPI_GRO_CB(skb)->csum_valid)) goto out; /* mark that this skb passed once through the tunnel gro layer */ NAPI_GRO_CB(skb)->encap_mark = 1; rcu_read_lock(); uo_priv = rcu_dereference(udp_offload_base); for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) && uo_priv->offload->port == uh->dest && uo_priv->offload->callbacks.gro_receive) goto unflush; } goto out_unlock; unflush: flush = 0; for (p = *head; p; p = p->next) { if (!NAPI_GRO_CB(p)->same_flow) continue; uh2 = (struct udphdr *)(p->data + off); /* Match ports and either checksums are either both zero * or nonzero. */ if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) || (!uh->check ^ !uh2->check)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } } skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; pp = uo_priv->offload->callbacks.gro_receive(head, skb, uo_priv->offload); out_unlock: rcu_read_unlock(); out: NAPI_GRO_CB(skb)->flush |= flush; return pp; } static struct sk_buff **udp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) { struct udphdr *uh = udp_gro_udphdr(skb); if (unlikely(!uh)) goto flush; /* Don't bother verifying checksum if we're going to flush anyway. */ if (NAPI_GRO_CB(skb)->flush) goto skip; if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, inet_gro_compute_pseudo)) goto flush; else if (uh->check) skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check, inet_gro_compute_pseudo); skip: NAPI_GRO_CB(skb)->is_ipv6 = 0; return udp_gro_receive(head, skb, uh); flush: NAPI_GRO_CB(skb)->flush = 1; return NULL; } int udp_gro_complete(struct sk_buff *skb, int nhoff) { struct udp_offload_priv *uo_priv; __be16 newlen = htons(skb->len - nhoff); struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); int err = -ENOSYS; uh->len = newlen; rcu_read_lock(); uo_priv = rcu_dereference(udp_offload_base); for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) && uo_priv->offload->port == uh->dest && uo_priv->offload->callbacks.gro_complete) break; } if (uo_priv) { NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr), uo_priv->offload); } rcu_read_unlock(); if (skb->remcsum_offload) skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM; skb->encapsulation = 1; skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr)); return err; } static int udp4_gro_complete(struct sk_buff *skb, int nhoff) { const struct iphdr *iph = ip_hdr(skb); struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); if (uh->check) { skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr, iph->daddr, 0); } else { skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; } return udp_gro_complete(skb, nhoff); } static const struct net_offload udpv4_offload = { .callbacks = { .gso_segment = udp4_ufo_fragment, .gro_receive = udp4_gro_receive, .gro_complete = udp4_gro_complete, }, }; int __init udpv4_offload_init(void) { return inet_add_offload(&udpv4_offload, IPPROTO_UDP); }
./CrossVul/dataset_final_sorted/CWE-400/c/good_5356_4
crossvul-cpp_data_bad_1270_0
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/etherdevice.h> #include <linux/lockdep.h> #include <linux/pci.h> #include <linux/skbuff.h> #include <linux/vmalloc.h> #include <net/devlink.h> #include <net/dst_metadata.h> #include "main.h" #include "../nfpcore/nfp_cpp.h" #include "../nfpcore/nfp_nffw.h" #include "../nfpcore/nfp_nsp.h" #include "../nfp_app.h" #include "../nfp_main.h" #include "../nfp_net.h" #include "../nfp_net_repr.h" #include "../nfp_port.h" #include "./cmsg.h" #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL #define NFP_MIN_INT_PORT_ID 1 #define NFP_MAX_INT_PORT_ID 256 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn) { return "FLOWER"; } static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app) { return DEVLINK_ESWITCH_MODE_SWITCHDEV; } static int nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv, struct net_device *netdev) { struct net_device *entry; int i, id = 0; rcu_read_lock(); idr_for_each_entry(&priv->internal_ports.port_ids, entry, i) if (entry == netdev) { id = i; break; } rcu_read_unlock(); return id; } static int nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev) { struct nfp_flower_priv *priv = app->priv; int id; id = nfp_flower_lookup_internal_port_id(priv, netdev); if (id > 0) return id; idr_preload(GFP_ATOMIC); spin_lock_bh(&priv->internal_ports.lock); id = idr_alloc(&priv->internal_ports.port_ids, netdev, NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC); spin_unlock_bh(&priv->internal_ports.lock); idr_preload_end(); return id; } u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app, struct net_device *netdev) { int ext_port; if (nfp_netdev_is_nfp_repr(netdev)) { return nfp_repr_get_port_id(netdev); } else if (nfp_flower_internal_port_can_offload(app, netdev)) { ext_port = nfp_flower_get_internal_port_id(app, netdev); if (ext_port < 0) return 0; return nfp_flower_internal_port_get_port_id(ext_port); } return 0; } static struct net_device * nfp_flower_get_netdev_from_internal_port_id(struct nfp_app *app, int port_id) { struct nfp_flower_priv *priv = app->priv; struct net_device *netdev; rcu_read_lock(); netdev = idr_find(&priv->internal_ports.port_ids, port_id); rcu_read_unlock(); return netdev; } static void nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev) { struct nfp_flower_priv *priv = app->priv; int id; id = nfp_flower_lookup_internal_port_id(priv, netdev); if (!id) return; spin_lock_bh(&priv->internal_ports.lock); idr_remove(&priv->internal_ports.port_ids, id); spin_unlock_bh(&priv->internal_ports.lock); } static int nfp_flower_internal_port_event_handler(struct nfp_app *app, struct net_device *netdev, unsigned long event) { if (event == NETDEV_UNREGISTER && nfp_flower_internal_port_can_offload(app, netdev)) nfp_flower_free_internal_port_id(app, netdev); return NOTIFY_OK; } static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv) { spin_lock_init(&priv->internal_ports.lock); idr_init(&priv->internal_ports.port_ids); } static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv) { idr_destroy(&priv->internal_ports.port_ids); } static struct nfp_flower_non_repr_priv * nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev) { struct nfp_flower_priv *priv = app->priv; struct nfp_flower_non_repr_priv *entry; ASSERT_RTNL(); list_for_each_entry(entry, &priv->non_repr_priv, list) if (entry->netdev == netdev) return entry; return NULL; } void __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv) { non_repr_priv->ref_count++; } struct nfp_flower_non_repr_priv * nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev) { struct nfp_flower_priv *priv = app->priv; struct nfp_flower_non_repr_priv *entry; entry = nfp_flower_non_repr_priv_lookup(app, netdev); if (entry) goto inc_ref; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return NULL; entry->netdev = netdev; list_add(&entry->list, &priv->non_repr_priv); inc_ref: __nfp_flower_non_repr_priv_get(entry); return entry; } void __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv) { if (--non_repr_priv->ref_count) return; list_del(&non_repr_priv->list); kfree(non_repr_priv); } void nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev) { struct nfp_flower_non_repr_priv *entry; entry = nfp_flower_non_repr_priv_lookup(app, netdev); if (!entry) return; __nfp_flower_non_repr_priv_put(entry); } static enum nfp_repr_type nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port) { switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) { case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT: *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id); return NFP_REPR_TYPE_PHYS_PORT; case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT: *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id); if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) == NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF) return NFP_REPR_TYPE_PF; else return NFP_REPR_TYPE_VF; } return __NFP_REPR_TYPE_MAX; } static struct net_device * nfp_flower_dev_get(struct nfp_app *app, u32 port_id, bool *redir_egress) { enum nfp_repr_type repr_type; struct nfp_reprs *reprs; u8 port = 0; /* Check if the port is internal. */ if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT) { if (redir_egress) *redir_egress = true; port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id); return nfp_flower_get_netdev_from_internal_port_id(app, port); } repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); if (repr_type > NFP_REPR_TYPE_MAX) return NULL; reprs = rcu_dereference(app->reprs[repr_type]); if (!reprs) return NULL; if (port >= reprs->num_reprs) return NULL; return rcu_dereference(reprs->reprs[port]); } static int nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type, bool exists) { struct nfp_reprs *reprs; int i, err, count = 0; reprs = rcu_dereference_protected(app->reprs[type], lockdep_is_held(&app->pf->lock)); if (!reprs) return 0; for (i = 0; i < reprs->num_reprs; i++) { struct net_device *netdev; netdev = nfp_repr_get_locked(app, reprs, i); if (netdev) { struct nfp_repr *repr = netdev_priv(netdev); err = nfp_flower_cmsg_portreify(repr, exists); if (err) return err; count++; } } return count; } static int nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl) { struct nfp_flower_priv *priv = app->priv; if (!tot_repl) return 0; lockdep_assert_held(&app->pf->lock); if (!wait_event_timeout(priv->reify_wait_queue, atomic_read(replies) >= tot_repl, NFP_FL_REPLY_TIMEOUT)) { nfp_warn(app->cpp, "Not all reprs responded to reify\n"); return -EIO; } return 0; } static int nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr) { int err; err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false); if (err) return err; netif_tx_wake_all_queues(repr->netdev); return 0; } static int nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr) { netif_tx_disable(repr->netdev); return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false); } static void nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev) { struct nfp_repr *repr = netdev_priv(netdev); kfree(repr->app_priv); } static void nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev) { struct nfp_repr *repr = netdev_priv(netdev); struct nfp_flower_priv *priv = app->priv; atomic_t *replies = &priv->reify_replies; int err; atomic_set(replies, 0); err = nfp_flower_cmsg_portreify(repr, false); if (err) { nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n"); return; } nfp_flower_wait_repr_reify(app, replies, 1); } static void nfp_flower_sriov_disable(struct nfp_app *app) { struct nfp_flower_priv *priv = app->priv; if (!priv->nn) return; nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF); } static int nfp_flower_spawn_vnic_reprs(struct nfp_app *app, enum nfp_flower_cmsg_port_vnic_type vnic_type, enum nfp_repr_type repr_type, unsigned int cnt) { u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); struct nfp_flower_priv *priv = app->priv; atomic_t *replies = &priv->reify_replies; struct nfp_flower_repr_priv *repr_priv; enum nfp_port_type port_type; struct nfp_repr *nfp_repr; struct nfp_reprs *reprs; int i, err, reify_cnt; const u8 queue = 0; port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT : NFP_PORT_VF_PORT; reprs = nfp_reprs_alloc(cnt); if (!reprs) return -ENOMEM; for (i = 0; i < cnt; i++) { struct net_device *repr; struct nfp_port *port; u32 port_id; repr = nfp_repr_alloc(app); if (!repr) { err = -ENOMEM; goto err_reprs_clean; } repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); if (!repr_priv) { err = -ENOMEM; goto err_reprs_clean; } nfp_repr = netdev_priv(repr); nfp_repr->app_priv = repr_priv; repr_priv->nfp_repr = nfp_repr; /* For now we only support 1 PF */ WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); port = nfp_port_alloc(app, port_type, repr); if (IS_ERR(port)) { err = PTR_ERR(port); nfp_repr_free(repr); goto err_reprs_clean; } if (repr_type == NFP_REPR_TYPE_PF) { port->pf_id = i; port->vnic = priv->nn->dp.ctrl_bar; } else { port->pf_id = 0; port->vf_id = i; port->vnic = app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ; } eth_hw_addr_random(repr); port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type, i, queue); err = nfp_repr_init(app, repr, port_id, port, priv->nn->dp.netdev); if (err) { nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; } RCU_INIT_POINTER(reprs->reprs[i], repr); nfp_info(app->cpp, "%s%d Representor(%s) created\n", repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i, repr->name); } nfp_app_reprs_set(app, repr_type, reprs); atomic_set(replies, 0); reify_cnt = nfp_flower_reprs_reify(app, repr_type, true); if (reify_cnt < 0) { err = reify_cnt; nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); goto err_reprs_remove; } err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); if (err) goto err_reprs_remove; return 0; err_reprs_remove: reprs = nfp_app_reprs_set(app, repr_type, NULL); err_reprs_clean: nfp_reprs_clean_and_free(app, reprs); return err; } static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs) { struct nfp_flower_priv *priv = app->priv; if (!priv->nn) return 0; return nfp_flower_spawn_vnic_reprs(app, NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, NFP_REPR_TYPE_VF, num_vfs); } static int nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) { struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; atomic_t *replies = &priv->reify_replies; struct nfp_flower_repr_priv *repr_priv; struct nfp_repr *nfp_repr; struct sk_buff *ctrl_skb; struct nfp_reprs *reprs; int err, reify_cnt; unsigned int i; ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); if (!ctrl_skb) return -ENOMEM; reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); if (!reprs) { err = -ENOMEM; goto err_free_ctrl_skb; } for (i = 0; i < eth_tbl->count; i++) { unsigned int phys_port = eth_tbl->ports[i].index; struct net_device *repr; struct nfp_port *port; u32 cmsg_port_id; repr = nfp_repr_alloc(app); if (!repr) { err = -ENOMEM; goto err_reprs_clean; } repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); if (!repr_priv) { err = -ENOMEM; goto err_reprs_clean; } nfp_repr = netdev_priv(repr); nfp_repr->app_priv = repr_priv; repr_priv->nfp_repr = nfp_repr; port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); if (IS_ERR(port)) { err = PTR_ERR(port); nfp_repr_free(repr); goto err_reprs_clean; } err = nfp_port_init_phy_port(app->pf, app, port, i); if (err) { nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; } SET_NETDEV_DEV(repr, &priv->nn->pdev->dev); nfp_net_get_mac_addr(app->pf, repr, port); cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port); err = nfp_repr_init(app, repr, cmsg_port_id, port, priv->nn->dp.netdev); if (err) { nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; } nfp_flower_cmsg_mac_repr_add(ctrl_skb, i, eth_tbl->ports[i].nbi, eth_tbl->ports[i].base, phys_port); RCU_INIT_POINTER(reprs->reprs[phys_port], repr); nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n", phys_port, repr->name); } nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs); /* The REIFY/MAC_REPR control messages should be sent after the MAC * representors are registered using nfp_app_reprs_set(). This is * because the firmware may respond with control messages for the * MAC representors, f.e. to provide the driver with information * about their state, and without registration the driver will drop * any such messages. */ atomic_set(replies, 0); reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true); if (reify_cnt < 0) { err = reify_cnt; nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); goto err_reprs_remove; } err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); if (err) goto err_reprs_remove; nfp_ctrl_tx(app->ctrl, ctrl_skb); return 0; err_reprs_remove: reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL); err_reprs_clean: nfp_reprs_clean_and_free(app, reprs); err_free_ctrl_skb: kfree_skb(ctrl_skb); return err; } static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) { if (id > 0) { nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n"); goto err_invalid_port; } eth_hw_addr_random(nn->dp.netdev); netif_keep_dst(nn->dp.netdev); nn->vnic_no_name = true; return 0; err_invalid_port: nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev); return PTR_ERR_OR_ZERO(nn->port); } static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn) { struct nfp_flower_priv *priv = app->priv; if (app->pf->num_vfs) nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF); nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); priv->nn = NULL; } static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn) { struct nfp_flower_priv *priv = app->priv; int err; priv->nn = nn; err = nfp_flower_spawn_phy_reprs(app, app->priv); if (err) goto err_clear_nn; err = nfp_flower_spawn_vnic_reprs(app, NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF, NFP_REPR_TYPE_PF, 1); if (err) goto err_destroy_reprs_phy; if (app->pf->num_vfs) { err = nfp_flower_spawn_vnic_reprs(app, NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, NFP_REPR_TYPE_VF, app->pf->num_vfs); if (err) goto err_destroy_reprs_pf; } return 0; err_destroy_reprs_pf: nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); err_destroy_reprs_phy: nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); err_clear_nn: priv->nn = NULL; return err; } static int nfp_flower_init(struct nfp_app *app) { u64 version, features, ctx_count, num_mems; const struct nfp_pf *pf = app->pf; struct nfp_flower_priv *app_priv; int err; if (!pf->eth_tbl) { nfp_warn(app->cpp, "FlowerNIC requires eth table\n"); return -EINVAL; } if (!pf->mac_stats_bar) { nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n"); return -EINVAL; } if (!pf->vf_cfg_bar) { nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n"); return -EINVAL; } version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err); if (err) { nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n"); return err; } num_mems = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_SPLIT", &err); if (err) { nfp_warn(app->cpp, "FlowerNIC: unsupported host context memory: %d\n", err); err = 0; num_mems = 1; } if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM, num_mems) || !num_mems) { nfp_warn(app->cpp, "FlowerNIC: invalid host context memory: %llu\n", num_mems); return -EINVAL; } ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT", &err); if (err) { nfp_warn(app->cpp, "FlowerNIC: unsupported host context count: %d\n", err); err = 0; ctx_count = BIT(17); } /* We need to ensure hardware has enough flower capabilities. */ if (version != NFP_FLOWER_ALLOWED_VER) { nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n"); return -EINVAL; } app_priv = vzalloc(sizeof(struct nfp_flower_priv)); if (!app_priv) return -ENOMEM; app_priv->total_mem_units = num_mems; app_priv->active_mem_unit = 0; app_priv->stats_ring_size = roundup_pow_of_two(ctx_count); app->priv = app_priv; app_priv->app = app; skb_queue_head_init(&app_priv->cmsg_skbs_high); skb_queue_head_init(&app_priv->cmsg_skbs_low); INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); init_waitqueue_head(&app_priv->reify_wait_queue); init_waitqueue_head(&app_priv->mtu_conf.wait_q); spin_lock_init(&app_priv->mtu_conf.lock); err = nfp_flower_metadata_init(app, ctx_count, num_mems); if (err) goto err_free_app_priv; /* Extract the extra features supported by the firmware. */ features = nfp_rtsym_read_le(app->pf->rtbl, "_abi_flower_extra_features", &err); if (err) app_priv->flower_ext_feats = 0; else app_priv->flower_ext_feats = features; /* Tell the firmware that the driver supports lag. */ err = nfp_rtsym_write_le(app->pf->rtbl, "_abi_flower_balance_sync_enable", 1); if (!err) { app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG; nfp_flower_lag_init(&app_priv->nfp_lag); } else if (err == -ENOENT) { nfp_warn(app->cpp, "LAG not supported by FW.\n"); } else { goto err_cleanup_metadata; } if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) { /* Tell the firmware that the driver supports flow merging. */ err = nfp_rtsym_write_le(app->pf->rtbl, "_abi_flower_merge_hint_enable", 1); if (!err) { app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE; nfp_flower_internal_port_init(app_priv); } else if (err == -ENOENT) { nfp_warn(app->cpp, "Flow merge not supported by FW.\n"); } else { goto err_lag_clean; } } else { nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n"); } if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) nfp_flower_qos_init(app); INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); INIT_LIST_HEAD(&app_priv->non_repr_priv); app_priv->pre_tun_rule_cnt = 0; return 0; err_lag_clean: if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) nfp_flower_lag_cleanup(&app_priv->nfp_lag); err_cleanup_metadata: nfp_flower_metadata_cleanup(app); err_free_app_priv: vfree(app->priv); return err; } static void nfp_flower_clean(struct nfp_app *app) { struct nfp_flower_priv *app_priv = app->priv; skb_queue_purge(&app_priv->cmsg_skbs_high); skb_queue_purge(&app_priv->cmsg_skbs_low); flush_work(&app_priv->cmsg_work); if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) nfp_flower_qos_cleanup(app); if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) nfp_flower_lag_cleanup(&app_priv->nfp_lag); if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) nfp_flower_internal_port_cleanup(app_priv); nfp_flower_metadata_cleanup(app); vfree(app->priv); app->priv = NULL; } static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv) { bool ret; spin_lock_bh(&app_priv->mtu_conf.lock); ret = app_priv->mtu_conf.ack; spin_unlock_bh(&app_priv->mtu_conf.lock); return ret; } static int nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu) { struct nfp_flower_priv *app_priv = app->priv; struct nfp_repr *repr = netdev_priv(netdev); int err; /* Only need to config FW for physical port MTU change. */ if (repr->port->type != NFP_PORT_PHYS_PORT) return 0; if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) { nfp_err(app->cpp, "Physical port MTU setting not supported\n"); return -EINVAL; } spin_lock_bh(&app_priv->mtu_conf.lock); app_priv->mtu_conf.ack = false; app_priv->mtu_conf.requested_val = new_mtu; app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id; spin_unlock_bh(&app_priv->mtu_conf.lock); err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu, true); if (err) { spin_lock_bh(&app_priv->mtu_conf.lock); app_priv->mtu_conf.requested_val = 0; spin_unlock_bh(&app_priv->mtu_conf.lock); return err; } /* Wait for fw to ack the change. */ if (!wait_event_timeout(app_priv->mtu_conf.wait_q, nfp_flower_check_ack(app_priv), NFP_FL_REPLY_TIMEOUT)) { spin_lock_bh(&app_priv->mtu_conf.lock); app_priv->mtu_conf.requested_val = 0; spin_unlock_bh(&app_priv->mtu_conf.lock); nfp_warn(app->cpp, "MTU change not verified with fw\n"); return -EIO; } return 0; } static int nfp_flower_start(struct nfp_app *app) { struct nfp_flower_priv *app_priv = app->priv; int err; if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { err = nfp_flower_lag_reset(&app_priv->nfp_lag); if (err) return err; } return nfp_tunnel_config_start(app); } static void nfp_flower_stop(struct nfp_app *app) { nfp_tunnel_config_stop(app); } static int nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev, unsigned long event, void *ptr) { struct nfp_flower_priv *app_priv = app->priv; int ret; if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr); if (ret & NOTIFY_STOP_MASK) return ret; } ret = nfp_flower_reg_indir_block_handler(app, netdev, event); if (ret & NOTIFY_STOP_MASK) return ret; ret = nfp_flower_internal_port_event_handler(app, netdev, event); if (ret & NOTIFY_STOP_MASK) return ret; return nfp_tunnel_mac_event_handler(app, netdev, event, ptr); } const struct nfp_app_type app_flower = { .id = NFP_APP_FLOWER_NIC, .name = "flower", .ctrl_cap_mask = ~0U, .ctrl_has_meta = true, .extra_cap = nfp_flower_extra_cap, .init = nfp_flower_init, .clean = nfp_flower_clean, .repr_change_mtu = nfp_flower_repr_change_mtu, .vnic_alloc = nfp_flower_vnic_alloc, .vnic_init = nfp_flower_vnic_init, .vnic_clean = nfp_flower_vnic_clean, .repr_preclean = nfp_flower_repr_netdev_preclean, .repr_clean = nfp_flower_repr_netdev_clean, .repr_open = nfp_flower_repr_netdev_open, .repr_stop = nfp_flower_repr_netdev_stop, .start = nfp_flower_start, .stop = nfp_flower_stop, .netdev_event = nfp_flower_netdev_event, .ctrl_msg_rx = nfp_flower_cmsg_rx, .sriov_enable = nfp_flower_sriov_enable, .sriov_disable = nfp_flower_sriov_disable, .eswitch_mode_get = eswitch_mode_get, .dev_get = nfp_flower_dev_get, .setup_tc = nfp_flower_setup_tc, };
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1270_0
crossvul-cpp_data_good_1238_0
/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ /* * vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp, * VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn. * * Copyright (C) 2006-2016 Oracle Corporation */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/vbox_err.h> #include <linux/vbox_utils.h> #include "vboxguest_core.h" /* Get the pointer to the first parameter of a HGCM call request. */ #define VMMDEV_HGCM_CALL_PARMS(a) \ ((struct vmmdev_hgcm_function_parameter *)( \ (u8 *)(a) + sizeof(struct vmmdev_hgcm_call))) /* The max parameter buffer size for a user request. */ #define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M) /* The max parameter buffer size for a kernel request. */ #define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M) #define VBG_DEBUG_PORT 0x504 /* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */ static DEFINE_SPINLOCK(vbg_log_lock); static char vbg_log_buf[128]; #define VBG_LOG(name, pr_func) \ void name(const char *fmt, ...) \ { \ unsigned long flags; \ va_list args; \ int i, count; \ \ va_start(args, fmt); \ spin_lock_irqsave(&vbg_log_lock, flags); \ \ count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\ for (i = 0; i < count; i++) \ outb(vbg_log_buf[i], VBG_DEBUG_PORT); \ \ pr_func("%s", vbg_log_buf); \ \ spin_unlock_irqrestore(&vbg_log_lock, flags); \ va_end(args); \ } \ EXPORT_SYMBOL(name) VBG_LOG(vbg_info, pr_info); VBG_LOG(vbg_warn, pr_warn); VBG_LOG(vbg_err, pr_err); #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG) VBG_LOG(vbg_debug, pr_debug); #endif void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type, u32 requestor) { struct vmmdev_request_header *req; int order = get_order(PAGE_ALIGN(len)); req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order); if (!req) return NULL; memset(req, 0xaa, len); req->size = len; req->version = VMMDEV_REQUEST_HEADER_VERSION; req->request_type = req_type; req->rc = VERR_GENERAL_FAILURE; req->reserved1 = 0; req->requestor = requestor; return req; } void vbg_req_free(void *req, size_t len) { if (!req) return; free_pages((unsigned long)req, get_order(PAGE_ALIGN(len))); } /* Note this function returns a VBox status code, not a negative errno!! */ int vbg_req_perform(struct vbg_dev *gdev, void *req) { unsigned long phys_req = virt_to_phys(req); outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST); /* * The host changes the request as a result of the outl, make sure * the outl and any reads of the req happen in the correct order. */ mb(); return ((struct vmmdev_request_header *)req)->rc; } static bool hgcm_req_done(struct vbg_dev *gdev, struct vmmdev_hgcmreq_header *header) { unsigned long flags; bool done; spin_lock_irqsave(&gdev->event_spinlock, flags); done = header->flags & VMMDEV_HGCM_REQ_DONE; spin_unlock_irqrestore(&gdev->event_spinlock, flags); return done; } int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor, struct vmmdev_hgcm_service_location *loc, u32 *client_id, int *vbox_status) { struct vmmdev_hgcm_connect *hgcm_connect = NULL; int rc; hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect), VMMDEVREQ_HGCM_CONNECT, requestor); if (!hgcm_connect) return -ENOMEM; hgcm_connect->header.flags = 0; memcpy(&hgcm_connect->loc, loc, sizeof(*loc)); hgcm_connect->client_id = 0; rc = vbg_req_perform(gdev, hgcm_connect); if (rc == VINF_HGCM_ASYNC_EXECUTE) wait_event(gdev->hgcm_wq, hgcm_req_done(gdev, &hgcm_connect->header)); if (rc >= 0) { *client_id = hgcm_connect->client_id; rc = hgcm_connect->header.result; } vbg_req_free(hgcm_connect, sizeof(*hgcm_connect)); *vbox_status = rc; return 0; } EXPORT_SYMBOL(vbg_hgcm_connect); int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor, u32 client_id, int *vbox_status) { struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL; int rc; hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect), VMMDEVREQ_HGCM_DISCONNECT, requestor); if (!hgcm_disconnect) return -ENOMEM; hgcm_disconnect->header.flags = 0; hgcm_disconnect->client_id = client_id; rc = vbg_req_perform(gdev, hgcm_disconnect); if (rc == VINF_HGCM_ASYNC_EXECUTE) wait_event(gdev->hgcm_wq, hgcm_req_done(gdev, &hgcm_disconnect->header)); if (rc >= 0) rc = hgcm_disconnect->header.result; vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect)); *vbox_status = rc; return 0; } EXPORT_SYMBOL(vbg_hgcm_disconnect); static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len) { u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK)); return size >> PAGE_SHIFT; } static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra) { u32 page_count; page_count = hgcm_call_buf_size_in_pages(buf, len); *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); } static int hgcm_call_preprocess_linaddr( const struct vmmdev_hgcm_function_parameter *src_parm, void **bounce_buf_ret, size_t *extra) { void *buf, *bounce_buf; bool copy_in; u32 len; int ret; buf = (void *)src_parm->u.pointer.u.linear_addr; len = src_parm->u.pointer.size; copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT; if (len > VBG_MAX_HGCM_USER_PARM) return -E2BIG; bounce_buf = kvmalloc(len, GFP_KERNEL); if (!bounce_buf) return -ENOMEM; *bounce_buf_ret = bounce_buf; if (copy_in) { ret = copy_from_user(bounce_buf, (void __user *)buf, len); if (ret) return -EFAULT; } else { memset(bounce_buf, 0, len); } hgcm_call_add_pagelist_size(bounce_buf, len, extra); return 0; } /** * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and * figure out how much extra storage we need for page lists. * Return: 0 or negative errno value. * @src_parm: Pointer to source function call parameters * @parm_count: Number of function call parameters. * @bounce_bufs_ret: Where to return the allocated bouncebuffer array * @extra: Where to return the extra request space needed for * physical page lists. */ static int hgcm_call_preprocess( const struct vmmdev_hgcm_function_parameter *src_parm, u32 parm_count, void ***bounce_bufs_ret, size_t *extra) { void *buf, **bounce_bufs = NULL; u32 i, len; int ret; for (i = 0; i < parm_count; i++, src_parm++) { switch (src_parm->type) { case VMMDEV_HGCM_PARM_TYPE_32BIT: case VMMDEV_HGCM_PARM_TYPE_64BIT: break; case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: if (!bounce_bufs) { bounce_bufs = kcalloc(parm_count, sizeof(void *), GFP_KERNEL); if (!bounce_bufs) return -ENOMEM; *bounce_bufs_ret = bounce_bufs; } ret = hgcm_call_preprocess_linaddr(src_parm, &bounce_bufs[i], extra); if (ret) return ret; break; case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: buf = (void *)src_parm->u.pointer.u.linear_addr; len = src_parm->u.pointer.size; if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM)) return -E2BIG; hgcm_call_add_pagelist_size(buf, len, extra); break; default: return -EINVAL; } } return 0; } /** * Translates linear address types to page list direction flags. * * Return: page list flags. * @type: The type. */ static u32 hgcm_call_linear_addr_type_to_pagelist_flags( enum vmmdev_hgcm_function_parameter_type type) { switch (type) { default: WARN_ON(1); /* Fall through */ case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH; case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST; case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST; } } static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call, struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len, enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra) { struct vmmdev_hgcm_pagelist *dst_pg_lst; struct page *page; bool is_vmalloc; u32 i, page_count; dst_parm->type = type; if (len == 0) { dst_parm->u.pointer.size = 0; dst_parm->u.pointer.u.linear_addr = 0; return; } dst_pg_lst = (void *)call + *off_extra; page_count = hgcm_call_buf_size_in_pages(buf, len); is_vmalloc = is_vmalloc_addr(buf); dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST; dst_parm->u.page_list.size = len; dst_parm->u.page_list.offset = *off_extra; dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type); dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK; dst_pg_lst->page_count = page_count; for (i = 0; i < page_count; i++) { if (is_vmalloc) page = vmalloc_to_page(buf); else page = virt_to_page(buf); dst_pg_lst->pages[i] = page_to_phys(page); buf += PAGE_SIZE; } *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); } /** * Initializes the call request that we're sending to the host. * @call: The call to initialize. * @client_id: The client ID of the caller. * @function: The function number of the function to call. * @src_parm: Pointer to source function call parameters. * @parm_count: Number of function call parameters. * @bounce_bufs: The bouncebuffer array. */ static void hgcm_call_init_call( struct vmmdev_hgcm_call *call, u32 client_id, u32 function, const struct vmmdev_hgcm_function_parameter *src_parm, u32 parm_count, void **bounce_bufs) { struct vmmdev_hgcm_function_parameter *dst_parm = VMMDEV_HGCM_CALL_PARMS(call); u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call; void *buf; call->header.flags = 0; call->header.result = VINF_SUCCESS; call->client_id = client_id; call->function = function; call->parm_count = parm_count; for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) { switch (src_parm->type) { case VMMDEV_HGCM_PARM_TYPE_32BIT: case VMMDEV_HGCM_PARM_TYPE_64BIT: *dst_parm = *src_parm; break; case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i], src_parm->u.pointer.size, src_parm->type, &off_extra); break; case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: buf = (void *)src_parm->u.pointer.u.linear_addr; hgcm_call_init_linaddr(call, dst_parm, buf, src_parm->u.pointer.size, src_parm->type, &off_extra); break; default: WARN_ON(1); dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID; } } } /** * Tries to cancel a pending HGCM call. * * Return: VBox status code */ static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call) { int rc; /* * We use a pre-allocated request for cancellations, which is * protected by cancel_req_mutex. This means that all cancellations * get serialized, this should be fine since they should be rare. */ mutex_lock(&gdev->cancel_req_mutex); gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call); rc = vbg_req_perform(gdev, gdev->cancel_req); mutex_unlock(&gdev->cancel_req_mutex); if (rc == VERR_NOT_IMPLEMENTED) { call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED; call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL; rc = vbg_req_perform(gdev, call); if (rc == VERR_INVALID_PARAMETER) rc = VERR_NOT_FOUND; } if (rc >= 0) call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED; return rc; } /** * Performs the call and completion wait. * Return: 0 or negative errno value. * @gdev: The VBoxGuest device extension. * @call: The call to execute. * @timeout_ms: Timeout in ms. * @leak_it: Where to return the leak it / free it, indicator. * Cancellation fun. */ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call, u32 timeout_ms, bool *leak_it) { int rc, cancel_rc, ret; long timeout; *leak_it = false; rc = vbg_req_perform(gdev, call); /* * If the call failed, then pretend success. Upper layers will * interpret the result code in the packet. */ if (rc < 0) { call->header.result = rc; return 0; } if (rc != VINF_HGCM_ASYNC_EXECUTE) return 0; /* Host decided to process the request asynchronously, wait for it */ if (timeout_ms == U32_MAX) timeout = MAX_SCHEDULE_TIMEOUT; else timeout = msecs_to_jiffies(timeout_ms); timeout = wait_event_interruptible_timeout( gdev->hgcm_wq, hgcm_req_done(gdev, &call->header), timeout); /* timeout > 0 means hgcm_req_done has returned true, so success */ if (timeout > 0) return 0; if (timeout == 0) ret = -ETIMEDOUT; else ret = -EINTR; /* Cancel the request */ cancel_rc = hgcm_cancel_call(gdev, call); if (cancel_rc >= 0) return ret; /* * Failed to cancel, this should mean that the cancel has lost the * race with normal completion, wait while the host completes it. */ if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED) timeout = msecs_to_jiffies(500); else timeout = msecs_to_jiffies(2000); timeout = wait_event_timeout(gdev->hgcm_wq, hgcm_req_done(gdev, &call->header), timeout); if (WARN_ON(timeout == 0)) { /* We really should never get here */ vbg_err("%s: Call timedout and cancellation failed, leaking the request\n", __func__); *leak_it = true; return ret; } /* The call has completed normally after all */ return 0; } /** * Copies the result of the call back to the caller info structure and user * buffers. * Return: 0 or negative errno value. * @call: HGCM call request. * @dst_parm: Pointer to function call parameters destination. * @parm_count: Number of function call parameters. * @bounce_bufs: The bouncebuffer array. */ static int hgcm_call_copy_back_result( const struct vmmdev_hgcm_call *call, struct vmmdev_hgcm_function_parameter *dst_parm, u32 parm_count, void **bounce_bufs) { const struct vmmdev_hgcm_function_parameter *src_parm = VMMDEV_HGCM_CALL_PARMS(call); void __user *p; int ret; u32 i; /* Copy back parameters. */ for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) { switch (dst_parm->type) { case VMMDEV_HGCM_PARM_TYPE_32BIT: case VMMDEV_HGCM_PARM_TYPE_64BIT: *dst_parm = *src_parm; break; case VMMDEV_HGCM_PARM_TYPE_PAGELIST: dst_parm->u.page_list.size = src_parm->u.page_list.size; break; case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: dst_parm->u.pointer.size = src_parm->u.pointer.size; break; case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: dst_parm->u.pointer.size = src_parm->u.pointer.size; p = (void __user *)dst_parm->u.pointer.u.linear_addr; ret = copy_to_user(p, bounce_bufs[i], min(src_parm->u.pointer.size, dst_parm->u.pointer.size)); if (ret) return -EFAULT; break; default: WARN_ON(1); return -EINVAL; } } return 0; } int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function, u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, u32 parm_count, int *vbox_status) { struct vmmdev_hgcm_call *call; void **bounce_bufs = NULL; bool leak_it; size_t size; int i, ret; size = sizeof(struct vmmdev_hgcm_call) + parm_count * sizeof(struct vmmdev_hgcm_function_parameter); /* * Validate and buffer the parameters for the call. This also increases * call_size with the amount of extra space needed for page lists. */ ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size); if (ret) { /* Even on error bounce bufs may still have been allocated */ goto free_bounce_bufs; } call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor); if (!call) { ret = -ENOMEM; goto free_bounce_bufs; } hgcm_call_init_call(call, client_id, function, parms, parm_count, bounce_bufs); ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it); if (ret == 0) { *vbox_status = call->header.result; ret = hgcm_call_copy_back_result(call, parms, parm_count, bounce_bufs); } if (!leak_it) vbg_req_free(call, size); free_bounce_bufs: if (bounce_bufs) { for (i = 0; i < parm_count; i++) kvfree(bounce_bufs[i]); kfree(bounce_bufs); } return ret; } EXPORT_SYMBOL(vbg_hgcm_call); #ifdef CONFIG_COMPAT int vbg_hgcm_call32( struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function, u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, int *vbox_status) { struct vmmdev_hgcm_function_parameter *parm64 = NULL; u32 i, size; int ret = 0; /* KISS allocate a temporary request and convert the parameters. */ size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter); parm64 = kzalloc(size, GFP_KERNEL); if (!parm64) return -ENOMEM; for (i = 0; i < parm_count; i++) { switch (parm32[i].type) { case VMMDEV_HGCM_PARM_TYPE_32BIT: parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT; parm64[i].u.value32 = parm32[i].u.value32; break; case VMMDEV_HGCM_PARM_TYPE_64BIT: parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT; parm64[i].u.value64 = parm32[i].u.value64; break; case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: parm64[i].type = parm32[i].type; parm64[i].u.pointer.size = parm32[i].u.pointer.size; parm64[i].u.pointer.u.linear_addr = parm32[i].u.pointer.u.linear_addr; break; default: ret = -EINVAL; } if (ret < 0) goto out_free; } ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms, parm64, parm_count, vbox_status); if (ret < 0) goto out_free; /* Copy back. */ for (i = 0; i < parm_count; i++, parm32++, parm64++) { switch (parm64[i].type) { case VMMDEV_HGCM_PARM_TYPE_32BIT: parm32[i].u.value32 = parm64[i].u.value32; break; case VMMDEV_HGCM_PARM_TYPE_64BIT: parm32[i].u.value64 = parm64[i].u.value64; break; case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: parm32[i].u.pointer.size = parm64[i].u.pointer.size; break; default: WARN_ON(1); ret = -EINVAL; } } out_free: kfree(parm64); return ret; } #endif static const int vbg_status_code_to_errno_table[] = { [-VERR_ACCESS_DENIED] = -EPERM, [-VERR_FILE_NOT_FOUND] = -ENOENT, [-VERR_PROCESS_NOT_FOUND] = -ESRCH, [-VERR_INTERRUPTED] = -EINTR, [-VERR_DEV_IO_ERROR] = -EIO, [-VERR_TOO_MUCH_DATA] = -E2BIG, [-VERR_BAD_EXE_FORMAT] = -ENOEXEC, [-VERR_INVALID_HANDLE] = -EBADF, [-VERR_TRY_AGAIN] = -EAGAIN, [-VERR_NO_MEMORY] = -ENOMEM, [-VERR_INVALID_POINTER] = -EFAULT, [-VERR_RESOURCE_BUSY] = -EBUSY, [-VERR_ALREADY_EXISTS] = -EEXIST, [-VERR_NOT_SAME_DEVICE] = -EXDEV, [-VERR_NOT_A_DIRECTORY] = -ENOTDIR, [-VERR_PATH_NOT_FOUND] = -ENOTDIR, [-VERR_INVALID_NAME] = -ENOENT, [-VERR_IS_A_DIRECTORY] = -EISDIR, [-VERR_INVALID_PARAMETER] = -EINVAL, [-VERR_TOO_MANY_OPEN_FILES] = -ENFILE, [-VERR_INVALID_FUNCTION] = -ENOTTY, [-VERR_SHARING_VIOLATION] = -ETXTBSY, [-VERR_FILE_TOO_BIG] = -EFBIG, [-VERR_DISK_FULL] = -ENOSPC, [-VERR_SEEK_ON_DEVICE] = -ESPIPE, [-VERR_WRITE_PROTECT] = -EROFS, [-VERR_BROKEN_PIPE] = -EPIPE, [-VERR_DEADLOCK] = -EDEADLK, [-VERR_FILENAME_TOO_LONG] = -ENAMETOOLONG, [-VERR_FILE_LOCK_FAILED] = -ENOLCK, [-VERR_NOT_IMPLEMENTED] = -ENOSYS, [-VERR_NOT_SUPPORTED] = -ENOSYS, [-VERR_DIR_NOT_EMPTY] = -ENOTEMPTY, [-VERR_TOO_MANY_SYMLINKS] = -ELOOP, [-VERR_NO_MORE_FILES] = -ENODATA, [-VERR_NO_DATA] = -ENODATA, [-VERR_NET_NO_NETWORK] = -ENONET, [-VERR_NET_NOT_UNIQUE_NAME] = -ENOTUNIQ, [-VERR_NO_TRANSLATION] = -EILSEQ, [-VERR_NET_NOT_SOCKET] = -ENOTSOCK, [-VERR_NET_DEST_ADDRESS_REQUIRED] = -EDESTADDRREQ, [-VERR_NET_MSG_SIZE] = -EMSGSIZE, [-VERR_NET_PROTOCOL_TYPE] = -EPROTOTYPE, [-VERR_NET_PROTOCOL_NOT_AVAILABLE] = -ENOPROTOOPT, [-VERR_NET_PROTOCOL_NOT_SUPPORTED] = -EPROTONOSUPPORT, [-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED] = -ESOCKTNOSUPPORT, [-VERR_NET_OPERATION_NOT_SUPPORTED] = -EOPNOTSUPP, [-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED] = -EPFNOSUPPORT, [-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED] = -EAFNOSUPPORT, [-VERR_NET_ADDRESS_IN_USE] = -EADDRINUSE, [-VERR_NET_ADDRESS_NOT_AVAILABLE] = -EADDRNOTAVAIL, [-VERR_NET_DOWN] = -ENETDOWN, [-VERR_NET_UNREACHABLE] = -ENETUNREACH, [-VERR_NET_CONNECTION_RESET] = -ENETRESET, [-VERR_NET_CONNECTION_ABORTED] = -ECONNABORTED, [-VERR_NET_CONNECTION_RESET_BY_PEER] = -ECONNRESET, [-VERR_NET_NO_BUFFER_SPACE] = -ENOBUFS, [-VERR_NET_ALREADY_CONNECTED] = -EISCONN, [-VERR_NET_NOT_CONNECTED] = -ENOTCONN, [-VERR_NET_SHUTDOWN] = -ESHUTDOWN, [-VERR_NET_TOO_MANY_REFERENCES] = -ETOOMANYREFS, [-VERR_TIMEOUT] = -ETIMEDOUT, [-VERR_NET_CONNECTION_REFUSED] = -ECONNREFUSED, [-VERR_NET_HOST_DOWN] = -EHOSTDOWN, [-VERR_NET_HOST_UNREACHABLE] = -EHOSTUNREACH, [-VERR_NET_ALREADY_IN_PROGRESS] = -EALREADY, [-VERR_NET_IN_PROGRESS] = -EINPROGRESS, [-VERR_MEDIA_NOT_PRESENT] = -ENOMEDIUM, [-VERR_MEDIA_NOT_RECOGNIZED] = -EMEDIUMTYPE, }; int vbg_status_code_to_errno(int rc) { if (rc >= 0) return 0; rc = -rc; if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) || vbg_status_code_to_errno_table[rc] == 0) { vbg_warn("%s: Unhandled err %d\n", __func__, -rc); return -EPROTO; } return vbg_status_code_to_errno_table[rc]; } EXPORT_SYMBOL(vbg_status_code_to_errno);
./CrossVul/dataset_final_sorted/CWE-400/c/good_1238_0
crossvul-cpp_data_bad_1214_0
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) driver * * Copyright (C) 2013-2019 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Gary R Hook <gary.hook@amd.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <crypto/scatterwalk.h> #include <crypto/des.h> #include <linux/ccp.h> #include "ccp-dev.h" /* SHA initial context values */ static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = { cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), cpu_to_be32(SHA1_H4), }; static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), }; static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), }; static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = { cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1), cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3), cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5), cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7), }; static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = { cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1), cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3), cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5), cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7), }; #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \ ccp_gen_jobid(ccp) : 0) static u32 ccp_gen_jobid(struct ccp_device *ccp) { return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; } static void ccp_sg_free(struct ccp_sg_workarea *wa) { if (wa->dma_count) dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); wa->dma_count = 0; } static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, struct scatterlist *sg, u64 len, enum dma_data_direction dma_dir) { memset(wa, 0, sizeof(*wa)); wa->sg = sg; if (!sg) return 0; wa->nents = sg_nents_for_len(sg, len); if (wa->nents < 0) return wa->nents; wa->bytes_left = len; wa->sg_used = 0; if (len == 0) return 0; if (dma_dir == DMA_NONE) return 0; wa->dma_sg = sg; wa->dma_dev = dev; wa->dma_dir = dma_dir; wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); if (!wa->dma_count) return -ENOMEM; return 0; } static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) { unsigned int nbytes = min_t(u64, len, wa->bytes_left); if (!wa->sg) return; wa->sg_used += nbytes; wa->bytes_left -= nbytes; if (wa->sg_used == wa->sg->length) { wa->sg = sg_next(wa->sg); wa->sg_used = 0; } } static void ccp_dm_free(struct ccp_dm_workarea *wa) { if (wa->length <= CCP_DMAPOOL_MAX_SIZE) { if (wa->address) dma_pool_free(wa->dma_pool, wa->address, wa->dma.address); } else { if (wa->dma.address) dma_unmap_single(wa->dev, wa->dma.address, wa->length, wa->dma.dir); kfree(wa->address); } wa->address = NULL; wa->dma.address = 0; } static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa, struct ccp_cmd_queue *cmd_q, unsigned int len, enum dma_data_direction dir) { memset(wa, 0, sizeof(*wa)); if (!len) return 0; wa->dev = cmd_q->ccp->dev; wa->length = len; if (len <= CCP_DMAPOOL_MAX_SIZE) { wa->dma_pool = cmd_q->dma_pool; wa->address = dma_pool_zalloc(wa->dma_pool, GFP_KERNEL, &wa->dma.address); if (!wa->address) return -ENOMEM; wa->dma.length = CCP_DMAPOOL_MAX_SIZE; } else { wa->address = kzalloc(len, GFP_KERNEL); if (!wa->address) return -ENOMEM; wa->dma.address = dma_map_single(wa->dev, wa->address, len, dir); if (dma_mapping_error(wa->dev, wa->dma.address)) return -ENOMEM; wa->dma.length = len; } wa->dma.dir = dir; return 0; } static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, struct scatterlist *sg, unsigned int sg_offset, unsigned int len) { WARN_ON(!wa->address); if (len > (wa->length - wa_offset)) return -EINVAL; scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 0); return 0; } static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, struct scatterlist *sg, unsigned int sg_offset, unsigned int len) { WARN_ON(!wa->address); scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 1); } static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, struct scatterlist *sg, unsigned int sg_offset, unsigned int len) { u8 *p, *q; int rc; rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len); if (rc) return rc; p = wa->address + wa_offset; q = p + len - 1; while (p < q) { *p = *p ^ *q; *q = *p ^ *q; *p = *p ^ *q; p++; q--; } return 0; } static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, struct scatterlist *sg, unsigned int sg_offset, unsigned int len) { u8 *p, *q; p = wa->address + wa_offset; q = p + len - 1; while (p < q) { *p = *p ^ *q; *q = *p ^ *q; *p = *p ^ *q; p++; q--; } ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len); } static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q) { ccp_dm_free(&data->dm_wa); ccp_sg_free(&data->sg_wa); } static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q, struct scatterlist *sg, u64 sg_len, unsigned int dm_len, enum dma_data_direction dir) { int ret; memset(data, 0, sizeof(*data)); ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len, dir); if (ret) goto e_err; ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir); if (ret) goto e_err; return 0; e_err: ccp_free_data(data, cmd_q); return ret; } static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) { struct ccp_sg_workarea *sg_wa = &data->sg_wa; struct ccp_dm_workarea *dm_wa = &data->dm_wa; unsigned int buf_count, nbytes; /* Clear the buffer if setting it */ if (!from) memset(dm_wa->address, 0, dm_wa->length); if (!sg_wa->sg) return 0; /* Perform the copy operation * nbytes will always be <= UINT_MAX because dm_wa->length is * an unsigned int */ nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length); scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used, nbytes, from); /* Update the structures and generate the count */ buf_count = 0; while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { nbytes = min(sg_wa->sg->length - sg_wa->sg_used, dm_wa->length - buf_count); nbytes = min_t(u64, sg_wa->bytes_left, nbytes); buf_count += nbytes; ccp_update_sg_workarea(sg_wa, nbytes); } return buf_count; } static unsigned int ccp_fill_queue_buf(struct ccp_data *data) { return ccp_queue_buf(data, 0); } static unsigned int ccp_empty_queue_buf(struct ccp_data *data) { return ccp_queue_buf(data, 1); } static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, struct ccp_op *op, unsigned int block_size, bool blocksize_op) { unsigned int sg_src_len, sg_dst_len, op_len; /* The CCP can only DMA from/to one address each per operation. This * requires that we find the smallest DMA area between the source * and destination. The resulting len values will always be <= UINT_MAX * because the dma length is an unsigned int. */ sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); if (dst) { sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); op_len = min(sg_src_len, sg_dst_len); } else { op_len = sg_src_len; } /* The data operation length will be at least block_size in length * or the smaller of available sg room remaining for the source or * the destination */ op_len = max(op_len, block_size); /* Unless we have to buffer data, there's no reason to wait */ op->soc = 0; if (sg_src_len < block_size) { /* Not enough data in the sg element, so it * needs to be buffered into a blocksize chunk */ int cp_len = ccp_fill_queue_buf(src); op->soc = 1; op->src.u.dma.address = src->dm_wa.dma.address; op->src.u.dma.offset = 0; op->src.u.dma.length = (blocksize_op) ? block_size : cp_len; } else { /* Enough data in the sg element, but we need to * adjust for any previously copied data */ op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); op->src.u.dma.offset = src->sg_wa.sg_used; op->src.u.dma.length = op_len & ~(block_size - 1); ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length); } if (dst) { if (sg_dst_len < block_size) { /* Not enough room in the sg element or we're on the * last piece of data (when using padding), so the * output needs to be buffered into a blocksize chunk */ op->soc = 1; op->dst.u.dma.address = dst->dm_wa.dma.address; op->dst.u.dma.offset = 0; op->dst.u.dma.length = op->src.u.dma.length; } else { /* Enough room in the sg element, but we need to * adjust for any previously used area */ op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); op->dst.u.dma.offset = dst->sg_wa.sg_used; op->dst.u.dma.length = op->src.u.dma.length; } } } static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst, struct ccp_op *op) { op->init = 0; if (dst) { if (op->dst.u.dma.address == dst->dm_wa.dma.address) ccp_empty_queue_buf(dst); else ccp_update_sg_workarea(&dst->sg_wa, op->dst.u.dma.length); } } static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q, struct ccp_dm_workarea *wa, u32 jobid, u32 sb, u32 byte_swap, bool from) { struct ccp_op op; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = jobid; op.eom = 1; if (from) { op.soc = 1; op.src.type = CCP_MEMTYPE_SB; op.src.u.sb = sb; op.dst.type = CCP_MEMTYPE_SYSTEM; op.dst.u.dma.address = wa->dma.address; op.dst.u.dma.length = wa->length; } else { op.src.type = CCP_MEMTYPE_SYSTEM; op.src.u.dma.address = wa->dma.address; op.src.u.dma.length = wa->length; op.dst.type = CCP_MEMTYPE_SB; op.dst.u.sb = sb; } op.u.passthru.byte_swap = byte_swap; return cmd_q->ccp->vdata->perform->passthru(&op); } static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q, struct ccp_dm_workarea *wa, u32 jobid, u32 sb, u32 byte_swap) { return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false); } static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q, struct ccp_dm_workarea *wa, u32 jobid, u32 sb, u32 byte_swap) { return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true); } static noinline_for_stack int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; struct ccp_dm_workarea key, ctx; struct ccp_data src; struct ccp_op op; unsigned int dm_offset; int ret; if (!((aes->key_len == AES_KEYSIZE_128) || (aes->key_len == AES_KEYSIZE_192) || (aes->key_len == AES_KEYSIZE_256))) return -EINVAL; if (aes->src_len & (AES_BLOCK_SIZE - 1)) return -EINVAL; if (aes->iv_len != AES_BLOCK_SIZE) return -EINVAL; if (!aes->key || !aes->iv || !aes->src) return -EINVAL; if (aes->cmac_final) { if (aes->cmac_key_len != AES_BLOCK_SIZE) return -EINVAL; if (!aes->cmac_key) return -EINVAL; } BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); op.sb_key = cmd_q->sb_key; op.sb_ctx = cmd_q->sb_ctx; op.init = 1; op.u.aes.type = aes->type; op.u.aes.mode = aes->mode; op.u.aes.action = aes->action; /* All supported key sizes fit in a single (32-byte) SB entry * and must be in little endian format. Use the 256-bit byte * swap passthru option to convert from big endian to little * endian. */ ret = ccp_init_dm_workarea(&key, cmd_q, CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; dm_offset = CCP_SB_BYTES - aes->key_len; ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); if (ret) goto e_key; ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } /* The AES context fits in a single (32-byte) SB entry and * must be in little endian format. Use the 256-bit byte swap * passthru option to convert from big endian to little endian. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); if (ret) goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } /* Send data to the CCP AES engine */ ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (ret) goto e_ctx; while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true); if (aes->cmac_final && !src.sg_wa.bytes_left) { op.eom = 1; /* Push the K1/K2 key to the CCP now */ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; } ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, aes->cmac_key_len); if (ret) goto e_src; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; } } ret = cmd_q->ccp->vdata->perform->aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; } ccp_process_data(&src, NULL, &op); } /* Retrieve the AES context - convert from LE to BE using * 32-byte (256-bit) byteswapping */ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; } /* ...but we only need AES_BLOCK_SIZE bytes */ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); e_src: ccp_free_data(&src, cmd_q); e_ctx: ccp_dm_free(&ctx); e_key: ccp_dm_free(&key); return ret; } static noinline_for_stack int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; struct ccp_dm_workarea key, ctx, final_wa, tag; struct ccp_data src, dst; struct ccp_data aad; struct ccp_op op; unsigned long long *final; unsigned int dm_offset; unsigned int authsize; unsigned int jobid; unsigned int ilen; bool in_place = true; /* Default value */ int ret; struct scatterlist *p_inp, sg_inp[2]; struct scatterlist *p_tag, sg_tag[2]; struct scatterlist *p_outp, sg_outp[2]; struct scatterlist *p_aad; if (!aes->iv) return -EINVAL; if (!((aes->key_len == AES_KEYSIZE_128) || (aes->key_len == AES_KEYSIZE_192) || (aes->key_len == AES_KEYSIZE_256))) return -EINVAL; if (!aes->key) /* Gotta have a key SGL */ return -EINVAL; /* Zero defaults to 16 bytes, the maximum size */ authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE; switch (authsize) { case 16: case 15: case 14: case 13: case 12: case 8: case 4: break; default: return -EINVAL; } /* First, decompose the source buffer into AAD & PT, * and the destination buffer into AAD, CT & tag, or * the input into CT & tag. * It is expected that the input and output SGs will * be valid, even if the AAD and input lengths are 0. */ p_aad = aes->src; p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len); p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len); if (aes->action == CCP_AES_ACTION_ENCRYPT) { ilen = aes->src_len; p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen); } else { /* Input length for decryption includes tag */ ilen = aes->src_len - authsize; p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen); } jobid = CCP_NEW_JOBID(cmd_q->ccp); memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = jobid; op.sb_key = cmd_q->sb_key; /* Pre-allocated */ op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ op.init = 1; op.u.aes.type = aes->type; /* Copy the key to the LSB */ ret = ccp_init_dm_workarea(&key, cmd_q, CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; dm_offset = CCP_SB_BYTES - aes->key_len; ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); if (ret) goto e_key; ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } /* Copy the context (IV) to the LSB. * There is an assumption here that the IV is 96 bits in length, plus * a nonce of 32 bits. If no IV is present, use a zeroed buffer. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len; ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); if (ret) goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } op.init = 1; if (aes->aad_len > 0) { /* Step 1: Run a GHASH over the Additional Authenticated Data */ ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (ret) goto e_ctx; op.u.aes.mode = CCP_AES_MODE_GHASH; op.u.aes.action = CCP_AES_GHASHAAD; while (aad.sg_wa.bytes_left) { ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true); ret = cmd_q->ccp->vdata->perform->aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_aad; } ccp_process_data(&aad, NULL, &op); op.init = 0; } } op.u.aes.mode = CCP_AES_MODE_GCTR; op.u.aes.action = aes->action; if (ilen > 0) { /* Step 2: Run a GCTR over the plaintext */ in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false; ret = ccp_init_data(&src, cmd_q, p_inp, ilen, AES_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) goto e_ctx; if (in_place) { dst = src; } else { ret = ccp_init_data(&dst, cmd_q, p_outp, ilen, AES_BLOCK_SIZE, DMA_FROM_DEVICE); if (ret) goto e_src; } op.soc = 0; op.eom = 0; op.init = 1; while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); if (!src.sg_wa.bytes_left) { unsigned int nbytes = ilen % AES_BLOCK_SIZE; if (nbytes) { op.eom = 1; op.u.aes.size = (nbytes * 8) - 1; } } ret = cmd_q->ccp->vdata->perform->aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ccp_process_data(&src, &dst, &op); op.init = 0; } } /* Step 3: Update the IV portion of the context with the original IV */ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); if (ret) goto e_dst; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } /* Step 4: Concatenate the lengths of the AAD and source, and * hash that 16 byte buffer. */ ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL); if (ret) goto e_dst; final = (unsigned long long *) final_wa.address; final[0] = cpu_to_be64(aes->aad_len * 8); final[1] = cpu_to_be64(ilen * 8); memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = jobid; op.sb_key = cmd_q->sb_key; /* Pre-allocated */ op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ op.init = 1; op.u.aes.type = aes->type; op.u.aes.mode = CCP_AES_MODE_GHASH; op.u.aes.action = CCP_AES_GHASHFINAL; op.src.type = CCP_MEMTYPE_SYSTEM; op.src.u.dma.address = final_wa.dma.address; op.src.u.dma.length = AES_BLOCK_SIZE; op.dst.type = CCP_MEMTYPE_SYSTEM; op.dst.u.dma.address = final_wa.dma.address; op.dst.u.dma.length = AES_BLOCK_SIZE; op.eom = 1; op.u.aes.size = 0; ret = cmd_q->ccp->vdata->perform->aes(&op); if (ret) goto e_dst; if (aes->action == CCP_AES_ACTION_ENCRYPT) { /* Put the ciphered tag after the ciphertext. */ ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize); } else { /* Does this ciphered tag match the input? */ ret = ccp_init_dm_workarea(&tag, cmd_q, authsize, DMA_BIDIRECTIONAL); if (ret) goto e_tag; ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize); if (ret) goto e_tag; ret = crypto_memneq(tag.address, final_wa.address, authsize) ? -EBADMSG : 0; ccp_dm_free(&tag); } e_tag: ccp_dm_free(&final_wa); e_dst: if (ilen > 0 && !in_place) ccp_free_data(&dst, cmd_q); e_src: if (ilen > 0) ccp_free_data(&src, cmd_q); e_aad: if (aes->aad_len) ccp_free_data(&aad, cmd_q); e_ctx: ccp_dm_free(&ctx); e_key: ccp_dm_free(&key); return ret; } static noinline_for_stack int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; struct ccp_dm_workarea key, ctx; struct ccp_data src, dst; struct ccp_op op; unsigned int dm_offset; bool in_place = false; int ret; if (!((aes->key_len == AES_KEYSIZE_128) || (aes->key_len == AES_KEYSIZE_192) || (aes->key_len == AES_KEYSIZE_256))) return -EINVAL; if (((aes->mode == CCP_AES_MODE_ECB) || (aes->mode == CCP_AES_MODE_CBC)) && (aes->src_len & (AES_BLOCK_SIZE - 1))) return -EINVAL; if (!aes->key || !aes->src || !aes->dst) return -EINVAL; if (aes->mode != CCP_AES_MODE_ECB) { if (aes->iv_len != AES_BLOCK_SIZE) return -EINVAL; if (!aes->iv) return -EINVAL; } BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); op.sb_key = cmd_q->sb_key; op.sb_ctx = cmd_q->sb_ctx; op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; op.u.aes.type = aes->type; op.u.aes.mode = aes->mode; op.u.aes.action = aes->action; /* All supported key sizes fit in a single (32-byte) SB entry * and must be in little endian format. Use the 256-bit byte * swap passthru option to convert from big endian to little * endian. */ ret = ccp_init_dm_workarea(&key, cmd_q, CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; dm_offset = CCP_SB_BYTES - aes->key_len; ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); if (ret) goto e_key; ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } /* The AES context fits in a single (32-byte) SB entry and * must be in little endian format. Use the 256-bit byte swap * passthru option to convert from big endian to little endian. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; if (aes->mode != CCP_AES_MODE_ECB) { /* Load the AES context - convert to LE */ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); if (ret) goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } } switch (aes->mode) { case CCP_AES_MODE_CFB: /* CFB128 only */ case CCP_AES_MODE_CTR: op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1; break; default: op.u.aes.size = 0; } /* Prepare the input and output data workareas. For in-place * operations we need to set the dma direction to BIDIRECTIONAL * and copy the src workarea to the dst workarea. */ if (sg_virt(aes->src) == sg_virt(aes->dst)) in_place = true; ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, AES_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) goto e_ctx; if (in_place) { dst = src; } else { ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len, AES_BLOCK_SIZE, DMA_FROM_DEVICE); if (ret) goto e_src; } /* Send data to the CCP AES engine */ while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); if (!src.sg_wa.bytes_left) { op.eom = 1; /* Since we don't retrieve the AES context in ECB * mode we have to wait for the operation to complete * on the last piece of data */ if (aes->mode == CCP_AES_MODE_ECB) op.soc = 1; } ret = cmd_q->ccp->vdata->perform->aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ccp_process_data(&src, &dst, &op); } if (aes->mode != CCP_AES_MODE_ECB) { /* Retrieve the AES context - convert from LE to BE using * 32-byte (256-bit) byteswapping */ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } /* ...but we only need AES_BLOCK_SIZE bytes */ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); } e_dst: if (!in_place) ccp_free_data(&dst, cmd_q); e_src: ccp_free_data(&src, cmd_q); e_ctx: ccp_dm_free(&ctx); e_key: ccp_dm_free(&key); return ret; } static noinline_for_stack int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_xts_aes_engine *xts = &cmd->u.xts; struct ccp_dm_workarea key, ctx; struct ccp_data src, dst; struct ccp_op op; unsigned int unit_size, dm_offset; bool in_place = false; unsigned int sb_count; enum ccp_aes_type aestype; int ret; switch (xts->unit_size) { case CCP_XTS_AES_UNIT_SIZE_16: unit_size = 16; break; case CCP_XTS_AES_UNIT_SIZE_512: unit_size = 512; break; case CCP_XTS_AES_UNIT_SIZE_1024: unit_size = 1024; break; case CCP_XTS_AES_UNIT_SIZE_2048: unit_size = 2048; break; case CCP_XTS_AES_UNIT_SIZE_4096: unit_size = 4096; break; default: return -EINVAL; } if (xts->key_len == AES_KEYSIZE_128) aestype = CCP_AES_TYPE_128; else if (xts->key_len == AES_KEYSIZE_256) aestype = CCP_AES_TYPE_256; else return -EINVAL; if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1))) return -EINVAL; if (xts->iv_len != AES_BLOCK_SIZE) return -EINVAL; if (!xts->key || !xts->iv || !xts->src || !xts->dst) return -EINVAL; BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1); BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1); ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); op.sb_key = cmd_q->sb_key; op.sb_ctx = cmd_q->sb_ctx; op.init = 1; op.u.xts.type = aestype; op.u.xts.action = xts->action; op.u.xts.unit_size = xts->unit_size; /* A version 3 device only supports 128-bit keys, which fits into a * single SB entry. A version 5 device uses a 512-bit vector, so two * SB entries. */ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) sb_count = CCP_XTS_AES_KEY_SB_COUNT; else sb_count = CCP5_XTS_AES_KEY_SB_COUNT; ret = ccp_init_dm_workarea(&key, cmd_q, sb_count * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { /* All supported key sizes must be in little endian format. * Use the 256-bit byte swap passthru option to convert from * big endian to little endian. */ dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); if (ret) goto e_key; ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); if (ret) goto e_key; } else { /* Version 5 CCPs use a 512-bit space for the key: each portion * occupies 256 bits, or one entire slot, and is zero-padded. */ unsigned int pad; dm_offset = CCP_SB_BYTES; pad = dm_offset - xts->key_len; ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); if (ret) goto e_key; ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len, xts->key_len); if (ret) goto e_key; } ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } /* The AES context fits in a single (32-byte) SB entry and * for XTS is already in little endian format so no byte swapping * is needed. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); if (ret) goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } /* Prepare the input and output data workareas. For in-place * operations we need to set the dma direction to BIDIRECTIONAL * and copy the src workarea to the dst workarea. */ if (sg_virt(xts->src) == sg_virt(xts->dst)) in_place = true; ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len, unit_size, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) goto e_ctx; if (in_place) { dst = src; } else { ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len, unit_size, DMA_FROM_DEVICE); if (ret) goto e_src; } /* Send data to the CCP AES engine */ while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, &dst, &op, unit_size, true); if (!src.sg_wa.bytes_left) op.eom = 1; ret = cmd_q->ccp->vdata->perform->xts_aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ccp_process_data(&src, &dst, &op); } /* Retrieve the AES context - convert from LE to BE using * 32-byte (256-bit) byteswapping */ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } /* ...but we only need AES_BLOCK_SIZE bytes */ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len); e_dst: if (!in_place) ccp_free_data(&dst, cmd_q); e_src: ccp_free_data(&src, cmd_q); e_ctx: ccp_dm_free(&ctx); e_key: ccp_dm_free(&key); return ret; } static noinline_for_stack int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_des3_engine *des3 = &cmd->u.des3; struct ccp_dm_workarea key, ctx; struct ccp_data src, dst; struct ccp_op op; unsigned int dm_offset; unsigned int len_singlekey; bool in_place = false; int ret; /* Error checks */ if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) return -EINVAL; if (!cmd_q->ccp->vdata->perform->des3) return -EINVAL; if (des3->key_len != DES3_EDE_KEY_SIZE) return -EINVAL; if (((des3->mode == CCP_DES3_MODE_ECB) || (des3->mode == CCP_DES3_MODE_CBC)) && (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1))) return -EINVAL; if (!des3->key || !des3->src || !des3->dst) return -EINVAL; if (des3->mode != CCP_DES3_MODE_ECB) { if (des3->iv_len != DES3_EDE_BLOCK_SIZE) return -EINVAL; if (!des3->iv) return -EINVAL; } ret = -EIO; /* Zero out all the fields of the command desc */ memset(&op, 0, sizeof(op)); /* Set up the Function field */ op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); op.sb_key = cmd_q->sb_key; op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1; op.u.des3.type = des3->type; op.u.des3.mode = des3->mode; op.u.des3.action = des3->action; /* * All supported key sizes fit in a single (32-byte) KSB entry and * (like AES) must be in little endian format. Use the 256-bit byte * swap passthru option to convert from big endian to little endian. */ ret = ccp_init_dm_workarea(&key, cmd_q, CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; /* * The contents of the key triplet are in the reverse order of what * is required by the engine. Copy the 3 pieces individually to put * them where they belong. */ dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */ len_singlekey = des3->key_len / 3; ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey, des3->key, 0, len_singlekey); if (ret) goto e_key; ret = ccp_set_dm_area(&key, dm_offset + len_singlekey, des3->key, len_singlekey, len_singlekey); if (ret) goto e_key; ret = ccp_set_dm_area(&key, dm_offset, des3->key, 2 * len_singlekey, len_singlekey); if (ret) goto e_key; /* Copy the key to the SB */ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } /* * The DES3 context fits in a single (32-byte) KSB entry and * must be in little endian format. Use the 256-bit byte swap * passthru option to convert from big endian to little endian. */ if (des3->mode != CCP_DES3_MODE_ECB) { op.sb_ctx = cmd_q->sb_ctx; ret = ccp_init_dm_workarea(&ctx, cmd_q, CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; /* Load the context into the LSB */ dm_offset = CCP_SB_BYTES - des3->iv_len; ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len); if (ret) goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } } /* * Prepare the input and output data workareas. For in-place * operations we need to set the dma direction to BIDIRECTIONAL * and copy the src workarea to the dst workarea. */ if (sg_virt(des3->src) == sg_virt(des3->dst)) in_place = true; ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len, DES3_EDE_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) goto e_ctx; if (in_place) dst = src; else { ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len, DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE); if (ret) goto e_src; } /* Send data to the CCP DES3 engine */ while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true); if (!src.sg_wa.bytes_left) { op.eom = 1; /* Since we don't retrieve the context in ECB mode * we have to wait for the operation to complete * on the last piece of data */ op.soc = 0; } ret = cmd_q->ccp->vdata->perform->des3(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ccp_process_data(&src, &dst, &op); } if (des3->mode != CCP_DES3_MODE_ECB) { /* Retrieve the context and make BE */ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */ ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0, DES3_EDE_BLOCK_SIZE); } e_dst: if (!in_place) ccp_free_data(&dst, cmd_q); e_src: ccp_free_data(&src, cmd_q); e_ctx: if (des3->mode != CCP_DES3_MODE_ECB) ccp_dm_free(&ctx); e_key: ccp_dm_free(&key); return ret; } static noinline_for_stack int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_sha_engine *sha = &cmd->u.sha; struct ccp_dm_workarea ctx; struct ccp_data src; struct ccp_op op; unsigned int ioffset, ooffset; unsigned int digest_size; int sb_count; const void *init; u64 block_size; int ctx_size; int ret; switch (sha->type) { case CCP_SHA_TYPE_1: if (sha->ctx_len < SHA1_DIGEST_SIZE) return -EINVAL; block_size = SHA1_BLOCK_SIZE; break; case CCP_SHA_TYPE_224: if (sha->ctx_len < SHA224_DIGEST_SIZE) return -EINVAL; block_size = SHA224_BLOCK_SIZE; break; case CCP_SHA_TYPE_256: if (sha->ctx_len < SHA256_DIGEST_SIZE) return -EINVAL; block_size = SHA256_BLOCK_SIZE; break; case CCP_SHA_TYPE_384: if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) || sha->ctx_len < SHA384_DIGEST_SIZE) return -EINVAL; block_size = SHA384_BLOCK_SIZE; break; case CCP_SHA_TYPE_512: if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) || sha->ctx_len < SHA512_DIGEST_SIZE) return -EINVAL; block_size = SHA512_BLOCK_SIZE; break; default: return -EINVAL; } if (!sha->ctx) return -EINVAL; if (!sha->final && (sha->src_len & (block_size - 1))) return -EINVAL; /* The version 3 device can't handle zero-length input */ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { if (!sha->src_len) { unsigned int digest_len; const u8 *sha_zero; /* Not final, just return */ if (!sha->final) return 0; /* CCP can't do a zero length sha operation so the * caller must buffer the data. */ if (sha->msg_bits) return -EINVAL; /* The CCP cannot perform zero-length sha operations * so the caller is required to buffer data for the * final operation. However, a sha operation for a * message with a total length of zero is valid so * known values are required to supply the result. */ switch (sha->type) { case CCP_SHA_TYPE_1: sha_zero = sha1_zero_message_hash; digest_len = SHA1_DIGEST_SIZE; break; case CCP_SHA_TYPE_224: sha_zero = sha224_zero_message_hash; digest_len = SHA224_DIGEST_SIZE; break; case CCP_SHA_TYPE_256: sha_zero = sha256_zero_message_hash; digest_len = SHA256_DIGEST_SIZE; break; default: return -EINVAL; } scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, digest_len, 1); return 0; } } /* Set variables used throughout */ switch (sha->type) { case CCP_SHA_TYPE_1: digest_size = SHA1_DIGEST_SIZE; init = (void *) ccp_sha1_init; ctx_size = SHA1_DIGEST_SIZE; sb_count = 1; if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; else ooffset = ioffset = 0; break; case CCP_SHA_TYPE_224: digest_size = SHA224_DIGEST_SIZE; init = (void *) ccp_sha224_init; ctx_size = SHA256_DIGEST_SIZE; sb_count = 1; ioffset = 0; if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; else ooffset = 0; break; case CCP_SHA_TYPE_256: digest_size = SHA256_DIGEST_SIZE; init = (void *) ccp_sha256_init; ctx_size = SHA256_DIGEST_SIZE; sb_count = 1; ooffset = ioffset = 0; break; case CCP_SHA_TYPE_384: digest_size = SHA384_DIGEST_SIZE; init = (void *) ccp_sha384_init; ctx_size = SHA512_DIGEST_SIZE; sb_count = 2; ioffset = 0; ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE; break; case CCP_SHA_TYPE_512: digest_size = SHA512_DIGEST_SIZE; init = (void *) ccp_sha512_init; ctx_size = SHA512_DIGEST_SIZE; sb_count = 2; ooffset = ioffset = 0; break; default: ret = -EINVAL; goto e_data; } /* For zero-length plaintext the src pointer is ignored; * otherwise both parts must be valid */ if (sha->src_len && !sha->src) return -EINVAL; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ op.u.sha.type = sha->type; op.u.sha.msg_bits = sha->msg_bits; /* For SHA1/224/256 the context fits in a single (32-byte) SB entry; * SHA384/512 require 2 adjacent SB slots, with the right half in the * first slot, and the left half in the second. Each portion must then * be in little endian format: use the 256-bit byte swap option. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) return ret; if (sha->first) { switch (sha->type) { case CCP_SHA_TYPE_1: case CCP_SHA_TYPE_224: case CCP_SHA_TYPE_256: memcpy(ctx.address + ioffset, init, ctx_size); break; case CCP_SHA_TYPE_384: case CCP_SHA_TYPE_512: memcpy(ctx.address + ctx_size / 2, init, ctx_size / 2); memcpy(ctx.address, init + ctx_size / 2, ctx_size / 2); break; default: ret = -EINVAL; goto e_ctx; } } else { /* Restore the context */ ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sb_count * CCP_SB_BYTES); if (ret) goto e_ctx; } ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } if (sha->src) { /* Send data to the CCP SHA engine; block_size is set above */ ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, block_size, DMA_TO_DEVICE); if (ret) goto e_ctx; while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, NULL, &op, block_size, false); if (sha->final && !src.sg_wa.bytes_left) op.eom = 1; ret = cmd_q->ccp->vdata->perform->sha(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_data; } ccp_process_data(&src, NULL, &op); } } else { op.eom = 1; ret = cmd_q->ccp->vdata->perform->sha(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_data; } } /* Retrieve the SHA context - convert from LE to BE using * 32-byte (256-bit) byteswapping to BE */ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_data; } if (sha->final) { /* Finishing up, so get the digest */ switch (sha->type) { case CCP_SHA_TYPE_1: case CCP_SHA_TYPE_224: case CCP_SHA_TYPE_256: ccp_get_dm_area(&ctx, ooffset, sha->ctx, 0, digest_size); break; case CCP_SHA_TYPE_384: case CCP_SHA_TYPE_512: ccp_get_dm_area(&ctx, 0, sha->ctx, LSB_ITEM_SIZE - ooffset, LSB_ITEM_SIZE); ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset, sha->ctx, 0, LSB_ITEM_SIZE - ooffset); break; default: ret = -EINVAL; goto e_ctx; } } else { /* Stash the context */ ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sb_count * CCP_SB_BYTES); } if (sha->final && sha->opad) { /* HMAC operation, recursively perform final SHA */ struct ccp_cmd hmac_cmd; struct scatterlist sg; u8 *hmac_buf; if (sha->opad_len != block_size) { ret = -EINVAL; goto e_data; } hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL); if (!hmac_buf) { ret = -ENOMEM; goto e_data; } sg_init_one(&sg, hmac_buf, block_size + digest_size); scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); switch (sha->type) { case CCP_SHA_TYPE_1: case CCP_SHA_TYPE_224: case CCP_SHA_TYPE_256: memcpy(hmac_buf + block_size, ctx.address + ooffset, digest_size); break; case CCP_SHA_TYPE_384: case CCP_SHA_TYPE_512: memcpy(hmac_buf + block_size, ctx.address + LSB_ITEM_SIZE + ooffset, LSB_ITEM_SIZE); memcpy(hmac_buf + block_size + (LSB_ITEM_SIZE - ooffset), ctx.address, LSB_ITEM_SIZE); break; default: ret = -EINVAL; goto e_ctx; } memset(&hmac_cmd, 0, sizeof(hmac_cmd)); hmac_cmd.engine = CCP_ENGINE_SHA; hmac_cmd.u.sha.type = sha->type; hmac_cmd.u.sha.ctx = sha->ctx; hmac_cmd.u.sha.ctx_len = sha->ctx_len; hmac_cmd.u.sha.src = &sg; hmac_cmd.u.sha.src_len = block_size + digest_size; hmac_cmd.u.sha.opad = NULL; hmac_cmd.u.sha.opad_len = 0; hmac_cmd.u.sha.first = 1; hmac_cmd.u.sha.final = 1; hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3; ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd); if (ret) cmd->engine_error = hmac_cmd.engine_error; kfree(hmac_buf); } e_data: if (sha->src) ccp_free_data(&src, cmd_q); e_ctx: ccp_dm_free(&ctx); return ret; } static noinline_for_stack int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_rsa_engine *rsa = &cmd->u.rsa; struct ccp_dm_workarea exp, src, dst; struct ccp_op op; unsigned int sb_count, i_len, o_len; int ret; /* Check against the maximum allowable size, in bits */ if (rsa->key_size > cmd_q->ccp->vdata->rsamax) return -EINVAL; if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst) return -EINVAL; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); /* The RSA modulus must precede the message being acted upon, so * it must be copied to a DMA area where the message and the * modulus can be concatenated. Therefore the input buffer * length required is twice the output buffer length (which * must be a multiple of 256-bits). Compute o_len, i_len in bytes. * Buffer sizes must be a multiple of 32 bytes; rounding up may be * required. */ o_len = 32 * ((rsa->key_size + 255) / 256); i_len = o_len * 2; sb_count = 0; if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { /* sb_count is the number of storage block slots required * for the modulus. */ sb_count = o_len / CCP_SB_BYTES; op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count); if (!op.sb_key) return -EIO; } else { /* A version 5 device allows a modulus size that will not fit * in the LSB, so the command will transfer it from memory. * Set the sb key to the default, even though it's not used. */ op.sb_key = cmd_q->sb_key; } /* The RSA exponent must be in little endian format. Reverse its * byte order. */ ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE); if (ret) goto e_sb; ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len); if (ret) goto e_exp; if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { /* Copy the exponent to the local storage block, using * as many 32-byte blocks as were allocated above. It's * already little endian, so no further change is required. */ ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_exp; } } else { /* The exponent can be retrieved from memory via DMA. */ op.exp.u.dma.address = exp.dma.address; op.exp.u.dma.offset = 0; } /* Concatenate the modulus and the message. Both the modulus and * the operands must be in little endian format. Since the input * is in big endian format it must be converted. */ ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE); if (ret) goto e_exp; ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len); if (ret) goto e_src; ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len); if (ret) goto e_src; /* Prepare the output area for the operation */ ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE); if (ret) goto e_src; op.soc = 1; op.src.u.dma.address = src.dma.address; op.src.u.dma.offset = 0; op.src.u.dma.length = i_len; op.dst.u.dma.address = dst.dma.address; op.dst.u.dma.offset = 0; op.dst.u.dma.length = o_len; op.u.rsa.mod_size = rsa->key_size; op.u.rsa.input_len = i_len; ret = cmd_q->ccp->vdata->perform->rsa(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len); e_dst: ccp_dm_free(&dst); e_src: ccp_dm_free(&src); e_exp: ccp_dm_free(&exp); e_sb: if (sb_count) cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); return ret; } static noinline_for_stack int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_passthru_engine *pt = &cmd->u.passthru; struct ccp_dm_workarea mask; struct ccp_data src, dst; struct ccp_op op; bool in_place = false; unsigned int i; int ret = 0; if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) return -EINVAL; if (!pt->src || !pt->dst) return -EINVAL; if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) return -EINVAL; if (!pt->mask) return -EINVAL; } BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { /* Load the mask */ op.sb_key = cmd_q->sb_key; ret = ccp_init_dm_workarea(&mask, cmd_q, CCP_PASSTHRU_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); if (ret) goto e_mask; ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_mask; } } /* Prepare the input and output data workareas. For in-place * operations we need to set the dma direction to BIDIRECTIONAL * and copy the src workarea to the dst workarea. */ if (sg_virt(pt->src) == sg_virt(pt->dst)) in_place = true; ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len, CCP_PASSTHRU_MASKSIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) goto e_mask; if (in_place) { dst = src; } else { ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len, CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE); if (ret) goto e_src; } /* Send data to the CCP Passthru engine * Because the CCP engine works on a single source and destination * dma address at a time, each entry in the source scatterlist * (after the dma_map_sg call) must be less than or equal to the * (remaining) length in the destination scatterlist entry and the * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE */ dst.sg_wa.sg_used = 0; for (i = 1; i <= src.sg_wa.dma_count; i++) { if (!dst.sg_wa.sg || (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { ret = -EINVAL; goto e_dst; } if (i == src.sg_wa.dma_count) { op.eom = 1; op.soc = 1; } op.src.type = CCP_MEMTYPE_SYSTEM; op.src.u.dma.address = sg_dma_address(src.sg_wa.sg); op.src.u.dma.offset = 0; op.src.u.dma.length = sg_dma_len(src.sg_wa.sg); op.dst.type = CCP_MEMTYPE_SYSTEM; op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); op.dst.u.dma.offset = dst.sg_wa.sg_used; op.dst.u.dma.length = op.src.u.dma.length; ret = cmd_q->ccp->vdata->perform->passthru(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } dst.sg_wa.sg_used += src.sg_wa.sg->length; if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { dst.sg_wa.sg = sg_next(dst.sg_wa.sg); dst.sg_wa.sg_used = 0; } src.sg_wa.sg = sg_next(src.sg_wa.sg); } e_dst: if (!in_place) ccp_free_data(&dst, cmd_q); e_src: ccp_free_data(&src, cmd_q); e_mask: if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) ccp_dm_free(&mask); return ret; } static noinline_for_stack int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap; struct ccp_dm_workarea mask; struct ccp_op op; int ret; if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) return -EINVAL; if (!pt->src_dma || !pt->dst_dma) return -EINVAL; if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) return -EINVAL; if (!pt->mask) return -EINVAL; } BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { /* Load the mask */ op.sb_key = cmd_q->sb_key; mask.length = pt->mask_len; mask.dma.address = pt->mask; mask.dma.length = pt->mask_len; ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; return ret; } } /* Send data to the CCP Passthru engine */ op.eom = 1; op.soc = 1; op.src.type = CCP_MEMTYPE_SYSTEM; op.src.u.dma.address = pt->src_dma; op.src.u.dma.offset = 0; op.src.u.dma.length = pt->src_len; op.dst.type = CCP_MEMTYPE_SYSTEM; op.dst.u.dma.address = pt->dst_dma; op.dst.u.dma.offset = 0; op.dst.u.dma.length = pt->src_len; ret = cmd_q->ccp->vdata->perform->passthru(&op); if (ret) cmd->engine_error = cmd_q->cmd_error; return ret; } static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_ecc_engine *ecc = &cmd->u.ecc; struct ccp_dm_workarea src, dst; struct ccp_op op; int ret; u8 *save; if (!ecc->u.mm.operand_1 || (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) if (!ecc->u.mm.operand_2 || (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; if (!ecc->u.mm.result || (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES)) return -EINVAL; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); /* Concatenate the modulus and the operands. Both the modulus and * the operands must be in little endian format. Since the input * is in big endian format it must be converted and placed in a * fixed length buffer. */ ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, DMA_TO_DEVICE); if (ret) return ret; /* Save the workarea address since it is updated in order to perform * the concatenation */ save = src.address; /* Copy the ECC modulus */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; /* Copy the first operand */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0, ecc->u.mm.operand_1_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { /* Copy the second operand */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0, ecc->u.mm.operand_2_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; } /* Restore the workarea address */ src.address = save; /* Prepare the output area for the operation */ ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, DMA_FROM_DEVICE); if (ret) goto e_src; op.soc = 1; op.src.u.dma.address = src.dma.address; op.src.u.dma.offset = 0; op.src.u.dma.length = src.length; op.dst.u.dma.address = dst.dma.address; op.dst.u.dma.offset = 0; op.dst.u.dma.length = dst.length; op.u.ecc.function = cmd->u.ecc.function; ret = cmd_q->ccp->vdata->perform->ecc(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ecc->ecc_result = le16_to_cpup( (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { ret = -EIO; goto e_dst; } /* Save the ECC result */ ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0, CCP_ECC_MODULUS_BYTES); e_dst: ccp_dm_free(&dst); e_src: ccp_dm_free(&src); return ret; } static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_ecc_engine *ecc = &cmd->u.ecc; struct ccp_dm_workarea src, dst; struct ccp_op op; int ret; u8 *save; if (!ecc->u.pm.point_1.x || (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) || !ecc->u.pm.point_1.y || (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { if (!ecc->u.pm.point_2.x || (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) || !ecc->u.pm.point_2.y || (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; } else { if (!ecc->u.pm.domain_a || (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) if (!ecc->u.pm.scalar || (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; } if (!ecc->u.pm.result.x || (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) || !ecc->u.pm.result.y || (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES)) return -EINVAL; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); /* Concatenate the modulus and the operands. Both the modulus and * the operands must be in little endian format. Since the input * is in big endian format it must be converted and placed in a * fixed length buffer. */ ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, DMA_TO_DEVICE); if (ret) return ret; /* Save the workarea address since it is updated in order to perform * the concatenation */ save = src.address; /* Copy the ECC modulus */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; /* Copy the first point X and Y coordinate */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0, ecc->u.pm.point_1.x_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0, ecc->u.pm.point_1.y_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; /* Set the first point Z coordinate to 1 */ *src.address = 0x01; src.address += CCP_ECC_OPERAND_SIZE; if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { /* Copy the second point X and Y coordinate */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0, ecc->u.pm.point_2.x_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0, ecc->u.pm.point_2.y_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; /* Set the second point Z coordinate to 1 */ *src.address = 0x01; src.address += CCP_ECC_OPERAND_SIZE; } else { /* Copy the Domain "a" parameter */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0, ecc->u.pm.domain_a_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { /* Copy the scalar value */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.scalar, 0, ecc->u.pm.scalar_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; } } /* Restore the workarea address */ src.address = save; /* Prepare the output area for the operation */ ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, DMA_FROM_DEVICE); if (ret) goto e_src; op.soc = 1; op.src.u.dma.address = src.dma.address; op.src.u.dma.offset = 0; op.src.u.dma.length = src.length; op.dst.u.dma.address = dst.dma.address; op.dst.u.dma.offset = 0; op.dst.u.dma.length = dst.length; op.u.ecc.function = cmd->u.ecc.function; ret = cmd_q->ccp->vdata->perform->ecc(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ecc->ecc_result = le16_to_cpup( (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { ret = -EIO; goto e_dst; } /* Save the workarea address since it is updated as we walk through * to copy the point math result */ save = dst.address; /* Save the ECC result X and Y coordinates */ ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0, CCP_ECC_MODULUS_BYTES); dst.address += CCP_ECC_OUTPUT_SIZE; ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0, CCP_ECC_MODULUS_BYTES); dst.address += CCP_ECC_OUTPUT_SIZE; /* Restore the workarea address */ dst.address = save; e_dst: ccp_dm_free(&dst); e_src: ccp_dm_free(&src); return ret; } static noinline_for_stack int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_ecc_engine *ecc = &cmd->u.ecc; ecc->ecc_result = 0; if (!ecc->mod || (ecc->mod_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; switch (ecc->function) { case CCP_ECC_FUNCTION_MMUL_384BIT: case CCP_ECC_FUNCTION_MADD_384BIT: case CCP_ECC_FUNCTION_MINV_384BIT: return ccp_run_ecc_mm_cmd(cmd_q, cmd); case CCP_ECC_FUNCTION_PADD_384BIT: case CCP_ECC_FUNCTION_PMUL_384BIT: case CCP_ECC_FUNCTION_PDBL_384BIT: return ccp_run_ecc_pm_cmd(cmd_q, cmd); default: return -EINVAL; } } int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; cmd->engine_error = 0; cmd_q->cmd_error = 0; cmd_q->int_rcvd = 0; cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q); switch (cmd->engine) { case CCP_ENGINE_AES: switch (cmd->u.aes.mode) { case CCP_AES_MODE_CMAC: ret = ccp_run_aes_cmac_cmd(cmd_q, cmd); break; case CCP_AES_MODE_GCM: ret = ccp_run_aes_gcm_cmd(cmd_q, cmd); break; default: ret = ccp_run_aes_cmd(cmd_q, cmd); break; } break; case CCP_ENGINE_XTS_AES_128: ret = ccp_run_xts_aes_cmd(cmd_q, cmd); break; case CCP_ENGINE_DES3: ret = ccp_run_des3_cmd(cmd_q, cmd); break; case CCP_ENGINE_SHA: ret = ccp_run_sha_cmd(cmd_q, cmd); break; case CCP_ENGINE_RSA: ret = ccp_run_rsa_cmd(cmd_q, cmd); break; case CCP_ENGINE_PASSTHRU: if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP) ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd); else ret = ccp_run_passthru_cmd(cmd_q, cmd); break; case CCP_ENGINE_ECC: ret = ccp_run_ecc_cmd(cmd_q, cmd); break; default: ret = -EINVAL; } return ret; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1214_0
crossvul-cpp_data_good_527_0
// SPDX-License-Identifier: GPL-2.0 /* * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) * * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * * Interactivity improvements by Mike Galbraith * (C) 2007 Mike Galbraith <efault@gmx.de> * * Various enhancements by Dmitry Adamushko. * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> * * Group scheduling enhancements by Srivatsa Vaddagiri * Copyright IBM Corporation, 2007 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> * * Scaled math optimizations by Thomas Gleixner * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> * * Adaptive scheduling granularity, math enhancements by Peter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra */ #include "sched.h" #include <trace/events/sched.h> /* * Targeted preemption latency for CPU-bound tasks: * * NOTE: this latency value is not the same as the concept of * 'timeslice length' - timeslices in CFS are of variable length * and have no persistent notion like in traditional, time-slice * based scheduling concepts. * * (to see the precise effective timeslice length of your workload, * run vmstat and monitor the context-switches (cs) field) * * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) */ unsigned int sysctl_sched_latency = 6000000ULL; static unsigned int normalized_sysctl_sched_latency = 6000000ULL; /* * The initial- and re-scaling of tunables is configurable * * Options are: * * SCHED_TUNABLESCALING_NONE - unscaled, always *1 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus * * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) */ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; /* * Minimal preemption granularity for CPU-bound tasks: * * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) */ unsigned int sysctl_sched_min_granularity = 750000ULL; static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; /* * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity */ static unsigned int sched_nr_latency = 8; /* * After fork, child runs first. If set to 0 (default) then * parent will (try to) run first. */ unsigned int sysctl_sched_child_runs_first __read_mostly; /* * SCHED_OTHER wake-up granularity. * * This option delays the preemption effects of decoupled workloads * and reduces their over-scheduling. Synchronous workloads will still * have immediate wakeup/sleep latencies. * * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) */ unsigned int sysctl_sched_wakeup_granularity = 1000000UL; static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; const_debug unsigned int sysctl_sched_migration_cost = 500000UL; #ifdef CONFIG_SMP /* * For asym packing, by default the lower numbered CPU has higher priority. */ int __weak arch_asym_cpu_priority(int cpu) { return -cpu; } /* * The margin used when comparing utilization with CPU capacity: * util * margin < capacity * 1024 * * (default: ~20%) */ static unsigned int capacity_margin = 1280; #endif #ifdef CONFIG_CFS_BANDWIDTH /* * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool * each time a cfs_rq requests quota. * * Note: in the case that the slice exceeds the runtime remaining (either due * to consumption or the quota being specified to be smaller than the slice) * we will always only issue the remaining available time. * * (default: 5 msec, units: microseconds) */ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; #endif static inline void update_load_add(struct load_weight *lw, unsigned long inc) { lw->weight += inc; lw->inv_weight = 0; } static inline void update_load_sub(struct load_weight *lw, unsigned long dec) { lw->weight -= dec; lw->inv_weight = 0; } static inline void update_load_set(struct load_weight *lw, unsigned long w) { lw->weight = w; lw->inv_weight = 0; } /* * Increase the granularity value when there are more CPUs, * because with more CPUs the 'effective latency' as visible * to users decreases. But the relationship is not linear, * so pick a second-best guess by going with the log2 of the * number of CPUs. * * This idea comes from the SD scheduler of Con Kolivas: */ static unsigned int get_update_sysctl_factor(void) { unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); unsigned int factor; switch (sysctl_sched_tunable_scaling) { case SCHED_TUNABLESCALING_NONE: factor = 1; break; case SCHED_TUNABLESCALING_LINEAR: factor = cpus; break; case SCHED_TUNABLESCALING_LOG: default: factor = 1 + ilog2(cpus); break; } return factor; } static void update_sysctl(void) { unsigned int factor = get_update_sysctl_factor(); #define SET_SYSCTL(name) \ (sysctl_##name = (factor) * normalized_sysctl_##name) SET_SYSCTL(sched_min_granularity); SET_SYSCTL(sched_latency); SET_SYSCTL(sched_wakeup_granularity); #undef SET_SYSCTL } void sched_init_granularity(void) { update_sysctl(); } #define WMULT_CONST (~0U) #define WMULT_SHIFT 32 static void __update_inv_weight(struct load_weight *lw) { unsigned long w; if (likely(lw->inv_weight)) return; w = scale_load_down(lw->weight); if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) lw->inv_weight = 1; else if (unlikely(!w)) lw->inv_weight = WMULT_CONST; else lw->inv_weight = WMULT_CONST / w; } /* * delta_exec * weight / lw.weight * OR * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT * * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case * we're guaranteed shift stays positive because inv_weight is guaranteed to * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. * * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus * weight/lw.weight <= 1, and therefore our shift will also be positive. */ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) { u64 fact = scale_load_down(weight); int shift = WMULT_SHIFT; __update_inv_weight(lw); if (unlikely(fact >> 32)) { while (fact >> 32) { fact >>= 1; shift--; } } /* hint to use a 32x32->64 mul */ fact = (u64)(u32)fact * lw->inv_weight; while (fact >> 32) { fact >>= 1; shift--; } return mul_u64_u32_shr(delta_exec, fact, shift); } const struct sched_class fair_sched_class; /************************************************************** * CFS operations on generic schedulable entities: */ #ifdef CONFIG_FAIR_GROUP_SCHED /* cpu runqueue to which this cfs_rq is attached */ static inline struct rq *rq_of(struct cfs_rq *cfs_rq) { return cfs_rq->rq; } static inline struct task_struct *task_of(struct sched_entity *se) { SCHED_WARN_ON(!entity_is_task(se)); return container_of(se, struct task_struct, se); } /* Walk up scheduling entities hierarchy */ #define for_each_sched_entity(se) \ for (; se; se = se->parent) static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) { return p->se.cfs_rq; } /* runqueue on which this entity is (to be) queued */ static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) { return se->cfs_rq; } /* runqueue "owned" by this group */ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) { return grp->my_q; } static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) { if (!cfs_rq->on_list) { struct rq *rq = rq_of(cfs_rq); int cpu = cpu_of(rq); /* * Ensure we either appear before our parent (if already * enqueued) or force our parent to appear after us when it is * enqueued. The fact that we always enqueue bottom-up * reduces this to two cases and a special case for the root * cfs_rq. Furthermore, it also means that we will always reset * tmp_alone_branch either when the branch is connected * to a tree or when we reach the beg of the tree */ if (cfs_rq->tg->parent && cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { /* * If parent is already on the list, we add the child * just before. Thanks to circular linked property of * the list, this means to put the child at the tail * of the list that starts by parent. */ list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); /* * The branch is now connected to its tree so we can * reset tmp_alone_branch to the beginning of the * list. */ rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; } else if (!cfs_rq->tg->parent) { /* * cfs rq without parent should be put * at the tail of the list. */ list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); /* * We have reach the beg of a tree so we can reset * tmp_alone_branch to the beginning of the list. */ rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; } else { /* * The parent has not already been added so we want to * make sure that it will be put after us. * tmp_alone_branch points to the beg of the branch * where we will add parent. */ list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); /* * update tmp_alone_branch to points to the new beg * of the branch */ rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; } cfs_rq->on_list = 1; } } static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) { if (cfs_rq->on_list) { list_del_rcu(&cfs_rq->leaf_cfs_rq_list); cfs_rq->on_list = 0; } } /* Iterate through all leaf cfs_rq's on a runqueue: */ #define for_each_leaf_cfs_rq(rq, cfs_rq) \ list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) /* Do the two (enqueued) entities belong to the same group ? */ static inline struct cfs_rq * is_same_group(struct sched_entity *se, struct sched_entity *pse) { if (se->cfs_rq == pse->cfs_rq) return se->cfs_rq; return NULL; } static inline struct sched_entity *parent_entity(struct sched_entity *se) { return se->parent; } static void find_matching_se(struct sched_entity **se, struct sched_entity **pse) { int se_depth, pse_depth; /* * preemption test can be made between sibling entities who are in the * same cfs_rq i.e who have a common parent. Walk up the hierarchy of * both tasks until we find their ancestors who are siblings of common * parent. */ /* First walk up until both entities are at same depth */ se_depth = (*se)->depth; pse_depth = (*pse)->depth; while (se_depth > pse_depth) { se_depth--; *se = parent_entity(*se); } while (pse_depth > se_depth) { pse_depth--; *pse = parent_entity(*pse); } while (!is_same_group(*se, *pse)) { *se = parent_entity(*se); *pse = parent_entity(*pse); } } #else /* !CONFIG_FAIR_GROUP_SCHED */ static inline struct task_struct *task_of(struct sched_entity *se) { return container_of(se, struct task_struct, se); } static inline struct rq *rq_of(struct cfs_rq *cfs_rq) { return container_of(cfs_rq, struct rq, cfs); } #define for_each_sched_entity(se) \ for (; se; se = NULL) static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) { return &task_rq(p)->cfs; } static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) { struct task_struct *p = task_of(se); struct rq *rq = task_rq(p); return &rq->cfs; } /* runqueue "owned" by this group */ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) { return NULL; } static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) { } static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) { } #define for_each_leaf_cfs_rq(rq, cfs_rq) \ for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) static inline struct sched_entity *parent_entity(struct sched_entity *se) { return NULL; } static inline void find_matching_se(struct sched_entity **se, struct sched_entity **pse) { } #endif /* CONFIG_FAIR_GROUP_SCHED */ static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); /************************************************************** * Scheduling class tree data structure manipulation methods: */ static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime) { s64 delta = (s64)(vruntime - max_vruntime); if (delta > 0) max_vruntime = vruntime; return max_vruntime; } static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) { s64 delta = (s64)(vruntime - min_vruntime); if (delta < 0) min_vruntime = vruntime; return min_vruntime; } static inline int entity_before(struct sched_entity *a, struct sched_entity *b) { return (s64)(a->vruntime - b->vruntime) < 0; } static void update_min_vruntime(struct cfs_rq *cfs_rq) { struct sched_entity *curr = cfs_rq->curr; struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline); u64 vruntime = cfs_rq->min_vruntime; if (curr) { if (curr->on_rq) vruntime = curr->vruntime; else curr = NULL; } if (leftmost) { /* non-empty tree */ struct sched_entity *se; se = rb_entry(leftmost, struct sched_entity, run_node); if (!curr) vruntime = se->vruntime; else vruntime = min_vruntime(vruntime, se->vruntime); } /* ensure we never gain time by being placed backwards. */ cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); #ifndef CONFIG_64BIT smp_wmb(); cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; #endif } /* * Enqueue an entity into the rb-tree: */ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) { struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node; struct rb_node *parent = NULL; struct sched_entity *entry; bool leftmost = true; /* * Find the right place in the rbtree: */ while (*link) { parent = *link; entry = rb_entry(parent, struct sched_entity, run_node); /* * We dont care about collisions. Nodes with * the same key stay together. */ if (entity_before(se, entry)) { link = &parent->rb_left; } else { link = &parent->rb_right; leftmost = false; } } rb_link_node(&se->run_node, parent, link); rb_insert_color_cached(&se->run_node, &cfs_rq->tasks_timeline, leftmost); } static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) { rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); } struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) { struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline); if (!left) return NULL; return rb_entry(left, struct sched_entity, run_node); } static struct sched_entity *__pick_next_entity(struct sched_entity *se) { struct rb_node *next = rb_next(&se->run_node); if (!next) return NULL; return rb_entry(next, struct sched_entity, run_node); } #ifdef CONFIG_SCHED_DEBUG struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) { struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); if (!last) return NULL; return rb_entry(last, struct sched_entity, run_node); } /************************************************************** * Scheduling class statistics methods: */ int sched_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); unsigned int factor = get_update_sysctl_factor(); if (ret || !write) return ret; sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, sysctl_sched_min_granularity); #define WRT_SYSCTL(name) \ (normalized_sysctl_##name = sysctl_##name / (factor)) WRT_SYSCTL(sched_min_granularity); WRT_SYSCTL(sched_latency); WRT_SYSCTL(sched_wakeup_granularity); #undef WRT_SYSCTL return 0; } #endif /* * delta /= w */ static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) { if (unlikely(se->load.weight != NICE_0_LOAD)) delta = __calc_delta(delta, NICE_0_LOAD, &se->load); return delta; } /* * The idea is to set a period in which each task runs once. * * When there are too many tasks (sched_nr_latency) we have to stretch * this period because otherwise the slices get too small. * * p = (nr <= nl) ? l : l*nr/nl */ static u64 __sched_period(unsigned long nr_running) { if (unlikely(nr_running > sched_nr_latency)) return nr_running * sysctl_sched_min_granularity; else return sysctl_sched_latency; } /* * We calculate the wall-time slice from the period by taking a part * proportional to the weight. * * s = p*P[w/rw] */ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) { u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); for_each_sched_entity(se) { struct load_weight *load; struct load_weight lw; cfs_rq = cfs_rq_of(se); load = &cfs_rq->load; if (unlikely(!se->on_rq)) { lw = cfs_rq->load; update_load_add(&lw, se->load.weight); load = &lw; } slice = __calc_delta(slice, se->load.weight, load); } return slice; } /* * We calculate the vruntime slice of a to-be-inserted task. * * vs = s/w */ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) { return calc_delta_fair(sched_slice(cfs_rq, se), se); } #ifdef CONFIG_SMP #include "pelt.h" #include "sched-pelt.h" static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); static unsigned long task_h_load(struct task_struct *p); static unsigned long capacity_of(int cpu); /* Give new sched_entity start runnable values to heavy its load in infant time */ void init_entity_runnable_average(struct sched_entity *se) { struct sched_avg *sa = &se->avg; memset(sa, 0, sizeof(*sa)); /* * Tasks are initialized with full load to be seen as heavy tasks until * they get a chance to stabilize to their real load level. * Group entities are initialized with zero load to reflect the fact that * nothing has been attached to the task group yet. */ if (entity_is_task(se)) sa->runnable_load_avg = sa->load_avg = scale_load_down(se->load.weight); se->runnable_weight = se->load.weight; /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ } static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq); static void attach_entity_cfs_rq(struct sched_entity *se); /* * With new tasks being created, their initial util_avgs are extrapolated * based on the cfs_rq's current util_avg: * * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight * * However, in many cases, the above util_avg does not give a desired * value. Moreover, the sum of the util_avgs may be divergent, such * as when the series is a harmonic series. * * To solve this problem, we also cap the util_avg of successive tasks to * only 1/2 of the left utilization budget: * * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n * * where n denotes the nth task and cpu_scale the CPU capacity. * * For example, for a CPU with 1024 of capacity, a simplest series from * the beginning would be like: * * task util_avg: 512, 256, 128, 64, 32, 16, 8, ... * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ... * * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap) * if util_avg > util_avg_cap. */ void post_init_entity_util_avg(struct sched_entity *se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); struct sched_avg *sa = &se->avg; long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq))); long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; if (cap > 0) { if (cfs_rq->avg.util_avg != 0) { sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; sa->util_avg /= (cfs_rq->avg.load_avg + 1); if (sa->util_avg > cap) sa->util_avg = cap; } else { sa->util_avg = cap; } } if (entity_is_task(se)) { struct task_struct *p = task_of(se); if (p->sched_class != &fair_sched_class) { /* * For !fair tasks do: * update_cfs_rq_load_avg(now, cfs_rq); attach_entity_load_avg(cfs_rq, se, 0); switched_from_fair(rq, p); * * such that the next switched_to_fair() has the * expected state. */ se->avg.last_update_time = cfs_rq_clock_task(cfs_rq); return; } } attach_entity_cfs_rq(se); } #else /* !CONFIG_SMP */ void init_entity_runnable_average(struct sched_entity *se) { } void post_init_entity_util_avg(struct sched_entity *se) { } static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) { } #endif /* CONFIG_SMP */ /* * Update the current task's runtime statistics. */ static void update_curr(struct cfs_rq *cfs_rq) { struct sched_entity *curr = cfs_rq->curr; u64 now = rq_clock_task(rq_of(cfs_rq)); u64 delta_exec; if (unlikely(!curr)) return; delta_exec = now - curr->exec_start; if (unlikely((s64)delta_exec <= 0)) return; curr->exec_start = now; schedstat_set(curr->statistics.exec_max, max(delta_exec, curr->statistics.exec_max)); curr->sum_exec_runtime += delta_exec; schedstat_add(cfs_rq->exec_clock, delta_exec); curr->vruntime += calc_delta_fair(delta_exec, curr); update_min_vruntime(cfs_rq); if (entity_is_task(curr)) { struct task_struct *curtask = task_of(curr); trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); cgroup_account_cputime(curtask, delta_exec); account_group_exec_runtime(curtask, delta_exec); } account_cfs_rq_runtime(cfs_rq, delta_exec); } static void update_curr_fair(struct rq *rq) { update_curr(cfs_rq_of(&rq->curr->se)); } static inline void update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) { u64 wait_start, prev_wait_start; if (!schedstat_enabled()) return; wait_start = rq_clock(rq_of(cfs_rq)); prev_wait_start = schedstat_val(se->statistics.wait_start); if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) && likely(wait_start > prev_wait_start)) wait_start -= prev_wait_start; __schedstat_set(se->statistics.wait_start, wait_start); } static inline void update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) { struct task_struct *p; u64 delta; if (!schedstat_enabled()) return; delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start); if (entity_is_task(se)) { p = task_of(se); if (task_on_rq_migrating(p)) { /* * Preserve migrating task's wait time so wait_start * time stamp can be adjusted to accumulate wait time * prior to migration. */ __schedstat_set(se->statistics.wait_start, delta); return; } trace_sched_stat_wait(p, delta); } __schedstat_set(se->statistics.wait_max, max(schedstat_val(se->statistics.wait_max), delta)); __schedstat_inc(se->statistics.wait_count); __schedstat_add(se->statistics.wait_sum, delta); __schedstat_set(se->statistics.wait_start, 0); } static inline void update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) { struct task_struct *tsk = NULL; u64 sleep_start, block_start; if (!schedstat_enabled()) return; sleep_start = schedstat_val(se->statistics.sleep_start); block_start = schedstat_val(se->statistics.block_start); if (entity_is_task(se)) tsk = task_of(se); if (sleep_start) { u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start; if ((s64)delta < 0) delta = 0; if (unlikely(delta > schedstat_val(se->statistics.sleep_max))) __schedstat_set(se->statistics.sleep_max, delta); __schedstat_set(se->statistics.sleep_start, 0); __schedstat_add(se->statistics.sum_sleep_runtime, delta); if (tsk) { account_scheduler_latency(tsk, delta >> 10, 1); trace_sched_stat_sleep(tsk, delta); } } if (block_start) { u64 delta = rq_clock(rq_of(cfs_rq)) - block_start; if ((s64)delta < 0) delta = 0; if (unlikely(delta > schedstat_val(se->statistics.block_max))) __schedstat_set(se->statistics.block_max, delta); __schedstat_set(se->statistics.block_start, 0); __schedstat_add(se->statistics.sum_sleep_runtime, delta); if (tsk) { if (tsk->in_iowait) { __schedstat_add(se->statistics.iowait_sum, delta); __schedstat_inc(se->statistics.iowait_count); trace_sched_stat_iowait(tsk, delta); } trace_sched_stat_blocked(tsk, delta); /* * Blocking time is in units of nanosecs, so shift by * 20 to get a milliseconds-range estimation of the * amount of time that the task spent sleeping: */ if (unlikely(prof_on == SLEEP_PROFILING)) { profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), delta >> 20); } account_scheduler_latency(tsk, delta >> 10, 0); } } } /* * Task is being enqueued - update stats: */ static inline void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { if (!schedstat_enabled()) return; /* * Are we enqueueing a waiting task? (for current tasks * a dequeue/enqueue event is a NOP) */ if (se != cfs_rq->curr) update_stats_wait_start(cfs_rq, se); if (flags & ENQUEUE_WAKEUP) update_stats_enqueue_sleeper(cfs_rq, se); } static inline void update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { if (!schedstat_enabled()) return; /* * Mark the end of the wait period if dequeueing a * waiting task: */ if (se != cfs_rq->curr) update_stats_wait_end(cfs_rq, se); if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { struct task_struct *tsk = task_of(se); if (tsk->state & TASK_INTERRUPTIBLE) __schedstat_set(se->statistics.sleep_start, rq_clock(rq_of(cfs_rq))); if (tsk->state & TASK_UNINTERRUPTIBLE) __schedstat_set(se->statistics.block_start, rq_clock(rq_of(cfs_rq))); } } /* * We are picking a new current task - update its stats: */ static inline void update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) { /* * We are starting a new run period: */ se->exec_start = rq_clock_task(rq_of(cfs_rq)); } /************************************************** * Scheduling class queueing methods: */ #ifdef CONFIG_NUMA_BALANCING /* * Approximate time to scan a full NUMA task in ms. The task scan period is * calculated based on the tasks virtual memory size and * numa_balancing_scan_size. */ unsigned int sysctl_numa_balancing_scan_period_min = 1000; unsigned int sysctl_numa_balancing_scan_period_max = 60000; /* Portion of address space to scan in MB */ unsigned int sysctl_numa_balancing_scan_size = 256; /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ unsigned int sysctl_numa_balancing_scan_delay = 1000; struct numa_group { atomic_t refcount; spinlock_t lock; /* nr_tasks, tasks */ int nr_tasks; pid_t gid; int active_nodes; struct rcu_head rcu; unsigned long total_faults; unsigned long max_faults_cpu; /* * Faults_cpu is used to decide whether memory should move * towards the CPU. As a consequence, these stats are weighted * more by CPU use than by memory faults. */ unsigned long *faults_cpu; unsigned long faults[0]; }; static inline unsigned long group_faults_priv(struct numa_group *ng); static inline unsigned long group_faults_shared(struct numa_group *ng); static unsigned int task_nr_scan_windows(struct task_struct *p) { unsigned long rss = 0; unsigned long nr_scan_pages; /* * Calculations based on RSS as non-present and empty pages are skipped * by the PTE scanner and NUMA hinting faults should be trapped based * on resident pages */ nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT); rss = get_mm_rss(p->mm); if (!rss) rss = nr_scan_pages; rss = round_up(rss, nr_scan_pages); return rss / nr_scan_pages; } /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */ #define MAX_SCAN_WINDOW 2560 static unsigned int task_scan_min(struct task_struct *p) { unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size); unsigned int scan, floor; unsigned int windows = 1; if (scan_size < MAX_SCAN_WINDOW) windows = MAX_SCAN_WINDOW / scan_size; floor = 1000 / windows; scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); return max_t(unsigned int, floor, scan); } static unsigned int task_scan_start(struct task_struct *p) { unsigned long smin = task_scan_min(p); unsigned long period = smin; /* Scale the maximum scan period with the amount of shared memory. */ if (p->numa_group) { struct numa_group *ng = p->numa_group; unsigned long shared = group_faults_shared(ng); unsigned long private = group_faults_priv(ng); period *= atomic_read(&ng->refcount); period *= shared + 1; period /= private + shared + 1; } return max(smin, period); } static unsigned int task_scan_max(struct task_struct *p) { unsigned long smin = task_scan_min(p); unsigned long smax; /* Watch for min being lower than max due to floor calculations */ smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); /* Scale the maximum scan period with the amount of shared memory. */ if (p->numa_group) { struct numa_group *ng = p->numa_group; unsigned long shared = group_faults_shared(ng); unsigned long private = group_faults_priv(ng); unsigned long period = smax; period *= atomic_read(&ng->refcount); period *= shared + 1; period /= private + shared + 1; smax = max(smax, period); } return max(smin, smax); } void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) { int mm_users = 0; struct mm_struct *mm = p->mm; if (mm) { mm_users = atomic_read(&mm->mm_users); if (mm_users == 1) { mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); mm->numa_scan_seq = 0; } } p->node_stamp = 0; p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; p->numa_scan_period = sysctl_numa_balancing_scan_delay; p->numa_work.next = &p->numa_work; p->numa_faults = NULL; p->numa_group = NULL; p->last_task_numa_placement = 0; p->last_sum_exec_runtime = 0; /* New address space, reset the preferred nid */ if (!(clone_flags & CLONE_VM)) { p->numa_preferred_nid = -1; return; } /* * New thread, keep existing numa_preferred_nid which should be copied * already by arch_dup_task_struct but stagger when scans start. */ if (mm) { unsigned int delay; delay = min_t(unsigned int, task_scan_max(current), current->numa_scan_period * mm_users * NSEC_PER_MSEC); delay += 2 * TICK_NSEC; p->node_stamp = delay; } } static void account_numa_enqueue(struct rq *rq, struct task_struct *p) { rq->nr_numa_running += (p->numa_preferred_nid != -1); rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); } static void account_numa_dequeue(struct rq *rq, struct task_struct *p) { rq->nr_numa_running -= (p->numa_preferred_nid != -1); rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); } /* Shared or private faults. */ #define NR_NUMA_HINT_FAULT_TYPES 2 /* Memory and CPU locality */ #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2) /* Averaged statistics, and temporary buffers. */ #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2) pid_t task_numa_group_id(struct task_struct *p) { return p->numa_group ? p->numa_group->gid : 0; } /* * The averaged statistics, shared & private, memory & CPU, * occupy the first half of the array. The second half of the * array is for current counters, which are averaged into the * first set by task_numa_placement. */ static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv) { return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; } static inline unsigned long task_faults(struct task_struct *p, int nid) { if (!p->numa_faults) return 0; return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)]; } static inline unsigned long group_faults(struct task_struct *p, int nid) { if (!p->numa_group) return 0; return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] + p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)]; } static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) { return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] + group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)]; } static inline unsigned long group_faults_priv(struct numa_group *ng) { unsigned long faults = 0; int node; for_each_online_node(node) { faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; } return faults; } static inline unsigned long group_faults_shared(struct numa_group *ng) { unsigned long faults = 0; int node; for_each_online_node(node) { faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)]; } return faults; } /* * A node triggering more than 1/3 as many NUMA faults as the maximum is * considered part of a numa group's pseudo-interleaving set. Migrations * between these nodes are slowed down, to allow things to settle down. */ #define ACTIVE_NODE_FRACTION 3 static bool numa_is_active_node(int nid, struct numa_group *ng) { return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu; } /* Handle placement on systems where not all nodes are directly connected. */ static unsigned long score_nearby_nodes(struct task_struct *p, int nid, int maxdist, bool task) { unsigned long score = 0; int node; /* * All nodes are directly connected, and the same distance * from each other. No need for fancy placement algorithms. */ if (sched_numa_topology_type == NUMA_DIRECT) return 0; /* * This code is called for each node, introducing N^2 complexity, * which should be ok given the number of nodes rarely exceeds 8. */ for_each_online_node(node) { unsigned long faults; int dist = node_distance(nid, node); /* * The furthest away nodes in the system are not interesting * for placement; nid was already counted. */ if (dist == sched_max_numa_distance || node == nid) continue; /* * On systems with a backplane NUMA topology, compare groups * of nodes, and move tasks towards the group with the most * memory accesses. When comparing two nodes at distance * "hoplimit", only nodes closer by than "hoplimit" are part * of each group. Skip other nodes. */ if (sched_numa_topology_type == NUMA_BACKPLANE && dist >= maxdist) continue; /* Add up the faults from nearby nodes. */ if (task) faults = task_faults(p, node); else faults = group_faults(p, node); /* * On systems with a glueless mesh NUMA topology, there are * no fixed "groups of nodes". Instead, nodes that are not * directly connected bounce traffic through intermediate * nodes; a numa_group can occupy any set of nodes. * The further away a node is, the less the faults count. * This seems to result in good task placement. */ if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { faults *= (sched_max_numa_distance - dist); faults /= (sched_max_numa_distance - LOCAL_DISTANCE); } score += faults; } return score; } /* * These return the fraction of accesses done by a particular task, or * task group, on a particular numa node. The group weight is given a * larger multiplier, in order to group tasks together that are almost * evenly spread out between numa nodes. */ static inline unsigned long task_weight(struct task_struct *p, int nid, int dist) { unsigned long faults, total_faults; if (!p->numa_faults) return 0; total_faults = p->total_numa_faults; if (!total_faults) return 0; faults = task_faults(p, nid); faults += score_nearby_nodes(p, nid, dist, true); return 1000 * faults / total_faults; } static inline unsigned long group_weight(struct task_struct *p, int nid, int dist) { unsigned long faults, total_faults; if (!p->numa_group) return 0; total_faults = p->numa_group->total_faults; if (!total_faults) return 0; faults = group_faults(p, nid); faults += score_nearby_nodes(p, nid, dist, false); return 1000 * faults / total_faults; } bool should_numa_migrate_memory(struct task_struct *p, struct page * page, int src_nid, int dst_cpu) { struct numa_group *ng = p->numa_group; int dst_nid = cpu_to_node(dst_cpu); int last_cpupid, this_cpupid; this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); last_cpupid = page_cpupid_xchg_last(page, this_cpupid); /* * Allow first faults or private faults to migrate immediately early in * the lifetime of a task. The magic number 4 is based on waiting for * two full passes of the "multi-stage node selection" test that is * executed below. */ if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) && (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) return true; /* * Multi-stage node selection is used in conjunction with a periodic * migration fault to build a temporal task<->page relation. By using * a two-stage filter we remove short/unlikely relations. * * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate * a task's usage of a particular page (n_p) per total usage of this * page (n_t) (in a given time-span) to a probability. * * Our periodic faults will sample this probability and getting the * same result twice in a row, given these samples are fully * independent, is then given by P(n)^2, provided our sample period * is sufficiently short compared to the usage pattern. * * This quadric squishes small probabilities, making it less likely we * act on an unlikely task<->page relation. */ if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != dst_nid) return false; /* Always allow migrate on private faults */ if (cpupid_match_pid(p, last_cpupid)) return true; /* A shared fault, but p->numa_group has not been set up yet. */ if (!ng) return true; /* * Destination node is much more heavily used than the source * node? Allow migration. */ if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) * ACTIVE_NODE_FRACTION) return true; /* * Distribute memory according to CPU & memory use on each node, * with 3/4 hysteresis to avoid unnecessary memory migrations: * * faults_cpu(dst) 3 faults_cpu(src) * --------------- * - > --------------- * faults_mem(dst) 4 faults_mem(src) */ return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 > group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; } static unsigned long weighted_cpuload(struct rq *rq); static unsigned long source_load(int cpu, int type); static unsigned long target_load(int cpu, int type); /* Cached statistics for all CPUs within a node */ struct numa_stats { unsigned long load; /* Total compute capacity of CPUs on a node */ unsigned long compute_capacity; }; /* * XXX borrowed from update_sg_lb_stats */ static void update_numa_stats(struct numa_stats *ns, int nid) { int cpu; memset(ns, 0, sizeof(*ns)); for_each_cpu(cpu, cpumask_of_node(nid)) { struct rq *rq = cpu_rq(cpu); ns->load += weighted_cpuload(rq); ns->compute_capacity += capacity_of(cpu); } } struct task_numa_env { struct task_struct *p; int src_cpu, src_nid; int dst_cpu, dst_nid; struct numa_stats src_stats, dst_stats; int imbalance_pct; int dist; struct task_struct *best_task; long best_imp; int best_cpu; }; static void task_numa_assign(struct task_numa_env *env, struct task_struct *p, long imp) { struct rq *rq = cpu_rq(env->dst_cpu); /* Bail out if run-queue part of active NUMA balance. */ if (xchg(&rq->numa_migrate_on, 1)) return; /* * Clear previous best_cpu/rq numa-migrate flag, since task now * found a better CPU to move/swap. */ if (env->best_cpu != -1) { rq = cpu_rq(env->best_cpu); WRITE_ONCE(rq->numa_migrate_on, 0); } if (env->best_task) put_task_struct(env->best_task); if (p) get_task_struct(p); env->best_task = p; env->best_imp = imp; env->best_cpu = env->dst_cpu; } static bool load_too_imbalanced(long src_load, long dst_load, struct task_numa_env *env) { long imb, old_imb; long orig_src_load, orig_dst_load; long src_capacity, dst_capacity; /* * The load is corrected for the CPU capacity available on each node. * * src_load dst_load * ------------ vs --------- * src_capacity dst_capacity */ src_capacity = env->src_stats.compute_capacity; dst_capacity = env->dst_stats.compute_capacity; imb = abs(dst_load * src_capacity - src_load * dst_capacity); orig_src_load = env->src_stats.load; orig_dst_load = env->dst_stats.load; old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity); /* Would this change make things worse? */ return (imb > old_imb); } /* * Maximum NUMA importance can be 1998 (2*999); * SMALLIMP @ 30 would be close to 1998/64. * Used to deter task migration. */ #define SMALLIMP 30 /* * This checks if the overall compute and NUMA accesses of the system would * be improved if the source tasks was migrated to the target dst_cpu taking * into account that it might be best if task running on the dst_cpu should * be exchanged with the source task */ static void task_numa_compare(struct task_numa_env *env, long taskimp, long groupimp, bool maymove) { struct rq *dst_rq = cpu_rq(env->dst_cpu); struct task_struct *cur; long src_load, dst_load; long load; long imp = env->p->numa_group ? groupimp : taskimp; long moveimp = imp; int dist = env->dist; if (READ_ONCE(dst_rq->numa_migrate_on)) return; rcu_read_lock(); cur = task_rcu_dereference(&dst_rq->curr); if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) cur = NULL; /* * Because we have preemption enabled we can get migrated around and * end try selecting ourselves (current == env->p) as a swap candidate. */ if (cur == env->p) goto unlock; if (!cur) { if (maymove && moveimp >= env->best_imp) goto assign; else goto unlock; } /* * "imp" is the fault differential for the source task between the * source and destination node. Calculate the total differential for * the source task and potential destination task. The more negative * the value is, the more remote accesses that would be expected to * be incurred if the tasks were swapped. */ /* Skip this swap candidate if cannot move to the source cpu */ if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) goto unlock; /* * If dst and source tasks are in the same NUMA group, or not * in any group then look only at task weights. */ if (cur->numa_group == env->p->numa_group) { imp = taskimp + task_weight(cur, env->src_nid, dist) - task_weight(cur, env->dst_nid, dist); /* * Add some hysteresis to prevent swapping the * tasks within a group over tiny differences. */ if (cur->numa_group) imp -= imp / 16; } else { /* * Compare the group weights. If a task is all by itself * (not part of a group), use the task weight instead. */ if (cur->numa_group && env->p->numa_group) imp += group_weight(cur, env->src_nid, dist) - group_weight(cur, env->dst_nid, dist); else imp += task_weight(cur, env->src_nid, dist) - task_weight(cur, env->dst_nid, dist); } if (maymove && moveimp > imp && moveimp > env->best_imp) { imp = moveimp; cur = NULL; goto assign; } /* * If the NUMA importance is less than SMALLIMP, * task migration might only result in ping pong * of tasks and also hurt performance due to cache * misses. */ if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2) goto unlock; /* * In the overloaded case, try and keep the load balanced. */ load = task_h_load(env->p) - task_h_load(cur); if (!load) goto assign; dst_load = env->dst_stats.load + load; src_load = env->src_stats.load - load; if (load_too_imbalanced(src_load, dst_load, env)) goto unlock; assign: /* * One idle CPU per node is evaluated for a task numa move. * Call select_idle_sibling to maybe find a better one. */ if (!cur) { /* * select_idle_siblings() uses an per-CPU cpumask that * can be used from IRQ context. */ local_irq_disable(); env->dst_cpu = select_idle_sibling(env->p, env->src_cpu, env->dst_cpu); local_irq_enable(); } task_numa_assign(env, cur, imp); unlock: rcu_read_unlock(); } static void task_numa_find_cpu(struct task_numa_env *env, long taskimp, long groupimp) { long src_load, dst_load, load; bool maymove = false; int cpu; load = task_h_load(env->p); dst_load = env->dst_stats.load + load; src_load = env->src_stats.load - load; /* * If the improvement from just moving env->p direction is better * than swapping tasks around, check if a move is possible. */ maymove = !load_too_imbalanced(src_load, dst_load, env); for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { /* Skip this CPU if the source task cannot migrate */ if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed)) continue; env->dst_cpu = cpu; task_numa_compare(env, taskimp, groupimp, maymove); } } static int task_numa_migrate(struct task_struct *p) { struct task_numa_env env = { .p = p, .src_cpu = task_cpu(p), .src_nid = task_node(p), .imbalance_pct = 112, .best_task = NULL, .best_imp = 0, .best_cpu = -1, }; struct sched_domain *sd; struct rq *best_rq; unsigned long taskweight, groupweight; int nid, ret, dist; long taskimp, groupimp; /* * Pick the lowest SD_NUMA domain, as that would have the smallest * imbalance and would be the first to start moving tasks about. * * And we want to avoid any moving of tasks about, as that would create * random movement of tasks -- counter the numa conditions we're trying * to satisfy here. */ rcu_read_lock(); sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); if (sd) env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; rcu_read_unlock(); /* * Cpusets can break the scheduler domain tree into smaller * balance domains, some of which do not cross NUMA boundaries. * Tasks that are "trapped" in such domains cannot be migrated * elsewhere, so there is no point in (re)trying. */ if (unlikely(!sd)) { sched_setnuma(p, task_node(p)); return -EINVAL; } env.dst_nid = p->numa_preferred_nid; dist = env.dist = node_distance(env.src_nid, env.dst_nid); taskweight = task_weight(p, env.src_nid, dist); groupweight = group_weight(p, env.src_nid, dist); update_numa_stats(&env.src_stats, env.src_nid); taskimp = task_weight(p, env.dst_nid, dist) - taskweight; groupimp = group_weight(p, env.dst_nid, dist) - groupweight; update_numa_stats(&env.dst_stats, env.dst_nid); /* Try to find a spot on the preferred nid. */ task_numa_find_cpu(&env, taskimp, groupimp); /* * Look at other nodes in these cases: * - there is no space available on the preferred_nid * - the task is part of a numa_group that is interleaved across * multiple NUMA nodes; in order to better consolidate the group, * we need to check other locations. */ if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) { for_each_online_node(nid) { if (nid == env.src_nid || nid == p->numa_preferred_nid) continue; dist = node_distance(env.src_nid, env.dst_nid); if (sched_numa_topology_type == NUMA_BACKPLANE && dist != env.dist) { taskweight = task_weight(p, env.src_nid, dist); groupweight = group_weight(p, env.src_nid, dist); } /* Only consider nodes where both task and groups benefit */ taskimp = task_weight(p, nid, dist) - taskweight; groupimp = group_weight(p, nid, dist) - groupweight; if (taskimp < 0 && groupimp < 0) continue; env.dist = dist; env.dst_nid = nid; update_numa_stats(&env.dst_stats, env.dst_nid); task_numa_find_cpu(&env, taskimp, groupimp); } } /* * If the task is part of a workload that spans multiple NUMA nodes, * and is migrating into one of the workload's active nodes, remember * this node as the task's preferred numa node, so the workload can * settle down. * A task that migrated to a second choice node will be better off * trying for a better one later. Do not set the preferred node here. */ if (p->numa_group) { if (env.best_cpu == -1) nid = env.src_nid; else nid = cpu_to_node(env.best_cpu); if (nid != p->numa_preferred_nid) sched_setnuma(p, nid); } /* No better CPU than the current one was found. */ if (env.best_cpu == -1) return -EAGAIN; best_rq = cpu_rq(env.best_cpu); if (env.best_task == NULL) { ret = migrate_task_to(p, env.best_cpu); WRITE_ONCE(best_rq->numa_migrate_on, 0); if (ret != 0) trace_sched_stick_numa(p, env.src_cpu, env.best_cpu); return ret; } ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); WRITE_ONCE(best_rq->numa_migrate_on, 0); if (ret != 0) trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task)); put_task_struct(env.best_task); return ret; } /* Attempt to migrate a task to a CPU on the preferred node. */ static void numa_migrate_preferred(struct task_struct *p) { unsigned long interval = HZ; /* This task has no NUMA fault statistics yet */ if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults)) return; /* Periodically retry migrating the task to the preferred node */ interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); p->numa_migrate_retry = jiffies + interval; /* Success if task is already running on preferred CPU */ if (task_node(p) == p->numa_preferred_nid) return; /* Otherwise, try migrate to a CPU on the preferred node */ task_numa_migrate(p); } /* * Find out how many nodes on the workload is actively running on. Do this by * tracking the nodes from which NUMA hinting faults are triggered. This can * be different from the set of nodes where the workload's memory is currently * located. */ static void numa_group_count_active_nodes(struct numa_group *numa_group) { unsigned long faults, max_faults = 0; int nid, active_nodes = 0; for_each_online_node(nid) { faults = group_faults_cpu(numa_group, nid); if (faults > max_faults) max_faults = faults; } for_each_online_node(nid) { faults = group_faults_cpu(numa_group, nid); if (faults * ACTIVE_NODE_FRACTION > max_faults) active_nodes++; } numa_group->max_faults_cpu = max_faults; numa_group->active_nodes = active_nodes; } /* * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS * increments. The more local the fault statistics are, the higher the scan * period will be for the next scan window. If local/(local+remote) ratio is * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) * the scan period will decrease. Aim for 70% local accesses. */ #define NUMA_PERIOD_SLOTS 10 #define NUMA_PERIOD_THRESHOLD 7 /* * Increase the scan period (slow down scanning) if the majority of * our memory is already on our local node, or if the majority of * the page accesses are shared with other processes. * Otherwise, decrease the scan period. */ static void update_task_scan_period(struct task_struct *p, unsigned long shared, unsigned long private) { unsigned int period_slot; int lr_ratio, ps_ratio; int diff; unsigned long remote = p->numa_faults_locality[0]; unsigned long local = p->numa_faults_locality[1]; /* * If there were no record hinting faults then either the task is * completely idle or all activity is areas that are not of interest * to automatic numa balancing. Related to that, if there were failed * migration then it implies we are migrating too quickly or the local * node is overloaded. In either case, scan slower */ if (local + shared == 0 || p->numa_faults_locality[2]) { p->numa_scan_period = min(p->numa_scan_period_max, p->numa_scan_period << 1); p->mm->numa_next_scan = jiffies + msecs_to_jiffies(p->numa_scan_period); return; } /* * Prepare to scale scan period relative to the current period. * == NUMA_PERIOD_THRESHOLD scan period stays the same * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster) * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower) */ period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote); ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared); if (ps_ratio >= NUMA_PERIOD_THRESHOLD) { /* * Most memory accesses are local. There is no need to * do fast NUMA scanning, since memory is already local. */ int slot = ps_ratio - NUMA_PERIOD_THRESHOLD; if (!slot) slot = 1; diff = slot * period_slot; } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) { /* * Most memory accesses are shared with other tasks. * There is no point in continuing fast NUMA scanning, * since other tasks may just move the memory elsewhere. */ int slot = lr_ratio - NUMA_PERIOD_THRESHOLD; if (!slot) slot = 1; diff = slot * period_slot; } else { /* * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS, * yet they are not on the local NUMA node. Speed up * NUMA scanning to get the memory moved over. */ int ratio = max(lr_ratio, ps_ratio); diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot; } p->numa_scan_period = clamp(p->numa_scan_period + diff, task_scan_min(p), task_scan_max(p)); memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); } /* * Get the fraction of time the task has been running since the last * NUMA placement cycle. The scheduler keeps similar statistics, but * decays those on a 32ms period, which is orders of magnitude off * from the dozens-of-seconds NUMA balancing period. Use the scheduler * stats only if the task is so new there are no NUMA statistics yet. */ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) { u64 runtime, delta, now; /* Use the start of this time slice to avoid calculations. */ now = p->se.exec_start; runtime = p->se.sum_exec_runtime; if (p->last_task_numa_placement) { delta = runtime - p->last_sum_exec_runtime; *period = now - p->last_task_numa_placement; } else { delta = p->se.avg.load_sum; *period = LOAD_AVG_MAX; } p->last_sum_exec_runtime = runtime; p->last_task_numa_placement = now; return delta; } /* * Determine the preferred nid for a task in a numa_group. This needs to * be done in a way that produces consistent results with group_weight, * otherwise workloads might not converge. */ static int preferred_group_nid(struct task_struct *p, int nid) { nodemask_t nodes; int dist; /* Direct connections between all NUMA nodes. */ if (sched_numa_topology_type == NUMA_DIRECT) return nid; /* * On a system with glueless mesh NUMA topology, group_weight * scores nodes according to the number of NUMA hinting faults on * both the node itself, and on nearby nodes. */ if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { unsigned long score, max_score = 0; int node, max_node = nid; dist = sched_max_numa_distance; for_each_online_node(node) { score = group_weight(p, node, dist); if (score > max_score) { max_score = score; max_node = node; } } return max_node; } /* * Finding the preferred nid in a system with NUMA backplane * interconnect topology is more involved. The goal is to locate * tasks from numa_groups near each other in the system, and * untangle workloads from different sides of the system. This requires * searching down the hierarchy of node groups, recursively searching * inside the highest scoring group of nodes. The nodemask tricks * keep the complexity of the search down. */ nodes = node_online_map; for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { unsigned long max_faults = 0; nodemask_t max_group = NODE_MASK_NONE; int a, b; /* Are there nodes at this distance from each other? */ if (!find_numa_distance(dist)) continue; for_each_node_mask(a, nodes) { unsigned long faults = 0; nodemask_t this_group; nodes_clear(this_group); /* Sum group's NUMA faults; includes a==b case. */ for_each_node_mask(b, nodes) { if (node_distance(a, b) < dist) { faults += group_faults(p, b); node_set(b, this_group); node_clear(b, nodes); } } /* Remember the top group. */ if (faults > max_faults) { max_faults = faults; max_group = this_group; /* * subtle: at the smallest distance there is * just one node left in each "group", the * winner is the preferred nid. */ nid = a; } } /* Next round, evaluate the nodes within max_group. */ if (!max_faults) break; nodes = max_group; } return nid; } static void task_numa_placement(struct task_struct *p) { int seq, nid, max_nid = -1; unsigned long max_faults = 0; unsigned long fault_types[2] = { 0, 0 }; unsigned long total_faults; u64 runtime, period; spinlock_t *group_lock = NULL; /* * The p->mm->numa_scan_seq field gets updated without * exclusive access. Use READ_ONCE() here to ensure * that the field is read in a single access: */ seq = READ_ONCE(p->mm->numa_scan_seq); if (p->numa_scan_seq == seq) return; p->numa_scan_seq = seq; p->numa_scan_period_max = task_scan_max(p); total_faults = p->numa_faults_locality[0] + p->numa_faults_locality[1]; runtime = numa_get_avg_runtime(p, &period); /* If the task is part of a group prevent parallel updates to group stats */ if (p->numa_group) { group_lock = &p->numa_group->lock; spin_lock_irq(group_lock); } /* Find the node with the highest number of faults */ for_each_online_node(nid) { /* Keep track of the offsets in numa_faults array */ int mem_idx, membuf_idx, cpu_idx, cpubuf_idx; unsigned long faults = 0, group_faults = 0; int priv; for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) { long diff, f_diff, f_weight; mem_idx = task_faults_idx(NUMA_MEM, nid, priv); membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv); cpu_idx = task_faults_idx(NUMA_CPU, nid, priv); cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv); /* Decay existing window, copy faults since last scan */ diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2; fault_types[priv] += p->numa_faults[membuf_idx]; p->numa_faults[membuf_idx] = 0; /* * Normalize the faults_from, so all tasks in a group * count according to CPU use, instead of by the raw * number of faults. Tasks with little runtime have * little over-all impact on throughput, and thus their * faults are less important. */ f_weight = div64_u64(runtime << 16, period + 1); f_weight = (f_weight * p->numa_faults[cpubuf_idx]) / (total_faults + 1); f_diff = f_weight - p->numa_faults[cpu_idx] / 2; p->numa_faults[cpubuf_idx] = 0; p->numa_faults[mem_idx] += diff; p->numa_faults[cpu_idx] += f_diff; faults += p->numa_faults[mem_idx]; p->total_numa_faults += diff; if (p->numa_group) { /* * safe because we can only change our own group * * mem_idx represents the offset for a given * nid and priv in a specific region because it * is at the beginning of the numa_faults array. */ p->numa_group->faults[mem_idx] += diff; p->numa_group->faults_cpu[mem_idx] += f_diff; p->numa_group->total_faults += diff; group_faults += p->numa_group->faults[mem_idx]; } } if (!p->numa_group) { if (faults > max_faults) { max_faults = faults; max_nid = nid; } } else if (group_faults > max_faults) { max_faults = group_faults; max_nid = nid; } } if (p->numa_group) { numa_group_count_active_nodes(p->numa_group); spin_unlock_irq(group_lock); max_nid = preferred_group_nid(p, max_nid); } if (max_faults) { /* Set the new preferred node */ if (max_nid != p->numa_preferred_nid) sched_setnuma(p, max_nid); } update_task_scan_period(p, fault_types[0], fault_types[1]); } static inline int get_numa_group(struct numa_group *grp) { return atomic_inc_not_zero(&grp->refcount); } static inline void put_numa_group(struct numa_group *grp) { if (atomic_dec_and_test(&grp->refcount)) kfree_rcu(grp, rcu); } static void task_numa_group(struct task_struct *p, int cpupid, int flags, int *priv) { struct numa_group *grp, *my_grp; struct task_struct *tsk; bool join = false; int cpu = cpupid_to_cpu(cpupid); int i; if (unlikely(!p->numa_group)) { unsigned int size = sizeof(struct numa_group) + 4*nr_node_ids*sizeof(unsigned long); grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); if (!grp) return; atomic_set(&grp->refcount, 1); grp->active_nodes = 1; grp->max_faults_cpu = 0; spin_lock_init(&grp->lock); grp->gid = p->pid; /* Second half of the array tracks nids where faults happen */ grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES * nr_node_ids; for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) grp->faults[i] = p->numa_faults[i]; grp->total_faults = p->total_numa_faults; grp->nr_tasks++; rcu_assign_pointer(p->numa_group, grp); } rcu_read_lock(); tsk = READ_ONCE(cpu_rq(cpu)->curr); if (!cpupid_match_pid(tsk, cpupid)) goto no_join; grp = rcu_dereference(tsk->numa_group); if (!grp) goto no_join; my_grp = p->numa_group; if (grp == my_grp) goto no_join; /* * Only join the other group if its bigger; if we're the bigger group, * the other task will join us. */ if (my_grp->nr_tasks > grp->nr_tasks) goto no_join; /* * Tie-break on the grp address. */ if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp) goto no_join; /* Always join threads in the same process. */ if (tsk->mm == current->mm) join = true; /* Simple filter to avoid false positives due to PID collisions */ if (flags & TNF_SHARED) join = true; /* Update priv based on whether false sharing was detected */ *priv = !join; if (join && !get_numa_group(grp)) goto no_join; rcu_read_unlock(); if (!join) return; BUG_ON(irqs_disabled()); double_lock_irq(&my_grp->lock, &grp->lock); for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { my_grp->faults[i] -= p->numa_faults[i]; grp->faults[i] += p->numa_faults[i]; } my_grp->total_faults -= p->total_numa_faults; grp->total_faults += p->total_numa_faults; my_grp->nr_tasks--; grp->nr_tasks++; spin_unlock(&my_grp->lock); spin_unlock_irq(&grp->lock); rcu_assign_pointer(p->numa_group, grp); put_numa_group(my_grp); return; no_join: rcu_read_unlock(); return; } void task_numa_free(struct task_struct *p) { struct numa_group *grp = p->numa_group; void *numa_faults = p->numa_faults; unsigned long flags; int i; if (grp) { spin_lock_irqsave(&grp->lock, flags); for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) grp->faults[i] -= p->numa_faults[i]; grp->total_faults -= p->total_numa_faults; grp->nr_tasks--; spin_unlock_irqrestore(&grp->lock, flags); RCU_INIT_POINTER(p->numa_group, NULL); put_numa_group(grp); } p->numa_faults = NULL; kfree(numa_faults); } /* * Got a PROT_NONE fault for a page on @node. */ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) { struct task_struct *p = current; bool migrated = flags & TNF_MIGRATED; int cpu_node = task_node(current); int local = !!(flags & TNF_FAULT_LOCAL); struct numa_group *ng; int priv; if (!static_branch_likely(&sched_numa_balancing)) return; /* for example, ksmd faulting in a user's mm */ if (!p->mm) return; /* Allocate buffer to track faults on a per-node basis */ if (unlikely(!p->numa_faults)) { int size = sizeof(*p->numa_faults) * NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); if (!p->numa_faults) return; p->total_numa_faults = 0; memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); } /* * First accesses are treated as private, otherwise consider accesses * to be private if the accessing pid has not changed */ if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) { priv = 1; } else { priv = cpupid_match_pid(p, last_cpupid); if (!priv && !(flags & TNF_NO_GROUP)) task_numa_group(p, last_cpupid, flags, &priv); } /* * If a workload spans multiple NUMA nodes, a shared fault that * occurs wholly within the set of nodes that the workload is * actively using should be counted as local. This allows the * scan rate to slow down when a workload has settled down. */ ng = p->numa_group; if (!priv && !local && ng && ng->active_nodes > 1 && numa_is_active_node(cpu_node, ng) && numa_is_active_node(mem_node, ng)) local = 1; /* * Retry to migrate task to preferred node periodically, in case it * previously failed, or the scheduler moved us. */ if (time_after(jiffies, p->numa_migrate_retry)) { task_numa_placement(p); numa_migrate_preferred(p); } if (migrated) p->numa_pages_migrated += pages; if (flags & TNF_MIGRATE_FAIL) p->numa_faults_locality[2] += pages; p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; p->numa_faults_locality[local] += pages; } static void reset_ptenuma_scan(struct task_struct *p) { /* * We only did a read acquisition of the mmap sem, so * p->mm->numa_scan_seq is written to without exclusive access * and the update is not guaranteed to be atomic. That's not * much of an issue though, since this is just used for * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not * expensive, to avoid any form of compiler optimizations: */ WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); p->mm->numa_scan_offset = 0; } /* * The expensive part of numa migration is done from task_work context. * Triggered from task_tick_numa(). */ void task_numa_work(struct callback_head *work) { unsigned long migrate, next_scan, now = jiffies; struct task_struct *p = current; struct mm_struct *mm = p->mm; u64 runtime = p->se.sum_exec_runtime; struct vm_area_struct *vma; unsigned long start, end; unsigned long nr_pte_updates = 0; long pages, virtpages; SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); work->next = work; /* protect against double add */ /* * Who cares about NUMA placement when they're dying. * * NOTE: make sure not to dereference p->mm before this check, * exit_task_work() happens _after_ exit_mm() so we could be called * without p->mm even though we still had it when we enqueued this * work. */ if (p->flags & PF_EXITING) return; if (!mm->numa_next_scan) { mm->numa_next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); } /* * Enforce maximal scan/migration frequency.. */ migrate = mm->numa_next_scan; if (time_before(now, migrate)) return; if (p->numa_scan_period == 0) { p->numa_scan_period_max = task_scan_max(p); p->numa_scan_period = task_scan_start(p); } next_scan = now + msecs_to_jiffies(p->numa_scan_period); if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate) return; /* * Delay this task enough that another task of this mm will likely win * the next time around. */ p->node_stamp += 2 * TICK_NSEC; start = mm->numa_scan_offset; pages = sysctl_numa_balancing_scan_size; pages <<= 20 - PAGE_SHIFT; /* MB in pages */ virtpages = pages * 8; /* Scan up to this much virtual space */ if (!pages) return; if (!down_read_trylock(&mm->mmap_sem)) return; vma = find_vma(mm, start); if (!vma) { reset_ptenuma_scan(p); start = 0; vma = mm->mmap; } for (; vma; vma = vma->vm_next) { if (!vma_migratable(vma) || !vma_policy_mof(vma) || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { continue; } /* * Shared library pages mapped by multiple processes are not * migrated as it is expected they are cache replicated. Avoid * hinting faults in read-only file-backed mappings or the vdso * as migrating the pages will be of marginal benefit. */ if (!vma->vm_mm || (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) continue; /* * Skip inaccessible VMAs to avoid any confusion between * PROT_NONE and NUMA hinting ptes */ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) continue; do { start = max(start, vma->vm_start); end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); end = min(end, vma->vm_end); nr_pte_updates = change_prot_numa(vma, start, end); /* * Try to scan sysctl_numa_balancing_size worth of * hpages that have at least one present PTE that * is not already pte-numa. If the VMA contains * areas that are unused or already full of prot_numa * PTEs, scan up to virtpages, to skip through those * areas faster. */ if (nr_pte_updates) pages -= (end - start) >> PAGE_SHIFT; virtpages -= (end - start) >> PAGE_SHIFT; start = end; if (pages <= 0 || virtpages <= 0) goto out; cond_resched(); } while (end != vma->vm_end); } out: /* * It is possible to reach the end of the VMA list but the last few * VMAs are not guaranteed to the vma_migratable. If they are not, we * would find the !migratable VMA on the next scan but not reset the * scanner to the start so check it now. */ if (vma) mm->numa_scan_offset = start; else reset_ptenuma_scan(p); up_read(&mm->mmap_sem); /* * Make sure tasks use at least 32x as much time to run other code * than they used here, to limit NUMA PTE scanning overhead to 3% max. * Usually update_task_scan_period slows down scanning enough; on an * overloaded system we need to limit overhead on a per task basis. */ if (unlikely(p->se.sum_exec_runtime != runtime)) { u64 diff = p->se.sum_exec_runtime - runtime; p->node_stamp += 32 * diff; } } /* * Drive the periodic memory faults.. */ void task_tick_numa(struct rq *rq, struct task_struct *curr) { struct callback_head *work = &curr->numa_work; u64 period, now; /* * We don't care about NUMA placement if we don't have memory. */ if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) return; /* * Using runtime rather than walltime has the dual advantage that * we (mostly) drive the selection from busy threads and that the * task needs to have done some actual work before we bother with * NUMA placement. */ now = curr->se.sum_exec_runtime; period = (u64)curr->numa_scan_period * NSEC_PER_MSEC; if (now > curr->node_stamp + period) { if (!curr->node_stamp) curr->numa_scan_period = task_scan_start(curr); curr->node_stamp += period; if (!time_before(jiffies, curr->mm->numa_next_scan)) { init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */ task_work_add(curr, work, true); } } } static void update_scan_period(struct task_struct *p, int new_cpu) { int src_nid = cpu_to_node(task_cpu(p)); int dst_nid = cpu_to_node(new_cpu); if (!static_branch_likely(&sched_numa_balancing)) return; if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) return; if (src_nid == dst_nid) return; /* * Allow resets if faults have been trapped before one scan * has completed. This is most likely due to a new task that * is pulled cross-node due to wakeups or load balancing. */ if (p->numa_scan_seq) { /* * Avoid scan adjustments if moving to the preferred * node or if the task was not previously running on * the preferred node. */ if (dst_nid == p->numa_preferred_nid || (p->numa_preferred_nid != -1 && src_nid != p->numa_preferred_nid)) return; } p->numa_scan_period = task_scan_start(p); } #else static void task_tick_numa(struct rq *rq, struct task_struct *curr) { } static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) { } static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) { } static inline void update_scan_period(struct task_struct *p, int new_cpu) { } #endif /* CONFIG_NUMA_BALANCING */ static void account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) { update_load_add(&cfs_rq->load, se->load.weight); if (!parent_entity(se)) update_load_add(&rq_of(cfs_rq)->load, se->load.weight); #ifdef CONFIG_SMP if (entity_is_task(se)) { struct rq *rq = rq_of(cfs_rq); account_numa_enqueue(rq, task_of(se)); list_add(&se->group_node, &rq->cfs_tasks); } #endif cfs_rq->nr_running++; } static void account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) { update_load_sub(&cfs_rq->load, se->load.weight); if (!parent_entity(se)) update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); #ifdef CONFIG_SMP if (entity_is_task(se)) { account_numa_dequeue(rq_of(cfs_rq), task_of(se)); list_del_init(&se->group_node); } #endif cfs_rq->nr_running--; } /* * Signed add and clamp on underflow. * * Explicitly do a load-store to ensure the intermediate value never hits * memory. This allows lockless observations without ever seeing the negative * values. */ #define add_positive(_ptr, _val) do { \ typeof(_ptr) ptr = (_ptr); \ typeof(_val) val = (_val); \ typeof(*ptr) res, var = READ_ONCE(*ptr); \ \ res = var + val; \ \ if (val < 0 && res > var) \ res = 0; \ \ WRITE_ONCE(*ptr, res); \ } while (0) /* * Unsigned subtract and clamp on underflow. * * Explicitly do a load-store to ensure the intermediate value never hits * memory. This allows lockless observations without ever seeing the negative * values. */ #define sub_positive(_ptr, _val) do { \ typeof(_ptr) ptr = (_ptr); \ typeof(*ptr) val = (_val); \ typeof(*ptr) res, var = READ_ONCE(*ptr); \ res = var - val; \ if (res > var) \ res = 0; \ WRITE_ONCE(*ptr, res); \ } while (0) /* * Remove and clamp on negative, from a local variable. * * A variant of sub_positive(), which does not use explicit load-store * and is thus optimized for local variable updates. */ #define lsub_positive(_ptr, _val) do { \ typeof(_ptr) ptr = (_ptr); \ *ptr -= min_t(typeof(*ptr), *ptr, _val); \ } while (0) #ifdef CONFIG_SMP static inline void enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { cfs_rq->runnable_weight += se->runnable_weight; cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg; cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum; } static inline void dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { cfs_rq->runnable_weight -= se->runnable_weight; sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg); sub_positive(&cfs_rq->avg.runnable_load_sum, se_runnable(se) * se->avg.runnable_load_sum); } static inline void enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { cfs_rq->avg.load_avg += se->avg.load_avg; cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; } static inline void dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); } #else static inline void enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } static inline void dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } static inline void enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } static inline void dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } #endif static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight, unsigned long runnable) { if (se->on_rq) { /* commit outstanding execution time */ if (cfs_rq->curr == se) update_curr(cfs_rq); account_entity_dequeue(cfs_rq, se); dequeue_runnable_load_avg(cfs_rq, se); } dequeue_load_avg(cfs_rq, se); se->runnable_weight = runnable; update_load_set(&se->load, weight); #ifdef CONFIG_SMP do { u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib; se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); se->avg.runnable_load_avg = div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider); } while (0); #endif enqueue_load_avg(cfs_rq, se); if (se->on_rq) { account_entity_enqueue(cfs_rq, se); enqueue_runnable_load_avg(cfs_rq, se); } } void reweight_task(struct task_struct *p, int prio) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); struct load_weight *load = &se->load; unsigned long weight = scale_load(sched_prio_to_weight[prio]); reweight_entity(cfs_rq, se, weight, weight); load->inv_weight = sched_prio_to_wmult[prio]; } #ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_SMP /* * All this does is approximate the hierarchical proportion which includes that * global sum we all love to hate. * * That is, the weight of a group entity, is the proportional share of the * group weight based on the group runqueue weights. That is: * * tg->weight * grq->load.weight * ge->load.weight = ----------------------------- (1) * \Sum grq->load.weight * * Now, because computing that sum is prohibitively expensive to compute (been * there, done that) we approximate it with this average stuff. The average * moves slower and therefore the approximation is cheaper and more stable. * * So instead of the above, we substitute: * * grq->load.weight -> grq->avg.load_avg (2) * * which yields the following: * * tg->weight * grq->avg.load_avg * ge->load.weight = ------------------------------ (3) * tg->load_avg * * Where: tg->load_avg ~= \Sum grq->avg.load_avg * * That is shares_avg, and it is right (given the approximation (2)). * * The problem with it is that because the average is slow -- it was designed * to be exactly that of course -- this leads to transients in boundary * conditions. In specific, the case where the group was idle and we start the * one task. It takes time for our CPU's grq->avg.load_avg to build up, * yielding bad latency etc.. * * Now, in that special case (1) reduces to: * * tg->weight * grq->load.weight * ge->load.weight = ----------------------------- = tg->weight (4) * grp->load.weight * * That is, the sum collapses because all other CPUs are idle; the UP scenario. * * So what we do is modify our approximation (3) to approach (4) in the (near) * UP case, like: * * ge->load.weight = * * tg->weight * grq->load.weight * --------------------------------------------------- (5) * tg->load_avg - grq->avg.load_avg + grq->load.weight * * But because grq->load.weight can drop to 0, resulting in a divide by zero, * we need to use grq->avg.load_avg as its lower bound, which then gives: * * * tg->weight * grq->load.weight * ge->load.weight = ----------------------------- (6) * tg_load_avg' * * Where: * * tg_load_avg' = tg->load_avg - grq->avg.load_avg + * max(grq->load.weight, grq->avg.load_avg) * * And that is shares_weight and is icky. In the (near) UP case it approaches * (4) while in the normal case it approaches (3). It consistently * overestimates the ge->load.weight and therefore: * * \Sum ge->load.weight >= tg->weight * * hence icky! */ static long calc_group_shares(struct cfs_rq *cfs_rq) { long tg_weight, tg_shares, load, shares; struct task_group *tg = cfs_rq->tg; tg_shares = READ_ONCE(tg->shares); load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); tg_weight = atomic_long_read(&tg->load_avg); /* Ensure tg_weight >= load */ tg_weight -= cfs_rq->tg_load_avg_contrib; tg_weight += load; shares = (tg_shares * load); if (tg_weight) shares /= tg_weight; /* * MIN_SHARES has to be unscaled here to support per-CPU partitioning * of a group with small tg->shares value. It is a floor value which is * assigned as a minimum load.weight to the sched_entity representing * the group on a CPU. * * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024 * on an 8-core system with 8 tasks each runnable on one CPU shares has * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In * case no task is runnable on a CPU MIN_SHARES=2 should be returned * instead of 0. */ return clamp_t(long, shares, MIN_SHARES, tg_shares); } /* * This calculates the effective runnable weight for a group entity based on * the group entity weight calculated above. * * Because of the above approximation (2), our group entity weight is * an load_avg based ratio (3). This means that it includes blocked load and * does not represent the runnable weight. * * Approximate the group entity's runnable weight per ratio from the group * runqueue: * * grq->avg.runnable_load_avg * ge->runnable_weight = ge->load.weight * -------------------------- (7) * grq->avg.load_avg * * However, analogous to above, since the avg numbers are slow, this leads to * transients in the from-idle case. Instead we use: * * ge->runnable_weight = ge->load.weight * * * max(grq->avg.runnable_load_avg, grq->runnable_weight) * ----------------------------------------------------- (8) * max(grq->avg.load_avg, grq->load.weight) * * Where these max() serve both to use the 'instant' values to fix the slow * from-idle and avoid the /0 on to-idle, similar to (6). */ static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares) { long runnable, load_avg; load_avg = max(cfs_rq->avg.load_avg, scale_load_down(cfs_rq->load.weight)); runnable = max(cfs_rq->avg.runnable_load_avg, scale_load_down(cfs_rq->runnable_weight)); runnable *= shares; if (load_avg) runnable /= load_avg; return clamp_t(long, runnable, MIN_SHARES, shares); } #endif /* CONFIG_SMP */ static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); /* * Recomputes the group entity based on the current state of its group * runqueue. */ static void update_cfs_group(struct sched_entity *se) { struct cfs_rq *gcfs_rq = group_cfs_rq(se); long shares, runnable; if (!gcfs_rq) return; if (throttled_hierarchy(gcfs_rq)) return; #ifndef CONFIG_SMP runnable = shares = READ_ONCE(gcfs_rq->tg->shares); if (likely(se->load.weight == shares)) return; #else shares = calc_group_shares(gcfs_rq); runnable = calc_group_runnable(gcfs_rq, shares); #endif reweight_entity(cfs_rq_of(se), se, shares, runnable); } #else /* CONFIG_FAIR_GROUP_SCHED */ static inline void update_cfs_group(struct sched_entity *se) { } #endif /* CONFIG_FAIR_GROUP_SCHED */ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) { struct rq *rq = rq_of(cfs_rq); if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) { /* * There are a few boundary cases this might miss but it should * get called often enough that that should (hopefully) not be * a real problem. * * It will not get called when we go idle, because the idle * thread is a different class (!fair), nor will the utilization * number include things like RT tasks. * * As is, the util number is not freq-invariant (we'd have to * implement arch_scale_freq_capacity() for that). * * See cpu_util(). */ cpufreq_update_util(rq, flags); } } #ifdef CONFIG_SMP #ifdef CONFIG_FAIR_GROUP_SCHED /** * update_tg_load_avg - update the tg's load avg * @cfs_rq: the cfs_rq whose avg changed * @force: update regardless of how small the difference * * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load. * However, because tg->load_avg is a global value there are performance * considerations. * * In order to avoid having to look at the other cfs_rq's, we use a * differential update where we store the last value we propagated. This in * turn allows skipping updates if the differential is 'small'. * * Updating tg's load_avg is necessary before update_cfs_share(). */ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) { long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; /* * No need to update load_avg for root_task_group as it is not used. */ if (cfs_rq->tg == &root_task_group) return; if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { atomic_long_add(delta, &cfs_rq->tg->load_avg); cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; } } /* * Called within set_task_rq() right before setting a task's CPU. The * caller only guarantees p->pi_lock is held; no other assumptions, * including the state of rq->lock, should be made. */ void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next) { u64 p_last_update_time; u64 n_last_update_time; if (!sched_feat(ATTACH_AGE_LOAD)) return; /* * We are supposed to update the task to "current" time, then its up to * date and ready to go to new CPU/cfs_rq. But we have difficulty in * getting what current time is, so simply throw away the out-of-date * time. This will result in the wakee task is less decayed, but giving * the wakee more load sounds not bad. */ if (!(se->avg.last_update_time && prev)) return; #ifndef CONFIG_64BIT { u64 p_last_update_time_copy; u64 n_last_update_time_copy; do { p_last_update_time_copy = prev->load_last_update_time_copy; n_last_update_time_copy = next->load_last_update_time_copy; smp_rmb(); p_last_update_time = prev->avg.last_update_time; n_last_update_time = next->avg.last_update_time; } while (p_last_update_time != p_last_update_time_copy || n_last_update_time != n_last_update_time_copy); } #else p_last_update_time = prev->avg.last_update_time; n_last_update_time = next->avg.last_update_time; #endif __update_load_avg_blocked_se(p_last_update_time, cpu_of(rq_of(prev)), se); se->avg.last_update_time = n_last_update_time; } /* * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to * propagate its contribution. The key to this propagation is the invariant * that for each group: * * ge->avg == grq->avg (1) * * _IFF_ we look at the pure running and runnable sums. Because they * represent the very same entity, just at different points in the hierarchy. * * Per the above update_tg_cfs_util() is trivial and simply copies the running * sum over (but still wrong, because the group entity and group rq do not have * their PELT windows aligned). * * However, update_tg_cfs_runnable() is more complex. So we have: * * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2) * * And since, like util, the runnable part should be directly transferable, * the following would _appear_ to be the straight forward approach: * * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3) * * And per (1) we have: * * ge->avg.runnable_avg == grq->avg.runnable_avg * * Which gives: * * ge->load.weight * grq->avg.load_avg * ge->avg.load_avg = ----------------------------------- (4) * grq->load.weight * * Except that is wrong! * * Because while for entities historical weight is not important and we * really only care about our future and therefore can consider a pure * runnable sum, runqueues can NOT do this. * * We specifically want runqueues to have a load_avg that includes * historical weights. Those represent the blocked load, the load we expect * to (shortly) return to us. This only works by keeping the weights as * integral part of the sum. We therefore cannot decompose as per (3). * * Another reason this doesn't work is that runnable isn't a 0-sum entity. * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the * rq itself is runnable anywhere between 2/3 and 1 depending on how the * runnable section of these tasks overlap (or not). If they were to perfectly * align the rq as a whole would be runnable 2/3 of the time. If however we * always have at least 1 runnable task, the rq as a whole is always runnable. * * So we'll have to approximate.. :/ * * Given the constraint: * * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX * * We can construct a rule that adds runnable to a rq by assuming minimal * overlap. * * On removal, we'll assume each task is equally runnable; which yields: * * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight * * XXX: only do this for the part of runnable > running ? * */ static inline void update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) { long delta = gcfs_rq->avg.util_avg - se->avg.util_avg; /* Nothing to update */ if (!delta) return; /* * The relation between sum and avg is: * * LOAD_AVG_MAX - 1024 + sa->period_contrib * * however, the PELT windows are not aligned between grq and gse. */ /* Set new sched_entity's utilization */ se->avg.util_avg = gcfs_rq->avg.util_avg; se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX; /* Update parent cfs_rq utilization */ add_positive(&cfs_rq->avg.util_avg, delta); cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX; } static inline void update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) { long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum; unsigned long runnable_load_avg, load_avg; u64 runnable_load_sum, load_sum = 0; s64 delta_sum; if (!runnable_sum) return; gcfs_rq->prop_runnable_sum = 0; if (runnable_sum >= 0) { /* * Add runnable; clip at LOAD_AVG_MAX. Reflects that until * the CPU is saturated running == runnable. */ runnable_sum += se->avg.load_sum; runnable_sum = min(runnable_sum, (long)LOAD_AVG_MAX); } else { /* * Estimate the new unweighted runnable_sum of the gcfs_rq by * assuming all tasks are equally runnable. */ if (scale_load_down(gcfs_rq->load.weight)) { load_sum = div_s64(gcfs_rq->avg.load_sum, scale_load_down(gcfs_rq->load.weight)); } /* But make sure to not inflate se's runnable */ runnable_sum = min(se->avg.load_sum, load_sum); } /* * runnable_sum can't be lower than running_sum * As running sum is scale with CPU capacity wehreas the runnable sum * is not we rescale running_sum 1st */ running_sum = se->avg.util_sum / arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq))); runnable_sum = max(runnable_sum, running_sum); load_sum = (s64)se_weight(se) * runnable_sum; load_avg = div_s64(load_sum, LOAD_AVG_MAX); delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; delta_avg = load_avg - se->avg.load_avg; se->avg.load_sum = runnable_sum; se->avg.load_avg = load_avg; add_positive(&cfs_rq->avg.load_avg, delta_avg); add_positive(&cfs_rq->avg.load_sum, delta_sum); runnable_load_sum = (s64)se_runnable(se) * runnable_sum; runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX); delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum; delta_avg = runnable_load_avg - se->avg.runnable_load_avg; se->avg.runnable_load_sum = runnable_sum; se->avg.runnable_load_avg = runnable_load_avg; if (se->on_rq) { add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg); add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum); } } static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) { cfs_rq->propagate = 1; cfs_rq->prop_runnable_sum += runnable_sum; } /* Update task and its cfs_rq load average */ static inline int propagate_entity_load_avg(struct sched_entity *se) { struct cfs_rq *cfs_rq, *gcfs_rq; if (entity_is_task(se)) return 0; gcfs_rq = group_cfs_rq(se); if (!gcfs_rq->propagate) return 0; gcfs_rq->propagate = 0; cfs_rq = cfs_rq_of(se); add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); update_tg_cfs_util(cfs_rq, se, gcfs_rq); update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); return 1; } /* * Check if we need to update the load and the utilization of a blocked * group_entity: */ static inline bool skip_blocked_update(struct sched_entity *se) { struct cfs_rq *gcfs_rq = group_cfs_rq(se); /* * If sched_entity still have not zero load or utilization, we have to * decay it: */ if (se->avg.load_avg || se->avg.util_avg) return false; /* * If there is a pending propagation, we have to update the load and * the utilization of the sched_entity: */ if (gcfs_rq->propagate) return false; /* * Otherwise, the load and the utilization of the sched_entity is * already zero and there is no pending propagation, so it will be a * waste of time to try to decay it: */ return true; } #else /* CONFIG_FAIR_GROUP_SCHED */ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} static inline int propagate_entity_load_avg(struct sched_entity *se) { return 0; } static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {} #endif /* CONFIG_FAIR_GROUP_SCHED */ /** * update_cfs_rq_load_avg - update the cfs_rq's load/util averages * @now: current time, as per cfs_rq_clock_task() * @cfs_rq: cfs_rq to update * * The cfs_rq avg is the direct sum of all its entities (blocked and runnable) * avg. The immediate corollary is that all (fair) tasks must be attached, see * post_init_entity_util_avg(). * * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example. * * Returns true if the load decayed or we removed load. * * Since both these conditions indicate a changed cfs_rq->avg.load we should * call update_tg_load_avg() when this function returns true. */ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) { unsigned long removed_load = 0, removed_util = 0, removed_runnable_sum = 0; struct sched_avg *sa = &cfs_rq->avg; int decayed = 0; if (cfs_rq->removed.nr) { unsigned long r; u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib; raw_spin_lock(&cfs_rq->removed.lock); swap(cfs_rq->removed.util_avg, removed_util); swap(cfs_rq->removed.load_avg, removed_load); swap(cfs_rq->removed.runnable_sum, removed_runnable_sum); cfs_rq->removed.nr = 0; raw_spin_unlock(&cfs_rq->removed.lock); r = removed_load; sub_positive(&sa->load_avg, r); sub_positive(&sa->load_sum, r * divider); r = removed_util; sub_positive(&sa->util_avg, r); sub_positive(&sa->util_sum, r * divider); add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum); decayed = 1; } decayed |= __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq); #ifndef CONFIG_64BIT smp_wmb(); cfs_rq->load_last_update_time_copy = sa->last_update_time; #endif if (decayed) cfs_rq_util_change(cfs_rq, 0); return decayed; } /** * attach_entity_load_avg - attach this entity to its cfs_rq load avg * @cfs_rq: cfs_rq to attach to * @se: sched_entity to attach * @flags: migration hints * * Must call update_cfs_rq_load_avg() before this, since we rely on * cfs_rq->avg.last_update_time being current. */ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib; /* * When we attach the @se to the @cfs_rq, we must align the decay * window because without that, really weird and wonderful things can * happen. * * XXX illustrate */ se->avg.last_update_time = cfs_rq->avg.last_update_time; se->avg.period_contrib = cfs_rq->avg.period_contrib; /* * Hell(o) Nasty stuff.. we need to recompute _sum based on the new * period_contrib. This isn't strictly correct, but since we're * entirely outside of the PELT hierarchy, nobody cares if we truncate * _sum a little. */ se->avg.util_sum = se->avg.util_avg * divider; se->avg.load_sum = divider; if (se_weight(se)) { se->avg.load_sum = div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se)); } se->avg.runnable_load_sum = se->avg.load_sum; enqueue_load_avg(cfs_rq, se); cfs_rq->avg.util_avg += se->avg.util_avg; cfs_rq->avg.util_sum += se->avg.util_sum; add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); cfs_rq_util_change(cfs_rq, flags); } /** * detach_entity_load_avg - detach this entity from its cfs_rq load avg * @cfs_rq: cfs_rq to detach from * @se: sched_entity to detach * * Must call update_cfs_rq_load_avg() before this, since we rely on * cfs_rq->avg.last_update_time being current. */ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { dequeue_load_avg(cfs_rq, se); sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); cfs_rq_util_change(cfs_rq, 0); } /* * Optional action to be done while updating the load average */ #define UPDATE_TG 0x1 #define SKIP_AGE_LOAD 0x2 #define DO_ATTACH 0x4 /* Update task and its cfs_rq load average */ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { u64 now = cfs_rq_clock_task(cfs_rq); struct rq *rq = rq_of(cfs_rq); int cpu = cpu_of(rq); int decayed; /* * Track task load average for carrying it to new CPU after migrated, and * track group sched_entity load average for task_h_load calc in migration */ if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) __update_load_avg_se(now, cpu, cfs_rq, se); decayed = update_cfs_rq_load_avg(now, cfs_rq); decayed |= propagate_entity_load_avg(se); if (!se->avg.last_update_time && (flags & DO_ATTACH)) { /* * DO_ATTACH means we're here from enqueue_entity(). * !last_update_time means we've passed through * migrate_task_rq_fair() indicating we migrated. * * IOW we're enqueueing a task on a new CPU. */ attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION); update_tg_load_avg(cfs_rq, 0); } else if (decayed && (flags & UPDATE_TG)) update_tg_load_avg(cfs_rq, 0); } #ifndef CONFIG_64BIT static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) { u64 last_update_time_copy; u64 last_update_time; do { last_update_time_copy = cfs_rq->load_last_update_time_copy; smp_rmb(); last_update_time = cfs_rq->avg.last_update_time; } while (last_update_time != last_update_time_copy); return last_update_time; } #else static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) { return cfs_rq->avg.last_update_time; } #endif /* * Synchronize entity load avg of dequeued entity without locking * the previous rq. */ void sync_entity_load_avg(struct sched_entity *se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); u64 last_update_time; last_update_time = cfs_rq_last_update_time(cfs_rq); __update_load_avg_blocked_se(last_update_time, cpu_of(rq_of(cfs_rq)), se); } /* * Task first catches up with cfs_rq, and then subtract * itself from the cfs_rq (task must be off the queue now). */ void remove_entity_load_avg(struct sched_entity *se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); unsigned long flags; /* * tasks cannot exit without having gone through wake_up_new_task() -> * post_init_entity_util_avg() which will have added things to the * cfs_rq, so we can remove unconditionally. * * Similarly for groups, they will have passed through * post_init_entity_util_avg() before unregister_sched_fair_group() * calls this. */ sync_entity_load_avg(se); raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags); ++cfs_rq->removed.nr; cfs_rq->removed.util_avg += se->avg.util_avg; cfs_rq->removed.load_avg += se->avg.load_avg; cfs_rq->removed.runnable_sum += se->avg.load_sum; /* == runnable_sum */ raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags); } static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq) { return cfs_rq->avg.runnable_load_avg; } static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) { return cfs_rq->avg.load_avg; } static int idle_balance(struct rq *this_rq, struct rq_flags *rf); static inline unsigned long task_util(struct task_struct *p) { return READ_ONCE(p->se.avg.util_avg); } static inline unsigned long _task_util_est(struct task_struct *p) { struct util_est ue = READ_ONCE(p->se.avg.util_est); return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED); } static inline unsigned long task_util_est(struct task_struct *p) { return max(task_util(p), _task_util_est(p)); } static inline void util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) { unsigned int enqueued; if (!sched_feat(UTIL_EST)) return; /* Update root cfs_rq's estimated utilization */ enqueued = cfs_rq->avg.util_est.enqueued; enqueued += _task_util_est(p); WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); } /* * Check if a (signed) value is within a specified (unsigned) margin, * based on the observation that: * * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1) * * NOTE: this only works when value + maring < INT_MAX. */ static inline bool within_margin(int value, int margin) { return ((unsigned int)(value + margin - 1) < (2 * margin - 1)); } static void util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) { long last_ewma_diff; struct util_est ue; if (!sched_feat(UTIL_EST)) return; /* Update root cfs_rq's estimated utilization */ ue.enqueued = cfs_rq->avg.util_est.enqueued; ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p)); WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); /* * Skip update of task's estimated utilization when the task has not * yet completed an activation, e.g. being migrated. */ if (!task_sleep) return; /* * If the PELT values haven't changed since enqueue time, * skip the util_est update. */ ue = p->se.avg.util_est; if (ue.enqueued & UTIL_AVG_UNCHANGED) return; /* * Skip update of task's estimated utilization when its EWMA is * already ~1% close to its last activation value. */ ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED); last_ewma_diff = ue.enqueued - ue.ewma; if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100))) return; /* * Update Task's estimated utilization * * When *p completes an activation we can consolidate another sample * of the task size. This is done by storing the current PELT value * as ue.enqueued and by using this value to update the Exponential * Weighted Moving Average (EWMA): * * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1) * = w * task_util(p) + ewma(t-1) - w * ewma(t-1) * = w * (task_util(p) - ewma(t-1)) + ewma(t-1) * = w * ( last_ewma_diff ) + ewma(t-1) * = w * (last_ewma_diff + ewma(t-1) / w) * * Where 'w' is the weight of new samples, which is configured to be * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT) */ ue.ewma <<= UTIL_EST_WEIGHT_SHIFT; ue.ewma += last_ewma_diff; ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; WRITE_ONCE(p->se.avg.util_est, ue); } static inline int task_fits_capacity(struct task_struct *p, long capacity) { return capacity * 1024 > task_util_est(p) * capacity_margin; } static inline void update_misfit_status(struct task_struct *p, struct rq *rq) { if (!static_branch_unlikely(&sched_asym_cpucapacity)) return; if (!p) { rq->misfit_task_load = 0; return; } if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) { rq->misfit_task_load = 0; return; } rq->misfit_task_load = task_h_load(p); } #else /* CONFIG_SMP */ #define UPDATE_TG 0x0 #define SKIP_AGE_LOAD 0x0 #define DO_ATTACH 0x0 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) { cfs_rq_util_change(cfs_rq, 0); } static inline void remove_entity_load_avg(struct sched_entity *se) {} static inline void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {} static inline void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} static inline int idle_balance(struct rq *rq, struct rq_flags *rf) { return 0; } static inline void util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} static inline void util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) {} static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} #endif /* CONFIG_SMP */ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) { #ifdef CONFIG_SCHED_DEBUG s64 d = se->vruntime - cfs_rq->min_vruntime; if (d < 0) d = -d; if (d > 3*sysctl_sched_latency) schedstat_inc(cfs_rq->nr_spread_over); #endif } static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) { u64 vruntime = cfs_rq->min_vruntime; /* * The 'current' period is already promised to the current tasks, * however the extra weight of the new task will slow them down a * little, place the new task so that it fits in the slot that * stays open at the end. */ if (initial && sched_feat(START_DEBIT)) vruntime += sched_vslice(cfs_rq, se); /* sleeps up to a single latency don't count. */ if (!initial) { unsigned long thresh = sysctl_sched_latency; /* * Halve their sleep time's effect, to allow * for a gentler effect of sleepers: */ if (sched_feat(GENTLE_FAIR_SLEEPERS)) thresh >>= 1; vruntime -= thresh; } /* ensure we never gain time by being placed backwards. */ se->vruntime = max_vruntime(se->vruntime, vruntime); } static void check_enqueue_throttle(struct cfs_rq *cfs_rq); static inline void check_schedstat_required(void) { #ifdef CONFIG_SCHEDSTATS if (schedstat_enabled()) return; /* Force schedstat enabled if a dependent tracepoint is active */ if (trace_sched_stat_wait_enabled() || trace_sched_stat_sleep_enabled() || trace_sched_stat_iowait_enabled() || trace_sched_stat_blocked_enabled() || trace_sched_stat_runtime_enabled()) { printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " "stat_blocked and stat_runtime require the " "kernel parameter schedstats=enable or " "kernel.sched_schedstats=1\n"); } #endif } /* * MIGRATION * * dequeue * update_curr() * update_min_vruntime() * vruntime -= min_vruntime * * enqueue * update_curr() * update_min_vruntime() * vruntime += min_vruntime * * this way the vruntime transition between RQs is done when both * min_vruntime are up-to-date. * * WAKEUP (remote) * * ->migrate_task_rq_fair() (p->state == TASK_WAKING) * vruntime -= min_vruntime * * enqueue * update_curr() * update_min_vruntime() * vruntime += min_vruntime * * this way we don't have the most up-to-date min_vruntime on the originating * CPU and an up-to-date min_vruntime on the destination CPU. */ static void enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED); bool curr = cfs_rq->curr == se; /* * If we're the current task, we must renormalise before calling * update_curr(). */ if (renorm && curr) se->vruntime += cfs_rq->min_vruntime; update_curr(cfs_rq); /* * Otherwise, renormalise after, such that we're placed at the current * moment in time, instead of some random moment in the past. Being * placed in the past could significantly boost this task to the * fairness detriment of existing tasks. */ if (renorm && !curr) se->vruntime += cfs_rq->min_vruntime; /* * When enqueuing a sched_entity, we must: * - Update loads to have both entity and cfs_rq synced with now. * - Add its load to cfs_rq->runnable_avg * - For group_entity, update its weight to reflect the new share of * its group cfs_rq * - Add its new weight to cfs_rq->load.weight */ update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); update_cfs_group(se); enqueue_runnable_load_avg(cfs_rq, se); account_entity_enqueue(cfs_rq, se); if (flags & ENQUEUE_WAKEUP) place_entity(cfs_rq, se, 0); check_schedstat_required(); update_stats_enqueue(cfs_rq, se, flags); check_spread(cfs_rq, se); if (!curr) __enqueue_entity(cfs_rq, se); se->on_rq = 1; if (cfs_rq->nr_running == 1) { list_add_leaf_cfs_rq(cfs_rq); check_enqueue_throttle(cfs_rq); } } static void __clear_buddies_last(struct sched_entity *se) { for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); if (cfs_rq->last != se) break; cfs_rq->last = NULL; } } static void __clear_buddies_next(struct sched_entity *se) { for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); if (cfs_rq->next != se) break; cfs_rq->next = NULL; } } static void __clear_buddies_skip(struct sched_entity *se) { for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); if (cfs_rq->skip != se) break; cfs_rq->skip = NULL; } } static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) { if (cfs_rq->last == se) __clear_buddies_last(se); if (cfs_rq->next == se) __clear_buddies_next(se); if (cfs_rq->skip == se) __clear_buddies_skip(se); } static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); static void dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { /* * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); /* * When dequeuing a sched_entity, we must: * - Update loads to have both entity and cfs_rq synced with now. * - Subtract its load from the cfs_rq->runnable_avg. * - Subtract its previous weight from cfs_rq->load.weight. * - For group entity, update its weight to reflect the new share * of its group cfs_rq. */ update_load_avg(cfs_rq, se, UPDATE_TG); dequeue_runnable_load_avg(cfs_rq, se); update_stats_dequeue(cfs_rq, se, flags); clear_buddies(cfs_rq, se); if (se != cfs_rq->curr) __dequeue_entity(cfs_rq, se); se->on_rq = 0; account_entity_dequeue(cfs_rq, se); /* * Normalize after update_curr(); which will also have moved * min_vruntime if @se is the one holding it back. But before doing * update_min_vruntime() again, which will discount @se's position and * can move min_vruntime forward still more. */ if (!(flags & DEQUEUE_SLEEP)) se->vruntime -= cfs_rq->min_vruntime; /* return excess runtime on last dequeue */ return_cfs_rq_runtime(cfs_rq); update_cfs_group(se); /* * Now advance min_vruntime if @se was the entity holding it back, * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be * put back on, and if we advance min_vruntime, we'll be placed back * further than we started -- ie. we'll be penalized. */ if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) update_min_vruntime(cfs_rq); } /* * Preempt the current task with a newly woken task if needed: */ static void check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) { unsigned long ideal_runtime, delta_exec; struct sched_entity *se; s64 delta; ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { resched_curr(rq_of(cfs_rq)); /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. */ clear_buddies(cfs_rq, curr); return; } /* * Ensure that a task that missed wakeup preemption by a * narrow margin doesn't have to wait for a full slice. * This also mitigates buddy induced latencies under load. */ if (delta_exec < sysctl_sched_min_granularity) return; se = __pick_first_entity(cfs_rq); delta = curr->vruntime - se->vruntime; if (delta < 0) return; if (delta > ideal_runtime) resched_curr(rq_of(cfs_rq)); } static void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) { /* 'current' is not kept within the tree. */ if (se->on_rq) { /* * Any task has to be enqueued before it get to execute on * a CPU. So account for the time it spent waiting on the * runqueue. */ update_stats_wait_end(cfs_rq, se); __dequeue_entity(cfs_rq, se); update_load_avg(cfs_rq, se, UPDATE_TG); } update_stats_curr_start(cfs_rq, se); cfs_rq->curr = se; /* * Track our maximum slice length, if the CPU's load is at * least twice that of our own weight (i.e. dont track it * when there are only lesser-weight tasks around): */ if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { schedstat_set(se->statistics.slice_max, max((u64)schedstat_val(se->statistics.slice_max), se->sum_exec_runtime - se->prev_sum_exec_runtime)); } se->prev_sum_exec_runtime = se->sum_exec_runtime; } static int wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); /* * Pick the next process, keeping these things in mind, in this order: * 1) keep things fair between processes/task groups * 2) pick the "next" process, since someone really wants that to run * 3) pick the "last" process, for cache locality * 4) do not run the "skip" process, if something else is available */ static struct sched_entity * pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) { struct sched_entity *left = __pick_first_entity(cfs_rq); struct sched_entity *se; /* * If curr is set we have to see if its left of the leftmost entity * still in the tree, provided there was anything in the tree at all. */ if (!left || (curr && entity_before(curr, left))) left = curr; se = left; /* ideally we run the leftmost entity */ /* * Avoid running the skip buddy, if running something else can * be done without getting too unfair. */ if (cfs_rq->skip == se) { struct sched_entity *second; if (se == curr) { second = __pick_first_entity(cfs_rq); } else { second = __pick_next_entity(se); if (!second || (curr && entity_before(curr, second))) second = curr; } if (second && wakeup_preempt_entity(second, left) < 1) se = second; } /* * Prefer last buddy, try to return the CPU to a preempted task. */ if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) se = cfs_rq->last; /* * Someone really wants this to run. If it's not unfair, run it. */ if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) se = cfs_rq->next; clear_buddies(cfs_rq, se); return se; } static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) { /* * If still on the runqueue then deactivate_task() * was not called and update_curr() has to be done: */ if (prev->on_rq) update_curr(cfs_rq); /* throttle cfs_rqs exceeding runtime */ check_cfs_rq_runtime(cfs_rq); check_spread(cfs_rq, prev); if (prev->on_rq) { update_stats_wait_start(cfs_rq, prev); /* Put 'current' back into the tree. */ __enqueue_entity(cfs_rq, prev); /* in !on_rq case, update occurred at dequeue */ update_load_avg(cfs_rq, prev, 0); } cfs_rq->curr = NULL; } static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) { /* * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); /* * Ensure that runnable average is periodically updated. */ update_load_avg(cfs_rq, curr, UPDATE_TG); update_cfs_group(curr); #ifdef CONFIG_SCHED_HRTICK /* * queued ticks are scheduled to match the slice, so don't bother * validating it and just reschedule. */ if (queued) { resched_curr(rq_of(cfs_rq)); return; } /* * don't let the period tick interfere with the hrtick preemption */ if (!sched_feat(DOUBLE_TICK) && hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) return; #endif if (cfs_rq->nr_running > 1) check_preempt_tick(cfs_rq, curr); } /************************************************** * CFS bandwidth control machinery */ #ifdef CONFIG_CFS_BANDWIDTH #ifdef HAVE_JUMP_LABEL static struct static_key __cfs_bandwidth_used; static inline bool cfs_bandwidth_used(void) { return static_key_false(&__cfs_bandwidth_used); } void cfs_bandwidth_usage_inc(void) { static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used); } void cfs_bandwidth_usage_dec(void) { static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used); } #else /* HAVE_JUMP_LABEL */ static bool cfs_bandwidth_used(void) { return true; } void cfs_bandwidth_usage_inc(void) {} void cfs_bandwidth_usage_dec(void) {} #endif /* HAVE_JUMP_LABEL */ /* * default period for cfs group bandwidth. * default: 0.1s, units: nanoseconds */ static inline u64 default_cfs_period(void) { return 100000000ULL; } static inline u64 sched_cfs_bandwidth_slice(void) { return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; } /* * Replenish runtime according to assigned quota and update expiration time. * We use sched_clock_cpu directly instead of rq->clock to avoid adding * additional synchronization around rq->lock. * * requires cfs_b->lock */ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) { u64 now; if (cfs_b->quota == RUNTIME_INF) return; now = sched_clock_cpu(smp_processor_id()); cfs_b->runtime = cfs_b->quota; cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); cfs_b->expires_seq++; } static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) { return &tg->cfs_bandwidth; } /* rq->task_clock normalized against any time this cfs_rq has spent throttled */ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) { if (unlikely(cfs_rq->throttle_count)) return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time; return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; } /* returns 0 on failure to allocate runtime */ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) { struct task_group *tg = cfs_rq->tg; struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); u64 amount = 0, min_amount, expires; int expires_seq; /* note: this is a positive sum as runtime_remaining <= 0 */ min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; raw_spin_lock(&cfs_b->lock); if (cfs_b->quota == RUNTIME_INF) amount = min_amount; else { start_cfs_bandwidth(cfs_b); if (cfs_b->runtime > 0) { amount = min(cfs_b->runtime, min_amount); cfs_b->runtime -= amount; cfs_b->idle = 0; } } expires_seq = cfs_b->expires_seq; expires = cfs_b->runtime_expires; raw_spin_unlock(&cfs_b->lock); cfs_rq->runtime_remaining += amount; /* * we may have advanced our local expiration to account for allowed * spread between our sched_clock and the one on which runtime was * issued. */ if (cfs_rq->expires_seq != expires_seq) { cfs_rq->expires_seq = expires_seq; cfs_rq->runtime_expires = expires; } return cfs_rq->runtime_remaining > 0; } /* * Note: This depends on the synchronization provided by sched_clock and the * fact that rq->clock snapshots this value. */ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) { struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); /* if the deadline is ahead of our clock, nothing to do */ if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0)) return; if (cfs_rq->runtime_remaining < 0) return; /* * If the local deadline has passed we have to consider the * possibility that our sched_clock is 'fast' and the global deadline * has not truly expired. * * Fortunately we can check determine whether this the case by checking * whether the global deadline(cfs_b->expires_seq) has advanced. */ if (cfs_rq->expires_seq == cfs_b->expires_seq) { /* extend local deadline, drift is bounded above by 2 ticks */ cfs_rq->runtime_expires += TICK_NSEC; } else { /* global deadline is ahead, expiration has passed */ cfs_rq->runtime_remaining = 0; } } static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) { /* dock delta_exec before expiring quota (as it could span periods) */ cfs_rq->runtime_remaining -= delta_exec; expire_cfs_rq_runtime(cfs_rq); if (likely(cfs_rq->runtime_remaining > 0)) return; /* * if we're unable to extend our runtime we resched so that the active * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) resched_curr(rq_of(cfs_rq)); } static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) { if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) return; __account_cfs_rq_runtime(cfs_rq, delta_exec); } static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) { return cfs_bandwidth_used() && cfs_rq->throttled; } /* check whether cfs_rq, or any parent, is throttled */ static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) { return cfs_bandwidth_used() && cfs_rq->throttle_count; } /* * Ensure that neither of the group entities corresponding to src_cpu or * dest_cpu are members of a throttled hierarchy when performing group * load-balance operations. */ static inline int throttled_lb_pair(struct task_group *tg, int src_cpu, int dest_cpu) { struct cfs_rq *src_cfs_rq, *dest_cfs_rq; src_cfs_rq = tg->cfs_rq[src_cpu]; dest_cfs_rq = tg->cfs_rq[dest_cpu]; return throttled_hierarchy(src_cfs_rq) || throttled_hierarchy(dest_cfs_rq); } static int tg_unthrottle_up(struct task_group *tg, void *data) { struct rq *rq = data; struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; cfs_rq->throttle_count--; if (!cfs_rq->throttle_count) { /* adjust cfs_rq_clock_task() */ cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - cfs_rq->throttled_clock_task; } return 0; } static int tg_throttle_down(struct task_group *tg, void *data) { struct rq *rq = data; struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; /* group is entering throttled state, stop time */ if (!cfs_rq->throttle_count) cfs_rq->throttled_clock_task = rq_clock_task(rq); cfs_rq->throttle_count++; return 0; } static void throttle_cfs_rq(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; long task_delta, dequeue = 1; bool empty; se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; /* freeze hierarchy runnable averages while throttled */ rcu_read_lock(); walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); rcu_read_unlock(); task_delta = cfs_rq->h_nr_running; for_each_sched_entity(se) { struct cfs_rq *qcfs_rq = cfs_rq_of(se); /* throttled entity or throttle-on-deactivate */ if (!se->on_rq) break; if (dequeue) dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); qcfs_rq->h_nr_running -= task_delta; if (qcfs_rq->load.weight) dequeue = 0; } if (!se) sub_nr_running(rq, task_delta); cfs_rq->throttled = 1; cfs_rq->throttled_clock = rq_clock(rq); raw_spin_lock(&cfs_b->lock); empty = list_empty(&cfs_b->throttled_cfs_rq); /* * Add to the _head_ of the list, so that an already-started * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is * not running add to the tail so that later runqueues don't get starved. */ if (cfs_b->distribute_running) list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); else list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); /* * If we're the first throttled task, make sure the bandwidth * timer is running. */ if (empty) start_cfs_bandwidth(cfs_b); raw_spin_unlock(&cfs_b->lock); } void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; int enqueue = 1; long task_delta; se = cfs_rq->tg->se[cpu_of(rq)]; cfs_rq->throttled = 0; update_rq_clock(rq); raw_spin_lock(&cfs_b->lock); cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; list_del_rcu(&cfs_rq->throttled_list); raw_spin_unlock(&cfs_b->lock); /* update hierarchical throttle state */ walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); if (!cfs_rq->load.weight) return; task_delta = cfs_rq->h_nr_running; for_each_sched_entity(se) { if (se->on_rq) enqueue = 0; cfs_rq = cfs_rq_of(se); if (enqueue) enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); cfs_rq->h_nr_running += task_delta; if (cfs_rq_throttled(cfs_rq)) break; } if (!se) add_nr_running(rq, task_delta); /* Determine whether we need to wake up potentially idle CPU: */ if (rq->curr == rq->idle && rq->cfs.nr_running) resched_curr(rq); } static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining, u64 expires) { struct cfs_rq *cfs_rq; u64 runtime; u64 starting_runtime = remaining; rcu_read_lock(); list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, throttled_list) { struct rq *rq = rq_of(cfs_rq); struct rq_flags rf; rq_lock(rq, &rf); if (!cfs_rq_throttled(cfs_rq)) goto next; runtime = -cfs_rq->runtime_remaining + 1; if (runtime > remaining) runtime = remaining; remaining -= runtime; cfs_rq->runtime_remaining += runtime; cfs_rq->runtime_expires = expires; /* we check whether we're throttled above */ if (cfs_rq->runtime_remaining > 0) unthrottle_cfs_rq(cfs_rq); next: rq_unlock(rq, &rf); if (!remaining) break; } rcu_read_unlock(); return starting_runtime - remaining; } /* * Responsible for refilling a task_group's bandwidth and unthrottling its * cfs_rqs as appropriate. If there has been no activity within the last * period the timer is deactivated until scheduling resumes; cfs_b->idle is * used to track this state. */ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) { u64 runtime, runtime_expires; int throttled; /* no need to continue the timer with no bandwidth constraint */ if (cfs_b->quota == RUNTIME_INF) goto out_deactivate; throttled = !list_empty(&cfs_b->throttled_cfs_rq); cfs_b->nr_periods += overrun; /* * idle depends on !throttled (for the case of a large deficit), and if * we're going inactive then everything else can be deferred */ if (cfs_b->idle && !throttled) goto out_deactivate; __refill_cfs_bandwidth_runtime(cfs_b); if (!throttled) { /* mark as potentially idle for the upcoming period */ cfs_b->idle = 1; return 0; } /* account preceding periods in which throttling occurred */ cfs_b->nr_throttled += overrun; runtime_expires = cfs_b->runtime_expires; /* * This check is repeated as we are holding onto the new bandwidth while * we unthrottle. This can potentially race with an unthrottled group * trying to acquire new bandwidth from the global pool. This can result * in us over-using our runtime if it is all used during this loop, but * only by limited amounts in that extreme case. */ while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { runtime = cfs_b->runtime; cfs_b->distribute_running = 1; raw_spin_unlock(&cfs_b->lock); /* we can't nest cfs_b->lock while distributing bandwidth */ runtime = distribute_cfs_runtime(cfs_b, runtime, runtime_expires); raw_spin_lock(&cfs_b->lock); cfs_b->distribute_running = 0; throttled = !list_empty(&cfs_b->throttled_cfs_rq); lsub_positive(&cfs_b->runtime, runtime); } /* * While we are ensured activity in the period following an * unthrottle, this also covers the case in which the new bandwidth is * insufficient to cover the existing bandwidth deficit. (Forcing the * timer to remain active while there are any throttled entities.) */ cfs_b->idle = 0; return 0; out_deactivate: return 1; } /* a cfs_rq won't donate quota below this amount */ static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC; /* minimum remaining period time to redistribute slack quota */ static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC; /* how long we wait to gather additional slack before distributing */ static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; /* * Are we near the end of the current quota period? * * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the * hrtimer base being cleared by hrtimer_start. In the case of * migrate_hrtimers, base is never cleared, so we are fine. */ static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) { struct hrtimer *refresh_timer = &cfs_b->period_timer; u64 remaining; /* if the call-back is running a quota refresh is already occurring */ if (hrtimer_callback_running(refresh_timer)) return 1; /* is a quota refresh about to occur? */ remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); if (remaining < min_expire) return 1; return 0; } static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) { u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration; /* if there's a quota refresh soon don't bother with slack */ if (runtime_refresh_within(cfs_b, min_left)) return; hrtimer_start(&cfs_b->slack_timer, ns_to_ktime(cfs_bandwidth_slack_period), HRTIMER_MODE_REL); } /* we know any runtime found here is valid as update_curr() precedes return */ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) { struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; if (slack_runtime <= 0) return; raw_spin_lock(&cfs_b->lock); if (cfs_b->quota != RUNTIME_INF && cfs_rq->runtime_expires == cfs_b->runtime_expires) { cfs_b->runtime += slack_runtime; /* we are under rq->lock, defer unthrottling using a timer */ if (cfs_b->runtime > sched_cfs_bandwidth_slice() && !list_empty(&cfs_b->throttled_cfs_rq)) start_cfs_slack_bandwidth(cfs_b); } raw_spin_unlock(&cfs_b->lock); /* even if it's not valid for return we don't want to try again */ cfs_rq->runtime_remaining -= slack_runtime; } static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) { if (!cfs_bandwidth_used()) return; if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) return; __return_cfs_rq_runtime(cfs_rq); } /* * This is done with a timer (instead of inline with bandwidth return) since * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs. */ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) { u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); u64 expires; /* confirm we're still not at a refresh boundary */ raw_spin_lock(&cfs_b->lock); if (cfs_b->distribute_running) { raw_spin_unlock(&cfs_b->lock); return; } if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { raw_spin_unlock(&cfs_b->lock); return; } if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) runtime = cfs_b->runtime; expires = cfs_b->runtime_expires; if (runtime) cfs_b->distribute_running = 1; raw_spin_unlock(&cfs_b->lock); if (!runtime) return; runtime = distribute_cfs_runtime(cfs_b, runtime, expires); raw_spin_lock(&cfs_b->lock); if (expires == cfs_b->runtime_expires) lsub_positive(&cfs_b->runtime, runtime); cfs_b->distribute_running = 0; raw_spin_unlock(&cfs_b->lock); } /* * When a group wakes up we want to make sure that its quota is not already * expired/exceeded, otherwise it may be allowed to steal additional ticks of * runtime as update_curr() throttling can not not trigger until it's on-rq. */ static void check_enqueue_throttle(struct cfs_rq *cfs_rq) { if (!cfs_bandwidth_used()) return; /* an active group must be handled by the update_curr()->put() path */ if (!cfs_rq->runtime_enabled || cfs_rq->curr) return; /* ensure the group is not already throttled */ if (cfs_rq_throttled(cfs_rq)) return; /* update runtime allocation */ account_cfs_rq_runtime(cfs_rq, 0); if (cfs_rq->runtime_remaining <= 0) throttle_cfs_rq(cfs_rq); } static void sync_throttle(struct task_group *tg, int cpu) { struct cfs_rq *pcfs_rq, *cfs_rq; if (!cfs_bandwidth_used()) return; if (!tg->parent) return; cfs_rq = tg->cfs_rq[cpu]; pcfs_rq = tg->parent->cfs_rq[cpu]; cfs_rq->throttle_count = pcfs_rq->throttle_count; cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); } /* conditionally throttle active cfs_rq's from put_prev_entity() */ static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { if (!cfs_bandwidth_used()) return false; if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) return false; /* * it's possible for a throttled entity to be forced into a running * state (e.g. set_curr_task), in this case we're finished. */ if (cfs_rq_throttled(cfs_rq)) return true; throttle_cfs_rq(cfs_rq); return true; } static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) { struct cfs_bandwidth *cfs_b = container_of(timer, struct cfs_bandwidth, slack_timer); do_sched_cfs_slack_timer(cfs_b); return HRTIMER_NORESTART; } static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) { struct cfs_bandwidth *cfs_b = container_of(timer, struct cfs_bandwidth, period_timer); int overrun; int idle = 0; raw_spin_lock(&cfs_b->lock); for (;;) { overrun = hrtimer_forward_now(timer, cfs_b->period); if (!overrun) break; idle = do_sched_cfs_period_timer(cfs_b, overrun); } if (idle) cfs_b->period_active = 0; raw_spin_unlock(&cfs_b->lock); return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; } void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) { raw_spin_lock_init(&cfs_b->lock); cfs_b->runtime = 0; cfs_b->quota = RUNTIME_INF; cfs_b->period = ns_to_ktime(default_cfs_period()); INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); cfs_b->period_timer.function = sched_cfs_period_timer; hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cfs_b->slack_timer.function = sched_cfs_slack_timer; cfs_b->distribute_running = 0; } static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) { cfs_rq->runtime_enabled = 0; INIT_LIST_HEAD(&cfs_rq->throttled_list); } void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) { u64 overrun; lockdep_assert_held(&cfs_b->lock); if (cfs_b->period_active) return; cfs_b->period_active = 1; overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); cfs_b->runtime_expires += (overrun + 1) * ktime_to_ns(cfs_b->period); cfs_b->expires_seq++; hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); } static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) { /* init_cfs_bandwidth() was not called */ if (!cfs_b->throttled_cfs_rq.next) return; hrtimer_cancel(&cfs_b->period_timer); hrtimer_cancel(&cfs_b->slack_timer); } /* * Both these CPU hotplug callbacks race against unregister_fair_sched_group() * * The race is harmless, since modifying bandwidth settings of unhooked group * bits doesn't do much. */ /* cpu online calback */ static void __maybe_unused update_runtime_enabled(struct rq *rq) { struct task_group *tg; lockdep_assert_held(&rq->lock); rcu_read_lock(); list_for_each_entry_rcu(tg, &task_groups, list) { struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; raw_spin_lock(&cfs_b->lock); cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; raw_spin_unlock(&cfs_b->lock); } rcu_read_unlock(); } /* cpu offline callback */ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) { struct task_group *tg; lockdep_assert_held(&rq->lock); rcu_read_lock(); list_for_each_entry_rcu(tg, &task_groups, list) { struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; if (!cfs_rq->runtime_enabled) continue; /* * clock_task is not advancing so we just need to make sure * there's some valid quota amount */ cfs_rq->runtime_remaining = 1; /* * Offline rq is schedulable till CPU is completely disabled * in take_cpu_down(), so we prevent new cfs throttling here. */ cfs_rq->runtime_enabled = 0; if (cfs_rq_throttled(cfs_rq)) unthrottle_cfs_rq(cfs_rq); } rcu_read_unlock(); } #else /* CONFIG_CFS_BANDWIDTH */ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) { return rq_clock_task(rq_of(cfs_rq)); } static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} static inline void sync_throttle(struct task_group *tg, int cpu) {} static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) { return 0; } static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) { return 0; } static inline int throttled_lb_pair(struct task_group *tg, int src_cpu, int dest_cpu) { return 0; } void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} #ifdef CONFIG_FAIR_GROUP_SCHED static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} #endif static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) { return NULL; } static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} static inline void update_runtime_enabled(struct rq *rq) {} static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} #endif /* CONFIG_CFS_BANDWIDTH */ /************************************************** * CFS operations on tasks: */ #ifdef CONFIG_SCHED_HRTICK static void hrtick_start_fair(struct rq *rq, struct task_struct *p) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); SCHED_WARN_ON(task_rq(p) != rq); if (rq->cfs.h_nr_running > 1) { u64 slice = sched_slice(cfs_rq, se); u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; s64 delta = slice - ran; if (delta < 0) { if (rq->curr == p) resched_curr(rq); return; } hrtick_start(rq, delta); } } /* * called from enqueue/dequeue and updates the hrtick when the * current task is from our class and nr_running is low enough * to matter. */ static void hrtick_update(struct rq *rq) { struct task_struct *curr = rq->curr; if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) return; if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) hrtick_start_fair(rq, curr); } #else /* !CONFIG_SCHED_HRTICK */ static inline void hrtick_start_fair(struct rq *rq, struct task_struct *p) { } static inline void hrtick_update(struct rq *rq) { } #endif #ifdef CONFIG_SMP static inline unsigned long cpu_util(int cpu); static unsigned long capacity_of(int cpu); static inline bool cpu_overutilized(int cpu) { return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin); } static inline void update_overutilized_status(struct rq *rq) { if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED); } #else static inline void update_overutilized_status(struct rq *rq) { } #endif /* * The enqueue_task method is called before nr_running is * increased. Here we update the fair scheduling stats and * then put the task into the rbtree: */ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) { struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se; /* * The code below (indirectly) updates schedutil which looks at * the cfs_rq utilization to select a frequency. * Let's add the task's estimated utilization to the cfs_rq's * estimated utilization, before we update schedutil. */ util_est_enqueue(&rq->cfs, p); /* * If in_iowait is set, the code below may not trigger any cpufreq * utilization updates, so do it here explicitly with the IOWAIT flag * passed. */ if (p->in_iowait) cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT); for_each_sched_entity(se) { if (se->on_rq) break; cfs_rq = cfs_rq_of(se); enqueue_entity(cfs_rq, se, flags); /* * end evaluation on encountering a throttled cfs_rq * * note: in the case of encountering a throttled cfs_rq we will * post the final h_nr_running increment below. */ if (cfs_rq_throttled(cfs_rq)) break; cfs_rq->h_nr_running++; flags = ENQUEUE_WAKEUP; } for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); cfs_rq->h_nr_running++; if (cfs_rq_throttled(cfs_rq)) break; update_load_avg(cfs_rq, se, UPDATE_TG); update_cfs_group(se); } if (!se) { add_nr_running(rq, 1); /* * Since new tasks are assigned an initial util_avg equal to * half of the spare capacity of their CPU, tiny tasks have the * ability to cross the overutilized threshold, which will * result in the load balancer ruining all the task placement * done by EAS. As a way to mitigate that effect, do not account * for the first enqueue operation of new tasks during the * overutilized flag detection. * * A better way of solving this problem would be to wait for * the PELT signals of tasks to converge before taking them * into account, but that is not straightforward to implement, * and the following generally works well enough in practice. */ if (flags & ENQUEUE_WAKEUP) update_overutilized_status(rq); } hrtick_update(rq); } static void set_next_buddy(struct sched_entity *se); /* * The dequeue_task method is called before nr_running is * decreased. We remove the task from the rbtree and * update the fair scheduling stats: */ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) { struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se; int task_sleep = flags & DEQUEUE_SLEEP; for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); dequeue_entity(cfs_rq, se, flags); /* * end evaluation on encountering a throttled cfs_rq * * note: in the case of encountering a throttled cfs_rq we will * post the final h_nr_running decrement below. */ if (cfs_rq_throttled(cfs_rq)) break; cfs_rq->h_nr_running--; /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) { /* Avoid re-evaluating load for this entity: */ se = parent_entity(se); /* * Bias pick_next to pick a task from this cfs_rq, as * p is sleeping when it is within its sched_slice. */ if (task_sleep && se && !throttled_hierarchy(cfs_rq)) set_next_buddy(se); break; } flags |= DEQUEUE_SLEEP; } for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); cfs_rq->h_nr_running--; if (cfs_rq_throttled(cfs_rq)) break; update_load_avg(cfs_rq, se, UPDATE_TG); update_cfs_group(se); } if (!se) sub_nr_running(rq, 1); util_est_dequeue(&rq->cfs, p, task_sleep); hrtick_update(rq); } #ifdef CONFIG_SMP /* Working cpumask for: load_balance, load_balance_newidle. */ DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); DEFINE_PER_CPU(cpumask_var_t, select_idle_mask); #ifdef CONFIG_NO_HZ_COMMON /* * per rq 'load' arrray crap; XXX kill this. */ /* * The exact cpuload calculated at every tick would be: * * load' = (1 - 1/2^i) * load + (1/2^i) * cur_load * * If a CPU misses updates for n ticks (as it was idle) and update gets * called on the n+1-th tick when CPU may be busy, then we have: * * load_n = (1 - 1/2^i)^n * load_0 * load_n+1 = (1 - 1/2^i) * load_n + (1/2^i) * cur_load * * decay_load_missed() below does efficient calculation of * * load' = (1 - 1/2^i)^n * load * * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors. * This allows us to precompute the above in said factors, thereby allowing the * reduction of an arbitrary n in O(log_2 n) steps. (See also * fixed_power_int()) * * The calculation is approximated on a 128 point scale. */ #define DEGRADE_SHIFT 7 static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { { 0, 0, 0, 0, 0, 0, 0, 0 }, { 64, 32, 8, 0, 0, 0, 0, 0 }, { 96, 72, 40, 12, 1, 0, 0, 0 }, { 112, 98, 75, 43, 15, 1, 0, 0 }, { 120, 112, 98, 76, 45, 16, 2, 0 } }; /* * Update cpu_load for any missed ticks, due to tickless idle. The backlog * would be when CPU is idle and so we just decay the old load without * adding any new load. */ static unsigned long decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) { int j = 0; if (!missed_updates) return load; if (missed_updates >= degrade_zero_ticks[idx]) return 0; if (idx == 1) return load >> missed_updates; while (missed_updates) { if (missed_updates % 2) load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; missed_updates >>= 1; j++; } return load; } static struct { cpumask_var_t idle_cpus_mask; atomic_t nr_cpus; int has_blocked; /* Idle CPUS has blocked load */ unsigned long next_balance; /* in jiffy units */ unsigned long next_blocked; /* Next update of blocked load in jiffies */ } nohz ____cacheline_aligned; #endif /* CONFIG_NO_HZ_COMMON */ /** * __cpu_load_update - update the rq->cpu_load[] statistics * @this_rq: The rq to update statistics for * @this_load: The current load * @pending_updates: The number of missed updates * * Update rq->cpu_load[] statistics. This function is usually called every * scheduler tick (TICK_NSEC). * * This function computes a decaying average: * * load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load * * Because of NOHZ it might not get called on every tick which gives need for * the @pending_updates argument. * * load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1 * = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load * = A * (A * load[i]_n-2 + B) + B * = A * (A * (A * load[i]_n-3 + B) + B) + B * = A^3 * load[i]_n-3 + (A^2 + A + 1) * B * = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B * = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B * = (1 - 1/2^i)^n * (load[i]_0 - load) + load * * In the above we've assumed load_n := load, which is true for NOHZ_FULL as * any change in load would have resulted in the tick being turned back on. * * For regular NOHZ, this reduces to: * * load[i]_n = (1 - 1/2^i)^n * load[i]_0 * * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra * term. */ static void cpu_load_update(struct rq *this_rq, unsigned long this_load, unsigned long pending_updates) { unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0]; int i, scale; this_rq->nr_load_updates++; /* Update our load: */ this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { unsigned long old_load, new_load; /* scale is effectively 1 << i now, and >> i divides by scale */ old_load = this_rq->cpu_load[i]; #ifdef CONFIG_NO_HZ_COMMON old_load = decay_load_missed(old_load, pending_updates - 1, i); if (tickless_load) { old_load -= decay_load_missed(tickless_load, pending_updates - 1, i); /* * old_load can never be a negative value because a * decayed tickless_load cannot be greater than the * original tickless_load. */ old_load += tickless_load; } #endif new_load = this_load; /* * Round up the averaging division if load is increasing. This * prevents us from getting stuck on 9 if the load is 10, for * example. */ if (new_load > old_load) new_load += scale - 1; this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; } } /* Used instead of source_load when we know the type == 0 */ static unsigned long weighted_cpuload(struct rq *rq) { return cfs_rq_runnable_load_avg(&rq->cfs); } #ifdef CONFIG_NO_HZ_COMMON /* * There is no sane way to deal with nohz on smp when using jiffies because the * CPU doing the jiffies update might drift wrt the CPU doing the jiffy reading * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}. * * Therefore we need to avoid the delta approach from the regular tick when * possible since that would seriously skew the load calculation. This is why we * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on * jiffies deltas for updates happening while in nohz mode (idle ticks, idle * loop exit, nohz_idle_balance, nohz full exit...) * * This means we might still be one tick off for nohz periods. */ static void cpu_load_update_nohz(struct rq *this_rq, unsigned long curr_jiffies, unsigned long load) { unsigned long pending_updates; pending_updates = curr_jiffies - this_rq->last_load_update_tick; if (pending_updates) { this_rq->last_load_update_tick = curr_jiffies; /* * In the regular NOHZ case, we were idle, this means load 0. * In the NOHZ_FULL case, we were non-idle, we should consider * its weighted load. */ cpu_load_update(this_rq, load, pending_updates); } } /* * Called from nohz_idle_balance() to update the load ratings before doing the * idle balance. */ static void cpu_load_update_idle(struct rq *this_rq) { /* * bail if there's load or we're actually up-to-date. */ if (weighted_cpuload(this_rq)) return; cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0); } /* * Record CPU load on nohz entry so we know the tickless load to account * on nohz exit. cpu_load[0] happens then to be updated more frequently * than other cpu_load[idx] but it should be fine as cpu_load readers * shouldn't rely into synchronized cpu_load[*] updates. */ void cpu_load_update_nohz_start(void) { struct rq *this_rq = this_rq(); /* * This is all lockless but should be fine. If weighted_cpuload changes * concurrently we'll exit nohz. And cpu_load write can race with * cpu_load_update_idle() but both updater would be writing the same. */ this_rq->cpu_load[0] = weighted_cpuload(this_rq); } /* * Account the tickless load in the end of a nohz frame. */ void cpu_load_update_nohz_stop(void) { unsigned long curr_jiffies = READ_ONCE(jiffies); struct rq *this_rq = this_rq(); unsigned long load; struct rq_flags rf; if (curr_jiffies == this_rq->last_load_update_tick) return; load = weighted_cpuload(this_rq); rq_lock(this_rq, &rf); update_rq_clock(this_rq); cpu_load_update_nohz(this_rq, curr_jiffies, load); rq_unlock(this_rq, &rf); } #else /* !CONFIG_NO_HZ_COMMON */ static inline void cpu_load_update_nohz(struct rq *this_rq, unsigned long curr_jiffies, unsigned long load) { } #endif /* CONFIG_NO_HZ_COMMON */ static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load) { #ifdef CONFIG_NO_HZ_COMMON /* See the mess around cpu_load_update_nohz(). */ this_rq->last_load_update_tick = READ_ONCE(jiffies); #endif cpu_load_update(this_rq, load, 1); } /* * Called from scheduler_tick() */ void cpu_load_update_active(struct rq *this_rq) { unsigned long load = weighted_cpuload(this_rq); if (tick_nohz_tick_stopped()) cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load); else cpu_load_update_periodic(this_rq, load); } /* * Return a low guess at the load of a migration-source CPU weighted * according to the scheduling class and "nice" value. * * We want to under-estimate the load of migration sources, to * balance conservatively. */ static unsigned long source_load(int cpu, int type) { struct rq *rq = cpu_rq(cpu); unsigned long total = weighted_cpuload(rq); if (type == 0 || !sched_feat(LB_BIAS)) return total; return min(rq->cpu_load[type-1], total); } /* * Return a high guess at the load of a migration-target CPU weighted * according to the scheduling class and "nice" value. */ static unsigned long target_load(int cpu, int type) { struct rq *rq = cpu_rq(cpu); unsigned long total = weighted_cpuload(rq); if (type == 0 || !sched_feat(LB_BIAS)) return total; return max(rq->cpu_load[type-1], total); } static unsigned long capacity_of(int cpu) { return cpu_rq(cpu)->cpu_capacity; } static unsigned long capacity_orig_of(int cpu) { return cpu_rq(cpu)->cpu_capacity_orig; } static unsigned long cpu_avg_load_per_task(int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running); unsigned long load_avg = weighted_cpuload(rq); if (nr_running) return load_avg / nr_running; return 0; } static void record_wakee(struct task_struct *p) { /* * Only decay a single time; tasks that have less then 1 wakeup per * jiffy will not have built up many flips. */ if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) { current->wakee_flips >>= 1; current->wakee_flip_decay_ts = jiffies; } if (current->last_wakee != p) { current->last_wakee = p; current->wakee_flips++; } } /* * Detect M:N waker/wakee relationships via a switching-frequency heuristic. * * A waker of many should wake a different task than the one last awakened * at a frequency roughly N times higher than one of its wakees. * * In order to determine whether we should let the load spread vs consolidating * to shared cache, we look for a minimum 'flip' frequency of llc_size in one * partner, and a factor of lls_size higher frequency in the other. * * With both conditions met, we can be relatively sure that the relationship is * non-monogamous, with partner count exceeding socket size. * * Waker/wakee being client/server, worker/dispatcher, interrupt source or * whatever is irrelevant, spread criteria is apparent partner count exceeds * socket size. */ static int wake_wide(struct task_struct *p) { unsigned int master = current->wakee_flips; unsigned int slave = p->wakee_flips; int factor = this_cpu_read(sd_llc_size); if (master < slave) swap(master, slave); if (slave < factor || master < slave * factor) return 0; return 1; } /* * The purpose of wake_affine() is to quickly determine on which CPU we can run * soonest. For the purpose of speed we only consider the waking and previous * CPU. * * wake_affine_idle() - only considers 'now', it check if the waking CPU is * cache-affine and is (or will be) idle. * * wake_affine_weight() - considers the weight to reflect the average * scheduling latency of the CPUs. This seems to work * for the overloaded case. */ static int wake_affine_idle(int this_cpu, int prev_cpu, int sync) { /* * If this_cpu is idle, it implies the wakeup is from interrupt * context. Only allow the move if cache is shared. Otherwise an * interrupt intensive workload could force all tasks onto one * node depending on the IO topology or IRQ affinity settings. * * If the prev_cpu is idle and cache affine then avoid a migration. * There is no guarantee that the cache hot data from an interrupt * is more important than cache hot data on the prev_cpu and from * a cpufreq perspective, it's better to have higher utilisation * on one CPU. */ if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu; if (sync && cpu_rq(this_cpu)->nr_running == 1) return this_cpu; return nr_cpumask_bits; } static int wake_affine_weight(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync) { s64 this_eff_load, prev_eff_load; unsigned long task_load; this_eff_load = target_load(this_cpu, sd->wake_idx); if (sync) { unsigned long current_load = task_h_load(current); if (current_load > this_eff_load) return this_cpu; this_eff_load -= current_load; } task_load = task_h_load(p); this_eff_load += task_load; if (sched_feat(WA_BIAS)) this_eff_load *= 100; this_eff_load *= capacity_of(prev_cpu); prev_eff_load = source_load(prev_cpu, sd->wake_idx); prev_eff_load -= task_load; if (sched_feat(WA_BIAS)) prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; prev_eff_load *= capacity_of(this_cpu); /* * If sync, adjust the weight of prev_eff_load such that if * prev_eff == this_eff that select_idle_sibling() will consider * stacking the wakee on top of the waker if no other CPU is * idle. */ if (sync) prev_eff_load += 1; return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits; } static int wake_affine(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync) { int target = nr_cpumask_bits; if (sched_feat(WA_IDLE)) target = wake_affine_idle(this_cpu, prev_cpu, sync); if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits) target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); if (target == nr_cpumask_bits) return prev_cpu; schedstat_inc(sd->ttwu_move_affine); schedstat_inc(p->se.statistics.nr_wakeups_affine); return target; } static unsigned long cpu_util_without(int cpu, struct task_struct *p); static unsigned long capacity_spare_without(int cpu, struct task_struct *p) { return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0); } /* * find_idlest_group finds and returns the least busy CPU group within the * domain. * * Assumes p is allowed on at least one CPU in sd. */ static struct sched_group * find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu, int sd_flag) { struct sched_group *idlest = NULL, *group = sd->groups; struct sched_group *most_spare_sg = NULL; unsigned long min_runnable_load = ULONG_MAX; unsigned long this_runnable_load = ULONG_MAX; unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX; unsigned long most_spare = 0, this_spare = 0; int load_idx = sd->forkexec_idx; int imbalance_scale = 100 + (sd->imbalance_pct-100)/2; unsigned long imbalance = scale_load_down(NICE_0_LOAD) * (sd->imbalance_pct-100) / 100; if (sd_flag & SD_BALANCE_WAKE) load_idx = sd->wake_idx; do { unsigned long load, avg_load, runnable_load; unsigned long spare_cap, max_spare_cap; int local_group; int i; /* Skip over this group if it has no CPUs allowed */ if (!cpumask_intersects(sched_group_span(group), &p->cpus_allowed)) continue; local_group = cpumask_test_cpu(this_cpu, sched_group_span(group)); /* * Tally up the load of all CPUs in the group and find * the group containing the CPU with most spare capacity. */ avg_load = 0; runnable_load = 0; max_spare_cap = 0; for_each_cpu(i, sched_group_span(group)) { /* Bias balancing toward CPUs of our domain */ if (local_group) load = source_load(i, load_idx); else load = target_load(i, load_idx); runnable_load += load; avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs); spare_cap = capacity_spare_without(i, p); if (spare_cap > max_spare_cap) max_spare_cap = spare_cap; } /* Adjust by relative CPU capacity of the group */ avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity; runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity; if (local_group) { this_runnable_load = runnable_load; this_avg_load = avg_load; this_spare = max_spare_cap; } else { if (min_runnable_load > (runnable_load + imbalance)) { /* * The runnable load is significantly smaller * so we can pick this new CPU: */ min_runnable_load = runnable_load; min_avg_load = avg_load; idlest = group; } else if ((runnable_load < (min_runnable_load + imbalance)) && (100*min_avg_load > imbalance_scale*avg_load)) { /* * The runnable loads are close so take the * blocked load into account through avg_load: */ min_avg_load = avg_load; idlest = group; } if (most_spare < max_spare_cap) { most_spare = max_spare_cap; most_spare_sg = group; } } } while (group = group->next, group != sd->groups); /* * The cross-over point between using spare capacity or least load * is too conservative for high utilization tasks on partially * utilized systems if we require spare_capacity > task_util(p), * so we allow for some task stuffing by using * spare_capacity > task_util(p)/2. * * Spare capacity can't be used for fork because the utilization has * not been set yet, we must first select a rq to compute the initial * utilization. */ if (sd_flag & SD_BALANCE_FORK) goto skip_spare; if (this_spare > task_util(p) / 2 && imbalance_scale*this_spare > 100*most_spare) return NULL; if (most_spare > task_util(p) / 2) return most_spare_sg; skip_spare: if (!idlest) return NULL; /* * When comparing groups across NUMA domains, it's possible for the * local domain to be very lightly loaded relative to the remote * domains but "imbalance" skews the comparison making remote CPUs * look much more favourable. When considering cross-domain, add * imbalance to the runnable load on the remote node and consider * staying local. */ if ((sd->flags & SD_NUMA) && min_runnable_load + imbalance >= this_runnable_load) return NULL; if (min_runnable_load > (this_runnable_load + imbalance)) return NULL; if ((this_runnable_load < (min_runnable_load + imbalance)) && (100*this_avg_load < imbalance_scale*min_avg_load)) return NULL; return idlest; } /* * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group. */ static int find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) { unsigned long load, min_load = ULONG_MAX; unsigned int min_exit_latency = UINT_MAX; u64 latest_idle_timestamp = 0; int least_loaded_cpu = this_cpu; int shallowest_idle_cpu = -1; int i; /* Check if we have any choice: */ if (group->group_weight == 1) return cpumask_first(sched_group_span(group)); /* Traverse only the allowed CPUs */ for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) { if (available_idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); if (idle && idle->exit_latency < min_exit_latency) { /* * We give priority to a CPU whose idle state * has the smallest exit latency irrespective * of any idle timestamp. */ min_exit_latency = idle->exit_latency; latest_idle_timestamp = rq->idle_stamp; shallowest_idle_cpu = i; } else if ((!idle || idle->exit_latency == min_exit_latency) && rq->idle_stamp > latest_idle_timestamp) { /* * If equal or no active idle state, then * the most recently idled CPU might have * a warmer cache. */ latest_idle_timestamp = rq->idle_stamp; shallowest_idle_cpu = i; } } else if (shallowest_idle_cpu == -1) { load = weighted_cpuload(cpu_rq(i)); if (load < min_load) { min_load = load; least_loaded_cpu = i; } } } return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; } static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, int cpu, int prev_cpu, int sd_flag) { int new_cpu = cpu; if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed)) return prev_cpu; /* * We need task's util for capacity_spare_without, sync it up to * prev_cpu's last_update_time. */ if (!(sd_flag & SD_BALANCE_FORK)) sync_entity_load_avg(&p->se); while (sd) { struct sched_group *group; struct sched_domain *tmp; int weight; if (!(sd->flags & sd_flag)) { sd = sd->child; continue; } group = find_idlest_group(sd, p, cpu, sd_flag); if (!group) { sd = sd->child; continue; } new_cpu = find_idlest_group_cpu(group, p, cpu); if (new_cpu == cpu) { /* Now try balancing at a lower domain level of 'cpu': */ sd = sd->child; continue; } /* Now try balancing at a lower domain level of 'new_cpu': */ cpu = new_cpu; weight = sd->span_weight; sd = NULL; for_each_domain(cpu, tmp) { if (weight <= tmp->span_weight) break; if (tmp->flags & sd_flag) sd = tmp; } } return new_cpu; } #ifdef CONFIG_SCHED_SMT DEFINE_STATIC_KEY_FALSE(sched_smt_present); static inline void set_idle_cores(int cpu, int val) { struct sched_domain_shared *sds; sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); if (sds) WRITE_ONCE(sds->has_idle_cores, val); } static inline bool test_idle_cores(int cpu, bool def) { struct sched_domain_shared *sds; sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); if (sds) return READ_ONCE(sds->has_idle_cores); return def; } /* * Scans the local SMT mask to see if the entire core is idle, and records this * information in sd_llc_shared->has_idle_cores. * * Since SMT siblings share all cache levels, inspecting this limited remote * state should be fairly cheap. */ void __update_idle_core(struct rq *rq) { int core = cpu_of(rq); int cpu; rcu_read_lock(); if (test_idle_cores(core, true)) goto unlock; for_each_cpu(cpu, cpu_smt_mask(core)) { if (cpu == core) continue; if (!available_idle_cpu(cpu)) goto unlock; } set_idle_cores(core, 1); unlock: rcu_read_unlock(); } /* * Scan the entire LLC domain for idle cores; this dynamically switches off if * there are no idle cores left in the system; tracked through * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above. */ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) { struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); int core, cpu; if (!static_branch_likely(&sched_smt_present)) return -1; if (!test_idle_cores(target, false)) return -1; cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); for_each_cpu_wrap(core, cpus, target) { bool idle = true; for_each_cpu(cpu, cpu_smt_mask(core)) { cpumask_clear_cpu(cpu, cpus); if (!available_idle_cpu(cpu)) idle = false; } if (idle) return core; } /* * Failed to find an idle core; stop looking for one. */ set_idle_cores(target, 0); return -1; } /* * Scan the local SMT mask for idle CPUs. */ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) { int cpu; if (!static_branch_likely(&sched_smt_present)) return -1; for_each_cpu(cpu, cpu_smt_mask(target)) { if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) continue; if (available_idle_cpu(cpu)) return cpu; } return -1; } #else /* CONFIG_SCHED_SMT */ static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) { return -1; } static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) { return -1; } #endif /* CONFIG_SCHED_SMT */ /* * Scan the LLC domain for idle CPUs; this is dynamically regulated by * comparing the average scan cost (tracked in sd->avg_scan_cost) against the * average idle time for this rq (as found in rq->avg_idle). */ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target) { struct sched_domain *this_sd; u64 avg_cost, avg_idle; u64 time, cost; s64 delta; int cpu, nr = INT_MAX; this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); if (!this_sd) return -1; /* * Due to large variance we need a large fuzz factor; hackbench in * particularly is sensitive here. */ avg_idle = this_rq()->avg_idle / 512; avg_cost = this_sd->avg_scan_cost + 1; if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost) return -1; if (sched_feat(SIS_PROP)) { u64 span_avg = sd->span_weight * avg_idle; if (span_avg > 4*avg_cost) nr = div_u64(span_avg, avg_cost); else nr = 4; } time = local_clock(); for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { if (!--nr) return -1; if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) continue; if (available_idle_cpu(cpu)) break; } time = local_clock() - time; cost = this_sd->avg_scan_cost; delta = (s64)(time - cost) / 8; this_sd->avg_scan_cost += delta; return cpu; } /* * Try and locate an idle core/thread in the LLC cache domain. */ static int select_idle_sibling(struct task_struct *p, int prev, int target) { struct sched_domain *sd; int i, recent_used_cpu; if (available_idle_cpu(target)) return target; /* * If the previous CPU is cache affine and idle, don't be stupid: */ if (prev != target && cpus_share_cache(prev, target) && available_idle_cpu(prev)) return prev; /* Check a recently used CPU as a potential idle candidate: */ recent_used_cpu = p->recent_used_cpu; if (recent_used_cpu != prev && recent_used_cpu != target && cpus_share_cache(recent_used_cpu, target) && available_idle_cpu(recent_used_cpu) && cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) { /* * Replace recent_used_cpu with prev as it is a potential * candidate for the next wake: */ p->recent_used_cpu = prev; return recent_used_cpu; } sd = rcu_dereference(per_cpu(sd_llc, target)); if (!sd) return target; i = select_idle_core(p, sd, target); if ((unsigned)i < nr_cpumask_bits) return i; i = select_idle_cpu(p, sd, target); if ((unsigned)i < nr_cpumask_bits) return i; i = select_idle_smt(p, sd, target); if ((unsigned)i < nr_cpumask_bits) return i; return target; } /** * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks * @cpu: the CPU to get the utilization of * * The unit of the return value must be the one of capacity so we can compare * the utilization with the capacity of the CPU that is available for CFS task * (ie cpu_capacity). * * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the * recent utilization of currently non-runnable tasks on a CPU. It represents * the amount of utilization of a CPU in the range [0..capacity_orig] where * capacity_orig is the cpu_capacity available at the highest frequency * (arch_scale_freq_capacity()). * The utilization of a CPU converges towards a sum equal to or less than the * current capacity (capacity_curr <= capacity_orig) of the CPU because it is * the running time on this CPU scaled by capacity_curr. * * The estimated utilization of a CPU is defined to be the maximum between its * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks * currently RUNNABLE on that CPU. * This allows to properly represent the expected utilization of a CPU which * has just got a big task running since a long sleep period. At the same time * however it preserves the benefits of the "blocked utilization" in * describing the potential for other tasks waking up on the same CPU. * * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even * higher than capacity_orig because of unfortunate rounding in * cfs.avg.util_avg or just after migrating tasks and new task wakeups until * the average stabilizes with the new running time. We need to check that the * utilization stays within the range of [0..capacity_orig] and cap it if * necessary. Without utilization capping, a group could be seen as overloaded * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of * available capacity. We allow utilization to overshoot capacity_curr (but not * capacity_orig) as it useful for predicting the capacity required after task * migrations (scheduler-driven DVFS). * * Return: the (estimated) utilization for the specified CPU */ static inline unsigned long cpu_util(int cpu) { struct cfs_rq *cfs_rq; unsigned int util; cfs_rq = &cpu_rq(cpu)->cfs; util = READ_ONCE(cfs_rq->avg.util_avg); if (sched_feat(UTIL_EST)) util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued)); return min_t(unsigned long, util, capacity_orig_of(cpu)); } /* * cpu_util_without: compute cpu utilization without any contributions from *p * @cpu: the CPU which utilization is requested * @p: the task which utilization should be discounted * * The utilization of a CPU is defined by the utilization of tasks currently * enqueued on that CPU as well as tasks which are currently sleeping after an * execution on that CPU. * * This method returns the utilization of the specified CPU by discounting the * utilization of the specified task, whenever the task is currently * contributing to the CPU utilization. */ static unsigned long cpu_util_without(int cpu, struct task_struct *p) { struct cfs_rq *cfs_rq; unsigned int util; /* Task has no contribution or is new */ if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) return cpu_util(cpu); cfs_rq = &cpu_rq(cpu)->cfs; util = READ_ONCE(cfs_rq->avg.util_avg); /* Discount task's util from CPU's util */ lsub_positive(&util, task_util(p)); /* * Covered cases: * * a) if *p is the only task sleeping on this CPU, then: * cpu_util (== task_util) > util_est (== 0) * and thus we return: * cpu_util_without = (cpu_util - task_util) = 0 * * b) if other tasks are SLEEPING on this CPU, which is now exiting * IDLE, then: * cpu_util >= task_util * cpu_util > util_est (== 0) * and thus we discount *p's blocked utilization to return: * cpu_util_without = (cpu_util - task_util) >= 0 * * c) if other tasks are RUNNABLE on that CPU and * util_est > cpu_util * then we use util_est since it returns a more restrictive * estimation of the spare capacity on that CPU, by just * considering the expected utilization of tasks already * runnable on that CPU. * * Cases a) and b) are covered by the above code, while case c) is * covered by the following code when estimated utilization is * enabled. */ if (sched_feat(UTIL_EST)) { unsigned int estimated = READ_ONCE(cfs_rq->avg.util_est.enqueued); /* * Despite the following checks we still have a small window * for a possible race, when an execl's select_task_rq_fair() * races with LB's detach_task(): * * detach_task() * p->on_rq = TASK_ON_RQ_MIGRATING; * ---------------------------------- A * deactivate_task() \ * dequeue_task() + RaceTime * util_est_dequeue() / * ---------------------------------- B * * The additional check on "current == p" it's required to * properly fix the execl regression and it helps in further * reducing the chances for the above race. */ if (unlikely(task_on_rq_queued(p) || current == p)) lsub_positive(&estimated, _task_util_est(p)); util = max(util, estimated); } /* * Utilization (estimated) can exceed the CPU capacity, thus let's * clamp to the maximum CPU capacity to ensure consistency with * the cpu_util call. */ return min_t(unsigned long, util, capacity_orig_of(cpu)); } /* * Disable WAKE_AFFINE in the case where task @p doesn't fit in the * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu. * * In that case WAKE_AFFINE doesn't make sense and we'll let * BALANCE_WAKE sort things out. */ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu) { long min_cap, max_cap; if (!static_branch_unlikely(&sched_asym_cpucapacity)) return 0; min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu)); max_cap = cpu_rq(cpu)->rd->max_cpu_capacity; /* Minimum capacity is close to max, no need to abort wake_affine */ if (max_cap - min_cap < max_cap >> 3) return 0; /* Bring task utilization in sync with prev_cpu */ sync_entity_load_avg(&p->se); return !task_fits_capacity(p, min_cap); } /* * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued) * to @dst_cpu. */ static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu) { struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg); /* * If @p migrates from @cpu to another, remove its contribution. Or, * if @p migrates from another CPU to @cpu, add its contribution. In * the other cases, @cpu is not impacted by the migration, so the * util_avg should already be correct. */ if (task_cpu(p) == cpu && dst_cpu != cpu) sub_positive(&util, task_util(p)); else if (task_cpu(p) != cpu && dst_cpu == cpu) util += task_util(p); if (sched_feat(UTIL_EST)) { util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued); /* * During wake-up, the task isn't enqueued yet and doesn't * appear in the cfs_rq->avg.util_est.enqueued of any rq, * so just add it (if needed) to "simulate" what will be * cpu_util() after the task has been enqueued. */ if (dst_cpu == cpu) util_est += _task_util_est(p); util = max(util, util_est); } return min(util, capacity_orig_of(cpu)); } /* * compute_energy(): Estimates the energy that would be consumed if @p was * migrated to @dst_cpu. compute_energy() predicts what will be the utilization * landscape of the * CPUs after the task migration, and uses the Energy Model * to compute what would be the energy if we decided to actually migrate that * task. */ static long compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) { long util, max_util, sum_util, energy = 0; int cpu; for (; pd; pd = pd->next) { max_util = sum_util = 0; /* * The capacity state of CPUs of the current rd can be driven by * CPUs of another rd if they belong to the same performance * domain. So, account for the utilization of these CPUs too * by masking pd with cpu_online_mask instead of the rd span. * * If an entire performance domain is outside of the current rd, * it will not appear in its pd list and will not be accounted * by compute_energy(). */ for_each_cpu_and(cpu, perf_domain_span(pd), cpu_online_mask) { util = cpu_util_next(cpu, p, dst_cpu); util = schedutil_energy_util(cpu, util); max_util = max(util, max_util); sum_util += util; } energy += em_pd_energy(pd->em_pd, max_util, sum_util); } return energy; } /* * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the * waking task. find_energy_efficient_cpu() looks for the CPU with maximum * spare capacity in each performance domain and uses it as a potential * candidate to execute the task. Then, it uses the Energy Model to figure * out which of the CPU candidates is the most energy-efficient. * * The rationale for this heuristic is as follows. In a performance domain, * all the most energy efficient CPU candidates (according to the Energy * Model) are those for which we'll request a low frequency. When there are * several CPUs for which the frequency request will be the same, we don't * have enough data to break the tie between them, because the Energy Model * only includes active power costs. With this model, if we assume that * frequency requests follow utilization (e.g. using schedutil), the CPU with * the maximum spare capacity in a performance domain is guaranteed to be among * the best candidates of the performance domain. * * In practice, it could be preferable from an energy standpoint to pack * small tasks on a CPU in order to let other CPUs go in deeper idle states, * but that could also hurt our chances to go cluster idle, and we have no * ways to tell with the current Energy Model if this is actually a good * idea or not. So, find_energy_efficient_cpu() basically favors * cluster-packing, and spreading inside a cluster. That should at least be * a good thing for latency, and this is consistent with the idea that most * of the energy savings of EAS come from the asymmetry of the system, and * not so much from breaking the tie between identical CPUs. That's also the * reason why EAS is enabled in the topology code only for systems where * SD_ASYM_CPUCAPACITY is set. * * NOTE: Forkees are not accepted in the energy-aware wake-up path because * they don't have any useful utilization data yet and it's not possible to * forecast their impact on energy consumption. Consequently, they will be * placed by find_idlest_cpu() on the least loaded CPU, which might turn out * to be energy-inefficient in some use-cases. The alternative would be to * bias new tasks towards specific types of CPUs first, or to try to infer * their util_avg from the parent task, but those heuristics could hurt * other use-cases too. So, until someone finds a better way to solve this, * let's keep things simple by re-using the existing slow path. */ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) { unsigned long prev_energy = ULONG_MAX, best_energy = ULONG_MAX; struct root_domain *rd = cpu_rq(smp_processor_id())->rd; int cpu, best_energy_cpu = prev_cpu; struct perf_domain *head, *pd; unsigned long cpu_cap, util; struct sched_domain *sd; rcu_read_lock(); pd = rcu_dereference(rd->pd); if (!pd || READ_ONCE(rd->overutilized)) goto fail; head = pd; /* * Energy-aware wake-up happens on the lowest sched_domain starting * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu. */ sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity)); while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) sd = sd->parent; if (!sd) goto fail; sync_entity_load_avg(&p->se); if (!task_util_est(p)) goto unlock; for (; pd; pd = pd->next) { unsigned long cur_energy, spare_cap, max_spare_cap = 0; int max_spare_cap_cpu = -1; for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) { if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) continue; /* Skip CPUs that will be overutilized. */ util = cpu_util_next(cpu, p, cpu); cpu_cap = capacity_of(cpu); if (cpu_cap * 1024 < util * capacity_margin) continue; /* Always use prev_cpu as a candidate. */ if (cpu == prev_cpu) { prev_energy = compute_energy(p, prev_cpu, head); best_energy = min(best_energy, prev_energy); continue; } /* * Find the CPU with the maximum spare capacity in * the performance domain */ spare_cap = cpu_cap - util; if (spare_cap > max_spare_cap) { max_spare_cap = spare_cap; max_spare_cap_cpu = cpu; } } /* Evaluate the energy impact of using this CPU. */ if (max_spare_cap_cpu >= 0) { cur_energy = compute_energy(p, max_spare_cap_cpu, head); if (cur_energy < best_energy) { best_energy = cur_energy; best_energy_cpu = max_spare_cap_cpu; } } } unlock: rcu_read_unlock(); /* * Pick the best CPU if prev_cpu cannot be used, or if it saves at * least 6% of the energy used by prev_cpu. */ if (prev_energy == ULONG_MAX) return best_energy_cpu; if ((prev_energy - best_energy) > (prev_energy >> 4)) return best_energy_cpu; return prev_cpu; fail: rcu_read_unlock(); return -1; } /* * select_task_rq_fair: Select target runqueue for the waking task in domains * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE, * SD_BALANCE_FORK, or SD_BALANCE_EXEC. * * Balances load by selecting the idlest CPU in the idlest group, or under * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set. * * Returns the target CPU number. * * preempt must be disabled. */ static int select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags) { struct sched_domain *tmp, *sd = NULL; int cpu = smp_processor_id(); int new_cpu = prev_cpu; int want_affine = 0; int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING); if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); if (static_branch_unlikely(&sched_energy_present)) { new_cpu = find_energy_efficient_cpu(p, prev_cpu); if (new_cpu >= 0) return new_cpu; new_cpu = prev_cpu; } want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) && cpumask_test_cpu(cpu, &p->cpus_allowed); } rcu_read_lock(); for_each_domain(cpu, tmp) { if (!(tmp->flags & SD_LOAD_BALANCE)) break; /* * If both 'cpu' and 'prev_cpu' are part of this domain, * cpu is a valid SD_WAKE_AFFINE target. */ if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { if (cpu != prev_cpu) new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync); sd = NULL; /* Prefer wake_affine over balance flags */ break; } if (tmp->flags & sd_flag) sd = tmp; else if (!want_affine) break; } if (unlikely(sd)) { /* Slow path */ new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */ /* Fast path */ new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); if (want_affine) current->recent_used_cpu = cpu; } rcu_read_unlock(); return new_cpu; } static void detach_entity_cfs_rq(struct sched_entity *se); /* * Called immediately before a task is migrated to a new CPU; task_cpu(p) and * cfs_rq_of(p) references at time of call are still valid and identify the * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held. */ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) { /* * As blocked tasks retain absolute vruntime the migration needs to * deal with this by subtracting the old and adding the new * min_vruntime -- the latter is done by enqueue_entity() when placing * the task on the new runqueue. */ if (p->state == TASK_WAKING) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); u64 min_vruntime; #ifndef CONFIG_64BIT u64 min_vruntime_copy; do { min_vruntime_copy = cfs_rq->min_vruntime_copy; smp_rmb(); min_vruntime = cfs_rq->min_vruntime; } while (min_vruntime != min_vruntime_copy); #else min_vruntime = cfs_rq->min_vruntime; #endif se->vruntime -= min_vruntime; } if (p->on_rq == TASK_ON_RQ_MIGRATING) { /* * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old' * rq->lock and can modify state directly. */ lockdep_assert_held(&task_rq(p)->lock); detach_entity_cfs_rq(&p->se); } else { /* * We are supposed to update the task to "current" time, then * its up to date and ready to go to new CPU/cfs_rq. But we * have difficulty in getting what current time is, so simply * throw away the out-of-date time. This will result in the * wakee task is less decayed, but giving the wakee more load * sounds not bad. */ remove_entity_load_avg(&p->se); } /* Tell new CPU we are migrated */ p->se.avg.last_update_time = 0; /* We have migrated, no longer consider this task hot */ p->se.exec_start = 0; update_scan_period(p, new_cpu); } static void task_dead_fair(struct task_struct *p) { remove_entity_load_avg(&p->se); } #endif /* CONFIG_SMP */ static unsigned long wakeup_gran(struct sched_entity *se) { unsigned long gran = sysctl_sched_wakeup_granularity; /* * Since its curr running now, convert the gran from real-time * to virtual-time in his units. * * By using 'se' instead of 'curr' we penalize light tasks, so * they get preempted easier. That is, if 'se' < 'curr' then * the resulting gran will be larger, therefore penalizing the * lighter, if otoh 'se' > 'curr' then the resulting gran will * be smaller, again penalizing the lighter task. * * This is especially important for buddies when the leftmost * task is higher priority than the buddy. */ return calc_delta_fair(gran, se); } /* * Should 'se' preempt 'curr'. * * |s1 * |s2 * |s3 * g * |<--->|c * * w(c, s1) = -1 * w(c, s2) = 0 * w(c, s3) = 1 * */ static int wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) { s64 gran, vdiff = curr->vruntime - se->vruntime; if (vdiff <= 0) return -1; gran = wakeup_gran(se); if (vdiff > gran) return 1; return 0; } static void set_last_buddy(struct sched_entity *se) { if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) return; for_each_sched_entity(se) { if (SCHED_WARN_ON(!se->on_rq)) return; cfs_rq_of(se)->last = se; } } static void set_next_buddy(struct sched_entity *se) { if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) return; for_each_sched_entity(se) { if (SCHED_WARN_ON(!se->on_rq)) return; cfs_rq_of(se)->next = se; } } static void set_skip_buddy(struct sched_entity *se) { for_each_sched_entity(se) cfs_rq_of(se)->skip = se; } /* * Preempt the current task with a newly woken task if needed: */ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) { struct task_struct *curr = rq->curr; struct sched_entity *se = &curr->se, *pse = &p->se; struct cfs_rq *cfs_rq = task_cfs_rq(curr); int scale = cfs_rq->nr_running >= sched_nr_latency; int next_buddy_marked = 0; if (unlikely(se == pse)) return; /* * This is possible from callers such as attach_tasks(), in which we * unconditionally check_prempt_curr() after an enqueue (which may have * lead to a throttle). This both saves work and prevents false * next-buddy nomination below. */ if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) return; if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { set_next_buddy(pse); next_buddy_marked = 1; } /* * We can come here with TIF_NEED_RESCHED already set from new task * wake up path. * * Note: this also catches the edge-case of curr being in a throttled * group (e.g. via set_curr_task), since update_curr() (in the * enqueue of curr) will have resulted in resched being set. This * prevents us from potentially nominating it as a false LAST_BUDDY * below. */ if (test_tsk_need_resched(curr)) return; /* Idle tasks are by definition preempted by non-idle tasks. */ if (unlikely(task_has_idle_policy(curr)) && likely(!task_has_idle_policy(p))) goto preempt; /* * Batch and idle tasks do not preempt non-idle tasks (their preemption * is driven by the tick): */ if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) return; find_matching_se(&se, &pse); update_curr(cfs_rq_of(se)); BUG_ON(!pse); if (wakeup_preempt_entity(se, pse) == 1) { /* * Bias pick_next to pick the sched entity that is * triggering this preemption. */ if (!next_buddy_marked) set_next_buddy(pse); goto preempt; } return; preempt: resched_curr(rq); /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved * with schedule on the ->pre_schedule() or idle_balance() * point, either of which can * drop the rq lock. * * Also, during early boot the idle thread is in the fair class, * for obvious reasons its a bad idea to schedule back to it. */ if (unlikely(!se->on_rq || curr == rq->idle)) return; if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) set_last_buddy(se); } static struct task_struct * pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { struct cfs_rq *cfs_rq = &rq->cfs; struct sched_entity *se; struct task_struct *p; int new_tasks; again: if (!cfs_rq->nr_running) goto idle; #ifdef CONFIG_FAIR_GROUP_SCHED if (prev->sched_class != &fair_sched_class) goto simple; /* * Because of the set_next_buddy() in dequeue_task_fair() it is rather * likely that a next task is from the same cgroup as the current. * * Therefore attempt to avoid putting and setting the entire cgroup * hierarchy, only change the part that actually changes. */ do { struct sched_entity *curr = cfs_rq->curr; /* * Since we got here without doing put_prev_entity() we also * have to consider cfs_rq->curr. If it is still a runnable * entity, update_curr() will update its vruntime, otherwise * forget we've ever seen it. */ if (curr) { if (curr->on_rq) update_curr(cfs_rq); else curr = NULL; /* * This call to check_cfs_rq_runtime() will do the * throttle and dequeue its entity in the parent(s). * Therefore the nr_running test will indeed * be correct. */ if (unlikely(check_cfs_rq_runtime(cfs_rq))) { cfs_rq = &rq->cfs; if (!cfs_rq->nr_running) goto idle; goto simple; } } se = pick_next_entity(cfs_rq, curr); cfs_rq = group_cfs_rq(se); } while (cfs_rq); p = task_of(se); /* * Since we haven't yet done put_prev_entity and if the selected task * is a different task than we started out with, try and touch the * least amount of cfs_rqs. */ if (prev != p) { struct sched_entity *pse = &prev->se; while (!(cfs_rq = is_same_group(se, pse))) { int se_depth = se->depth; int pse_depth = pse->depth; if (se_depth <= pse_depth) { put_prev_entity(cfs_rq_of(pse), pse); pse = parent_entity(pse); } if (se_depth >= pse_depth) { set_next_entity(cfs_rq_of(se), se); se = parent_entity(se); } } put_prev_entity(cfs_rq, pse); set_next_entity(cfs_rq, se); } goto done; simple: #endif put_prev_task(rq, prev); do { se = pick_next_entity(cfs_rq, NULL); set_next_entity(cfs_rq, se); cfs_rq = group_cfs_rq(se); } while (cfs_rq); p = task_of(se); done: __maybe_unused; #ifdef CONFIG_SMP /* * Move the next running task to the front of * the list, so our cfs_tasks list becomes MRU * one. */ list_move(&p->se.group_node, &rq->cfs_tasks); #endif if (hrtick_enabled(rq)) hrtick_start_fair(rq, p); update_misfit_status(p, rq); return p; idle: update_misfit_status(NULL, rq); new_tasks = idle_balance(rq, rf); /* * Because idle_balance() releases (and re-acquires) rq->lock, it is * possible for any higher priority task to appear. In that case we * must re-start the pick_next_entity() loop. */ if (new_tasks < 0) return RETRY_TASK; if (new_tasks > 0) goto again; return NULL; } /* * Account for a descheduled task: */ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) { struct sched_entity *se = &prev->se; struct cfs_rq *cfs_rq; for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); put_prev_entity(cfs_rq, se); } } /* * sched_yield() is very simple * * The magic of dealing with the ->skip buddy is in pick_next_entity. */ static void yield_task_fair(struct rq *rq) { struct task_struct *curr = rq->curr; struct cfs_rq *cfs_rq = task_cfs_rq(curr); struct sched_entity *se = &curr->se; /* * Are we the only task in the tree? */ if (unlikely(rq->nr_running == 1)) return; clear_buddies(cfs_rq, se); if (curr->policy != SCHED_BATCH) { update_rq_clock(rq); /* * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); /* * Tell update_rq_clock() that we've just updated, * so we don't do microscopic update in schedule() * and double the fastpath cost. */ rq_clock_skip_update(rq); } set_skip_buddy(se); } static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) { struct sched_entity *se = &p->se; /* throttled hierarchies are not runnable */ if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) return false; /* Tell the scheduler that we'd really like pse to run next. */ set_next_buddy(se); yield_task_fair(rq); return true; } #ifdef CONFIG_SMP /************************************************** * Fair scheduling class load-balancing methods. * * BASICS * * The purpose of load-balancing is to achieve the same basic fairness the * per-CPU scheduler provides, namely provide a proportional amount of compute * time to each task. This is expressed in the following equation: * * W_i,n/P_i == W_j,n/P_j for all i,j (1) * * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight * W_i,0 is defined as: * * W_i,0 = \Sum_j w_i,j (2) * * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight * is derived from the nice value as per sched_prio_to_weight[]. * * The weight average is an exponential decay average of the instantaneous * weight: * * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3) * * C_i is the compute capacity of CPU i, typically it is the * fraction of 'recent' time available for SCHED_OTHER task execution. But it * can also include other factors [XXX]. * * To achieve this balance we define a measure of imbalance which follows * directly from (1): * * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4) * * We them move tasks around to minimize the imbalance. In the continuous * function space it is obvious this converges, in the discrete case we get * a few fun cases generally called infeasible weight scenarios. * * [XXX expand on: * - infeasible weights; * - local vs global optima in the discrete case. ] * * * SCHED DOMAINS * * In order to solve the imbalance equation (4), and avoid the obvious O(n^2) * for all i,j solution, we create a tree of CPUs that follows the hardware * topology where each level pairs two lower groups (or better). This results * in O(log n) layers. Furthermore we reduce the number of CPUs going up the * tree to only the first of the previous level and we decrease the frequency * of load-balance at each level inv. proportional to the number of CPUs in * the groups. * * This yields: * * log_2 n 1 n * \Sum { --- * --- * 2^i } = O(n) (5) * i = 0 2^i 2^i * `- size of each group * | | `- number of CPUs doing load-balance * | `- freq * `- sum over all levels * * Coupled with a limit on how many tasks we can migrate every balance pass, * this makes (5) the runtime complexity of the balancer. * * An important property here is that each CPU is still (indirectly) connected * to every other CPU in at most O(log n) steps: * * The adjacency matrix of the resulting graph is given by: * * log_2 n * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6) * k = 0 * * And you'll find that: * * A^(log_2 n)_i,j != 0 for all i,j (7) * * Showing there's indeed a path between every CPU in at most O(log n) steps. * The task movement gives a factor of O(m), giving a convergence complexity * of: * * O(nm log n), n := nr_cpus, m := nr_tasks (8) * * * WORK CONSERVING * * In order to avoid CPUs going idle while there's still work to do, new idle * balancing is more aggressive and has the newly idle CPU iterate up the domain * tree itself instead of relying on other CPUs to bring it work. * * This adds some complexity to both (5) and (8) but it reduces the total idle * time. * * [XXX more?] * * * CGROUPS * * Cgroups make a horror show out of (2), instead of a simple sum we get: * * s_k,i * W_i,0 = \Sum_j \Prod_k w_k * ----- (9) * S_k * * Where * * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10) * * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i. * * The big problem is S_k, its a global sum needed to compute a local (W_i) * property. * * [XXX write more on how we solve this.. _after_ merging pjt's patches that * rewrite all of this once again.] */ static unsigned long __read_mostly max_load_balance_interval = HZ/10; enum fbq_type { regular, remote, all }; enum group_type { group_other = 0, group_misfit_task, group_imbalanced, group_overloaded, }; #define LBF_ALL_PINNED 0x01 #define LBF_NEED_BREAK 0x02 #define LBF_DST_PINNED 0x04 #define LBF_SOME_PINNED 0x08 #define LBF_NOHZ_STATS 0x10 #define LBF_NOHZ_AGAIN 0x20 struct lb_env { struct sched_domain *sd; struct rq *src_rq; int src_cpu; int dst_cpu; struct rq *dst_rq; struct cpumask *dst_grpmask; int new_dst_cpu; enum cpu_idle_type idle; long imbalance; /* The set of CPUs under consideration for load-balancing */ struct cpumask *cpus; unsigned int flags; unsigned int loop; unsigned int loop_break; unsigned int loop_max; enum fbq_type fbq_type; enum group_type src_grp_type; struct list_head tasks; }; /* * Is this task likely cache-hot: */ static int task_hot(struct task_struct *p, struct lb_env *env) { s64 delta; lockdep_assert_held(&env->src_rq->lock); if (p->sched_class != &fair_sched_class) return 0; if (unlikely(task_has_idle_policy(p))) return 0; /* * Buddy candidates are cache hot: */ if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && (&p->se == cfs_rq_of(&p->se)->next || &p->se == cfs_rq_of(&p->se)->last)) return 1; if (sysctl_sched_migration_cost == -1) return 1; if (sysctl_sched_migration_cost == 0) return 0; delta = rq_clock_task(env->src_rq) - p->se.exec_start; return delta < (s64)sysctl_sched_migration_cost; } #ifdef CONFIG_NUMA_BALANCING /* * Returns 1, if task migration degrades locality * Returns 0, if task migration improves locality i.e migration preferred. * Returns -1, if task migration is not affected by locality. */ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) { struct numa_group *numa_group = rcu_dereference(p->numa_group); unsigned long src_weight, dst_weight; int src_nid, dst_nid, dist; if (!static_branch_likely(&sched_numa_balancing)) return -1; if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) return -1; src_nid = cpu_to_node(env->src_cpu); dst_nid = cpu_to_node(env->dst_cpu); if (src_nid == dst_nid) return -1; /* Migrating away from the preferred node is always bad. */ if (src_nid == p->numa_preferred_nid) { if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) return 1; else return -1; } /* Encourage migration to the preferred node. */ if (dst_nid == p->numa_preferred_nid) return 0; /* Leaving a core idle is often worse than degrading locality. */ if (env->idle == CPU_IDLE) return -1; dist = node_distance(src_nid, dst_nid); if (numa_group) { src_weight = group_weight(p, src_nid, dist); dst_weight = group_weight(p, dst_nid, dist); } else { src_weight = task_weight(p, src_nid, dist); dst_weight = task_weight(p, dst_nid, dist); } return dst_weight < src_weight; } #else static inline int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) { return -1; } #endif /* * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? */ static int can_migrate_task(struct task_struct *p, struct lb_env *env) { int tsk_cache_hot; lockdep_assert_held(&env->src_rq->lock); /* * We do not migrate tasks that are: * 1) throttled_lb_pair, or * 2) cannot be migrated to this CPU due to cpus_allowed, or * 3) running (obviously), or * 4) are cache-hot on their current CPU. */ if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) return 0; if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) { int cpu; schedstat_inc(p->se.statistics.nr_failed_migrations_affine); env->flags |= LBF_SOME_PINNED; /* * Remember if this task can be migrated to any other CPU in * our sched_group. We may want to revisit it if we couldn't * meet load balance goals by pulling other tasks on src_cpu. * * Avoid computing new_dst_cpu for NEWLY_IDLE or if we have * already computed one in current iteration. */ if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED)) return 0; /* Prevent to re-select dst_cpu via env's CPUs: */ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { if (cpumask_test_cpu(cpu, &p->cpus_allowed)) { env->flags |= LBF_DST_PINNED; env->new_dst_cpu = cpu; break; } } return 0; } /* Record that we found atleast one task that could run on dst_cpu */ env->flags &= ~LBF_ALL_PINNED; if (task_running(env->src_rq, p)) { schedstat_inc(p->se.statistics.nr_failed_migrations_running); return 0; } /* * Aggressive migration if: * 1) destination numa is preferred * 2) task is cache cold, or * 3) too many balance attempts have failed. */ tsk_cache_hot = migrate_degrades_locality(p, env); if (tsk_cache_hot == -1) tsk_cache_hot = task_hot(p, env); if (tsk_cache_hot <= 0 || env->sd->nr_balance_failed > env->sd->cache_nice_tries) { if (tsk_cache_hot == 1) { schedstat_inc(env->sd->lb_hot_gained[env->idle]); schedstat_inc(p->se.statistics.nr_forced_migrations); } return 1; } schedstat_inc(p->se.statistics.nr_failed_migrations_hot); return 0; } /* * detach_task() -- detach the task for the migration specified in env */ static void detach_task(struct task_struct *p, struct lb_env *env) { lockdep_assert_held(&env->src_rq->lock); p->on_rq = TASK_ON_RQ_MIGRATING; deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); set_task_cpu(p, env->dst_cpu); } /* * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as * part of active balancing operations within "domain". * * Returns a task if successful and NULL otherwise. */ static struct task_struct *detach_one_task(struct lb_env *env) { struct task_struct *p; lockdep_assert_held(&env->src_rq->lock); list_for_each_entry_reverse(p, &env->src_rq->cfs_tasks, se.group_node) { if (!can_migrate_task(p, env)) continue; detach_task(p, env); /* * Right now, this is only the second place where * lb_gained[env->idle] is updated (other is detach_tasks) * so we can safely collect stats here rather than * inside detach_tasks(). */ schedstat_inc(env->sd->lb_gained[env->idle]); return p; } return NULL; } static const unsigned int sched_nr_migrate_break = 32; /* * detach_tasks() -- tries to detach up to imbalance weighted load from * busiest_rq, as part of a balancing operation within domain "sd". * * Returns number of detached tasks if successful and 0 otherwise. */ static int detach_tasks(struct lb_env *env) { struct list_head *tasks = &env->src_rq->cfs_tasks; struct task_struct *p; unsigned long load; int detached = 0; lockdep_assert_held(&env->src_rq->lock); if (env->imbalance <= 0) return 0; while (!list_empty(tasks)) { /* * We don't want to steal all, otherwise we may be treated likewise, * which could at worst lead to a livelock crash. */ if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) break; p = list_last_entry(tasks, struct task_struct, se.group_node); env->loop++; /* We've more or less seen every task there is, call it quits */ if (env->loop > env->loop_max) break; /* take a breather every nr_migrate tasks */ if (env->loop > env->loop_break) { env->loop_break += sched_nr_migrate_break; env->flags |= LBF_NEED_BREAK; break; } if (!can_migrate_task(p, env)) goto next; load = task_h_load(p); if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) goto next; if ((load / 2) > env->imbalance) goto next; detach_task(p, env); list_add(&p->se.group_node, &env->tasks); detached++; env->imbalance -= load; #ifdef CONFIG_PREEMPT /* * NEWIDLE balancing is a source of latency, so preemptible * kernels will stop after the first task is detached to minimize * the critical section. */ if (env->idle == CPU_NEWLY_IDLE) break; #endif /* * We only want to steal up to the prescribed amount of * weighted load. */ if (env->imbalance <= 0) break; continue; next: list_move(&p->se.group_node, tasks); } /* * Right now, this is one of only two places we collect this stat * so we can safely collect detach_one_task() stats here rather * than inside detach_one_task(). */ schedstat_add(env->sd->lb_gained[env->idle], detached); return detached; } /* * attach_task() -- attach the task detached by detach_task() to its new rq. */ static void attach_task(struct rq *rq, struct task_struct *p) { lockdep_assert_held(&rq->lock); BUG_ON(task_rq(p) != rq); activate_task(rq, p, ENQUEUE_NOCLOCK); p->on_rq = TASK_ON_RQ_QUEUED; check_preempt_curr(rq, p, 0); } /* * attach_one_task() -- attaches the task returned from detach_one_task() to * its new rq. */ static void attach_one_task(struct rq *rq, struct task_struct *p) { struct rq_flags rf; rq_lock(rq, &rf); update_rq_clock(rq); attach_task(rq, p); rq_unlock(rq, &rf); } /* * attach_tasks() -- attaches all tasks detached by detach_tasks() to their * new rq. */ static void attach_tasks(struct lb_env *env) { struct list_head *tasks = &env->tasks; struct task_struct *p; struct rq_flags rf; rq_lock(env->dst_rq, &rf); update_rq_clock(env->dst_rq); while (!list_empty(tasks)) { p = list_first_entry(tasks, struct task_struct, se.group_node); list_del_init(&p->se.group_node); attach_task(env->dst_rq, p); } rq_unlock(env->dst_rq, &rf); } static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { if (cfs_rq->avg.load_avg) return true; if (cfs_rq->avg.util_avg) return true; return false; } static inline bool others_have_blocked(struct rq *rq) { if (READ_ONCE(rq->avg_rt.util_avg)) return true; if (READ_ONCE(rq->avg_dl.util_avg)) return true; #ifdef CONFIG_HAVE_SCHED_AVG_IRQ if (READ_ONCE(rq->avg_irq.util_avg)) return true; #endif return false; } #ifdef CONFIG_FAIR_GROUP_SCHED static void update_blocked_averages(int cpu) { struct rq *rq = cpu_rq(cpu); struct cfs_rq *cfs_rq; const struct sched_class *curr_class; struct rq_flags rf; bool done = true; rq_lock_irqsave(rq, &rf); update_rq_clock(rq); /* * Iterates the task_group tree in a bottom up fashion, see * list_add_leaf_cfs_rq() for details. */ for_each_leaf_cfs_rq(rq, cfs_rq) { struct sched_entity *se; /* throttled entities do not contribute to load */ if (throttled_hierarchy(cfs_rq)) continue; if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq)) update_tg_load_avg(cfs_rq, 0); /* Propagate pending load changes to the parent, if any: */ se = cfs_rq->tg->se[cpu]; if (se && !skip_blocked_update(se)) update_load_avg(cfs_rq_of(se), se, 0); /* Don't need periodic decay once load/util_avg are null */ if (cfs_rq_has_blocked(cfs_rq)) done = false; } curr_class = rq->curr->sched_class; update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class); update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class); update_irq_load_avg(rq, 0); /* Don't need periodic decay once load/util_avg are null */ if (others_have_blocked(rq)) done = false; #ifdef CONFIG_NO_HZ_COMMON rq->last_blocked_load_update_tick = jiffies; if (done) rq->has_blocked_load = 0; #endif rq_unlock_irqrestore(rq, &rf); } /* * Compute the hierarchical load factor for cfs_rq and all its ascendants. * This needs to be done in a top-down fashion because the load of a child * group is a fraction of its parents load. */ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; unsigned long now = jiffies; unsigned long load; if (cfs_rq->last_h_load_update == now) return; cfs_rq->h_load_next = NULL; for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); cfs_rq->h_load_next = se; if (cfs_rq->last_h_load_update == now) break; } if (!se) { cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); cfs_rq->last_h_load_update = now; } while ((se = cfs_rq->h_load_next) != NULL) { load = cfs_rq->h_load; load = div64_ul(load * se->avg.load_avg, cfs_rq_load_avg(cfs_rq) + 1); cfs_rq = group_cfs_rq(se); cfs_rq->h_load = load; cfs_rq->last_h_load_update = now; } } static unsigned long task_h_load(struct task_struct *p) { struct cfs_rq *cfs_rq = task_cfs_rq(p); update_cfs_rq_h_load(cfs_rq); return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, cfs_rq_load_avg(cfs_rq) + 1); } #else static inline void update_blocked_averages(int cpu) { struct rq *rq = cpu_rq(cpu); struct cfs_rq *cfs_rq = &rq->cfs; const struct sched_class *curr_class; struct rq_flags rf; rq_lock_irqsave(rq, &rf); update_rq_clock(rq); update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); curr_class = rq->curr->sched_class; update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class); update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class); update_irq_load_avg(rq, 0); #ifdef CONFIG_NO_HZ_COMMON rq->last_blocked_load_update_tick = jiffies; if (!cfs_rq_has_blocked(cfs_rq) && !others_have_blocked(rq)) rq->has_blocked_load = 0; #endif rq_unlock_irqrestore(rq, &rf); } static unsigned long task_h_load(struct task_struct *p) { return p->se.avg.load_avg; } #endif /********** Helpers for find_busiest_group ************************/ /* * sg_lb_stats - stats of a sched_group required for load_balancing */ struct sg_lb_stats { unsigned long avg_load; /*Avg load across the CPUs of the group */ unsigned long group_load; /* Total load over the CPUs of the group */ unsigned long sum_weighted_load; /* Weighted load of group's tasks */ unsigned long load_per_task; unsigned long group_capacity; unsigned long group_util; /* Total utilization of the group */ unsigned int sum_nr_running; /* Nr tasks running in the group */ unsigned int idle_cpus; unsigned int group_weight; enum group_type group_type; int group_no_capacity; unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */ #ifdef CONFIG_NUMA_BALANCING unsigned int nr_numa_running; unsigned int nr_preferred_running; #endif }; /* * sd_lb_stats - Structure to store the statistics of a sched_domain * during load balancing. */ struct sd_lb_stats { struct sched_group *busiest; /* Busiest group in this sd */ struct sched_group *local; /* Local group in this sd */ unsigned long total_running; unsigned long total_load; /* Total load of all groups in sd */ unsigned long total_capacity; /* Total capacity of all groups in sd */ unsigned long avg_load; /* Average load across all groups in sd */ struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */ struct sg_lb_stats local_stat; /* Statistics of the local group */ }; static inline void init_sd_lb_stats(struct sd_lb_stats *sds) { /* * Skimp on the clearing to avoid duplicate work. We can avoid clearing * local_stat because update_sg_lb_stats() does a full clear/assignment. * We must however clear busiest_stat::avg_load because * update_sd_pick_busiest() reads this before assignment. */ *sds = (struct sd_lb_stats){ .busiest = NULL, .local = NULL, .total_running = 0UL, .total_load = 0UL, .total_capacity = 0UL, .busiest_stat = { .avg_load = 0UL, .sum_nr_running = 0, .group_type = group_other, }, }; } /** * get_sd_load_idx - Obtain the load index for a given sched domain. * @sd: The sched_domain whose load_idx is to be obtained. * @idle: The idle status of the CPU for whose sd load_idx is obtained. * * Return: The load index. */ static inline int get_sd_load_idx(struct sched_domain *sd, enum cpu_idle_type idle) { int load_idx; switch (idle) { case CPU_NOT_IDLE: load_idx = sd->busy_idx; break; case CPU_NEWLY_IDLE: load_idx = sd->newidle_idx; break; default: load_idx = sd->idle_idx; break; } return load_idx; } static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long max = arch_scale_cpu_capacity(sd, cpu); unsigned long used, free; unsigned long irq; irq = cpu_util_irq(rq); if (unlikely(irq >= max)) return 1; used = READ_ONCE(rq->avg_rt.util_avg); used += READ_ONCE(rq->avg_dl.util_avg); if (unlikely(used >= max)) return 1; free = max - used; return scale_irq_capacity(free, irq, max); } static void update_cpu_capacity(struct sched_domain *sd, int cpu) { unsigned long capacity = scale_rt_capacity(sd, cpu); struct sched_group *sdg = sd->groups; cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu); if (!capacity) capacity = 1; cpu_rq(cpu)->cpu_capacity = capacity; sdg->sgc->capacity = capacity; sdg->sgc->min_capacity = capacity; sdg->sgc->max_capacity = capacity; } void update_group_capacity(struct sched_domain *sd, int cpu) { struct sched_domain *child = sd->child; struct sched_group *group, *sdg = sd->groups; unsigned long capacity, min_capacity, max_capacity; unsigned long interval; interval = msecs_to_jiffies(sd->balance_interval); interval = clamp(interval, 1UL, max_load_balance_interval); sdg->sgc->next_update = jiffies + interval; if (!child) { update_cpu_capacity(sd, cpu); return; } capacity = 0; min_capacity = ULONG_MAX; max_capacity = 0; if (child->flags & SD_OVERLAP) { /* * SD_OVERLAP domains cannot assume that child groups * span the current group. */ for_each_cpu(cpu, sched_group_span(sdg)) { struct sched_group_capacity *sgc; struct rq *rq = cpu_rq(cpu); /* * build_sched_domains() -> init_sched_groups_capacity() * gets here before we've attached the domains to the * runqueues. * * Use capacity_of(), which is set irrespective of domains * in update_cpu_capacity(). * * This avoids capacity from being 0 and * causing divide-by-zero issues on boot. */ if (unlikely(!rq->sd)) { capacity += capacity_of(cpu); } else { sgc = rq->sd->groups->sgc; capacity += sgc->capacity; } min_capacity = min(capacity, min_capacity); max_capacity = max(capacity, max_capacity); } } else { /* * !SD_OVERLAP domains can assume that child groups * span the current group. */ group = child->groups; do { struct sched_group_capacity *sgc = group->sgc; capacity += sgc->capacity; min_capacity = min(sgc->min_capacity, min_capacity); max_capacity = max(sgc->max_capacity, max_capacity); group = group->next; } while (group != child->groups); } sdg->sgc->capacity = capacity; sdg->sgc->min_capacity = min_capacity; sdg->sgc->max_capacity = max_capacity; } /* * Check whether the capacity of the rq has been noticeably reduced by side * activity. The imbalance_pct is used for the threshold. * Return true is the capacity is reduced */ static inline int check_cpu_capacity(struct rq *rq, struct sched_domain *sd) { return ((rq->cpu_capacity * sd->imbalance_pct) < (rq->cpu_capacity_orig * 100)); } /* * Group imbalance indicates (and tries to solve) the problem where balancing * groups is inadequate due to ->cpus_allowed constraints. * * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. * Something like: * * { 0 1 2 3 } { 4 5 6 7 } * * * * * * * If we were to balance group-wise we'd place two tasks in the first group and * two tasks in the second group. Clearly this is undesired as it will overload * cpu 3 and leave one of the CPUs in the second group unused. * * The current solution to this issue is detecting the skew in the first group * by noticing the lower domain failed to reach balance and had difficulty * moving tasks due to affinity constraints. * * When this is so detected; this group becomes a candidate for busiest; see * update_sd_pick_busiest(). And calculate_imbalance() and * find_busiest_group() avoid some of the usual balance conditions to allow it * to create an effective group imbalance. * * This is a somewhat tricky proposition since the next run might not find the * group imbalance and decide the groups need to be balanced again. A most * subtle and fragile situation. */ static inline int sg_imbalanced(struct sched_group *group) { return group->sgc->imbalance; } /* * group_has_capacity returns true if the group has spare capacity that could * be used by some tasks. * We consider that a group has spare capacity if the * number of task is * smaller than the number of CPUs or if the utilization is lower than the * available capacity for CFS tasks. * For the latter, we use a threshold to stabilize the state, to take into * account the variance of the tasks' load and to return true if the available * capacity in meaningful for the load balancer. * As an example, an available capacity of 1% can appear but it doesn't make * any benefit for the load balance. */ static inline bool group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs) { if (sgs->sum_nr_running < sgs->group_weight) return true; if ((sgs->group_capacity * 100) > (sgs->group_util * env->sd->imbalance_pct)) return true; return false; } /* * group_is_overloaded returns true if the group has more tasks than it can * handle. * group_is_overloaded is not equals to !group_has_capacity because a group * with the exact right number of tasks, has no more spare capacity but is not * overloaded so both group_has_capacity and group_is_overloaded return * false. */ static inline bool group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs) { if (sgs->sum_nr_running <= sgs->group_weight) return false; if ((sgs->group_capacity * 100) < (sgs->group_util * env->sd->imbalance_pct)) return true; return false; } /* * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller * per-CPU capacity than sched_group ref. */ static inline bool group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref) { return sg->sgc->min_capacity * capacity_margin < ref->sgc->min_capacity * 1024; } /* * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller * per-CPU capacity_orig than sched_group ref. */ static inline bool group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref) { return sg->sgc->max_capacity * capacity_margin < ref->sgc->max_capacity * 1024; } static inline enum group_type group_classify(struct sched_group *group, struct sg_lb_stats *sgs) { if (sgs->group_no_capacity) return group_overloaded; if (sg_imbalanced(group)) return group_imbalanced; if (sgs->group_misfit_task_load) return group_misfit_task; return group_other; } static bool update_nohz_stats(struct rq *rq, bool force) { #ifdef CONFIG_NO_HZ_COMMON unsigned int cpu = rq->cpu; if (!rq->has_blocked_load) return false; if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) return false; if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick)) return true; update_blocked_averages(cpu); return rq->has_blocked_load; #else return false; #endif } /** * update_sg_lb_stats - Update sched_group's statistics for load balancing. * @env: The load balancing environment. * @group: sched_group whose statistics are to be updated. * @sgs: variable to hold the statistics for this group. * @sg_status: Holds flag indicating the status of the sched_group */ static inline void update_sg_lb_stats(struct lb_env *env, struct sched_group *group, struct sg_lb_stats *sgs, int *sg_status) { int local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group)); int load_idx = get_sd_load_idx(env->sd, env->idle); unsigned long load; int i, nr_running; memset(sgs, 0, sizeof(*sgs)); for_each_cpu_and(i, sched_group_span(group), env->cpus) { struct rq *rq = cpu_rq(i); if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false)) env->flags |= LBF_NOHZ_AGAIN; /* Bias balancing toward CPUs of our domain: */ if (local_group) load = target_load(i, load_idx); else load = source_load(i, load_idx); sgs->group_load += load; sgs->group_util += cpu_util(i); sgs->sum_nr_running += rq->cfs.h_nr_running; nr_running = rq->nr_running; if (nr_running > 1) *sg_status |= SG_OVERLOAD; if (cpu_overutilized(i)) *sg_status |= SG_OVERUTILIZED; #ifdef CONFIG_NUMA_BALANCING sgs->nr_numa_running += rq->nr_numa_running; sgs->nr_preferred_running += rq->nr_preferred_running; #endif sgs->sum_weighted_load += weighted_cpuload(rq); /* * No need to call idle_cpu() if nr_running is not 0 */ if (!nr_running && idle_cpu(i)) sgs->idle_cpus++; if (env->sd->flags & SD_ASYM_CPUCAPACITY && sgs->group_misfit_task_load < rq->misfit_task_load) { sgs->group_misfit_task_load = rq->misfit_task_load; *sg_status |= SG_OVERLOAD; } } /* Adjust by relative CPU capacity of the group */ sgs->group_capacity = group->sgc->capacity; sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity; if (sgs->sum_nr_running) sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; sgs->group_weight = group->group_weight; sgs->group_no_capacity = group_is_overloaded(env, sgs); sgs->group_type = group_classify(group, sgs); } /** * update_sd_pick_busiest - return 1 on busiest group * @env: The load balancing environment. * @sds: sched_domain statistics * @sg: sched_group candidate to be checked for being the busiest * @sgs: sched_group statistics * * Determine if @sg is a busier group than the previously selected * busiest group. * * Return: %true if @sg is a busier group than the previously selected * busiest group. %false otherwise. */ static bool update_sd_pick_busiest(struct lb_env *env, struct sd_lb_stats *sds, struct sched_group *sg, struct sg_lb_stats *sgs) { struct sg_lb_stats *busiest = &sds->busiest_stat; /* * Don't try to pull misfit tasks we can't help. * We can use max_capacity here as reduction in capacity on some * CPUs in the group should either be possible to resolve * internally or be covered by avg_load imbalance (eventually). */ if (sgs->group_type == group_misfit_task && (!group_smaller_max_cpu_capacity(sg, sds->local) || !group_has_capacity(env, &sds->local_stat))) return false; if (sgs->group_type > busiest->group_type) return true; if (sgs->group_type < busiest->group_type) return false; if (sgs->avg_load <= busiest->avg_load) return false; if (!(env->sd->flags & SD_ASYM_CPUCAPACITY)) goto asym_packing; /* * Candidate sg has no more than one task per CPU and * has higher per-CPU capacity. Migrating tasks to less * capable CPUs may harm throughput. Maximize throughput, * power/energy consequences are not considered. */ if (sgs->sum_nr_running <= sgs->group_weight && group_smaller_min_cpu_capacity(sds->local, sg)) return false; /* * If we have more than one misfit sg go with the biggest misfit. */ if (sgs->group_type == group_misfit_task && sgs->group_misfit_task_load < busiest->group_misfit_task_load) return false; asym_packing: /* This is the busiest node in its class. */ if (!(env->sd->flags & SD_ASYM_PACKING)) return true; /* No ASYM_PACKING if target CPU is already busy */ if (env->idle == CPU_NOT_IDLE) return true; /* * ASYM_PACKING needs to move all the work to the highest * prority CPUs in the group, therefore mark all groups * of lower priority than ourself as busy. */ if (sgs->sum_nr_running && sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) { if (!sds->busiest) return true; /* Prefer to move from lowest priority CPU's work */ if (sched_asym_prefer(sds->busiest->asym_prefer_cpu, sg->asym_prefer_cpu)) return true; } return false; } #ifdef CONFIG_NUMA_BALANCING static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) { if (sgs->sum_nr_running > sgs->nr_numa_running) return regular; if (sgs->sum_nr_running > sgs->nr_preferred_running) return remote; return all; } static inline enum fbq_type fbq_classify_rq(struct rq *rq) { if (rq->nr_running > rq->nr_numa_running) return regular; if (rq->nr_running > rq->nr_preferred_running) return remote; return all; } #else static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) { return all; } static inline enum fbq_type fbq_classify_rq(struct rq *rq) { return regular; } #endif /* CONFIG_NUMA_BALANCING */ /** * update_sd_lb_stats - Update sched_domain's statistics for load balancing. * @env: The load balancing environment. * @sds: variable to hold the statistics for this sched_domain. */ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) { struct sched_domain *child = env->sd->child; struct sched_group *sg = env->sd->groups; struct sg_lb_stats *local = &sds->local_stat; struct sg_lb_stats tmp_sgs; bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING; int sg_status = 0; #ifdef CONFIG_NO_HZ_COMMON if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked)) env->flags |= LBF_NOHZ_STATS; #endif do { struct sg_lb_stats *sgs = &tmp_sgs; int local_group; local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); if (local_group) { sds->local = sg; sgs = local; if (env->idle != CPU_NEWLY_IDLE || time_after_eq(jiffies, sg->sgc->next_update)) update_group_capacity(env->sd, env->dst_cpu); } update_sg_lb_stats(env, sg, sgs, &sg_status); if (local_group) goto next_group; /* * In case the child domain prefers tasks go to siblings * first, lower the sg capacity so that we'll try * and move all the excess tasks away. We lower the capacity * of a group only if the local group has the capacity to fit * these excess tasks. The extra check prevents the case where * you always pull from the heaviest group when it is already * under-utilized (possible with a large weight task outweighs * the tasks on the system). */ if (prefer_sibling && sds->local && group_has_capacity(env, local) && (sgs->sum_nr_running > local->sum_nr_running + 1)) { sgs->group_no_capacity = 1; sgs->group_type = group_classify(sg, sgs); } if (update_sd_pick_busiest(env, sds, sg, sgs)) { sds->busiest = sg; sds->busiest_stat = *sgs; } next_group: /* Now, start updating sd_lb_stats */ sds->total_running += sgs->sum_nr_running; sds->total_load += sgs->group_load; sds->total_capacity += sgs->group_capacity; sg = sg->next; } while (sg != env->sd->groups); #ifdef CONFIG_NO_HZ_COMMON if ((env->flags & LBF_NOHZ_AGAIN) && cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) { WRITE_ONCE(nohz.next_blocked, jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD)); } #endif if (env->sd->flags & SD_NUMA) env->fbq_type = fbq_classify_group(&sds->busiest_stat); if (!env->sd->parent) { struct root_domain *rd = env->dst_rq->rd; /* update overload indicator if we are at root domain */ WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD); /* Update over-utilization (tipping point, U >= 0) indicator */ WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED); } else if (sg_status & SG_OVERUTILIZED) { WRITE_ONCE(env->dst_rq->rd->overutilized, SG_OVERUTILIZED); } } /** * check_asym_packing - Check to see if the group is packed into the * sched domain. * * This is primarily intended to used at the sibling level. Some * cores like POWER7 prefer to use lower numbered SMT threads. In the * case of POWER7, it can move to lower SMT modes only when higher * threads are idle. When in lower SMT modes, the threads will * perform better since they share less core resources. Hence when we * have idle threads, we want them to be the higher ones. * * This packing function is run on idle threads. It checks to see if * the busiest CPU in this domain (core in the P7 case) has a higher * CPU number than the packing function is being run on. Here we are * assuming lower CPU number will be equivalent to lower a SMT thread * number. * * Return: 1 when packing is required and a task should be moved to * this CPU. The amount of the imbalance is returned in env->imbalance. * * @env: The load balancing environment. * @sds: Statistics of the sched_domain which is to be packed */ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) { int busiest_cpu; if (!(env->sd->flags & SD_ASYM_PACKING)) return 0; if (env->idle == CPU_NOT_IDLE) return 0; if (!sds->busiest) return 0; busiest_cpu = sds->busiest->asym_prefer_cpu; if (sched_asym_prefer(busiest_cpu, env->dst_cpu)) return 0; env->imbalance = DIV_ROUND_CLOSEST( sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity, SCHED_CAPACITY_SCALE); return 1; } /** * fix_small_imbalance - Calculate the minor imbalance that exists * amongst the groups of a sched_domain, during * load balancing. * @env: The load balancing environment. * @sds: Statistics of the sched_domain whose imbalance is to be calculated. */ static inline void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) { unsigned long tmp, capa_now = 0, capa_move = 0; unsigned int imbn = 2; unsigned long scaled_busy_load_per_task; struct sg_lb_stats *local, *busiest; local = &sds->local_stat; busiest = &sds->busiest_stat; if (!local->sum_nr_running) local->load_per_task = cpu_avg_load_per_task(env->dst_cpu); else if (busiest->load_per_task > local->load_per_task) imbn = 1; scaled_busy_load_per_task = (busiest->load_per_task * SCHED_CAPACITY_SCALE) / busiest->group_capacity; if (busiest->avg_load + scaled_busy_load_per_task >= local->avg_load + (scaled_busy_load_per_task * imbn)) { env->imbalance = busiest->load_per_task; return; } /* * OK, we don't have enough imbalance to justify moving tasks, * however we may be able to increase total CPU capacity used by * moving them. */ capa_now += busiest->group_capacity * min(busiest->load_per_task, busiest->avg_load); capa_now += local->group_capacity * min(local->load_per_task, local->avg_load); capa_now /= SCHED_CAPACITY_SCALE; /* Amount of load we'd subtract */ if (busiest->avg_load > scaled_busy_load_per_task) { capa_move += busiest->group_capacity * min(busiest->load_per_task, busiest->avg_load - scaled_busy_load_per_task); } /* Amount of load we'd add */ if (busiest->avg_load * busiest->group_capacity < busiest->load_per_task * SCHED_CAPACITY_SCALE) { tmp = (busiest->avg_load * busiest->group_capacity) / local->group_capacity; } else { tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) / local->group_capacity; } capa_move += local->group_capacity * min(local->load_per_task, local->avg_load + tmp); capa_move /= SCHED_CAPACITY_SCALE; /* Move if we gain throughput */ if (capa_move > capa_now) env->imbalance = busiest->load_per_task; } /** * calculate_imbalance - Calculate the amount of imbalance present within the * groups of a given sched_domain during load balance. * @env: load balance environment * @sds: statistics of the sched_domain whose imbalance is to be calculated. */ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) { unsigned long max_pull, load_above_capacity = ~0UL; struct sg_lb_stats *local, *busiest; local = &sds->local_stat; busiest = &sds->busiest_stat; if (busiest->group_type == group_imbalanced) { /* * In the group_imb case we cannot rely on group-wide averages * to ensure CPU-load equilibrium, look at wider averages. XXX */ busiest->load_per_task = min(busiest->load_per_task, sds->avg_load); } /* * Avg load of busiest sg can be less and avg load of local sg can * be greater than avg load across all sgs of sd because avg load * factors in sg capacity and sgs with smaller group_type are * skipped when updating the busiest sg: */ if (busiest->group_type != group_misfit_task && (busiest->avg_load <= sds->avg_load || local->avg_load >= sds->avg_load)) { env->imbalance = 0; return fix_small_imbalance(env, sds); } /* * If there aren't any idle CPUs, avoid creating some. */ if (busiest->group_type == group_overloaded && local->group_type == group_overloaded) { load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE; if (load_above_capacity > busiest->group_capacity) { load_above_capacity -= busiest->group_capacity; load_above_capacity *= scale_load_down(NICE_0_LOAD); load_above_capacity /= busiest->group_capacity; } else load_above_capacity = ~0UL; } /* * We're trying to get all the CPUs to the average_load, so we don't * want to push ourselves above the average load, nor do we wish to * reduce the max loaded CPU below the average load. At the same time, * we also don't want to reduce the group load below the group * capacity. Thus we look for the minimum possible imbalance. */ max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity); /* How much load to actually move to equalise the imbalance */ env->imbalance = min( max_pull * busiest->group_capacity, (sds->avg_load - local->avg_load) * local->group_capacity ) / SCHED_CAPACITY_SCALE; /* Boost imbalance to allow misfit task to be balanced. */ if (busiest->group_type == group_misfit_task) { env->imbalance = max_t(long, env->imbalance, busiest->group_misfit_task_load); } /* * if *imbalance is less than the average load per runnable task * there is no guarantee that any tasks will be moved so we'll have * a think about bumping its value to force at least one task to be * moved */ if (env->imbalance < busiest->load_per_task) return fix_small_imbalance(env, sds); } /******* find_busiest_group() helpers end here *********************/ /** * find_busiest_group - Returns the busiest group within the sched_domain * if there is an imbalance. * * Also calculates the amount of weighted load which should be moved * to restore balance. * * @env: The load balancing environment. * * Return: - The busiest group if imbalance exists. */ static struct sched_group *find_busiest_group(struct lb_env *env) { struct sg_lb_stats *local, *busiest; struct sd_lb_stats sds; init_sd_lb_stats(&sds); /* * Compute the various statistics relavent for load balancing at * this level. */ update_sd_lb_stats(env, &sds); if (static_branch_unlikely(&sched_energy_present)) { struct root_domain *rd = env->dst_rq->rd; if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)) goto out_balanced; } local = &sds.local_stat; busiest = &sds.busiest_stat; /* ASYM feature bypasses nice load balance check */ if (check_asym_packing(env, &sds)) return sds.busiest; /* There is no busy sibling group to pull tasks from */ if (!sds.busiest || busiest->sum_nr_running == 0) goto out_balanced; /* XXX broken for overlapping NUMA groups */ sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load) / sds.total_capacity; /* * If the busiest group is imbalanced the below checks don't * work because they assume all things are equal, which typically * isn't true due to cpus_allowed constraints and the like. */ if (busiest->group_type == group_imbalanced) goto force_balance; /* * When dst_cpu is idle, prevent SMP nice and/or asymmetric group * capacities from resulting in underutilization due to avg_load. */ if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) && busiest->group_no_capacity) goto force_balance; /* Misfit tasks should be dealt with regardless of the avg load */ if (busiest->group_type == group_misfit_task) goto force_balance; /* * If the local group is busier than the selected busiest group * don't try and pull any tasks. */ if (local->avg_load >= busiest->avg_load) goto out_balanced; /* * Don't pull any tasks if this group is already above the domain * average load. */ if (local->avg_load >= sds.avg_load) goto out_balanced; if (env->idle == CPU_IDLE) { /* * This CPU is idle. If the busiest group is not overloaded * and there is no imbalance between this and busiest group * wrt idle CPUs, it is balanced. The imbalance becomes * significant if the diff is greater than 1 otherwise we * might end up to just move the imbalance on another group */ if ((busiest->group_type != group_overloaded) && (local->idle_cpus <= (busiest->idle_cpus + 1))) goto out_balanced; } else { /* * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use * imbalance_pct to be conservative. */ if (100 * busiest->avg_load <= env->sd->imbalance_pct * local->avg_load) goto out_balanced; } force_balance: /* Looks like there is an imbalance. Compute it */ env->src_grp_type = busiest->group_type; calculate_imbalance(env, &sds); return env->imbalance ? sds.busiest : NULL; out_balanced: env->imbalance = 0; return NULL; } /* * find_busiest_queue - find the busiest runqueue among the CPUs in the group. */ static struct rq *find_busiest_queue(struct lb_env *env, struct sched_group *group) { struct rq *busiest = NULL, *rq; unsigned long busiest_load = 0, busiest_capacity = 1; int i; for_each_cpu_and(i, sched_group_span(group), env->cpus) { unsigned long capacity, wl; enum fbq_type rt; rq = cpu_rq(i); rt = fbq_classify_rq(rq); /* * We classify groups/runqueues into three groups: * - regular: there are !numa tasks * - remote: there are numa tasks that run on the 'wrong' node * - all: there is no distinction * * In order to avoid migrating ideally placed numa tasks, * ignore those when there's better options. * * If we ignore the actual busiest queue to migrate another * task, the next balance pass can still reduce the busiest * queue by moving tasks around inside the node. * * If we cannot move enough load due to this classification * the next pass will adjust the group classification and * allow migration of more tasks. * * Both cases only affect the total convergence complexity. */ if (rt > env->fbq_type) continue; /* * For ASYM_CPUCAPACITY domains with misfit tasks we simply * seek the "biggest" misfit task. */ if (env->src_grp_type == group_misfit_task) { if (rq->misfit_task_load > busiest_load) { busiest_load = rq->misfit_task_load; busiest = rq; } continue; } capacity = capacity_of(i); /* * For ASYM_CPUCAPACITY domains, don't pick a CPU that could * eventually lead to active_balancing high->low capacity. * Higher per-CPU capacity is considered better than balancing * average load. */ if (env->sd->flags & SD_ASYM_CPUCAPACITY && capacity_of(env->dst_cpu) < capacity && rq->nr_running == 1) continue; wl = weighted_cpuload(rq); /* * When comparing with imbalance, use weighted_cpuload() * which is not scaled with the CPU capacity. */ if (rq->nr_running == 1 && wl > env->imbalance && !check_cpu_capacity(rq, env->sd)) continue; /* * For the load comparisons with the other CPU's, consider * the weighted_cpuload() scaled with the CPU capacity, so * that the load can be moved away from the CPU that is * potentially running at a lower capacity. * * Thus we're looking for max(wl_i / capacity_i), crosswise * multiplication to rid ourselves of the division works out * to: wl_i * capacity_j > wl_j * capacity_i; where j is * our previous maximum. */ if (wl * busiest_capacity > busiest_load * capacity) { busiest_load = wl; busiest_capacity = capacity; busiest = rq; } } return busiest; } /* * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but * so long as it is large enough. */ #define MAX_PINNED_INTERVAL 512 static int need_active_balance(struct lb_env *env) { struct sched_domain *sd = env->sd; if (env->idle == CPU_NEWLY_IDLE) { /* * ASYM_PACKING needs to force migrate tasks from busy but * lower priority CPUs in order to pack all tasks in the * highest priority CPUs. */ if ((sd->flags & SD_ASYM_PACKING) && sched_asym_prefer(env->dst_cpu, env->src_cpu)) return 1; } /* * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. * It's worth migrating the task if the src_cpu's capacity is reduced * because of other sched_class or IRQs if more capacity stays * available on dst_cpu. */ if ((env->idle != CPU_NOT_IDLE) && (env->src_rq->cfs.h_nr_running == 1)) { if ((check_cpu_capacity(env->src_rq, sd)) && (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) return 1; } if (env->src_grp_type == group_misfit_task) return 1; return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); } static int active_load_balance_cpu_stop(void *data); static int should_we_balance(struct lb_env *env) { struct sched_group *sg = env->sd->groups; int cpu, balance_cpu = -1; /* * Ensure the balancing environment is consistent; can happen * when the softirq triggers 'during' hotplug. */ if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) return 0; /* * In the newly idle case, we will allow all the CPUs * to do the newly idle load balance. */ if (env->idle == CPU_NEWLY_IDLE) return 1; /* Try to find first idle CPU */ for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) { if (!idle_cpu(cpu)) continue; balance_cpu = cpu; break; } if (balance_cpu == -1) balance_cpu = group_balance_cpu(sg); /* * First idle CPU or the first CPU(busiest) in this sched group * is eligible for doing load balancing at this and above domains. */ return balance_cpu == env->dst_cpu; } /* * Check this_cpu to ensure it is balanced within domain. Attempt to move * tasks if there is an imbalance. */ static int load_balance(int this_cpu, struct rq *this_rq, struct sched_domain *sd, enum cpu_idle_type idle, int *continue_balancing) { int ld_moved, cur_ld_moved, active_balance = 0; struct sched_domain *sd_parent = sd->parent; struct sched_group *group; struct rq *busiest; struct rq_flags rf; struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask); struct lb_env env = { .sd = sd, .dst_cpu = this_cpu, .dst_rq = this_rq, .dst_grpmask = sched_group_span(sd->groups), .idle = idle, .loop_break = sched_nr_migrate_break, .cpus = cpus, .fbq_type = all, .tasks = LIST_HEAD_INIT(env.tasks), }; cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask); schedstat_inc(sd->lb_count[idle]); redo: if (!should_we_balance(&env)) { *continue_balancing = 0; goto out_balanced; } group = find_busiest_group(&env); if (!group) { schedstat_inc(sd->lb_nobusyg[idle]); goto out_balanced; } busiest = find_busiest_queue(&env, group); if (!busiest) { schedstat_inc(sd->lb_nobusyq[idle]); goto out_balanced; } BUG_ON(busiest == env.dst_rq); schedstat_add(sd->lb_imbalance[idle], env.imbalance); env.src_cpu = busiest->cpu; env.src_rq = busiest; ld_moved = 0; if (busiest->nr_running > 1) { /* * Attempt to move tasks. If find_busiest_group has found * an imbalance but busiest->nr_running <= 1, the group is * still unbalanced. ld_moved simply stays zero, so it is * correctly treated as an imbalance. */ env.flags |= LBF_ALL_PINNED; env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); more_balance: rq_lock_irqsave(busiest, &rf); update_rq_clock(busiest); /* * cur_ld_moved - load moved in current iteration * ld_moved - cumulative load moved across iterations */ cur_ld_moved = detach_tasks(&env); /* * We've detached some tasks from busiest_rq. Every * task is masked "TASK_ON_RQ_MIGRATING", so we can safely * unlock busiest->lock, and we are able to be sure * that nobody can manipulate the tasks in parallel. * See task_rq_lock() family for the details. */ rq_unlock(busiest, &rf); if (cur_ld_moved) { attach_tasks(&env); ld_moved += cur_ld_moved; } local_irq_restore(rf.flags); if (env.flags & LBF_NEED_BREAK) { env.flags &= ~LBF_NEED_BREAK; goto more_balance; } /* * Revisit (affine) tasks on src_cpu that couldn't be moved to * us and move them to an alternate dst_cpu in our sched_group * where they can run. The upper limit on how many times we * iterate on same src_cpu is dependent on number of CPUs in our * sched_group. * * This changes load balance semantics a bit on who can move * load to a given_cpu. In addition to the given_cpu itself * (or a ilb_cpu acting on its behalf where given_cpu is * nohz-idle), we now have balance_cpu in a position to move * load to given_cpu. In rare situations, this may cause * conflicts (balance_cpu and given_cpu/ilb_cpu deciding * _independently_ and at _same_ time to move some load to * given_cpu) causing exceess load to be moved to given_cpu. * This however should not happen so much in practice and * moreover subsequent load balance cycles should correct the * excess load moved. */ if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { /* Prevent to re-select dst_cpu via env's CPUs */ cpumask_clear_cpu(env.dst_cpu, env.cpus); env.dst_rq = cpu_rq(env.new_dst_cpu); env.dst_cpu = env.new_dst_cpu; env.flags &= ~LBF_DST_PINNED; env.loop = 0; env.loop_break = sched_nr_migrate_break; /* * Go back to "more_balance" rather than "redo" since we * need to continue with same src_cpu. */ goto more_balance; } /* * We failed to reach balance because of affinity. */ if (sd_parent) { int *group_imbalance = &sd_parent->groups->sgc->imbalance; if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) *group_imbalance = 1; } /* All tasks on this runqueue were pinned by CPU affinity */ if (unlikely(env.flags & LBF_ALL_PINNED)) { cpumask_clear_cpu(cpu_of(busiest), cpus); /* * Attempting to continue load balancing at the current * sched_domain level only makes sense if there are * active CPUs remaining as possible busiest CPUs to * pull load from which are not contained within the * destination group that is receiving any migrated * load. */ if (!cpumask_subset(cpus, env.dst_grpmask)) { env.loop = 0; env.loop_break = sched_nr_migrate_break; goto redo; } goto out_all_pinned; } } if (!ld_moved) { schedstat_inc(sd->lb_failed[idle]); /* * Increment the failure counter only on periodic balance. * We do not want newidle balance, which can be very * frequent, pollute the failure counter causing * excessive cache_hot migrations and active balances. */ if (idle != CPU_NEWLY_IDLE) sd->nr_balance_failed++; if (need_active_balance(&env)) { unsigned long flags; raw_spin_lock_irqsave(&busiest->lock, flags); /* * Don't kick the active_load_balance_cpu_stop, * if the curr task on busiest CPU can't be * moved to this_cpu: */ if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { raw_spin_unlock_irqrestore(&busiest->lock, flags); env.flags |= LBF_ALL_PINNED; goto out_one_pinned; } /* * ->active_balance synchronizes accesses to * ->active_balance_work. Once set, it's cleared * only after active load balance is finished. */ if (!busiest->active_balance) { busiest->active_balance = 1; busiest->push_cpu = this_cpu; active_balance = 1; } raw_spin_unlock_irqrestore(&busiest->lock, flags); if (active_balance) { stop_one_cpu_nowait(cpu_of(busiest), active_load_balance_cpu_stop, busiest, &busiest->active_balance_work); } /* We've kicked active balancing, force task migration. */ sd->nr_balance_failed = sd->cache_nice_tries+1; } } else sd->nr_balance_failed = 0; if (likely(!active_balance)) { /* We were unbalanced, so reset the balancing interval */ sd->balance_interval = sd->min_interval; } else { /* * If we've begun active balancing, start to back off. This * case may not be covered by the all_pinned logic if there * is only 1 task on the busy runqueue (because we don't call * detach_tasks). */ if (sd->balance_interval < sd->max_interval) sd->balance_interval *= 2; } goto out; out_balanced: /* * We reach balance although we may have faced some affinity * constraints. Clear the imbalance flag if it was set. */ if (sd_parent) { int *group_imbalance = &sd_parent->groups->sgc->imbalance; if (*group_imbalance) *group_imbalance = 0; } out_all_pinned: /* * We reach balance because all tasks are pinned at this level so * we can't migrate them. Let the imbalance flag set so parent level * can try to migrate them. */ schedstat_inc(sd->lb_balanced[idle]); sd->nr_balance_failed = 0; out_one_pinned: ld_moved = 0; /* * idle_balance() disregards balance intervals, so we could repeatedly * reach this code, which would lead to balance_interval skyrocketting * in a short amount of time. Skip the balance_interval increase logic * to avoid that. */ if (env.idle == CPU_NEWLY_IDLE) goto out; /* tune up the balancing interval */ if ((env.flags & LBF_ALL_PINNED && sd->balance_interval < MAX_PINNED_INTERVAL) || sd->balance_interval < sd->max_interval) sd->balance_interval *= 2; out: return ld_moved; } static inline unsigned long get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) { unsigned long interval = sd->balance_interval; if (cpu_busy) interval *= sd->busy_factor; /* scale ms to jiffies */ interval = msecs_to_jiffies(interval); interval = clamp(interval, 1UL, max_load_balance_interval); return interval; } static inline void update_next_balance(struct sched_domain *sd, unsigned long *next_balance) { unsigned long interval, next; /* used by idle balance, so cpu_busy = 0 */ interval = get_sd_balance_interval(sd, 0); next = sd->last_balance + interval; if (time_after(*next_balance, next)) *next_balance = next; } /* * active_load_balance_cpu_stop is run by the CPU stopper. It pushes * running tasks off the busiest CPU onto idle CPUs. It requires at * least 1 task to be running on each physical CPU where possible, and * avoids physical / logical imbalances. */ static int active_load_balance_cpu_stop(void *data) { struct rq *busiest_rq = data; int busiest_cpu = cpu_of(busiest_rq); int target_cpu = busiest_rq->push_cpu; struct rq *target_rq = cpu_rq(target_cpu); struct sched_domain *sd; struct task_struct *p = NULL; struct rq_flags rf; rq_lock_irq(busiest_rq, &rf); /* * Between queueing the stop-work and running it is a hole in which * CPUs can become inactive. We should not move tasks from or to * inactive CPUs. */ if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu)) goto out_unlock; /* Make sure the requested CPU hasn't gone down in the meantime: */ if (unlikely(busiest_cpu != smp_processor_id() || !busiest_rq->active_balance)) goto out_unlock; /* Is there any task to move? */ if (busiest_rq->nr_running <= 1) goto out_unlock; /* * This condition is "impossible", if it occurs * we need to fix it. Originally reported by * Bjorn Helgaas on a 128-CPU setup. */ BUG_ON(busiest_rq == target_rq); /* Search for an sd spanning us and the target CPU. */ rcu_read_lock(); for_each_domain(target_cpu, sd) { if ((sd->flags & SD_LOAD_BALANCE) && cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) break; } if (likely(sd)) { struct lb_env env = { .sd = sd, .dst_cpu = target_cpu, .dst_rq = target_rq, .src_cpu = busiest_rq->cpu, .src_rq = busiest_rq, .idle = CPU_IDLE, /* * can_migrate_task() doesn't need to compute new_dst_cpu * for active balancing. Since we have CPU_IDLE, but no * @dst_grpmask we need to make that test go away with lying * about DST_PINNED. */ .flags = LBF_DST_PINNED, }; schedstat_inc(sd->alb_count); update_rq_clock(busiest_rq); p = detach_one_task(&env); if (p) { schedstat_inc(sd->alb_pushed); /* Active balancing done, reset the failure counter. */ sd->nr_balance_failed = 0; } else { schedstat_inc(sd->alb_failed); } } rcu_read_unlock(); out_unlock: busiest_rq->active_balance = 0; rq_unlock(busiest_rq, &rf); if (p) attach_one_task(target_rq, p); local_irq_enable(); return 0; } static DEFINE_SPINLOCK(balancing); /* * Scale the max load_balance interval with the number of CPUs in the system. * This trades load-balance latency on larger machines for less cross talk. */ void update_max_interval(void) { max_load_balance_interval = HZ*num_online_cpus()/10; } /* * It checks each scheduling domain to see if it is due to be balanced, * and initiates a balancing operation if so. * * Balancing parameters are set up in init_sched_domains. */ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) { int continue_balancing = 1; int cpu = rq->cpu; unsigned long interval; struct sched_domain *sd; /* Earliest time when we have to do rebalance again */ unsigned long next_balance = jiffies + 60*HZ; int update_next_balance = 0; int need_serialize, need_decay = 0; u64 max_cost = 0; rcu_read_lock(); for_each_domain(cpu, sd) { /* * Decay the newidle max times here because this is a regular * visit to all the domains. Decay ~1% per second. */ if (time_after(jiffies, sd->next_decay_max_lb_cost)) { sd->max_newidle_lb_cost = (sd->max_newidle_lb_cost * 253) / 256; sd->next_decay_max_lb_cost = jiffies + HZ; need_decay = 1; } max_cost += sd->max_newidle_lb_cost; if (!(sd->flags & SD_LOAD_BALANCE)) continue; /* * Stop the load balance at this level. There is another * CPU in our sched group which is doing load balancing more * actively. */ if (!continue_balancing) { if (need_decay) continue; break; } interval = get_sd_balance_interval(sd, idle != CPU_IDLE); need_serialize = sd->flags & SD_SERIALIZE; if (need_serialize) { if (!spin_trylock(&balancing)) goto out; } if (time_after_eq(jiffies, sd->last_balance + interval)) { if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { /* * The LBF_DST_PINNED logic could have changed * env->dst_cpu, so we can't know our idle * state even if we migrated tasks. Update it. */ idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE; } sd->last_balance = jiffies; interval = get_sd_balance_interval(sd, idle != CPU_IDLE); } if (need_serialize) spin_unlock(&balancing); out: if (time_after(next_balance, sd->last_balance + interval)) { next_balance = sd->last_balance + interval; update_next_balance = 1; } } if (need_decay) { /* * Ensure the rq-wide value also decays but keep it at a * reasonable floor to avoid funnies with rq->avg_idle. */ rq->max_idle_balance_cost = max((u64)sysctl_sched_migration_cost, max_cost); } rcu_read_unlock(); /* * next_balance will be updated only when there is a need. * When the cpu is attached to null domain for ex, it will not be * updated. */ if (likely(update_next_balance)) { rq->next_balance = next_balance; #ifdef CONFIG_NO_HZ_COMMON /* * If this CPU has been elected to perform the nohz idle * balance. Other idle CPUs have already rebalanced with * nohz_idle_balance() and nohz.next_balance has been * updated accordingly. This CPU is now running the idle load * balance for itself and we need to update the * nohz.next_balance accordingly. */ if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) nohz.next_balance = rq->next_balance; #endif } } static inline int on_null_domain(struct rq *rq) { return unlikely(!rcu_dereference_sched(rq->sd)); } #ifdef CONFIG_NO_HZ_COMMON /* * idle load balancing details * - When one of the busy CPUs notice that there may be an idle rebalancing * needed, they will kick the idle load balancer, which then does idle * load balancing for all the idle CPUs. */ static inline int find_new_ilb(void) { int ilb = cpumask_first(nohz.idle_cpus_mask); if (ilb < nr_cpu_ids && idle_cpu(ilb)) return ilb; return nr_cpu_ids; } /* * Kick a CPU to do the nohz balancing, if it is time for it. We pick the * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle * CPU (if there is one). */ static void kick_ilb(unsigned int flags) { int ilb_cpu; nohz.next_balance++; ilb_cpu = find_new_ilb(); if (ilb_cpu >= nr_cpu_ids) return; flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu)); if (flags & NOHZ_KICK_MASK) return; /* * Use smp_send_reschedule() instead of resched_cpu(). * This way we generate a sched IPI on the target CPU which * is idle. And the softirq performing nohz idle load balance * will be run before returning from the IPI. */ smp_send_reschedule(ilb_cpu); } /* * Current heuristic for kicking the idle load balancer in the presence * of an idle cpu in the system. * - This rq has more than one task. * - This rq has at least one CFS task and the capacity of the CPU is * significantly reduced because of RT tasks or IRQs. * - At parent of LLC scheduler domain level, this cpu's scheduler group has * multiple busy cpu. * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler * domain span are idle. */ static void nohz_balancer_kick(struct rq *rq) { unsigned long now = jiffies; struct sched_domain_shared *sds; struct sched_domain *sd; int nr_busy, i, cpu = rq->cpu; unsigned int flags = 0; if (unlikely(rq->idle_balance)) return; /* * We may be recently in ticked or tickless idle mode. At the first * busy tick after returning from idle, we will update the busy stats. */ nohz_balance_exit_idle(rq); /* * None are in tickless mode and hence no need for NOHZ idle load * balancing. */ if (likely(!atomic_read(&nohz.nr_cpus))) return; if (READ_ONCE(nohz.has_blocked) && time_after(now, READ_ONCE(nohz.next_blocked))) flags = NOHZ_STATS_KICK; if (time_before(now, nohz.next_balance)) goto out; if (rq->nr_running >= 2 || rq->misfit_task_load) { flags = NOHZ_KICK_MASK; goto out; } rcu_read_lock(); sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); if (sds) { /* * XXX: write a coherent comment on why we do this. * See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com */ nr_busy = atomic_read(&sds->nr_busy_cpus); if (nr_busy > 1) { flags = NOHZ_KICK_MASK; goto unlock; } } sd = rcu_dereference(rq->sd); if (sd) { if ((rq->cfs.h_nr_running >= 1) && check_cpu_capacity(rq, sd)) { flags = NOHZ_KICK_MASK; goto unlock; } } sd = rcu_dereference(per_cpu(sd_asym_packing, cpu)); if (sd) { for_each_cpu(i, sched_domain_span(sd)) { if (i == cpu || !cpumask_test_cpu(i, nohz.idle_cpus_mask)) continue; if (sched_asym_prefer(i, cpu)) { flags = NOHZ_KICK_MASK; goto unlock; } } } unlock: rcu_read_unlock(); out: if (flags) kick_ilb(flags); } static void set_cpu_sd_state_busy(int cpu) { struct sched_domain *sd; rcu_read_lock(); sd = rcu_dereference(per_cpu(sd_llc, cpu)); if (!sd || !sd->nohz_idle) goto unlock; sd->nohz_idle = 0; atomic_inc(&sd->shared->nr_busy_cpus); unlock: rcu_read_unlock(); } void nohz_balance_exit_idle(struct rq *rq) { SCHED_WARN_ON(rq != this_rq()); if (likely(!rq->nohz_tick_stopped)) return; rq->nohz_tick_stopped = 0; cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask); atomic_dec(&nohz.nr_cpus); set_cpu_sd_state_busy(rq->cpu); } static void set_cpu_sd_state_idle(int cpu) { struct sched_domain *sd; rcu_read_lock(); sd = rcu_dereference(per_cpu(sd_llc, cpu)); if (!sd || sd->nohz_idle) goto unlock; sd->nohz_idle = 1; atomic_dec(&sd->shared->nr_busy_cpus); unlock: rcu_read_unlock(); } /* * This routine will record that the CPU is going idle with tick stopped. * This info will be used in performing idle load balancing in the future. */ void nohz_balance_enter_idle(int cpu) { struct rq *rq = cpu_rq(cpu); SCHED_WARN_ON(cpu != smp_processor_id()); /* If this CPU is going down, then nothing needs to be done: */ if (!cpu_active(cpu)) return; /* Spare idle load balancing on CPUs that don't want to be disturbed: */ if (!housekeeping_cpu(cpu, HK_FLAG_SCHED)) return; /* * Can be set safely without rq->lock held * If a clear happens, it will have evaluated last additions because * rq->lock is held during the check and the clear */ rq->has_blocked_load = 1; /* * The tick is still stopped but load could have been added in the * meantime. We set the nohz.has_blocked flag to trig a check of the * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear * of nohz.has_blocked can only happen after checking the new load */ if (rq->nohz_tick_stopped) goto out; /* If we're a completely isolated CPU, we don't play: */ if (on_null_domain(rq)) return; rq->nohz_tick_stopped = 1; cpumask_set_cpu(cpu, nohz.idle_cpus_mask); atomic_inc(&nohz.nr_cpus); /* * Ensures that if nohz_idle_balance() fails to observe our * @idle_cpus_mask store, it must observe the @has_blocked * store. */ smp_mb__after_atomic(); set_cpu_sd_state_idle(cpu); out: /* * Each time a cpu enter idle, we assume that it has blocked load and * enable the periodic update of the load of idle cpus */ WRITE_ONCE(nohz.has_blocked, 1); } /* * Internal function that runs load balance for all idle cpus. The load balance * can be a simple update of blocked load or a complete load balance with * tasks movement depending of flags. * The function returns false if the loop has stopped before running * through all idle CPUs. */ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, enum cpu_idle_type idle) { /* Earliest time when we have to do rebalance again */ unsigned long now = jiffies; unsigned long next_balance = now + 60*HZ; bool has_blocked_load = false; int update_next_balance = 0; int this_cpu = this_rq->cpu; int balance_cpu; int ret = false; struct rq *rq; SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK); /* * We assume there will be no idle load after this update and clear * the has_blocked flag. If a cpu enters idle in the mean time, it will * set the has_blocked flag and trig another update of idle load. * Because a cpu that becomes idle, is added to idle_cpus_mask before * setting the flag, we are sure to not clear the state and not * check the load of an idle cpu. */ WRITE_ONCE(nohz.has_blocked, 0); /* * Ensures that if we miss the CPU, we must see the has_blocked * store from nohz_balance_enter_idle(). */ smp_mb(); for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) continue; /* * If this CPU gets work to do, stop the load balancing * work being done for other CPUs. Next load * balancing owner will pick it up. */ if (need_resched()) { has_blocked_load = true; goto abort; } rq = cpu_rq(balance_cpu); has_blocked_load |= update_nohz_stats(rq, true); /* * If time for next balance is due, * do the balance. */ if (time_after_eq(jiffies, rq->next_balance)) { struct rq_flags rf; rq_lock_irqsave(rq, &rf); update_rq_clock(rq); cpu_load_update_idle(rq); rq_unlock_irqrestore(rq, &rf); if (flags & NOHZ_BALANCE_KICK) rebalance_domains(rq, CPU_IDLE); } if (time_after(next_balance, rq->next_balance)) { next_balance = rq->next_balance; update_next_balance = 1; } } /* Newly idle CPU doesn't need an update */ if (idle != CPU_NEWLY_IDLE) { update_blocked_averages(this_cpu); has_blocked_load |= this_rq->has_blocked_load; } if (flags & NOHZ_BALANCE_KICK) rebalance_domains(this_rq, CPU_IDLE); WRITE_ONCE(nohz.next_blocked, now + msecs_to_jiffies(LOAD_AVG_PERIOD)); /* The full idle balance loop has been done */ ret = true; abort: /* There is still blocked load, enable periodic update */ if (has_blocked_load) WRITE_ONCE(nohz.has_blocked, 1); /* * next_balance will be updated only when there is a need. * When the CPU is attached to null domain for ex, it will not be * updated. */ if (likely(update_next_balance)) nohz.next_balance = next_balance; return ret; } /* * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the * rebalancing for all the cpus for whom scheduler ticks are stopped. */ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { int this_cpu = this_rq->cpu; unsigned int flags; if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_KICK_MASK)) return false; if (idle != CPU_IDLE) { atomic_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); return false; } /* could be _relaxed() */ flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); if (!(flags & NOHZ_KICK_MASK)) return false; _nohz_idle_balance(this_rq, flags, idle); return true; } static void nohz_newidle_balance(struct rq *this_rq) { int this_cpu = this_rq->cpu; /* * This CPU doesn't want to be disturbed by scheduler * housekeeping */ if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED)) return; /* Will wake up very soon. No time for doing anything else*/ if (this_rq->avg_idle < sysctl_sched_migration_cost) return; /* Don't need to update blocked load of idle CPUs*/ if (!READ_ONCE(nohz.has_blocked) || time_before(jiffies, READ_ONCE(nohz.next_blocked))) return; raw_spin_unlock(&this_rq->lock); /* * This CPU is going to be idle and blocked load of idle CPUs * need to be updated. Run the ilb locally as it is a good * candidate for ilb instead of waking up another idle CPU. * Kick an normal ilb if we failed to do the update. */ if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE)) kick_ilb(NOHZ_STATS_KICK); raw_spin_lock(&this_rq->lock); } #else /* !CONFIG_NO_HZ_COMMON */ static inline void nohz_balancer_kick(struct rq *rq) { } static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { return false; } static inline void nohz_newidle_balance(struct rq *this_rq) { } #endif /* CONFIG_NO_HZ_COMMON */ /* * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. */ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) { unsigned long next_balance = jiffies + HZ; int this_cpu = this_rq->cpu; struct sched_domain *sd; int pulled_task = 0; u64 curr_cost = 0; /* * We must set idle_stamp _before_ calling idle_balance(), such that we * measure the duration of idle_balance() as idle time. */ this_rq->idle_stamp = rq_clock(this_rq); /* * Do not pull tasks towards !active CPUs... */ if (!cpu_active(this_cpu)) return 0; /* * This is OK, because current is on_cpu, which avoids it being picked * for load-balance and preemption/IRQs are still disabled avoiding * further scheduler activity on it and we're being very careful to * re-start the picking loop. */ rq_unpin_lock(this_rq, rf); if (this_rq->avg_idle < sysctl_sched_migration_cost || !READ_ONCE(this_rq->rd->overload)) { rcu_read_lock(); sd = rcu_dereference_check_sched_domain(this_rq->sd); if (sd) update_next_balance(sd, &next_balance); rcu_read_unlock(); nohz_newidle_balance(this_rq); goto out; } raw_spin_unlock(&this_rq->lock); update_blocked_averages(this_cpu); rcu_read_lock(); for_each_domain(this_cpu, sd) { int continue_balancing = 1; u64 t0, domain_cost; if (!(sd->flags & SD_LOAD_BALANCE)) continue; if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { update_next_balance(sd, &next_balance); break; } if (sd->flags & SD_BALANCE_NEWIDLE) { t0 = sched_clock_cpu(this_cpu); pulled_task = load_balance(this_cpu, this_rq, sd, CPU_NEWLY_IDLE, &continue_balancing); domain_cost = sched_clock_cpu(this_cpu) - t0; if (domain_cost > sd->max_newidle_lb_cost) sd->max_newidle_lb_cost = domain_cost; curr_cost += domain_cost; } update_next_balance(sd, &next_balance); /* * Stop searching for tasks to pull if there are * now runnable tasks on this rq. */ if (pulled_task || this_rq->nr_running > 0) break; } rcu_read_unlock(); raw_spin_lock(&this_rq->lock); if (curr_cost > this_rq->max_idle_balance_cost) this_rq->max_idle_balance_cost = curr_cost; out: /* * While browsing the domains, we released the rq lock, a task could * have been enqueued in the meantime. Since we're not going idle, * pretend we pulled a task. */ if (this_rq->cfs.h_nr_running && !pulled_task) pulled_task = 1; /* Move the next balance forward */ if (time_after(this_rq->next_balance, next_balance)) this_rq->next_balance = next_balance; /* Is there a task of a high priority class? */ if (this_rq->nr_running != this_rq->cfs.h_nr_running) pulled_task = -1; if (pulled_task) this_rq->idle_stamp = 0; rq_repin_lock(this_rq, rf); return pulled_task; } /* * run_rebalance_domains is triggered when needed from the scheduler tick. * Also triggered for nohz idle balancing (with nohz_balancing_kick set). */ static __latent_entropy void run_rebalance_domains(struct softirq_action *h) { struct rq *this_rq = this_rq(); enum cpu_idle_type idle = this_rq->idle_balance ? CPU_IDLE : CPU_NOT_IDLE; /* * If this CPU has a pending nohz_balance_kick, then do the * balancing on behalf of the other idle CPUs whose ticks are * stopped. Do nohz_idle_balance *before* rebalance_domains to * give the idle CPUs a chance to load balance. Else we may * load balance only within the local sched_domain hierarchy * and abort nohz_idle_balance altogether if we pull some load. */ if (nohz_idle_balance(this_rq, idle)) return; /* normal load balance */ update_blocked_averages(this_rq->cpu); rebalance_domains(this_rq, idle); } /* * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. */ void trigger_load_balance(struct rq *rq) { /* Don't need to rebalance while attached to NULL domain */ if (unlikely(on_null_domain(rq))) return; if (time_after_eq(jiffies, rq->next_balance)) raise_softirq(SCHED_SOFTIRQ); nohz_balancer_kick(rq); } static void rq_online_fair(struct rq *rq) { update_sysctl(); update_runtime_enabled(rq); } static void rq_offline_fair(struct rq *rq) { update_sysctl(); /* Ensure any throttled groups are reachable by pick_next_task */ unthrottle_offline_cfs_rqs(rq); } #endif /* CONFIG_SMP */ /* * scheduler tick hitting a task of our scheduling class. * * NOTE: This function can be called remotely by the tick offload that * goes along full dynticks. Therefore no local assumption can be made * and everything must be accessed through the @rq and @curr passed in * parameters. */ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) { struct cfs_rq *cfs_rq; struct sched_entity *se = &curr->se; for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); entity_tick(cfs_rq, se, queued); } if (static_branch_unlikely(&sched_numa_balancing)) task_tick_numa(rq, curr); update_misfit_status(curr, rq); update_overutilized_status(task_rq(curr)); } /* * called on fork with the child task as argument from the parent's context * - child not yet on the tasklist * - preemption disabled */ static void task_fork_fair(struct task_struct *p) { struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se, *curr; struct rq *rq = this_rq(); struct rq_flags rf; rq_lock(rq, &rf); update_rq_clock(rq); cfs_rq = task_cfs_rq(current); curr = cfs_rq->curr; if (curr) { update_curr(cfs_rq); se->vruntime = curr->vruntime; } place_entity(cfs_rq, se, 1); if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { /* * Upon rescheduling, sched_class::put_prev_task() will place * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); resched_curr(rq); } se->vruntime -= cfs_rq->min_vruntime; rq_unlock(rq, &rf); } /* * Priority of the task has changed. Check to see if we preempt * the current task. */ static void prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) { if (!task_on_rq_queued(p)) return; /* * Reschedule if we are currently running on this runqueue and * our priority decreased, or if we are not currently running on * this runqueue and our priority is higher than the current's */ if (rq->curr == p) { if (p->prio > oldprio) resched_curr(rq); } else check_preempt_curr(rq, p, 0); } static inline bool vruntime_normalized(struct task_struct *p) { struct sched_entity *se = &p->se; /* * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases, * the dequeue_entity(.flags=0) will already have normalized the * vruntime. */ if (p->on_rq) return true; /* * When !on_rq, vruntime of the task has usually NOT been normalized. * But there are some cases where it has already been normalized: * * - A forked child which is waiting for being woken up by * wake_up_new_task(). * - A task which has been woken up by try_to_wake_up() and * waiting for actually being woken up by sched_ttwu_pending(). */ if (!se->sum_exec_runtime || (p->state == TASK_WAKING && p->sched_remote_wakeup)) return true; return false; } #ifdef CONFIG_FAIR_GROUP_SCHED /* * Propagate the changes of the sched_entity across the tg tree to make it * visible to the root */ static void propagate_entity_cfs_rq(struct sched_entity *se) { struct cfs_rq *cfs_rq; /* Start to propagate at parent */ se = se->parent; for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); if (cfs_rq_throttled(cfs_rq)) break; update_load_avg(cfs_rq, se, UPDATE_TG); } } #else static void propagate_entity_cfs_rq(struct sched_entity *se) { } #endif static void detach_entity_cfs_rq(struct sched_entity *se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); /* Catch up with the cfs_rq and remove our load when we leave */ update_load_avg(cfs_rq, se, 0); detach_entity_load_avg(cfs_rq, se); update_tg_load_avg(cfs_rq, false); propagate_entity_cfs_rq(se); } static void attach_entity_cfs_rq(struct sched_entity *se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); #ifdef CONFIG_FAIR_GROUP_SCHED /* * Since the real-depth could have been changed (only FAIR * class maintain depth value), reset depth properly. */ se->depth = se->parent ? se->parent->depth + 1 : 0; #endif /* Synchronize entity with its cfs_rq */ update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); attach_entity_load_avg(cfs_rq, se, 0); update_tg_load_avg(cfs_rq, false); propagate_entity_cfs_rq(se); } static void detach_task_cfs_rq(struct task_struct *p) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); if (!vruntime_normalized(p)) { /* * Fix up our vruntime so that the current sleep doesn't * cause 'unlimited' sleep bonus. */ place_entity(cfs_rq, se, 0); se->vruntime -= cfs_rq->min_vruntime; } detach_entity_cfs_rq(se); } static void attach_task_cfs_rq(struct task_struct *p) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); attach_entity_cfs_rq(se); if (!vruntime_normalized(p)) se->vruntime += cfs_rq->min_vruntime; } static void switched_from_fair(struct rq *rq, struct task_struct *p) { detach_task_cfs_rq(p); } static void switched_to_fair(struct rq *rq, struct task_struct *p) { attach_task_cfs_rq(p); if (task_on_rq_queued(p)) { /* * We were most likely switched from sched_rt, so * kick off the schedule if running, otherwise just see * if we can still preempt the current task. */ if (rq->curr == p) resched_curr(rq); else check_preempt_curr(rq, p, 0); } } /* Account for a task changing its policy or group. * * This routine is mostly called to set cfs_rq->curr field when a task * migrates between groups/classes. */ static void set_curr_task_fair(struct rq *rq) { struct sched_entity *se = &rq->curr->se; for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); set_next_entity(cfs_rq, se); /* ensure bandwidth has been allocated on our new cfs_rq */ account_cfs_rq_runtime(cfs_rq, 0); } } void init_cfs_rq(struct cfs_rq *cfs_rq) { cfs_rq->tasks_timeline = RB_ROOT_CACHED; cfs_rq->min_vruntime = (u64)(-(1LL << 20)); #ifndef CONFIG_64BIT cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; #endif #ifdef CONFIG_SMP raw_spin_lock_init(&cfs_rq->removed.lock); #endif } #ifdef CONFIG_FAIR_GROUP_SCHED static void task_set_group_fair(struct task_struct *p) { struct sched_entity *se = &p->se; set_task_rq(p, task_cpu(p)); se->depth = se->parent ? se->parent->depth + 1 : 0; } static void task_move_group_fair(struct task_struct *p) { detach_task_cfs_rq(p); set_task_rq(p, task_cpu(p)); #ifdef CONFIG_SMP /* Tell se's cfs_rq has been changed -- migrated */ p->se.avg.last_update_time = 0; #endif attach_task_cfs_rq(p); } static void task_change_group_fair(struct task_struct *p, int type) { switch (type) { case TASK_SET_GROUP: task_set_group_fair(p); break; case TASK_MOVE_GROUP: task_move_group_fair(p); break; } } void free_fair_sched_group(struct task_group *tg) { int i; destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); for_each_possible_cpu(i) { if (tg->cfs_rq) kfree(tg->cfs_rq[i]); if (tg->se) kfree(tg->se[i]); } kfree(tg->cfs_rq); kfree(tg->se); } int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) { struct sched_entity *se; struct cfs_rq *cfs_rq; int i; tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); if (!tg->cfs_rq) goto err; tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); if (!tg->se) goto err; tg->shares = NICE_0_LOAD; init_cfs_bandwidth(tg_cfs_bandwidth(tg)); for_each_possible_cpu(i) { cfs_rq = kzalloc_node(sizeof(struct cfs_rq), GFP_KERNEL, cpu_to_node(i)); if (!cfs_rq) goto err; se = kzalloc_node(sizeof(struct sched_entity), GFP_KERNEL, cpu_to_node(i)); if (!se) goto err_free_rq; init_cfs_rq(cfs_rq); init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); init_entity_runnable_average(se); } return 1; err_free_rq: kfree(cfs_rq); err: return 0; } void online_fair_sched_group(struct task_group *tg) { struct sched_entity *se; struct rq *rq; int i; for_each_possible_cpu(i) { rq = cpu_rq(i); se = tg->se[i]; raw_spin_lock_irq(&rq->lock); update_rq_clock(rq); attach_entity_cfs_rq(se); sync_throttle(tg, i); raw_spin_unlock_irq(&rq->lock); } } void unregister_fair_sched_group(struct task_group *tg) { unsigned long flags; struct rq *rq; int cpu; for_each_possible_cpu(cpu) { if (tg->se[cpu]) remove_entity_load_avg(tg->se[cpu]); /* * Only empty task groups can be destroyed; so we can speculatively * check on_list without danger of it being re-added. */ if (!tg->cfs_rq[cpu]->on_list) continue; rq = cpu_rq(cpu); raw_spin_lock_irqsave(&rq->lock, flags); list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); raw_spin_unlock_irqrestore(&rq->lock, flags); } } void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, struct sched_entity *parent) { struct rq *rq = cpu_rq(cpu); cfs_rq->tg = tg; cfs_rq->rq = rq; init_cfs_rq_runtime(cfs_rq); tg->cfs_rq[cpu] = cfs_rq; tg->se[cpu] = se; /* se could be NULL for root_task_group */ if (!se) return; if (!parent) { se->cfs_rq = &rq->cfs; se->depth = 0; } else { se->cfs_rq = parent->my_q; se->depth = parent->depth + 1; } se->my_q = cfs_rq; /* guarantee group entities always have weight */ update_load_set(&se->load, NICE_0_LOAD); se->parent = parent; } static DEFINE_MUTEX(shares_mutex); int sched_group_set_shares(struct task_group *tg, unsigned long shares) { int i; /* * We can't change the weight of the root cgroup. */ if (!tg->se[0]) return -EINVAL; shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); mutex_lock(&shares_mutex); if (tg->shares == shares) goto done; tg->shares = shares; for_each_possible_cpu(i) { struct rq *rq = cpu_rq(i); struct sched_entity *se = tg->se[i]; struct rq_flags rf; /* Propagate contribution to hierarchy */ rq_lock_irqsave(rq, &rf); update_rq_clock(rq); for_each_sched_entity(se) { update_load_avg(cfs_rq_of(se), se, UPDATE_TG); update_cfs_group(se); } rq_unlock_irqrestore(rq, &rf); } done: mutex_unlock(&shares_mutex); return 0; } #else /* CONFIG_FAIR_GROUP_SCHED */ void free_fair_sched_group(struct task_group *tg) { } int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) { return 1; } void online_fair_sched_group(struct task_group *tg) { } void unregister_fair_sched_group(struct task_group *tg) { } #endif /* CONFIG_FAIR_GROUP_SCHED */ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) { struct sched_entity *se = &task->se; unsigned int rr_interval = 0; /* * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise * idle runqueue: */ if (rq->cfs.load.weight) rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); return rr_interval; } /* * All the scheduling class methods: */ const struct sched_class fair_sched_class = { .next = &idle_sched_class, .enqueue_task = enqueue_task_fair, .dequeue_task = dequeue_task_fair, .yield_task = yield_task_fair, .yield_to_task = yield_to_task_fair, .check_preempt_curr = check_preempt_wakeup, .pick_next_task = pick_next_task_fair, .put_prev_task = put_prev_task_fair, #ifdef CONFIG_SMP .select_task_rq = select_task_rq_fair, .migrate_task_rq = migrate_task_rq_fair, .rq_online = rq_online_fair, .rq_offline = rq_offline_fair, .task_dead = task_dead_fair, .set_cpus_allowed = set_cpus_allowed_common, #endif .set_curr_task = set_curr_task_fair, .task_tick = task_tick_fair, .task_fork = task_fork_fair, .prio_changed = prio_changed_fair, .switched_from = switched_from_fair, .switched_to = switched_to_fair, .get_rr_interval = get_rr_interval_fair, .update_curr = update_curr_fair, #ifdef CONFIG_FAIR_GROUP_SCHED .task_change_group = task_change_group_fair, #endif }; #ifdef CONFIG_SCHED_DEBUG void print_cfs_stats(struct seq_file *m, int cpu) { struct cfs_rq *cfs_rq; rcu_read_lock(); for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) print_cfs_rq(m, cpu, cfs_rq); rcu_read_unlock(); } #ifdef CONFIG_NUMA_BALANCING void show_numa_stats(struct task_struct *p, struct seq_file *m) { int node; unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0; for_each_online_node(node) { if (p->numa_faults) { tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; } if (p->numa_group) { gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)], gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)]; } print_numa_stats(m, node, tsf, tpf, gsf, gpf); } } #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_SCHED_DEBUG */ __init void init_sched_fair_class(void) { #ifdef CONFIG_SMP open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); #ifdef CONFIG_NO_HZ_COMMON nohz.next_balance = jiffies; nohz.next_blocked = jiffies; zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); #endif #endif /* SMP */ }
./CrossVul/dataset_final_sorted/CWE-400/c/good_527_0
crossvul-cpp_data_bad_1243_0
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016, Linaro Ltd. * Copyright (c) 2012, Michal Simek <monstr@monstr.eu> * Copyright (c) 2012, PetaLogix * Copyright (c) 2011, Texas Instruments, Inc. * Copyright (c) 2011, Google, Inc. * * Based on rpmsg performance statistics driver by Michal Simek, which in turn * was based on TI & Google OMX rpmsg driver. */ #include <linux/cdev.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/idr.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/rpmsg.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <uapi/linux/rpmsg.h> #include "rpmsg_internal.h" #define RPMSG_DEV_MAX (MINORMASK + 1) static dev_t rpmsg_major; static struct class *rpmsg_class; static DEFINE_IDA(rpmsg_ctrl_ida); static DEFINE_IDA(rpmsg_ept_ida); static DEFINE_IDA(rpmsg_minor_ida); #define dev_to_eptdev(dev) container_of(dev, struct rpmsg_eptdev, dev) #define cdev_to_eptdev(i_cdev) container_of(i_cdev, struct rpmsg_eptdev, cdev) #define dev_to_ctrldev(dev) container_of(dev, struct rpmsg_ctrldev, dev) #define cdev_to_ctrldev(i_cdev) container_of(i_cdev, struct rpmsg_ctrldev, cdev) /** * struct rpmsg_ctrldev - control device for instantiating endpoint devices * @rpdev: underlaying rpmsg device * @cdev: cdev for the ctrl device * @dev: device for the ctrl device */ struct rpmsg_ctrldev { struct rpmsg_device *rpdev; struct cdev cdev; struct device dev; }; /** * struct rpmsg_eptdev - endpoint device context * @dev: endpoint device * @cdev: cdev for the endpoint device * @rpdev: underlaying rpmsg device * @chinfo: info used to open the endpoint * @ept_lock: synchronization of @ept modifications * @ept: rpmsg endpoint reference, when open * @queue_lock: synchronization of @queue operations * @queue: incoming message queue * @readq: wait object for incoming queue */ struct rpmsg_eptdev { struct device dev; struct cdev cdev; struct rpmsg_device *rpdev; struct rpmsg_channel_info chinfo; struct mutex ept_lock; struct rpmsg_endpoint *ept; spinlock_t queue_lock; struct sk_buff_head queue; wait_queue_head_t readq; }; static int rpmsg_eptdev_destroy(struct device *dev, void *data) { struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev); mutex_lock(&eptdev->ept_lock); if (eptdev->ept) { rpmsg_destroy_ept(eptdev->ept); eptdev->ept = NULL; } mutex_unlock(&eptdev->ept_lock); /* wake up any blocked readers */ wake_up_interruptible(&eptdev->readq); device_del(&eptdev->dev); put_device(&eptdev->dev); return 0; } static int rpmsg_ept_cb(struct rpmsg_device *rpdev, void *buf, int len, void *priv, u32 addr) { struct rpmsg_eptdev *eptdev = priv; struct sk_buff *skb; skb = alloc_skb(len, GFP_ATOMIC); if (!skb) return -ENOMEM; skb_put_data(skb, buf, len); spin_lock(&eptdev->queue_lock); skb_queue_tail(&eptdev->queue, skb); spin_unlock(&eptdev->queue_lock); /* wake up any blocking processes, waiting for new data */ wake_up_interruptible(&eptdev->readq); return 0; } static int rpmsg_eptdev_open(struct inode *inode, struct file *filp) { struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev); struct rpmsg_endpoint *ept; struct rpmsg_device *rpdev = eptdev->rpdev; struct device *dev = &eptdev->dev; get_device(dev); ept = rpmsg_create_ept(rpdev, rpmsg_ept_cb, eptdev, eptdev->chinfo); if (!ept) { dev_err(dev, "failed to open %s\n", eptdev->chinfo.name); put_device(dev); return -EINVAL; } eptdev->ept = ept; filp->private_data = eptdev; return 0; } static int rpmsg_eptdev_release(struct inode *inode, struct file *filp) { struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev); struct device *dev = &eptdev->dev; struct sk_buff *skb; /* Close the endpoint, if it's not already destroyed by the parent */ mutex_lock(&eptdev->ept_lock); if (eptdev->ept) { rpmsg_destroy_ept(eptdev->ept); eptdev->ept = NULL; } mutex_unlock(&eptdev->ept_lock); /* Discard all SKBs */ while (!skb_queue_empty(&eptdev->queue)) { skb = skb_dequeue(&eptdev->queue); kfree_skb(skb); } put_device(dev); return 0; } static ssize_t rpmsg_eptdev_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *filp = iocb->ki_filp; struct rpmsg_eptdev *eptdev = filp->private_data; unsigned long flags; struct sk_buff *skb; int use; if (!eptdev->ept) return -EPIPE; spin_lock_irqsave(&eptdev->queue_lock, flags); /* Wait for data in the queue */ if (skb_queue_empty(&eptdev->queue)) { spin_unlock_irqrestore(&eptdev->queue_lock, flags); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; /* Wait until we get data or the endpoint goes away */ if (wait_event_interruptible(eptdev->readq, !skb_queue_empty(&eptdev->queue) || !eptdev->ept)) return -ERESTARTSYS; /* We lost the endpoint while waiting */ if (!eptdev->ept) return -EPIPE; spin_lock_irqsave(&eptdev->queue_lock, flags); } skb = skb_dequeue(&eptdev->queue); spin_unlock_irqrestore(&eptdev->queue_lock, flags); if (!skb) return -EFAULT; use = min_t(size_t, iov_iter_count(to), skb->len); if (copy_to_iter(skb->data, use, to) != use) use = -EFAULT; kfree_skb(skb); return use; } static ssize_t rpmsg_eptdev_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *filp = iocb->ki_filp; struct rpmsg_eptdev *eptdev = filp->private_data; size_t len = iov_iter_count(from); void *kbuf; int ret; kbuf = kzalloc(len, GFP_KERNEL); if (!kbuf) return -ENOMEM; if (!copy_from_iter_full(kbuf, len, from)) return -EFAULT; if (mutex_lock_interruptible(&eptdev->ept_lock)) { ret = -ERESTARTSYS; goto free_kbuf; } if (!eptdev->ept) { ret = -EPIPE; goto unlock_eptdev; } if (filp->f_flags & O_NONBLOCK) ret = rpmsg_trysend(eptdev->ept, kbuf, len); else ret = rpmsg_send(eptdev->ept, kbuf, len); unlock_eptdev: mutex_unlock(&eptdev->ept_lock); free_kbuf: kfree(kbuf); return ret < 0 ? ret : len; } static __poll_t rpmsg_eptdev_poll(struct file *filp, poll_table *wait) { struct rpmsg_eptdev *eptdev = filp->private_data; __poll_t mask = 0; if (!eptdev->ept) return EPOLLERR; poll_wait(filp, &eptdev->readq, wait); if (!skb_queue_empty(&eptdev->queue)) mask |= EPOLLIN | EPOLLRDNORM; mask |= rpmsg_poll(eptdev->ept, filp, wait); return mask; } static long rpmsg_eptdev_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { struct rpmsg_eptdev *eptdev = fp->private_data; if (cmd != RPMSG_DESTROY_EPT_IOCTL) return -EINVAL; return rpmsg_eptdev_destroy(&eptdev->dev, NULL); } static const struct file_operations rpmsg_eptdev_fops = { .owner = THIS_MODULE, .open = rpmsg_eptdev_open, .release = rpmsg_eptdev_release, .read_iter = rpmsg_eptdev_read_iter, .write_iter = rpmsg_eptdev_write_iter, .poll = rpmsg_eptdev_poll, .unlocked_ioctl = rpmsg_eptdev_ioctl, .compat_ioctl = rpmsg_eptdev_ioctl, }; static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev); return sprintf(buf, "%s\n", eptdev->chinfo.name); } static DEVICE_ATTR_RO(name); static ssize_t src_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", eptdev->chinfo.src); } static DEVICE_ATTR_RO(src); static ssize_t dst_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", eptdev->chinfo.dst); } static DEVICE_ATTR_RO(dst); static struct attribute *rpmsg_eptdev_attrs[] = { &dev_attr_name.attr, &dev_attr_src.attr, &dev_attr_dst.attr, NULL }; ATTRIBUTE_GROUPS(rpmsg_eptdev); static void rpmsg_eptdev_release_device(struct device *dev) { struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev); ida_simple_remove(&rpmsg_ept_ida, dev->id); ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt)); cdev_del(&eptdev->cdev); kfree(eptdev); } static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev, struct rpmsg_channel_info chinfo) { struct rpmsg_device *rpdev = ctrldev->rpdev; struct rpmsg_eptdev *eptdev; struct device *dev; int ret; eptdev = kzalloc(sizeof(*eptdev), GFP_KERNEL); if (!eptdev) return -ENOMEM; dev = &eptdev->dev; eptdev->rpdev = rpdev; eptdev->chinfo = chinfo; mutex_init(&eptdev->ept_lock); spin_lock_init(&eptdev->queue_lock); skb_queue_head_init(&eptdev->queue); init_waitqueue_head(&eptdev->readq); device_initialize(dev); dev->class = rpmsg_class; dev->parent = &ctrldev->dev; dev->groups = rpmsg_eptdev_groups; dev_set_drvdata(dev, eptdev); cdev_init(&eptdev->cdev, &rpmsg_eptdev_fops); eptdev->cdev.owner = THIS_MODULE; ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL); if (ret < 0) goto free_eptdev; dev->devt = MKDEV(MAJOR(rpmsg_major), ret); ret = ida_simple_get(&rpmsg_ept_ida, 0, 0, GFP_KERNEL); if (ret < 0) goto free_minor_ida; dev->id = ret; dev_set_name(dev, "rpmsg%d", ret); ret = cdev_add(&eptdev->cdev, dev->devt, 1); if (ret) goto free_ept_ida; /* We can now rely on the release function for cleanup */ dev->release = rpmsg_eptdev_release_device; ret = device_add(dev); if (ret) { dev_err(dev, "device_add failed: %d\n", ret); put_device(dev); } return ret; free_ept_ida: ida_simple_remove(&rpmsg_ept_ida, dev->id); free_minor_ida: ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt)); free_eptdev: put_device(dev); kfree(eptdev); return ret; } static int rpmsg_ctrldev_open(struct inode *inode, struct file *filp) { struct rpmsg_ctrldev *ctrldev = cdev_to_ctrldev(inode->i_cdev); get_device(&ctrldev->dev); filp->private_data = ctrldev; return 0; } static int rpmsg_ctrldev_release(struct inode *inode, struct file *filp) { struct rpmsg_ctrldev *ctrldev = cdev_to_ctrldev(inode->i_cdev); put_device(&ctrldev->dev); return 0; } static long rpmsg_ctrldev_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { struct rpmsg_ctrldev *ctrldev = fp->private_data; void __user *argp = (void __user *)arg; struct rpmsg_endpoint_info eptinfo; struct rpmsg_channel_info chinfo; if (cmd != RPMSG_CREATE_EPT_IOCTL) return -EINVAL; if (copy_from_user(&eptinfo, argp, sizeof(eptinfo))) return -EFAULT; memcpy(chinfo.name, eptinfo.name, RPMSG_NAME_SIZE); chinfo.name[RPMSG_NAME_SIZE-1] = '\0'; chinfo.src = eptinfo.src; chinfo.dst = eptinfo.dst; return rpmsg_eptdev_create(ctrldev, chinfo); }; static const struct file_operations rpmsg_ctrldev_fops = { .owner = THIS_MODULE, .open = rpmsg_ctrldev_open, .release = rpmsg_ctrldev_release, .unlocked_ioctl = rpmsg_ctrldev_ioctl, .compat_ioctl = rpmsg_ctrldev_ioctl, }; static void rpmsg_ctrldev_release_device(struct device *dev) { struct rpmsg_ctrldev *ctrldev = dev_to_ctrldev(dev); ida_simple_remove(&rpmsg_ctrl_ida, dev->id); ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt)); cdev_del(&ctrldev->cdev); kfree(ctrldev); } static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev) { struct rpmsg_ctrldev *ctrldev; struct device *dev; int ret; ctrldev = kzalloc(sizeof(*ctrldev), GFP_KERNEL); if (!ctrldev) return -ENOMEM; ctrldev->rpdev = rpdev; dev = &ctrldev->dev; device_initialize(dev); dev->parent = &rpdev->dev; dev->class = rpmsg_class; cdev_init(&ctrldev->cdev, &rpmsg_ctrldev_fops); ctrldev->cdev.owner = THIS_MODULE; ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL); if (ret < 0) goto free_ctrldev; dev->devt = MKDEV(MAJOR(rpmsg_major), ret); ret = ida_simple_get(&rpmsg_ctrl_ida, 0, 0, GFP_KERNEL); if (ret < 0) goto free_minor_ida; dev->id = ret; dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret); ret = cdev_add(&ctrldev->cdev, dev->devt, 1); if (ret) goto free_ctrl_ida; /* We can now rely on the release function for cleanup */ dev->release = rpmsg_ctrldev_release_device; ret = device_add(dev); if (ret) { dev_err(&rpdev->dev, "device_add failed: %d\n", ret); put_device(dev); } dev_set_drvdata(&rpdev->dev, ctrldev); return ret; free_ctrl_ida: ida_simple_remove(&rpmsg_ctrl_ida, dev->id); free_minor_ida: ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt)); free_ctrldev: put_device(dev); kfree(ctrldev); return ret; } static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev) { struct rpmsg_ctrldev *ctrldev = dev_get_drvdata(&rpdev->dev); int ret; /* Destroy all endpoints */ ret = device_for_each_child(&ctrldev->dev, NULL, rpmsg_eptdev_destroy); if (ret) dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret); device_del(&ctrldev->dev); put_device(&ctrldev->dev); } static struct rpmsg_driver rpmsg_chrdev_driver = { .probe = rpmsg_chrdev_probe, .remove = rpmsg_chrdev_remove, .drv = { .name = "rpmsg_chrdev", }, }; static int rpmsg_char_init(void) { int ret; ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg"); if (ret < 0) { pr_err("rpmsg: failed to allocate char dev region\n"); return ret; } rpmsg_class = class_create(THIS_MODULE, "rpmsg"); if (IS_ERR(rpmsg_class)) { pr_err("failed to create rpmsg class\n"); unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX); return PTR_ERR(rpmsg_class); } ret = register_rpmsg_driver(&rpmsg_chrdev_driver); if (ret < 0) { pr_err("rpmsgchr: failed to register rpmsg driver\n"); class_destroy(rpmsg_class); unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX); } return ret; } postcore_initcall(rpmsg_char_init); static void rpmsg_chrdev_exit(void) { unregister_rpmsg_driver(&rpmsg_chrdev_driver); class_destroy(rpmsg_class); unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX); } module_exit(rpmsg_chrdev_exit); MODULE_ALIAS("rpmsg:rpmsg_chrdev"); MODULE_LICENSE("GPL v2");
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1243_0
crossvul-cpp_data_bad_1259_0
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. // Copyright (c) 2018, Linaro Limited #include <linux/completion.h> #include <linux/device.h> #include <linux/dma-buf.h> #include <linux/dma-mapping.h> #include <linux/idr.h> #include <linux/list.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of.h> #include <linux/sort.h> #include <linux/of_platform.h> #include <linux/rpmsg.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <uapi/misc/fastrpc.h> #define ADSP_DOMAIN_ID (0) #define MDSP_DOMAIN_ID (1) #define SDSP_DOMAIN_ID (2) #define CDSP_DOMAIN_ID (3) #define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/ #define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/ #define FASTRPC_ALIGN 128 #define FASTRPC_MAX_FDLIST 16 #define FASTRPC_MAX_CRCLIST 64 #define FASTRPC_PHYS(p) ((p) & 0xffffffff) #define FASTRPC_CTX_MAX (256) #define FASTRPC_INIT_HANDLE 1 #define FASTRPC_CTXID_MASK (0xFF0) #define INIT_FILELEN_MAX (64 * 1024 * 1024) #define FASTRPC_DEVICE_NAME "fastrpc" /* Retrives number of input buffers from the scalars parameter */ #define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff) /* Retrives number of output buffers from the scalars parameter */ #define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff) /* Retrives number of input handles from the scalars parameter */ #define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f) /* Retrives number of output handles from the scalars parameter */ #define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f) #define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \ REMOTE_SCALARS_OUTBUFS(sc) + \ REMOTE_SCALARS_INHANDLES(sc)+ \ REMOTE_SCALARS_OUTHANDLES(sc)) #define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \ (((attr & 0x07) << 29) | \ ((method & 0x1f) << 24) | \ ((in & 0xff) << 16) | \ ((out & 0xff) << 8) | \ ((oin & 0x0f) << 4) | \ (oout & 0x0f)) #define FASTRPC_SCALARS(method, in, out) \ FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0) #define FASTRPC_CREATE_PROCESS_NARGS 6 /* Remote Method id table */ #define FASTRPC_RMID_INIT_ATTACH 0 #define FASTRPC_RMID_INIT_RELEASE 1 #define FASTRPC_RMID_INIT_CREATE 6 #define FASTRPC_RMID_INIT_CREATE_ATTR 7 #define FASTRPC_RMID_INIT_CREATE_STATIC 8 #define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev) static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp", "sdsp", "cdsp"}; struct fastrpc_phy_page { u64 addr; /* physical address */ u64 size; /* size of contiguous region */ }; struct fastrpc_invoke_buf { u32 num; /* number of contiguous regions */ u32 pgidx; /* index to start of contiguous region */ }; struct fastrpc_remote_arg { u64 pv; u64 len; }; struct fastrpc_msg { int pid; /* process group id */ int tid; /* thread id */ u64 ctx; /* invoke caller context */ u32 handle; /* handle to invoke */ u32 sc; /* scalars structure describing the data */ u64 addr; /* physical address */ u64 size; /* size of contiguous region */ }; struct fastrpc_invoke_rsp { u64 ctx; /* invoke caller context */ int retval; /* invoke return value */ }; struct fastrpc_buf_overlap { u64 start; u64 end; int raix; u64 mstart; u64 mend; u64 offset; }; struct fastrpc_buf { struct fastrpc_user *fl; struct dma_buf *dmabuf; struct device *dev; void *virt; u64 phys; u64 size; /* Lock for dma buf attachments */ struct mutex lock; struct list_head attachments; }; struct fastrpc_dma_buf_attachment { struct device *dev; struct sg_table sgt; struct list_head node; }; struct fastrpc_map { struct list_head node; struct fastrpc_user *fl; int fd; struct dma_buf *buf; struct sg_table *table; struct dma_buf_attachment *attach; u64 phys; u64 size; void *va; u64 len; struct kref refcount; }; struct fastrpc_invoke_ctx { int nscalars; int nbufs; int retval; int pid; int tgid; u32 sc; u32 *crc; u64 ctxid; u64 msg_sz; struct kref refcount; struct list_head node; /* list of ctxs */ struct completion work; struct work_struct put_work; struct fastrpc_msg msg; struct fastrpc_user *fl; struct fastrpc_remote_arg *rpra; struct fastrpc_map **maps; struct fastrpc_buf *buf; struct fastrpc_invoke_args *args; struct fastrpc_buf_overlap *olaps; struct fastrpc_channel_ctx *cctx; }; struct fastrpc_session_ctx { struct device *dev; int sid; bool used; bool valid; }; struct fastrpc_channel_ctx { int domain_id; int sesscount; struct rpmsg_device *rpdev; struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS]; spinlock_t lock; struct idr ctx_idr; struct list_head users; struct miscdevice miscdev; struct kref refcount; }; struct fastrpc_user { struct list_head user; struct list_head maps; struct list_head pending; struct fastrpc_channel_ctx *cctx; struct fastrpc_session_ctx *sctx; struct fastrpc_buf *init_mem; int tgid; int pd; /* Lock for lists */ spinlock_t lock; /* lock for allocations */ struct mutex mutex; }; static void fastrpc_free_map(struct kref *ref) { struct fastrpc_map *map; map = container_of(ref, struct fastrpc_map, refcount); if (map->table) { dma_buf_unmap_attachment(map->attach, map->table, DMA_BIDIRECTIONAL); dma_buf_detach(map->buf, map->attach); dma_buf_put(map->buf); } kfree(map); } static void fastrpc_map_put(struct fastrpc_map *map) { if (map) kref_put(&map->refcount, fastrpc_free_map); } static void fastrpc_map_get(struct fastrpc_map *map) { if (map) kref_get(&map->refcount); } static int fastrpc_map_find(struct fastrpc_user *fl, int fd, struct fastrpc_map **ppmap) { struct fastrpc_map *map = NULL; mutex_lock(&fl->mutex); list_for_each_entry(map, &fl->maps, node) { if (map->fd == fd) { fastrpc_map_get(map); *ppmap = map; mutex_unlock(&fl->mutex); return 0; } } mutex_unlock(&fl->mutex); return -ENOENT; } static void fastrpc_buf_free(struct fastrpc_buf *buf) { dma_free_coherent(buf->dev, buf->size, buf->virt, FASTRPC_PHYS(buf->phys)); kfree(buf); } static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, u64 size, struct fastrpc_buf **obuf) { struct fastrpc_buf *buf; buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (!buf) return -ENOMEM; INIT_LIST_HEAD(&buf->attachments); mutex_init(&buf->lock); buf->fl = fl; buf->virt = NULL; buf->phys = 0; buf->size = size; buf->dev = dev; buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys, GFP_KERNEL); if (!buf->virt) { mutex_destroy(&buf->lock); kfree(buf); return -ENOMEM; } if (fl->sctx && fl->sctx->sid) buf->phys += ((u64)fl->sctx->sid << 32); *obuf = buf; return 0; } static void fastrpc_channel_ctx_free(struct kref *ref) { struct fastrpc_channel_ctx *cctx; cctx = container_of(ref, struct fastrpc_channel_ctx, refcount); kfree(cctx); } static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx) { kref_get(&cctx->refcount); } static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx) { kref_put(&cctx->refcount, fastrpc_channel_ctx_free); } static void fastrpc_context_free(struct kref *ref) { struct fastrpc_invoke_ctx *ctx; struct fastrpc_channel_ctx *cctx; unsigned long flags; int i; ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount); cctx = ctx->cctx; for (i = 0; i < ctx->nscalars; i++) fastrpc_map_put(ctx->maps[i]); if (ctx->buf) fastrpc_buf_free(ctx->buf); spin_lock_irqsave(&cctx->lock, flags); idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4); spin_unlock_irqrestore(&cctx->lock, flags); kfree(ctx->maps); kfree(ctx->olaps); kfree(ctx); fastrpc_channel_ctx_put(cctx); } static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx) { kref_get(&ctx->refcount); } static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx) { kref_put(&ctx->refcount, fastrpc_context_free); } static void fastrpc_context_put_wq(struct work_struct *work) { struct fastrpc_invoke_ctx *ctx = container_of(work, struct fastrpc_invoke_ctx, put_work); fastrpc_context_put(ctx); } #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1) static int olaps_cmp(const void *a, const void *b) { struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a; struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b; /* sort with lowest starting buffer first */ int st = CMP(pa->start, pb->start); /* sort with highest ending buffer first */ int ed = CMP(pb->end, pa->end); return st == 0 ? ed : st; } static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx) { u64 max_end = 0; int i; for (i = 0; i < ctx->nbufs; ++i) { ctx->olaps[i].start = ctx->args[i].ptr; ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length; ctx->olaps[i].raix = i; } sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL); for (i = 0; i < ctx->nbufs; ++i) { /* Falling inside previous range */ if (ctx->olaps[i].start < max_end) { ctx->olaps[i].mstart = max_end; ctx->olaps[i].mend = ctx->olaps[i].end; ctx->olaps[i].offset = max_end - ctx->olaps[i].start; if (ctx->olaps[i].end > max_end) { max_end = ctx->olaps[i].end; } else { ctx->olaps[i].mend = 0; ctx->olaps[i].mstart = 0; } } else { ctx->olaps[i].mend = ctx->olaps[i].end; ctx->olaps[i].mstart = ctx->olaps[i].start; ctx->olaps[i].offset = 0; max_end = ctx->olaps[i].end; } } } static struct fastrpc_invoke_ctx *fastrpc_context_alloc( struct fastrpc_user *user, u32 kernel, u32 sc, struct fastrpc_invoke_args *args) { struct fastrpc_channel_ctx *cctx = user->cctx; struct fastrpc_invoke_ctx *ctx = NULL; unsigned long flags; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&ctx->node); ctx->fl = user; ctx->nscalars = REMOTE_SCALARS_LENGTH(sc); ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) + REMOTE_SCALARS_OUTBUFS(sc); if (ctx->nscalars) { ctx->maps = kcalloc(ctx->nscalars, sizeof(*ctx->maps), GFP_KERNEL); if (!ctx->maps) { kfree(ctx); return ERR_PTR(-ENOMEM); } ctx->olaps = kcalloc(ctx->nscalars, sizeof(*ctx->olaps), GFP_KERNEL); if (!ctx->olaps) { kfree(ctx->maps); kfree(ctx); return ERR_PTR(-ENOMEM); } ctx->args = args; fastrpc_get_buff_overlaps(ctx); } /* Released in fastrpc_context_put() */ fastrpc_channel_ctx_get(cctx); ctx->sc = sc; ctx->retval = -1; ctx->pid = current->pid; ctx->tgid = user->tgid; ctx->cctx = cctx; init_completion(&ctx->work); INIT_WORK(&ctx->put_work, fastrpc_context_put_wq); spin_lock(&user->lock); list_add_tail(&ctx->node, &user->pending); spin_unlock(&user->lock); spin_lock_irqsave(&cctx->lock, flags); ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1, FASTRPC_CTX_MAX, GFP_ATOMIC); if (ret < 0) { spin_unlock_irqrestore(&cctx->lock, flags); goto err_idr; } ctx->ctxid = ret << 4; spin_unlock_irqrestore(&cctx->lock, flags); kref_init(&ctx->refcount); return ctx; err_idr: spin_lock(&user->lock); list_del(&ctx->node); spin_unlock(&user->lock); fastrpc_channel_ctx_put(cctx); kfree(ctx->maps); kfree(ctx->olaps); kfree(ctx); return ERR_PTR(ret); } static struct sg_table * fastrpc_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction dir) { struct fastrpc_dma_buf_attachment *a = attachment->priv; struct sg_table *table; table = &a->sgt; if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir)) return ERR_PTR(-ENOMEM); return table; } static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *table, enum dma_data_direction dir) { dma_unmap_sg(attach->dev, table->sgl, table->nents, dir); } static void fastrpc_release(struct dma_buf *dmabuf) { struct fastrpc_buf *buffer = dmabuf->priv; fastrpc_buf_free(buffer); } static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment) { struct fastrpc_dma_buf_attachment *a; struct fastrpc_buf *buffer = dmabuf->priv; int ret; a = kzalloc(sizeof(*a), GFP_KERNEL); if (!a) return -ENOMEM; ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt, FASTRPC_PHYS(buffer->phys), buffer->size); if (ret < 0) { dev_err(buffer->dev, "failed to get scatterlist from DMA API\n"); return -EINVAL; } a->dev = attachment->dev; INIT_LIST_HEAD(&a->node); attachment->priv = a; mutex_lock(&buffer->lock); list_add(&a->node, &buffer->attachments); mutex_unlock(&buffer->lock); return 0; } static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment) { struct fastrpc_dma_buf_attachment *a = attachment->priv; struct fastrpc_buf *buffer = dmabuf->priv; mutex_lock(&buffer->lock); list_del(&a->node); mutex_unlock(&buffer->lock); sg_free_table(&a->sgt); kfree(a); } static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum) { struct fastrpc_buf *buf = dmabuf->priv; return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL; } static void *fastrpc_vmap(struct dma_buf *dmabuf) { struct fastrpc_buf *buf = dmabuf->priv; return buf->virt; } static int fastrpc_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { struct fastrpc_buf *buf = dmabuf->priv; size_t size = vma->vm_end - vma->vm_start; return dma_mmap_coherent(buf->dev, vma, buf->virt, FASTRPC_PHYS(buf->phys), size); } static const struct dma_buf_ops fastrpc_dma_buf_ops = { .attach = fastrpc_dma_buf_attach, .detach = fastrpc_dma_buf_detatch, .map_dma_buf = fastrpc_map_dma_buf, .unmap_dma_buf = fastrpc_unmap_dma_buf, .mmap = fastrpc_mmap, .map = fastrpc_kmap, .vmap = fastrpc_vmap, .release = fastrpc_release, }; static int fastrpc_map_create(struct fastrpc_user *fl, int fd, u64 len, struct fastrpc_map **ppmap) { struct fastrpc_session_ctx *sess = fl->sctx; struct fastrpc_map *map = NULL; int err = 0; if (!fastrpc_map_find(fl, fd, ppmap)) return 0; map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) return -ENOMEM; INIT_LIST_HEAD(&map->node); map->fl = fl; map->fd = fd; map->buf = dma_buf_get(fd); if (IS_ERR(map->buf)) { err = PTR_ERR(map->buf); goto get_err; } map->attach = dma_buf_attach(map->buf, sess->dev); if (IS_ERR(map->attach)) { dev_err(sess->dev, "Failed to attach dmabuf\n"); err = PTR_ERR(map->attach); goto attach_err; } map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL); if (IS_ERR(map->table)) { err = PTR_ERR(map->table); goto map_err; } map->phys = sg_dma_address(map->table->sgl); map->phys += ((u64)fl->sctx->sid << 32); map->size = len; map->va = sg_virt(map->table->sgl); map->len = len; kref_init(&map->refcount); spin_lock(&fl->lock); list_add_tail(&map->node, &fl->maps); spin_unlock(&fl->lock); *ppmap = map; return 0; map_err: dma_buf_detach(map->buf, map->attach); attach_err: dma_buf_put(map->buf); get_err: kfree(map); return err; } /* * Fastrpc payload buffer with metadata looks like: * * >>>>>> START of METADATA <<<<<<<<< * +---------------------------------+ * | Arguments | * | type:(struct fastrpc_remote_arg)| * | (0 - N) | * +---------------------------------+ * | Invoke Buffer list | * | type:(struct fastrpc_invoke_buf)| * | (0 - N) | * +---------------------------------+ * | Page info list | * | type:(struct fastrpc_phy_page) | * | (0 - N) | * +---------------------------------+ * | Optional info | * |(can be specific to SoC/Firmware)| * +---------------------------------+ * >>>>>>>> END of METADATA <<<<<<<<< * +---------------------------------+ * | Inline ARGS | * | (0-N) | * +---------------------------------+ */ static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx) { int size = 0; size = (sizeof(struct fastrpc_remote_arg) + sizeof(struct fastrpc_invoke_buf) + sizeof(struct fastrpc_phy_page)) * ctx->nscalars + sizeof(u64) * FASTRPC_MAX_FDLIST + sizeof(u32) * FASTRPC_MAX_CRCLIST; return size; } static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen) { u64 size = 0; int i; size = ALIGN(metalen, FASTRPC_ALIGN); for (i = 0; i < ctx->nscalars; i++) { if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) { if (ctx->olaps[i].offset == 0) size = ALIGN(size, FASTRPC_ALIGN); size += (ctx->olaps[i].mend - ctx->olaps[i].mstart); } } return size; } static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx) { struct device *dev = ctx->fl->sctx->dev; int i, err; for (i = 0; i < ctx->nscalars; ++i) { /* Make sure reserved field is set to 0 */ if (ctx->args[i].reserved) return -EINVAL; if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 || ctx->args[i].length == 0) continue; err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, ctx->args[i].length, &ctx->maps[i]); if (err) { dev_err(dev, "Error Creating map %d\n", err); return -EINVAL; } } return 0; } static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx) { struct device *dev = ctx->fl->sctx->dev; struct fastrpc_remote_arg *rpra; struct fastrpc_invoke_buf *list; struct fastrpc_phy_page *pages; int inbufs, i, oix, err = 0; u64 len, rlen, pkt_size; u64 pg_start, pg_end; uintptr_t args; int metalen; inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); metalen = fastrpc_get_meta_size(ctx); pkt_size = fastrpc_get_payload_size(ctx, metalen); err = fastrpc_create_maps(ctx); if (err) return err; ctx->msg_sz = pkt_size; err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf); if (err) return err; rpra = ctx->buf->virt; list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra); pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) + sizeof(*rpra)); args = (uintptr_t)ctx->buf->virt + metalen; rlen = pkt_size - metalen; ctx->rpra = rpra; for (oix = 0; oix < ctx->nbufs; ++oix) { int mlen; i = ctx->olaps[oix].raix; len = ctx->args[i].length; rpra[i].pv = 0; rpra[i].len = len; list[i].num = len ? 1 : 0; list[i].pgidx = i; if (!len) continue; if (ctx->maps[i]) { struct vm_area_struct *vma = NULL; rpra[i].pv = (u64) ctx->args[i].ptr; pages[i].addr = ctx->maps[i]->phys; vma = find_vma(current->mm, ctx->args[i].ptr); if (vma) pages[i].addr += ctx->args[i].ptr - vma->vm_start; pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT; pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >> PAGE_SHIFT; pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; } else { if (ctx->olaps[oix].offset == 0) { rlen -= ALIGN(args, FASTRPC_ALIGN) - args; args = ALIGN(args, FASTRPC_ALIGN); } mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart; if (rlen < mlen) goto bail; rpra[i].pv = args - ctx->olaps[oix].offset; pages[i].addr = ctx->buf->phys - ctx->olaps[oix].offset + (pkt_size - rlen); pages[i].addr = pages[i].addr & PAGE_MASK; pg_start = (args & PAGE_MASK) >> PAGE_SHIFT; pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT; pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; args = args + mlen; rlen -= mlen; } if (i < inbufs && !ctx->maps[i]) { void *dst = (void *)(uintptr_t)rpra[i].pv; void *src = (void *)(uintptr_t)ctx->args[i].ptr; if (!kernel) { if (copy_from_user(dst, (void __user *)src, len)) { err = -EFAULT; goto bail; } } else { memcpy(dst, src, len); } } } for (i = ctx->nbufs; i < ctx->nscalars; ++i) { rpra[i].pv = (u64) ctx->args[i].ptr; rpra[i].len = ctx->args[i].length; list[i].num = ctx->args[i].length ? 1 : 0; list[i].pgidx = i; pages[i].addr = ctx->maps[i]->phys; pages[i].size = ctx->maps[i]->size; } bail: if (err) dev_err(dev, "Error: get invoke args failed:%d\n", err); return err; } static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, u32 kernel) { struct fastrpc_remote_arg *rpra = ctx->rpra; int i, inbufs; inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); for (i = inbufs; i < ctx->nbufs; ++i) { void *src = (void *)(uintptr_t)rpra[i].pv; void *dst = (void *)(uintptr_t)ctx->args[i].ptr; u64 len = rpra[i].len; if (!kernel) { if (copy_to_user((void __user *)dst, src, len)) return -EFAULT; } else { memcpy(dst, src, len); } } return 0; } static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, struct fastrpc_invoke_ctx *ctx, u32 kernel, uint32_t handle) { struct fastrpc_channel_ctx *cctx; struct fastrpc_user *fl = ctx->fl; struct fastrpc_msg *msg = &ctx->msg; cctx = fl->cctx; msg->pid = fl->tgid; msg->tid = current->pid; if (kernel) msg->pid = 0; msg->ctx = ctx->ctxid | fl->pd; msg->handle = handle; msg->sc = ctx->sc; msg->addr = ctx->buf ? ctx->buf->phys : 0; msg->size = roundup(ctx->msg_sz, PAGE_SIZE); fastrpc_context_get(ctx); return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg)); } static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, u32 handle, u32 sc, struct fastrpc_invoke_args *args) { struct fastrpc_invoke_ctx *ctx = NULL; int err = 0; if (!fl->sctx) return -EINVAL; if (!fl->cctx->rpdev) return -EPIPE; ctx = fastrpc_context_alloc(fl, kernel, sc, args); if (IS_ERR(ctx)) return PTR_ERR(ctx); if (ctx->nscalars) { err = fastrpc_get_args(kernel, ctx); if (err) goto bail; } /* make sure that all CPU memory writes are seen by DSP */ dma_wmb(); /* Send invoke buffer to remote dsp */ err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle); if (err) goto bail; /* Wait for remote dsp to respond or time out */ err = wait_for_completion_interruptible(&ctx->work); if (err) goto bail; /* Check the response from remote dsp */ err = ctx->retval; if (err) goto bail; if (ctx->nscalars) { /* make sure that all memory writes by DSP are seen by CPU */ dma_rmb(); /* populate all the output buffers with results */ err = fastrpc_put_args(ctx, kernel); if (err) goto bail; } bail: /* We are done with this compute context, remove it from pending list */ spin_lock(&fl->lock); list_del(&ctx->node); spin_unlock(&fl->lock); fastrpc_context_put(ctx); if (err) dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err); return err; } static int fastrpc_init_create_process(struct fastrpc_user *fl, char __user *argp) { struct fastrpc_init_create init; struct fastrpc_invoke_args *args; struct fastrpc_phy_page pages[1]; struct fastrpc_map *map = NULL; struct fastrpc_buf *imem = NULL; int memlen; int err; struct { int pgid; u32 namelen; u32 filelen; u32 pageslen; u32 attrs; u32 siglen; } inbuf; u32 sc; args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL); if (!args) return -ENOMEM; if (copy_from_user(&init, argp, sizeof(init))) { err = -EFAULT; goto err; } if (init.filelen > INIT_FILELEN_MAX) { err = -EINVAL; goto err; } inbuf.pgid = fl->tgid; inbuf.namelen = strlen(current->comm) + 1; inbuf.filelen = init.filelen; inbuf.pageslen = 1; inbuf.attrs = init.attrs; inbuf.siglen = init.siglen; fl->pd = 1; if (init.filelen && init.filefd) { err = fastrpc_map_create(fl, init.filefd, init.filelen, &map); if (err) goto err; } memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4), 1024 * 1024); err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen, &imem); if (err) goto err_alloc; fl->init_mem = imem; args[0].ptr = (u64)(uintptr_t)&inbuf; args[0].length = sizeof(inbuf); args[0].fd = -1; args[1].ptr = (u64)(uintptr_t)current->comm; args[1].length = inbuf.namelen; args[1].fd = -1; args[2].ptr = (u64) init.file; args[2].length = inbuf.filelen; args[2].fd = init.filefd; pages[0].addr = imem->phys; pages[0].size = imem->size; args[3].ptr = (u64)(uintptr_t) pages; args[3].length = 1 * sizeof(*pages); args[3].fd = -1; args[4].ptr = (u64)(uintptr_t)&inbuf.attrs; args[4].length = sizeof(inbuf.attrs); args[4].fd = -1; args[5].ptr = (u64)(uintptr_t) &inbuf.siglen; args[5].length = sizeof(inbuf.siglen); args[5].fd = -1; sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0); if (init.attrs) sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0); err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, args); if (err) goto err_invoke; kfree(args); return 0; err_invoke: fl->init_mem = NULL; fastrpc_buf_free(imem); err_alloc: if (map) { spin_lock(&fl->lock); list_del(&map->node); spin_unlock(&fl->lock); fastrpc_map_put(map); } err: kfree(args); return err; } static struct fastrpc_session_ctx *fastrpc_session_alloc( struct fastrpc_channel_ctx *cctx) { struct fastrpc_session_ctx *session = NULL; unsigned long flags; int i; spin_lock_irqsave(&cctx->lock, flags); for (i = 0; i < cctx->sesscount; i++) { if (!cctx->session[i].used && cctx->session[i].valid) { cctx->session[i].used = true; session = &cctx->session[i]; break; } } spin_unlock_irqrestore(&cctx->lock, flags); return session; } static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx, struct fastrpc_session_ctx *session) { unsigned long flags; spin_lock_irqsave(&cctx->lock, flags); session->used = false; spin_unlock_irqrestore(&cctx->lock, flags); } static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl) { struct fastrpc_invoke_args args[1]; int tgid = 0; u32 sc; tgid = fl->tgid; args[0].ptr = (u64)(uintptr_t) &tgid; args[0].length = sizeof(tgid); args[0].fd = -1; args[0].reserved = 0; sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0); return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]); } static int fastrpc_device_release(struct inode *inode, struct file *file) { struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; struct fastrpc_channel_ctx *cctx = fl->cctx; struct fastrpc_invoke_ctx *ctx, *n; struct fastrpc_map *map, *m; unsigned long flags; fastrpc_release_current_dsp_process(fl); spin_lock_irqsave(&cctx->lock, flags); list_del(&fl->user); spin_unlock_irqrestore(&cctx->lock, flags); if (fl->init_mem) fastrpc_buf_free(fl->init_mem); list_for_each_entry_safe(ctx, n, &fl->pending, node) { list_del(&ctx->node); fastrpc_context_put(ctx); } list_for_each_entry_safe(map, m, &fl->maps, node) { list_del(&map->node); fastrpc_map_put(map); } fastrpc_session_free(cctx, fl->sctx); fastrpc_channel_ctx_put(cctx); mutex_destroy(&fl->mutex); kfree(fl); file->private_data = NULL; return 0; } static int fastrpc_device_open(struct inode *inode, struct file *filp) { struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data); struct fastrpc_user *fl = NULL; unsigned long flags; fl = kzalloc(sizeof(*fl), GFP_KERNEL); if (!fl) return -ENOMEM; /* Released in fastrpc_device_release() */ fastrpc_channel_ctx_get(cctx); filp->private_data = fl; spin_lock_init(&fl->lock); mutex_init(&fl->mutex); INIT_LIST_HEAD(&fl->pending); INIT_LIST_HEAD(&fl->maps); INIT_LIST_HEAD(&fl->user); fl->tgid = current->tgid; fl->cctx = cctx; fl->sctx = fastrpc_session_alloc(cctx); if (!fl->sctx) { dev_err(&cctx->rpdev->dev, "No session available\n"); mutex_destroy(&fl->mutex); kfree(fl); return -EBUSY; } spin_lock_irqsave(&cctx->lock, flags); list_add_tail(&fl->user, &cctx->users); spin_unlock_irqrestore(&cctx->lock, flags); return 0; } static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) { struct fastrpc_alloc_dma_buf bp; DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct fastrpc_buf *buf = NULL; int err; if (copy_from_user(&bp, argp, sizeof(bp))) return -EFAULT; err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf); if (err) return err; exp_info.ops = &fastrpc_dma_buf_ops; exp_info.size = bp.size; exp_info.flags = O_RDWR; exp_info.priv = buf; buf->dmabuf = dma_buf_export(&exp_info); if (IS_ERR(buf->dmabuf)) { err = PTR_ERR(buf->dmabuf); fastrpc_buf_free(buf); return err; } bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE); if (bp.fd < 0) { dma_buf_put(buf->dmabuf); return -EINVAL; } if (copy_to_user(argp, &bp, sizeof(bp))) { dma_buf_put(buf->dmabuf); return -EFAULT; } return 0; } static int fastrpc_init_attach(struct fastrpc_user *fl) { struct fastrpc_invoke_args args[1]; int tgid = fl->tgid; u32 sc; args[0].ptr = (u64)(uintptr_t) &tgid; args[0].length = sizeof(tgid); args[0].fd = -1; args[0].reserved = 0; sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0); fl->pd = 0; return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]); } static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp) { struct fastrpc_invoke_args *args = NULL; struct fastrpc_invoke inv; u32 nscalars; int err; if (copy_from_user(&inv, argp, sizeof(inv))) return -EFAULT; /* nscalars is truncated here to max supported value */ nscalars = REMOTE_SCALARS_LENGTH(inv.sc); if (nscalars) { args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL); if (!args) return -ENOMEM; if (copy_from_user(args, (void __user *)(uintptr_t)inv.args, nscalars * sizeof(*args))) { kfree(args); return -EFAULT; } } err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args); kfree(args); return err; } static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; char __user *argp = (char __user *)arg; int err; switch (cmd) { case FASTRPC_IOCTL_INVOKE: err = fastrpc_invoke(fl, argp); break; case FASTRPC_IOCTL_INIT_ATTACH: err = fastrpc_init_attach(fl); break; case FASTRPC_IOCTL_INIT_CREATE: err = fastrpc_init_create_process(fl, argp); break; case FASTRPC_IOCTL_ALLOC_DMA_BUFF: err = fastrpc_dmabuf_alloc(fl, argp); break; default: err = -ENOTTY; break; } return err; } static const struct file_operations fastrpc_fops = { .open = fastrpc_device_open, .release = fastrpc_device_release, .unlocked_ioctl = fastrpc_device_ioctl, .compat_ioctl = fastrpc_device_ioctl, }; static int fastrpc_cb_probe(struct platform_device *pdev) { struct fastrpc_channel_ctx *cctx; struct fastrpc_session_ctx *sess; struct device *dev = &pdev->dev; int i, sessions = 0; unsigned long flags; int rc; cctx = dev_get_drvdata(dev->parent); if (!cctx) return -EINVAL; of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions); spin_lock_irqsave(&cctx->lock, flags); sess = &cctx->session[cctx->sesscount]; sess->used = false; sess->valid = true; sess->dev = dev; dev_set_drvdata(dev, sess); if (of_property_read_u32(dev->of_node, "reg", &sess->sid)) dev_info(dev, "FastRPC Session ID not specified in DT\n"); if (sessions > 0) { struct fastrpc_session_ctx *dup_sess; for (i = 1; i < sessions; i++) { if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS) break; dup_sess = &cctx->session[cctx->sesscount]; memcpy(dup_sess, sess, sizeof(*dup_sess)); } } cctx->sesscount++; spin_unlock_irqrestore(&cctx->lock, flags); rc = dma_set_mask(dev, DMA_BIT_MASK(32)); if (rc) { dev_err(dev, "32-bit DMA enable failed\n"); return rc; } return 0; } static int fastrpc_cb_remove(struct platform_device *pdev) { struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent); struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev); unsigned long flags; int i; spin_lock_irqsave(&cctx->lock, flags); for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) { if (cctx->session[i].sid == sess->sid) { cctx->session[i].valid = false; cctx->sesscount--; } } spin_unlock_irqrestore(&cctx->lock, flags); return 0; } static const struct of_device_id fastrpc_match_table[] = { { .compatible = "qcom,fastrpc-compute-cb", }, {} }; static struct platform_driver fastrpc_cb_driver = { .probe = fastrpc_cb_probe, .remove = fastrpc_cb_remove, .driver = { .name = "qcom,fastrpc-cb", .of_match_table = fastrpc_match_table, .suppress_bind_attrs = true, }, }; static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) { struct device *rdev = &rpdev->dev; struct fastrpc_channel_ctx *data; int i, err, domain_id = -1; const char *domain; err = of_property_read_string(rdev->of_node, "label", &domain); if (err) { dev_info(rdev, "FastRPC Domain not specified in DT\n"); return err; } for (i = 0; i <= CDSP_DOMAIN_ID; i++) { if (!strcmp(domains[i], domain)) { domain_id = i; break; } } if (domain_id < 0) { dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id); return -EINVAL; } data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->miscdev.minor = MISC_DYNAMIC_MINOR; data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s", domains[domain_id]); data->miscdev.fops = &fastrpc_fops; err = misc_register(&data->miscdev); if (err) return err; kref_init(&data->refcount); dev_set_drvdata(&rpdev->dev, data); dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32)); INIT_LIST_HEAD(&data->users); spin_lock_init(&data->lock); idr_init(&data->ctx_idr); data->domain_id = domain_id; data->rpdev = rpdev; return of_platform_populate(rdev->of_node, NULL, NULL, rdev); } static void fastrpc_notify_users(struct fastrpc_user *user) { struct fastrpc_invoke_ctx *ctx; spin_lock(&user->lock); list_for_each_entry(ctx, &user->pending, node) complete(&ctx->work); spin_unlock(&user->lock); } static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) { struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); struct fastrpc_user *user; unsigned long flags; spin_lock_irqsave(&cctx->lock, flags); list_for_each_entry(user, &cctx->users, user) fastrpc_notify_users(user); spin_unlock_irqrestore(&cctx->lock, flags); misc_deregister(&cctx->miscdev); of_platform_depopulate(&rpdev->dev); cctx->rpdev = NULL; fastrpc_channel_ctx_put(cctx); } static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data, int len, void *priv, u32 addr) { struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); struct fastrpc_invoke_rsp *rsp = data; struct fastrpc_invoke_ctx *ctx; unsigned long flags; unsigned long ctxid; if (len < sizeof(*rsp)) return -EINVAL; ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4); spin_lock_irqsave(&cctx->lock, flags); ctx = idr_find(&cctx->ctx_idr, ctxid); spin_unlock_irqrestore(&cctx->lock, flags); if (!ctx) { dev_err(&rpdev->dev, "No context ID matches response\n"); return -ENOENT; } ctx->retval = rsp->retval; complete(&ctx->work); /* * The DMA buffer associated with the context cannot be freed in * interrupt context so schedule it through a worker thread to * avoid a kernel BUG. */ schedule_work(&ctx->put_work); return 0; } static const struct of_device_id fastrpc_rpmsg_of_match[] = { { .compatible = "qcom,fastrpc" }, { }, }; MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match); static struct rpmsg_driver fastrpc_driver = { .probe = fastrpc_rpmsg_probe, .remove = fastrpc_rpmsg_remove, .callback = fastrpc_rpmsg_callback, .drv = { .name = "qcom,fastrpc", .of_match_table = fastrpc_rpmsg_of_match, }, }; static int fastrpc_init(void) { int ret; ret = platform_driver_register(&fastrpc_cb_driver); if (ret < 0) { pr_err("fastrpc: failed to register cb driver\n"); return ret; } ret = register_rpmsg_driver(&fastrpc_driver); if (ret < 0) { pr_err("fastrpc: failed to register rpmsg driver\n"); platform_driver_unregister(&fastrpc_cb_driver); return ret; } return 0; } module_init(fastrpc_init); static void fastrpc_exit(void) { platform_driver_unregister(&fastrpc_cb_driver); unregister_rpmsg_driver(&fastrpc_driver); } module_exit(fastrpc_exit); MODULE_LICENSE("GPL v2");
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1259_0
crossvul-cpp_data_good_1265_0
/* * http://www.cascoda.com/products/ca-821x/ * Copyright (c) 2016, Cascoda, Ltd. * All rights reserved. * * This code is dual-licensed under both GPLv2 and 3-clause BSD. What follows is * the license notice for both respectively. * ******************************************************************************* * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * ******************************************************************************* * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <linux/cdev.h> #include <linux/clk-provider.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/ieee802154.h> #include <linux/io.h> #include <linux/kfifo.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <net/ieee802154_netdev.h> #include <net/mac802154.h> #define DRIVER_NAME "ca8210" /* external clock frequencies */ #define ONE_MHZ 1000000 #define TWO_MHZ (2 * ONE_MHZ) #define FOUR_MHZ (4 * ONE_MHZ) #define EIGHT_MHZ (8 * ONE_MHZ) #define SIXTEEN_MHZ (16 * ONE_MHZ) /* spi constants */ #define CA8210_SPI_BUF_SIZE 256 #define CA8210_SYNC_TIMEOUT 1000 /* Timeout for synchronous commands [ms] */ /* test interface constants */ #define CA8210_TEST_INT_FILE_NAME "ca8210_test" #define CA8210_TEST_INT_FIFO_SIZE 256 /* MAC status enumerations */ #define MAC_SUCCESS (0x00) #define MAC_ERROR (0x01) #define MAC_CANCELLED (0x02) #define MAC_READY_FOR_POLL (0x03) #define MAC_COUNTER_ERROR (0xDB) #define MAC_IMPROPER_KEY_TYPE (0xDC) #define MAC_IMPROPER_SECURITY_LEVEL (0xDD) #define MAC_UNSUPPORTED_LEGACY (0xDE) #define MAC_UNSUPPORTED_SECURITY (0xDF) #define MAC_BEACON_LOST (0xE0) #define MAC_CHANNEL_ACCESS_FAILURE (0xE1) #define MAC_DENIED (0xE2) #define MAC_DISABLE_TRX_FAILURE (0xE3) #define MAC_SECURITY_ERROR (0xE4) #define MAC_FRAME_TOO_LONG (0xE5) #define MAC_INVALID_GTS (0xE6) #define MAC_INVALID_HANDLE (0xE7) #define MAC_INVALID_PARAMETER (0xE8) #define MAC_NO_ACK (0xE9) #define MAC_NO_BEACON (0xEA) #define MAC_NO_DATA (0xEB) #define MAC_NO_SHORT_ADDRESS (0xEC) #define MAC_OUT_OF_CAP (0xED) #define MAC_PAN_ID_CONFLICT (0xEE) #define MAC_REALIGNMENT (0xEF) #define MAC_TRANSACTION_EXPIRED (0xF0) #define MAC_TRANSACTION_OVERFLOW (0xF1) #define MAC_TX_ACTIVE (0xF2) #define MAC_UNAVAILABLE_KEY (0xF3) #define MAC_UNSUPPORTED_ATTRIBUTE (0xF4) #define MAC_INVALID_ADDRESS (0xF5) #define MAC_ON_TIME_TOO_LONG (0xF6) #define MAC_PAST_TIME (0xF7) #define MAC_TRACKING_OFF (0xF8) #define MAC_INVALID_INDEX (0xF9) #define MAC_LIMIT_REACHED (0xFA) #define MAC_READ_ONLY (0xFB) #define MAC_SCAN_IN_PROGRESS (0xFC) #define MAC_SUPERFRAME_OVERLAP (0xFD) #define MAC_SYSTEM_ERROR (0xFF) /* HWME attribute IDs */ #define HWME_EDTHRESHOLD (0x04) #define HWME_EDVALUE (0x06) #define HWME_SYSCLKOUT (0x0F) #define HWME_LQILIMIT (0x11) /* TDME attribute IDs */ #define TDME_CHANNEL (0x00) #define TDME_ATM_CONFIG (0x06) #define MAX_HWME_ATTRIBUTE_SIZE 16 #define MAX_TDME_ATTRIBUTE_SIZE 2 /* PHY/MAC PIB Attribute Enumerations */ #define PHY_CURRENT_CHANNEL (0x00) #define PHY_TRANSMIT_POWER (0x02) #define PHY_CCA_MODE (0x03) #define MAC_ASSOCIATION_PERMIT (0x41) #define MAC_AUTO_REQUEST (0x42) #define MAC_BATT_LIFE_EXT (0x43) #define MAC_BATT_LIFE_EXT_PERIODS (0x44) #define MAC_BEACON_PAYLOAD (0x45) #define MAC_BEACON_PAYLOAD_LENGTH (0x46) #define MAC_BEACON_ORDER (0x47) #define MAC_GTS_PERMIT (0x4d) #define MAC_MAX_CSMA_BACKOFFS (0x4e) #define MAC_MIN_BE (0x4f) #define MAC_PAN_ID (0x50) #define MAC_PROMISCUOUS_MODE (0x51) #define MAC_RX_ON_WHEN_IDLE (0x52) #define MAC_SHORT_ADDRESS (0x53) #define MAC_SUPERFRAME_ORDER (0x54) #define MAC_ASSOCIATED_PAN_COORD (0x56) #define MAC_MAX_BE (0x57) #define MAC_MAX_FRAME_RETRIES (0x59) #define MAC_RESPONSE_WAIT_TIME (0x5A) #define MAC_SECURITY_ENABLED (0x5D) #define MAC_AUTO_REQUEST_SECURITY_LEVEL (0x78) #define MAC_AUTO_REQUEST_KEY_ID_MODE (0x79) #define NS_IEEE_ADDRESS (0xFF) /* Non-standard IEEE address */ /* MAC Address Mode Definitions */ #define MAC_MODE_NO_ADDR (0x00) #define MAC_MODE_SHORT_ADDR (0x02) #define MAC_MODE_LONG_ADDR (0x03) /* MAC constants */ #define MAX_BEACON_OVERHEAD (75) #define MAX_BEACON_PAYLOAD_LENGTH (IEEE802154_MTU - MAX_BEACON_OVERHEAD) #define MAX_ATTRIBUTE_SIZE (122) #define MAX_DATA_SIZE (114) #define CA8210_VALID_CHANNELS (0x07FFF800) /* MAC workarounds for V1.1 and MPW silicon (V0.x) */ #define CA8210_MAC_WORKAROUNDS (0) #define CA8210_MAC_MPW (0) /* memory manipulation macros */ #define LS_BYTE(x) ((u8)((x) & 0xFF)) #define MS_BYTE(x) ((u8)(((x) >> 8) & 0xFF)) /* message ID codes in SPI commands */ /* downstream */ #define MCPS_DATA_REQUEST (0x00) #define MLME_ASSOCIATE_REQUEST (0x02) #define MLME_ASSOCIATE_RESPONSE (0x03) #define MLME_DISASSOCIATE_REQUEST (0x04) #define MLME_GET_REQUEST (0x05) #define MLME_ORPHAN_RESPONSE (0x06) #define MLME_RESET_REQUEST (0x07) #define MLME_RX_ENABLE_REQUEST (0x08) #define MLME_SCAN_REQUEST (0x09) #define MLME_SET_REQUEST (0x0A) #define MLME_START_REQUEST (0x0B) #define MLME_POLL_REQUEST (0x0D) #define HWME_SET_REQUEST (0x0E) #define HWME_GET_REQUEST (0x0F) #define TDME_SETSFR_REQUEST (0x11) #define TDME_GETSFR_REQUEST (0x12) #define TDME_SET_REQUEST (0x14) /* upstream */ #define MCPS_DATA_INDICATION (0x00) #define MCPS_DATA_CONFIRM (0x01) #define MLME_RESET_CONFIRM (0x0A) #define MLME_SET_CONFIRM (0x0E) #define MLME_START_CONFIRM (0x0F) #define HWME_SET_CONFIRM (0x12) #define HWME_GET_CONFIRM (0x13) #define HWME_WAKEUP_INDICATION (0x15) #define TDME_SETSFR_CONFIRM (0x17) /* SPI command IDs */ /* bit indicating a confirm or indication from slave to master */ #define SPI_S2M (0x20) /* bit indicating a synchronous message */ #define SPI_SYN (0x40) /* SPI command definitions */ #define SPI_IDLE (0xFF) #define SPI_NACK (0xF0) #define SPI_MCPS_DATA_REQUEST (MCPS_DATA_REQUEST) #define SPI_MCPS_DATA_INDICATION (MCPS_DATA_INDICATION + SPI_S2M) #define SPI_MCPS_DATA_CONFIRM (MCPS_DATA_CONFIRM + SPI_S2M) #define SPI_MLME_ASSOCIATE_REQUEST (MLME_ASSOCIATE_REQUEST) #define SPI_MLME_RESET_REQUEST (MLME_RESET_REQUEST + SPI_SYN) #define SPI_MLME_SET_REQUEST (MLME_SET_REQUEST + SPI_SYN) #define SPI_MLME_START_REQUEST (MLME_START_REQUEST + SPI_SYN) #define SPI_MLME_RESET_CONFIRM (MLME_RESET_CONFIRM + SPI_S2M + SPI_SYN) #define SPI_MLME_SET_CONFIRM (MLME_SET_CONFIRM + SPI_S2M + SPI_SYN) #define SPI_MLME_START_CONFIRM (MLME_START_CONFIRM + SPI_S2M + SPI_SYN) #define SPI_HWME_SET_REQUEST (HWME_SET_REQUEST + SPI_SYN) #define SPI_HWME_GET_REQUEST (HWME_GET_REQUEST + SPI_SYN) #define SPI_HWME_SET_CONFIRM (HWME_SET_CONFIRM + SPI_S2M + SPI_SYN) #define SPI_HWME_GET_CONFIRM (HWME_GET_CONFIRM + SPI_S2M + SPI_SYN) #define SPI_HWME_WAKEUP_INDICATION (HWME_WAKEUP_INDICATION + SPI_S2M) #define SPI_TDME_SETSFR_REQUEST (TDME_SETSFR_REQUEST + SPI_SYN) #define SPI_TDME_SET_REQUEST (TDME_SET_REQUEST + SPI_SYN) #define SPI_TDME_SETSFR_CONFIRM (TDME_SETSFR_CONFIRM + SPI_S2M + SPI_SYN) /* TDME SFR addresses */ /* Page 0 */ #define CA8210_SFR_PACFG (0xB1) #define CA8210_SFR_MACCON (0xD8) #define CA8210_SFR_PACFGIB (0xFE) /* Page 1 */ #define CA8210_SFR_LOTXCAL (0xBF) #define CA8210_SFR_PTHRH (0xD1) #define CA8210_SFR_PRECFG (0xD3) #define CA8210_SFR_LNAGX40 (0xE1) #define CA8210_SFR_LNAGX41 (0xE2) #define CA8210_SFR_LNAGX42 (0xE3) #define CA8210_SFR_LNAGX43 (0xE4) #define CA8210_SFR_LNAGX44 (0xE5) #define CA8210_SFR_LNAGX45 (0xE6) #define CA8210_SFR_LNAGX46 (0xE7) #define CA8210_SFR_LNAGX47 (0xE9) #define PACFGIB_DEFAULT_CURRENT (0x3F) #define PTHRH_DEFAULT_THRESHOLD (0x5A) #define LNAGX40_DEFAULT_GAIN (0x29) /* 10dB */ #define LNAGX41_DEFAULT_GAIN (0x54) /* 21dB */ #define LNAGX42_DEFAULT_GAIN (0x6C) /* 27dB */ #define LNAGX43_DEFAULT_GAIN (0x7A) /* 30dB */ #define LNAGX44_DEFAULT_GAIN (0x84) /* 33dB */ #define LNAGX45_DEFAULT_GAIN (0x8B) /* 34dB */ #define LNAGX46_DEFAULT_GAIN (0x92) /* 36dB */ #define LNAGX47_DEFAULT_GAIN (0x96) /* 37dB */ #define CA8210_IOCTL_HARD_RESET (0x00) /* Structs/Enums */ /** * struct cas_control - spi transfer structure * @msg: spi_message for each exchange * @transfer: spi_transfer for each exchange * @tx_buf: source array for transmission * @tx_in_buf: array storing bytes received during transmission * @priv: pointer to private data * * This structure stores all the necessary data passed around during a single * spi exchange. */ struct cas_control { struct spi_message msg; struct spi_transfer transfer; u8 tx_buf[CA8210_SPI_BUF_SIZE]; u8 tx_in_buf[CA8210_SPI_BUF_SIZE]; struct ca8210_priv *priv; }; /** * struct ca8210_test - ca8210 test interface structure * @ca8210_dfs_spi_int: pointer to the entry in the debug fs for this device * @up_fifo: fifo for upstream messages * * This structure stores all the data pertaining to the debug interface */ struct ca8210_test { struct dentry *ca8210_dfs_spi_int; struct kfifo up_fifo; wait_queue_head_t readq; }; /** * struct ca8210_priv - ca8210 private data structure * @spi: pointer to the ca8210 spi device object * @hw: pointer to the ca8210 ieee802154_hw object * @hw_registered: true if hw has been registered with ieee802154 * @lock: spinlock protecting the private data area * @mlme_workqueue: workqueue for triggering MLME Reset * @irq_workqueue: workqueue for irq processing * @tx_skb: current socket buffer to transmit * @nextmsduhandle: msdu handle to pass to the 15.4 MAC layer for the * next transmission * @clk: external clock provided by the ca8210 * @last_dsn: sequence number of last data packet received, for * resend detection * @test: test interface data section for this instance * @async_tx_pending: true if an asynchronous transmission was started and * is not complete * @sync_command_response: pointer to buffer to fill with sync response * @ca8210_is_awake: nonzero if ca8210 is initialised, ready for comms * @sync_down: counts number of downstream synchronous commands * @sync_up: counts number of upstream synchronous commands * @spi_transfer_complete completion object for a single spi_transfer * @sync_exchange_complete completion object for a complete synchronous API * exchange * @promiscuous whether the ca8210 is in promiscuous mode or not * @retries: records how many times the current pending spi * transfer has been retried */ struct ca8210_priv { struct spi_device *spi; struct ieee802154_hw *hw; bool hw_registered; spinlock_t lock; struct workqueue_struct *mlme_workqueue; struct workqueue_struct *irq_workqueue; struct sk_buff *tx_skb; u8 nextmsduhandle; struct clk *clk; int last_dsn; struct ca8210_test test; bool async_tx_pending; u8 *sync_command_response; struct completion ca8210_is_awake; int sync_down, sync_up; struct completion spi_transfer_complete, sync_exchange_complete; bool promiscuous; int retries; }; /** * struct work_priv_container - link between a work object and the relevant * device's private data * @work: work object being executed * @priv: device's private data section * */ struct work_priv_container { struct work_struct work; struct ca8210_priv *priv; }; /** * struct ca8210_platform_data - ca8210 platform data structure * @extclockenable: true if the external clock is to be enabled * @extclockfreq: frequency of the external clock * @extclockgpio: ca8210 output gpio of the external clock * @gpio_reset: gpio number of ca8210 reset line * @gpio_irq: gpio number of ca8210 interrupt line * @irq_id: identifier for the ca8210 irq * */ struct ca8210_platform_data { bool extclockenable; unsigned int extclockfreq; unsigned int extclockgpio; int gpio_reset; int gpio_irq; int irq_id; }; /** * struct fulladdr - full MAC addressing information structure * @mode: address mode (none, short, extended) * @pan_id: 16-bit LE pan id * @address: LE address, variable length as specified by mode * */ struct fulladdr { u8 mode; u8 pan_id[2]; u8 address[8]; }; /** * union macaddr: generic MAC address container * @short_addr: 16-bit short address * @ieee_address: 64-bit extended address as LE byte array * */ union macaddr { u16 short_address; u8 ieee_address[8]; }; /** * struct secspec: security specification for SAP commands * @security_level: 0-7, controls level of authentication & encryption * @key_id_mode: 0-3, specifies how to obtain key * @key_source: extended key retrieval data * @key_index: single-byte key identifier * */ struct secspec { u8 security_level; u8 key_id_mode; u8 key_source[8]; u8 key_index; }; /* downlink functions parameter set definitions */ struct mcps_data_request_pset { u8 src_addr_mode; struct fulladdr dst; u8 msdu_length; u8 msdu_handle; u8 tx_options; u8 msdu[MAX_DATA_SIZE]; }; struct mlme_set_request_pset { u8 pib_attribute; u8 pib_attribute_index; u8 pib_attribute_length; u8 pib_attribute_value[MAX_ATTRIBUTE_SIZE]; }; struct hwme_set_request_pset { u8 hw_attribute; u8 hw_attribute_length; u8 hw_attribute_value[MAX_HWME_ATTRIBUTE_SIZE]; }; struct hwme_get_request_pset { u8 hw_attribute; }; struct tdme_setsfr_request_pset { u8 sfr_page; u8 sfr_address; u8 sfr_value; }; /* uplink functions parameter set definitions */ struct hwme_set_confirm_pset { u8 status; u8 hw_attribute; }; struct hwme_get_confirm_pset { u8 status; u8 hw_attribute; u8 hw_attribute_length; u8 hw_attribute_value[MAX_HWME_ATTRIBUTE_SIZE]; }; struct tdme_setsfr_confirm_pset { u8 status; u8 sfr_page; u8 sfr_address; }; struct mac_message { u8 command_id; u8 length; union { struct mcps_data_request_pset data_req; struct mlme_set_request_pset set_req; struct hwme_set_request_pset hwme_set_req; struct hwme_get_request_pset hwme_get_req; struct tdme_setsfr_request_pset tdme_set_sfr_req; struct hwme_set_confirm_pset hwme_set_cnf; struct hwme_get_confirm_pset hwme_get_cnf; struct tdme_setsfr_confirm_pset tdme_set_sfr_cnf; u8 u8param; u8 status; u8 payload[148]; } pdata; }; union pa_cfg_sfr { struct { u8 bias_current_trim : 3; u8 /* reserved */ : 1; u8 buffer_capacitor_trim : 3; u8 boost : 1; }; u8 paib; }; struct preamble_cfg_sfr { u8 timeout_symbols : 3; u8 acquisition_symbols : 3; u8 search_symbols : 2; }; static int (*cascoda_api_upstream)( const u8 *buf, size_t len, void *device_ref ); /** * link_to_linux_err() - Translates an 802.15.4 return code into the closest * linux error * @link_status: 802.15.4 status code * * Return: 0 or Linux error code */ static int link_to_linux_err(int link_status) { if (link_status < 0) { /* status is already a Linux code */ return link_status; } switch (link_status) { case MAC_SUCCESS: case MAC_REALIGNMENT: return 0; case MAC_IMPROPER_KEY_TYPE: return -EKEYREJECTED; case MAC_IMPROPER_SECURITY_LEVEL: case MAC_UNSUPPORTED_LEGACY: case MAC_DENIED: return -EACCES; case MAC_BEACON_LOST: case MAC_NO_ACK: case MAC_NO_BEACON: return -ENETUNREACH; case MAC_CHANNEL_ACCESS_FAILURE: case MAC_TX_ACTIVE: case MAC_SCAN_IN_PROGRESS: return -EBUSY; case MAC_DISABLE_TRX_FAILURE: case MAC_OUT_OF_CAP: return -EAGAIN; case MAC_FRAME_TOO_LONG: return -EMSGSIZE; case MAC_INVALID_GTS: case MAC_PAST_TIME: return -EBADSLT; case MAC_INVALID_HANDLE: return -EBADMSG; case MAC_INVALID_PARAMETER: case MAC_UNSUPPORTED_ATTRIBUTE: case MAC_ON_TIME_TOO_LONG: case MAC_INVALID_INDEX: return -EINVAL; case MAC_NO_DATA: return -ENODATA; case MAC_NO_SHORT_ADDRESS: return -EFAULT; case MAC_PAN_ID_CONFLICT: return -EADDRINUSE; case MAC_TRANSACTION_EXPIRED: return -ETIME; case MAC_TRANSACTION_OVERFLOW: return -ENOBUFS; case MAC_UNAVAILABLE_KEY: return -ENOKEY; case MAC_INVALID_ADDRESS: return -ENXIO; case MAC_TRACKING_OFF: case MAC_SUPERFRAME_OVERLAP: return -EREMOTEIO; case MAC_LIMIT_REACHED: return -EDQUOT; case MAC_READ_ONLY: return -EROFS; default: return -EPROTO; } } /** * ca8210_test_int_driver_write() - Writes a message to the test interface to be * read by the userspace * @buf: Buffer containing upstream message * @len: length of message to write * @spi: SPI device of message originator * * Return: 0 or linux error code */ static int ca8210_test_int_driver_write( const u8 *buf, size_t len, void *spi ) { struct ca8210_priv *priv = spi_get_drvdata(spi); struct ca8210_test *test = &priv->test; char *fifo_buffer; int i; dev_dbg( &priv->spi->dev, "test_interface: Buffering upstream message:\n" ); for (i = 0; i < len; i++) dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]); fifo_buffer = kmemdup(buf, len, GFP_KERNEL); if (!fifo_buffer) return -ENOMEM; kfifo_in(&test->up_fifo, &fifo_buffer, 4); wake_up_interruptible(&priv->test.readq); return 0; } /* SPI Operation */ static int ca8210_net_rx( struct ieee802154_hw *hw, u8 *command, size_t len ); static u8 mlme_reset_request_sync( u8 set_default_pib, void *device_ref ); static int ca8210_spi_transfer( struct spi_device *spi, const u8 *buf, size_t len ); /** * ca8210_reset_send() - Hard resets the ca8210 for a given time * @spi: Pointer to target ca8210 spi device * @ms: Milliseconds to hold the reset line low for */ static void ca8210_reset_send(struct spi_device *spi, unsigned int ms) { struct ca8210_platform_data *pdata = spi->dev.platform_data; struct ca8210_priv *priv = spi_get_drvdata(spi); long status; gpio_set_value(pdata->gpio_reset, 0); reinit_completion(&priv->ca8210_is_awake); msleep(ms); gpio_set_value(pdata->gpio_reset, 1); priv->promiscuous = false; /* Wait until wakeup indication seen */ status = wait_for_completion_interruptible_timeout( &priv->ca8210_is_awake, msecs_to_jiffies(CA8210_SYNC_TIMEOUT) ); if (status == 0) { dev_crit( &spi->dev, "Fatal: No wakeup from ca8210 after reset!\n" ); } dev_dbg(&spi->dev, "Reset the device\n"); } /** * ca8210_mlme_reset_worker() - Resets the MLME, Called when the MAC OVERFLOW * condition happens. * @work: Pointer to work being executed */ static void ca8210_mlme_reset_worker(struct work_struct *work) { struct work_priv_container *wpc = container_of( work, struct work_priv_container, work ); struct ca8210_priv *priv = wpc->priv; mlme_reset_request_sync(0, priv->spi); kfree(wpc); } /** * ca8210_rx_done() - Calls various message dispatches responding to a received * command * @arg: Pointer to the cas_control object for the relevant spi transfer * * Presents a received SAP command from the ca8210 to the Cascoda EVBME, test * interface and network driver. */ static void ca8210_rx_done(struct cas_control *cas_ctl) { u8 *buf; unsigned int len; struct work_priv_container *mlme_reset_wpc; struct ca8210_priv *priv = cas_ctl->priv; buf = cas_ctl->tx_in_buf; len = buf[1] + 2; if (len > CA8210_SPI_BUF_SIZE) { dev_crit( &priv->spi->dev, "Received packet len (%u) erroneously long\n", len ); goto finish; } if (buf[0] & SPI_SYN) { if (priv->sync_command_response) { memcpy(priv->sync_command_response, buf, len); complete(&priv->sync_exchange_complete); } else { if (cascoda_api_upstream) cascoda_api_upstream(buf, len, priv->spi); priv->sync_up++; } } else { if (cascoda_api_upstream) cascoda_api_upstream(buf, len, priv->spi); } ca8210_net_rx(priv->hw, buf, len); if (buf[0] == SPI_MCPS_DATA_CONFIRM) { if (buf[3] == MAC_TRANSACTION_OVERFLOW) { dev_info( &priv->spi->dev, "Waiting for transaction overflow to stabilise...\n"); msleep(2000); dev_info( &priv->spi->dev, "Resetting MAC...\n"); mlme_reset_wpc = kmalloc(sizeof(*mlme_reset_wpc), GFP_KERNEL); if (!mlme_reset_wpc) goto finish; INIT_WORK( &mlme_reset_wpc->work, ca8210_mlme_reset_worker ); mlme_reset_wpc->priv = priv; queue_work(priv->mlme_workqueue, &mlme_reset_wpc->work); } } else if (buf[0] == SPI_HWME_WAKEUP_INDICATION) { dev_notice( &priv->spi->dev, "Wakeup indication received, reason:\n" ); switch (buf[2]) { case 0: dev_notice( &priv->spi->dev, "Transceiver woken up from Power Up / System Reset\n" ); break; case 1: dev_notice( &priv->spi->dev, "Watchdog Timer Time-Out\n" ); break; case 2: dev_notice( &priv->spi->dev, "Transceiver woken up from Power-Off by Sleep Timer Time-Out\n"); break; case 3: dev_notice( &priv->spi->dev, "Transceiver woken up from Power-Off by GPIO Activity\n" ); break; case 4: dev_notice( &priv->spi->dev, "Transceiver woken up from Standby by Sleep Timer Time-Out\n" ); break; case 5: dev_notice( &priv->spi->dev, "Transceiver woken up from Standby by GPIO Activity\n" ); break; case 6: dev_notice( &priv->spi->dev, "Sleep-Timer Time-Out in Active Mode\n" ); break; default: dev_warn(&priv->spi->dev, "Wakeup reason unknown\n"); break; } complete(&priv->ca8210_is_awake); } finish:; } static int ca8210_remove(struct spi_device *spi_device); /** * ca8210_spi_transfer_complete() - Called when a single spi transfer has * completed * @context: Pointer to the cas_control object for the finished transfer */ static void ca8210_spi_transfer_complete(void *context) { struct cas_control *cas_ctl = context; struct ca8210_priv *priv = cas_ctl->priv; bool duplex_rx = false; int i; u8 retry_buffer[CA8210_SPI_BUF_SIZE]; if ( cas_ctl->tx_in_buf[0] == SPI_NACK || (cas_ctl->tx_in_buf[0] == SPI_IDLE && cas_ctl->tx_in_buf[1] == SPI_NACK) ) { /* ca8210 is busy */ dev_info(&priv->spi->dev, "ca8210 was busy during attempted write\n"); if (cas_ctl->tx_buf[0] == SPI_IDLE) { dev_warn( &priv->spi->dev, "IRQ servicing NACKd, dropping transfer\n" ); kfree(cas_ctl); return; } if (priv->retries > 3) { dev_err(&priv->spi->dev, "too many retries!\n"); kfree(cas_ctl); ca8210_remove(priv->spi); return; } memcpy(retry_buffer, cas_ctl->tx_buf, CA8210_SPI_BUF_SIZE); kfree(cas_ctl); ca8210_spi_transfer( priv->spi, retry_buffer, CA8210_SPI_BUF_SIZE ); priv->retries++; dev_info(&priv->spi->dev, "retried spi write\n"); return; } else if ( cas_ctl->tx_in_buf[0] != SPI_IDLE && cas_ctl->tx_in_buf[0] != SPI_NACK ) { duplex_rx = true; } if (duplex_rx) { dev_dbg(&priv->spi->dev, "READ CMD DURING TX\n"); for (i = 0; i < cas_ctl->tx_in_buf[1] + 2; i++) dev_dbg( &priv->spi->dev, "%#03x\n", cas_ctl->tx_in_buf[i] ); ca8210_rx_done(cas_ctl); } complete(&priv->spi_transfer_complete); kfree(cas_ctl); priv->retries = 0; } /** * ca8210_spi_transfer() - Initiate duplex spi transfer with ca8210 * @spi: Pointer to spi device for transfer * @buf: Octet array to send * @len: length of the buffer being sent * * Return: 0 or linux error code */ static int ca8210_spi_transfer( struct spi_device *spi, const u8 *buf, size_t len ) { int i, status = 0; struct ca8210_priv *priv; struct cas_control *cas_ctl; if (!spi) { pr_crit("NULL spi device passed to %s\n", __func__); return -ENODEV; } priv = spi_get_drvdata(spi); reinit_completion(&priv->spi_transfer_complete); dev_dbg(&spi->dev, "%s called\n", __func__); cas_ctl = kmalloc(sizeof(*cas_ctl), GFP_ATOMIC); if (!cas_ctl) return -ENOMEM; cas_ctl->priv = priv; memset(cas_ctl->tx_buf, SPI_IDLE, CA8210_SPI_BUF_SIZE); memset(cas_ctl->tx_in_buf, SPI_IDLE, CA8210_SPI_BUF_SIZE); memcpy(cas_ctl->tx_buf, buf, len); for (i = 0; i < len; i++) dev_dbg(&spi->dev, "%#03x\n", cas_ctl->tx_buf[i]); spi_message_init(&cas_ctl->msg); cas_ctl->transfer.tx_nbits = 1; /* 1 MOSI line */ cas_ctl->transfer.rx_nbits = 1; /* 1 MISO line */ cas_ctl->transfer.speed_hz = 0; /* Use device setting */ cas_ctl->transfer.bits_per_word = 0; /* Use device setting */ cas_ctl->transfer.tx_buf = cas_ctl->tx_buf; cas_ctl->transfer.rx_buf = cas_ctl->tx_in_buf; cas_ctl->transfer.delay_usecs = 0; cas_ctl->transfer.cs_change = 0; cas_ctl->transfer.len = sizeof(struct mac_message); cas_ctl->msg.complete = ca8210_spi_transfer_complete; cas_ctl->msg.context = cas_ctl; spi_message_add_tail( &cas_ctl->transfer, &cas_ctl->msg ); status = spi_async(spi, &cas_ctl->msg); if (status < 0) { dev_crit( &spi->dev, "status %d from spi_sync in write\n", status ); } return status; } /** * ca8210_spi_exchange() - Exchange API/SAP commands with the radio * @buf: Octet array of command being sent downstream * @len: length of buf * @response: buffer for storing synchronous response * @device_ref: spi_device pointer for ca8210 * * Effectively calls ca8210_spi_transfer to write buf[] to the spi, then for * synchronous commands waits for the corresponding response to be read from * the spi before returning. The response is written to the response parameter. * * Return: 0 or linux error code */ static int ca8210_spi_exchange( const u8 *buf, size_t len, u8 *response, void *device_ref ) { int status = 0; struct spi_device *spi = device_ref; struct ca8210_priv *priv = spi->dev.driver_data; long wait_remaining; if ((buf[0] & SPI_SYN) && response) { /* if sync wait for confirm */ reinit_completion(&priv->sync_exchange_complete); priv->sync_command_response = response; } do { reinit_completion(&priv->spi_transfer_complete); status = ca8210_spi_transfer(priv->spi, buf, len); if (status) { dev_warn( &spi->dev, "spi write failed, returned %d\n", status ); if (status == -EBUSY) continue; if (((buf[0] & SPI_SYN) && response)) complete(&priv->sync_exchange_complete); goto cleanup; } wait_remaining = wait_for_completion_interruptible_timeout( &priv->spi_transfer_complete, msecs_to_jiffies(1000) ); if (wait_remaining == -ERESTARTSYS) { status = -ERESTARTSYS; } else if (wait_remaining == 0) { dev_err( &spi->dev, "SPI downstream transfer timed out!\n" ); status = -ETIME; goto cleanup; } } while (status < 0); if (!((buf[0] & SPI_SYN) && response)) goto cleanup; wait_remaining = wait_for_completion_interruptible_timeout( &priv->sync_exchange_complete, msecs_to_jiffies(CA8210_SYNC_TIMEOUT) ); if (wait_remaining == -ERESTARTSYS) { status = -ERESTARTSYS; } else if (wait_remaining == 0) { dev_err( &spi->dev, "Synchronous confirm timeout\n" ); status = -ETIME; } cleanup: priv->sync_command_response = NULL; return status; } /** * ca8210_interrupt_handler() - Called when an irq is received from the ca8210 * @irq: Id of the irq being handled * @dev_id: Pointer passed by the system, pointing to the ca8210's private data * * This function is called when the irq line from the ca8210 is asserted, * signifying that the ca8210 has a message to send upstream to us. Starts the * asynchronous spi read. * * Return: irq return code */ static irqreturn_t ca8210_interrupt_handler(int irq, void *dev_id) { struct ca8210_priv *priv = dev_id; int status; dev_dbg(&priv->spi->dev, "irq: Interrupt occurred\n"); do { status = ca8210_spi_transfer(priv->spi, NULL, 0); if (status && (status != -EBUSY)) { dev_warn( &priv->spi->dev, "spi read failed, returned %d\n", status ); } } while (status == -EBUSY); return IRQ_HANDLED; } static int (*cascoda_api_downstream)( const u8 *buf, size_t len, u8 *response, void *device_ref ) = ca8210_spi_exchange; /* Cascoda API / 15.4 SAP Primitives */ /** * tdme_setsfr_request_sync() - TDME_SETSFR_request/confirm according to API * @sfr_page: SFR Page * @sfr_address: SFR Address * @sfr_value: SFR Value * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of TDME-SETSFR.confirm */ static u8 tdme_setsfr_request_sync( u8 sfr_page, u8 sfr_address, u8 sfr_value, void *device_ref ) { int ret; struct mac_message command, response; struct spi_device *spi = device_ref; command.command_id = SPI_TDME_SETSFR_REQUEST; command.length = 3; command.pdata.tdme_set_sfr_req.sfr_page = sfr_page; command.pdata.tdme_set_sfr_req.sfr_address = sfr_address; command.pdata.tdme_set_sfr_req.sfr_value = sfr_value; response.command_id = SPI_IDLE; ret = cascoda_api_downstream( &command.command_id, command.length + 2, &response.command_id, device_ref ); if (ret) { dev_crit(&spi->dev, "cascoda_api_downstream returned %d", ret); return MAC_SYSTEM_ERROR; } if (response.command_id != SPI_TDME_SETSFR_CONFIRM) { dev_crit( &spi->dev, "sync response to SPI_TDME_SETSFR_REQUEST was not SPI_TDME_SETSFR_CONFIRM, it was %d\n", response.command_id ); return MAC_SYSTEM_ERROR; } return response.pdata.tdme_set_sfr_cnf.status; } /** * tdme_chipinit() - TDME Chip Register Default Initialisation Macro * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of API calls */ static u8 tdme_chipinit(void *device_ref) { u8 status = MAC_SUCCESS; u8 sfr_address; struct spi_device *spi = device_ref; struct preamble_cfg_sfr pre_cfg_value = { .timeout_symbols = 3, .acquisition_symbols = 3, .search_symbols = 1, }; /* LNA Gain Settings */ status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX40), LNAGX40_DEFAULT_GAIN, device_ref); if (status) goto finish; status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX41), LNAGX41_DEFAULT_GAIN, device_ref); if (status) goto finish; status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX42), LNAGX42_DEFAULT_GAIN, device_ref); if (status) goto finish; status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX43), LNAGX43_DEFAULT_GAIN, device_ref); if (status) goto finish; status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX44), LNAGX44_DEFAULT_GAIN, device_ref); if (status) goto finish; status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX45), LNAGX45_DEFAULT_GAIN, device_ref); if (status) goto finish; status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX46), LNAGX46_DEFAULT_GAIN, device_ref); if (status) goto finish; status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_LNAGX47), LNAGX47_DEFAULT_GAIN, device_ref); if (status) goto finish; /* Preamble Timing Config */ status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_PRECFG), *((u8 *)&pre_cfg_value), device_ref); if (status) goto finish; /* Preamble Threshold High */ status = tdme_setsfr_request_sync( 1, (sfr_address = CA8210_SFR_PTHRH), PTHRH_DEFAULT_THRESHOLD, device_ref); if (status) goto finish; /* Tx Output Power 8 dBm */ status = tdme_setsfr_request_sync( 0, (sfr_address = CA8210_SFR_PACFGIB), PACFGIB_DEFAULT_CURRENT, device_ref); if (status) goto finish; finish: if (status != MAC_SUCCESS) { dev_err( &spi->dev, "failed to set sfr at %#03x, status = %#03x\n", sfr_address, status ); } return status; } /** * tdme_channelinit() - TDME Channel Register Default Initialisation Macro (Tx) * @channel: 802.15.4 channel to initialise chip for * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of API calls */ static u8 tdme_channelinit(u8 channel, void *device_ref) { /* Transceiver front-end local oscillator tx two-point calibration * value. Tuned for the hardware. */ u8 txcalval; if (channel >= 25) txcalval = 0xA7; else if (channel >= 23) txcalval = 0xA8; else if (channel >= 22) txcalval = 0xA9; else if (channel >= 20) txcalval = 0xAA; else if (channel >= 17) txcalval = 0xAB; else if (channel >= 16) txcalval = 0xAC; else if (channel >= 14) txcalval = 0xAD; else if (channel >= 12) txcalval = 0xAE; else txcalval = 0xAF; return tdme_setsfr_request_sync( 1, CA8210_SFR_LOTXCAL, txcalval, device_ref ); /* LO Tx Cal */ } /** * tdme_checkpibattribute() - Checks Attribute Values that are not checked in * MAC * @pib_attribute: Attribute Number * @pib_attribute_length: Attribute length * @pib_attribute_value: Pointer to Attribute Value * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of checks */ static u8 tdme_checkpibattribute( u8 pib_attribute, u8 pib_attribute_length, const void *pib_attribute_value ) { u8 status = MAC_SUCCESS; u8 value; value = *((u8 *)pib_attribute_value); switch (pib_attribute) { /* PHY */ case PHY_TRANSMIT_POWER: if (value > 0x3F) status = MAC_INVALID_PARAMETER; break; case PHY_CCA_MODE: if (value > 0x03) status = MAC_INVALID_PARAMETER; break; /* MAC */ case MAC_BATT_LIFE_EXT_PERIODS: if (value < 6 || value > 41) status = MAC_INVALID_PARAMETER; break; case MAC_BEACON_PAYLOAD: if (pib_attribute_length > MAX_BEACON_PAYLOAD_LENGTH) status = MAC_INVALID_PARAMETER; break; case MAC_BEACON_PAYLOAD_LENGTH: if (value > MAX_BEACON_PAYLOAD_LENGTH) status = MAC_INVALID_PARAMETER; break; case MAC_BEACON_ORDER: if (value > 15) status = MAC_INVALID_PARAMETER; break; case MAC_MAX_BE: if (value < 3 || value > 8) status = MAC_INVALID_PARAMETER; break; case MAC_MAX_CSMA_BACKOFFS: if (value > 5) status = MAC_INVALID_PARAMETER; break; case MAC_MAX_FRAME_RETRIES: if (value > 7) status = MAC_INVALID_PARAMETER; break; case MAC_MIN_BE: if (value > 8) status = MAC_INVALID_PARAMETER; break; case MAC_RESPONSE_WAIT_TIME: if (value < 2 || value > 64) status = MAC_INVALID_PARAMETER; break; case MAC_SUPERFRAME_ORDER: if (value > 15) status = MAC_INVALID_PARAMETER; break; /* boolean */ case MAC_ASSOCIATED_PAN_COORD: case MAC_ASSOCIATION_PERMIT: case MAC_AUTO_REQUEST: case MAC_BATT_LIFE_EXT: case MAC_GTS_PERMIT: case MAC_PROMISCUOUS_MODE: case MAC_RX_ON_WHEN_IDLE: case MAC_SECURITY_ENABLED: if (value > 1) status = MAC_INVALID_PARAMETER; break; /* MAC SEC */ case MAC_AUTO_REQUEST_SECURITY_LEVEL: if (value > 7) status = MAC_INVALID_PARAMETER; break; case MAC_AUTO_REQUEST_KEY_ID_MODE: if (value > 3) status = MAC_INVALID_PARAMETER; break; default: break; } return status; } /** * tdme_settxpower() - Sets the tx power for MLME_SET phyTransmitPower * @txp: Transmit Power * @device_ref: Nondescript pointer to target device * * Normalised to 802.15.4 Definition (6-bit, signed): * Bit 7-6: not used * Bit 5-0: tx power (-32 - +31 dB) * * Return: 802.15.4 status code of api calls */ static u8 tdme_settxpower(u8 txp, void *device_ref) { u8 status; s8 txp_val; u8 txp_ext; union pa_cfg_sfr pa_cfg_val; /* extend from 6 to 8 bit */ txp_ext = 0x3F & txp; if (txp_ext & 0x20) txp_ext += 0xC0; txp_val = (s8)txp_ext; if (CA8210_MAC_MPW) { if (txp_val > 0) { /* 8 dBm: ptrim = 5, itrim = +3 => +4 dBm */ pa_cfg_val.bias_current_trim = 3; pa_cfg_val.buffer_capacitor_trim = 5; pa_cfg_val.boost = 1; } else { /* 0 dBm: ptrim = 7, itrim = +3 => -6 dBm */ pa_cfg_val.bias_current_trim = 3; pa_cfg_val.buffer_capacitor_trim = 7; pa_cfg_val.boost = 0; } /* write PACFG */ status = tdme_setsfr_request_sync( 0, CA8210_SFR_PACFG, pa_cfg_val.paib, device_ref ); } else { /* Look-Up Table for Setting Current and Frequency Trim values * for desired Output Power */ if (txp_val > 8) { pa_cfg_val.paib = 0x3F; } else if (txp_val == 8) { pa_cfg_val.paib = 0x32; } else if (txp_val == 7) { pa_cfg_val.paib = 0x22; } else if (txp_val == 6) { pa_cfg_val.paib = 0x18; } else if (txp_val == 5) { pa_cfg_val.paib = 0x10; } else if (txp_val == 4) { pa_cfg_val.paib = 0x0C; } else if (txp_val == 3) { pa_cfg_val.paib = 0x08; } else if (txp_val == 2) { pa_cfg_val.paib = 0x05; } else if (txp_val == 1) { pa_cfg_val.paib = 0x03; } else if (txp_val == 0) { pa_cfg_val.paib = 0x01; } else { /* < 0 */ pa_cfg_val.paib = 0x00; } /* write PACFGIB */ status = tdme_setsfr_request_sync( 0, CA8210_SFR_PACFGIB, pa_cfg_val.paib, device_ref ); } return status; } /** * mcps_data_request() - mcps_data_request (Send Data) according to API Spec * @src_addr_mode: Source Addressing Mode * @dst_address_mode: Destination Addressing Mode * @dst_pan_id: Destination PAN ID * @dst_addr: Pointer to Destination Address * @msdu_length: length of Data * @msdu: Pointer to Data * @msdu_handle: Handle of Data * @tx_options: Tx Options Bit Field * @security: Pointer to Security Structure or NULL * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of action */ static u8 mcps_data_request( u8 src_addr_mode, u8 dst_address_mode, u16 dst_pan_id, union macaddr *dst_addr, u8 msdu_length, u8 *msdu, u8 msdu_handle, u8 tx_options, struct secspec *security, void *device_ref ) { struct secspec *psec; struct mac_message command; command.command_id = SPI_MCPS_DATA_REQUEST; command.pdata.data_req.src_addr_mode = src_addr_mode; command.pdata.data_req.dst.mode = dst_address_mode; if (dst_address_mode != MAC_MODE_NO_ADDR) { command.pdata.data_req.dst.pan_id[0] = LS_BYTE(dst_pan_id); command.pdata.data_req.dst.pan_id[1] = MS_BYTE(dst_pan_id); if (dst_address_mode == MAC_MODE_SHORT_ADDR) { command.pdata.data_req.dst.address[0] = LS_BYTE( dst_addr->short_address ); command.pdata.data_req.dst.address[1] = MS_BYTE( dst_addr->short_address ); } else { /* MAC_MODE_LONG_ADDR*/ memcpy( command.pdata.data_req.dst.address, dst_addr->ieee_address, 8 ); } } command.pdata.data_req.msdu_length = msdu_length; command.pdata.data_req.msdu_handle = msdu_handle; command.pdata.data_req.tx_options = tx_options; memcpy(command.pdata.data_req.msdu, msdu, msdu_length); psec = (struct secspec *)(command.pdata.data_req.msdu + msdu_length); command.length = sizeof(struct mcps_data_request_pset) - MAX_DATA_SIZE + msdu_length; if (!security || security->security_level == 0) { psec->security_level = 0; command.length += 1; } else { *psec = *security; command.length += sizeof(struct secspec); } if (ca8210_spi_transfer(device_ref, &command.command_id, command.length + 2)) return MAC_SYSTEM_ERROR; return MAC_SUCCESS; } /** * mlme_reset_request_sync() - MLME_RESET_request/confirm according to API Spec * @set_default_pib: Set defaults in PIB * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of MLME-RESET.confirm */ static u8 mlme_reset_request_sync( u8 set_default_pib, void *device_ref ) { u8 status; struct mac_message command, response; struct spi_device *spi = device_ref; command.command_id = SPI_MLME_RESET_REQUEST; command.length = 1; command.pdata.u8param = set_default_pib; if (cascoda_api_downstream( &command.command_id, command.length + 2, &response.command_id, device_ref)) { dev_err(&spi->dev, "cascoda_api_downstream failed\n"); return MAC_SYSTEM_ERROR; } if (response.command_id != SPI_MLME_RESET_CONFIRM) return MAC_SYSTEM_ERROR; status = response.pdata.status; /* reset COORD Bit for Channel Filtering as Coordinator */ if (CA8210_MAC_WORKAROUNDS && set_default_pib && !status) { status = tdme_setsfr_request_sync( 0, CA8210_SFR_MACCON, 0, device_ref ); } return status; } /** * mlme_set_request_sync() - MLME_SET_request/confirm according to API Spec * @pib_attribute: Attribute Number * @pib_attribute_index: Index within Attribute if an Array * @pib_attribute_length: Attribute length * @pib_attribute_value: Pointer to Attribute Value * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of MLME-SET.confirm */ static u8 mlme_set_request_sync( u8 pib_attribute, u8 pib_attribute_index, u8 pib_attribute_length, const void *pib_attribute_value, void *device_ref ) { u8 status; struct mac_message command, response; /* pre-check the validity of pib_attribute values that are not checked * in MAC */ if (tdme_checkpibattribute( pib_attribute, pib_attribute_length, pib_attribute_value)) { return MAC_INVALID_PARAMETER; } if (pib_attribute == PHY_CURRENT_CHANNEL) { status = tdme_channelinit( *((u8 *)pib_attribute_value), device_ref ); if (status) return status; } if (pib_attribute == PHY_TRANSMIT_POWER) { return tdme_settxpower( *((u8 *)pib_attribute_value), device_ref ); } command.command_id = SPI_MLME_SET_REQUEST; command.length = sizeof(struct mlme_set_request_pset) - MAX_ATTRIBUTE_SIZE + pib_attribute_length; command.pdata.set_req.pib_attribute = pib_attribute; command.pdata.set_req.pib_attribute_index = pib_attribute_index; command.pdata.set_req.pib_attribute_length = pib_attribute_length; memcpy( command.pdata.set_req.pib_attribute_value, pib_attribute_value, pib_attribute_length ); if (cascoda_api_downstream( &command.command_id, command.length + 2, &response.command_id, device_ref)) { return MAC_SYSTEM_ERROR; } if (response.command_id != SPI_MLME_SET_CONFIRM) return MAC_SYSTEM_ERROR; return response.pdata.status; } /** * hwme_set_request_sync() - HWME_SET_request/confirm according to API Spec * @hw_attribute: Attribute Number * @hw_attribute_length: Attribute length * @hw_attribute_value: Pointer to Attribute Value * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of HWME-SET.confirm */ static u8 hwme_set_request_sync( u8 hw_attribute, u8 hw_attribute_length, u8 *hw_attribute_value, void *device_ref ) { struct mac_message command, response; command.command_id = SPI_HWME_SET_REQUEST; command.length = 2 + hw_attribute_length; command.pdata.hwme_set_req.hw_attribute = hw_attribute; command.pdata.hwme_set_req.hw_attribute_length = hw_attribute_length; memcpy( command.pdata.hwme_set_req.hw_attribute_value, hw_attribute_value, hw_attribute_length ); if (cascoda_api_downstream( &command.command_id, command.length + 2, &response.command_id, device_ref)) { return MAC_SYSTEM_ERROR; } if (response.command_id != SPI_HWME_SET_CONFIRM) return MAC_SYSTEM_ERROR; return response.pdata.hwme_set_cnf.status; } /** * hwme_get_request_sync() - HWME_GET_request/confirm according to API Spec * @hw_attribute: Attribute Number * @hw_attribute_length: Attribute length * @hw_attribute_value: Pointer to Attribute Value * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of HWME-GET.confirm */ static u8 hwme_get_request_sync( u8 hw_attribute, u8 *hw_attribute_length, u8 *hw_attribute_value, void *device_ref ) { struct mac_message command, response; command.command_id = SPI_HWME_GET_REQUEST; command.length = 1; command.pdata.hwme_get_req.hw_attribute = hw_attribute; if (cascoda_api_downstream( &command.command_id, command.length + 2, &response.command_id, device_ref)) { return MAC_SYSTEM_ERROR; } if (response.command_id != SPI_HWME_GET_CONFIRM) return MAC_SYSTEM_ERROR; if (response.pdata.hwme_get_cnf.status == MAC_SUCCESS) { *hw_attribute_length = response.pdata.hwme_get_cnf.hw_attribute_length; memcpy( hw_attribute_value, response.pdata.hwme_get_cnf.hw_attribute_value, *hw_attribute_length ); } return response.pdata.hwme_get_cnf.status; } /* Network driver operation */ /** * ca8210_async_xmit_complete() - Called to announce that an asynchronous * transmission has finished * @hw: ieee802154_hw of ca8210 that has finished exchange * @msduhandle: Identifier of transmission that has completed * @status: Returned 802.15.4 status code of the transmission * * Return: 0 or linux error code */ static int ca8210_async_xmit_complete( struct ieee802154_hw *hw, u8 msduhandle, u8 status) { struct ca8210_priv *priv = hw->priv; if (priv->nextmsduhandle != msduhandle) { dev_err( &priv->spi->dev, "Unexpected msdu_handle on data confirm, Expected %d, got %d\n", priv->nextmsduhandle, msduhandle ); return -EIO; } priv->async_tx_pending = false; priv->nextmsduhandle++; if (status) { dev_err( &priv->spi->dev, "Link transmission unsuccessful, status = %d\n", status ); if (status != MAC_TRANSACTION_OVERFLOW) { ieee802154_wake_queue(priv->hw); return 0; } } ieee802154_xmit_complete(priv->hw, priv->tx_skb, true); return 0; } /** * ca8210_skb_rx() - Contructs a properly framed socket buffer from a received * MCPS_DATA_indication * @hw: ieee802154_hw that MCPS_DATA_indication was received by * @len: length of MCPS_DATA_indication * @data_ind: Octet array of MCPS_DATA_indication * * Called by the spi driver whenever a SAP command is received, this function * will ascertain whether the command is of interest to the network driver and * take necessary action. * * Return: 0 or linux error code */ static int ca8210_skb_rx( struct ieee802154_hw *hw, size_t len, u8 *data_ind ) { struct ieee802154_hdr hdr; int msdulen; int hlen; u8 mpdulinkquality = data_ind[23]; struct sk_buff *skb; struct ca8210_priv *priv = hw->priv; /* Allocate mtu size buffer for every rx packet */ skb = dev_alloc_skb(IEEE802154_MTU + sizeof(hdr)); if (!skb) return -ENOMEM; skb_reserve(skb, sizeof(hdr)); msdulen = data_ind[22]; /* msdu_length */ if (msdulen > IEEE802154_MTU) { dev_err( &priv->spi->dev, "received erroneously large msdu length!\n" ); kfree_skb(skb); return -EMSGSIZE; } dev_dbg(&priv->spi->dev, "skb buffer length = %d\n", msdulen); if (priv->promiscuous) goto copy_payload; /* Populate hdr */ hdr.sec.level = data_ind[29 + msdulen]; dev_dbg(&priv->spi->dev, "security level: %#03x\n", hdr.sec.level); if (hdr.sec.level > 0) { hdr.sec.key_id_mode = data_ind[30 + msdulen]; memcpy(&hdr.sec.extended_src, &data_ind[31 + msdulen], 8); hdr.sec.key_id = data_ind[39 + msdulen]; } hdr.source.mode = data_ind[0]; dev_dbg(&priv->spi->dev, "srcAddrMode: %#03x\n", hdr.source.mode); hdr.source.pan_id = *(u16 *)&data_ind[1]; dev_dbg(&priv->spi->dev, "srcPanId: %#06x\n", hdr.source.pan_id); memcpy(&hdr.source.extended_addr, &data_ind[3], 8); hdr.dest.mode = data_ind[11]; dev_dbg(&priv->spi->dev, "dstAddrMode: %#03x\n", hdr.dest.mode); hdr.dest.pan_id = *(u16 *)&data_ind[12]; dev_dbg(&priv->spi->dev, "dstPanId: %#06x\n", hdr.dest.pan_id); memcpy(&hdr.dest.extended_addr, &data_ind[14], 8); /* Fill in FC implicitly */ hdr.fc.type = 1; /* Data frame */ if (hdr.sec.level) hdr.fc.security_enabled = 1; else hdr.fc.security_enabled = 0; if (data_ind[1] != data_ind[12] || data_ind[2] != data_ind[13]) hdr.fc.intra_pan = 1; else hdr.fc.intra_pan = 0; hdr.fc.dest_addr_mode = hdr.dest.mode; hdr.fc.source_addr_mode = hdr.source.mode; /* Add hdr to front of buffer */ hlen = ieee802154_hdr_push(skb, &hdr); if (hlen < 0) { dev_crit(&priv->spi->dev, "failed to push mac hdr onto skb!\n"); kfree_skb(skb); return hlen; } skb_reset_mac_header(skb); skb->mac_len = hlen; copy_payload: /* Add <msdulen> bytes of space to the back of the buffer */ /* Copy msdu to skb */ skb_put_data(skb, &data_ind[29], msdulen); ieee802154_rx_irqsafe(hw, skb, mpdulinkquality); return 0; } /** * ca8210_net_rx() - Acts upon received SAP commands relevant to the network * driver * @hw: ieee802154_hw that command was received by * @command: Octet array of received command * @len: length of the received command * * Called by the spi driver whenever a SAP command is received, this function * will ascertain whether the command is of interest to the network driver and * take necessary action. * * Return: 0 or linux error code */ static int ca8210_net_rx(struct ieee802154_hw *hw, u8 *command, size_t len) { struct ca8210_priv *priv = hw->priv; unsigned long flags; u8 status; dev_dbg(&priv->spi->dev, "%s: CmdID = %d\n", __func__, command[0]); if (command[0] == SPI_MCPS_DATA_INDICATION) { /* Received data */ spin_lock_irqsave(&priv->lock, flags); if (command[26] == priv->last_dsn) { dev_dbg( &priv->spi->dev, "DSN %d resend received, ignoring...\n", command[26] ); spin_unlock_irqrestore(&priv->lock, flags); return 0; } priv->last_dsn = command[26]; spin_unlock_irqrestore(&priv->lock, flags); return ca8210_skb_rx(hw, len - 2, command + 2); } else if (command[0] == SPI_MCPS_DATA_CONFIRM) { status = command[3]; if (priv->async_tx_pending) { return ca8210_async_xmit_complete( hw, command[2], status ); } } return 0; } /** * ca8210_skb_tx() - Transmits a given socket buffer using the ca8210 * @skb: Socket buffer to transmit * @msduhandle: Data identifier to pass to the 802.15.4 MAC * @priv: Pointer to private data section of target ca8210 * * Return: 0 or linux error code */ static int ca8210_skb_tx( struct sk_buff *skb, u8 msduhandle, struct ca8210_priv *priv ) { int status; struct ieee802154_hdr header = { }; struct secspec secspec; unsigned int mac_len; dev_dbg(&priv->spi->dev, "%s called\n", __func__); /* Get addressing info from skb - ieee802154 layer creates a full * packet */ mac_len = ieee802154_hdr_peek_addrs(skb, &header); secspec.security_level = header.sec.level; secspec.key_id_mode = header.sec.key_id_mode; if (secspec.key_id_mode == 2) memcpy(secspec.key_source, &header.sec.short_src, 4); else if (secspec.key_id_mode == 3) memcpy(secspec.key_source, &header.sec.extended_src, 8); secspec.key_index = header.sec.key_id; /* Pass to Cascoda API */ status = mcps_data_request( header.source.mode, header.dest.mode, header.dest.pan_id, (union macaddr *)&header.dest.extended_addr, skb->len - mac_len, &skb->data[mac_len], msduhandle, header.fc.ack_request, &secspec, priv->spi ); return link_to_linux_err(status); } /** * ca8210_start() - Starts the network driver * @hw: ieee802154_hw of ca8210 being started * * Return: 0 or linux error code */ static int ca8210_start(struct ieee802154_hw *hw) { int status; u8 rx_on_when_idle; u8 lqi_threshold = 0; struct ca8210_priv *priv = hw->priv; priv->last_dsn = -1; /* Turn receiver on when idle for now just to test rx */ rx_on_when_idle = 1; status = mlme_set_request_sync( MAC_RX_ON_WHEN_IDLE, 0, 1, &rx_on_when_idle, priv->spi ); if (status) { dev_crit( &priv->spi->dev, "Setting rx_on_when_idle failed, status = %d\n", status ); return link_to_linux_err(status); } status = hwme_set_request_sync( HWME_LQILIMIT, 1, &lqi_threshold, priv->spi ); if (status) { dev_crit( &priv->spi->dev, "Setting lqilimit failed, status = %d\n", status ); return link_to_linux_err(status); } return 0; } /** * ca8210_stop() - Stops the network driver * @hw: ieee802154_hw of ca8210 being stopped * * Return: 0 or linux error code */ static void ca8210_stop(struct ieee802154_hw *hw) { } /** * ca8210_xmit_async() - Asynchronously transmits a given socket buffer using * the ca8210 * @hw: ieee802154_hw of ca8210 to transmit from * @skb: Socket buffer to transmit * * Return: 0 or linux error code */ static int ca8210_xmit_async(struct ieee802154_hw *hw, struct sk_buff *skb) { struct ca8210_priv *priv = hw->priv; int status; dev_dbg(&priv->spi->dev, "calling %s\n", __func__); priv->tx_skb = skb; priv->async_tx_pending = true; status = ca8210_skb_tx(skb, priv->nextmsduhandle, priv); return status; } /** * ca8210_get_ed() - Returns the measured energy on the current channel at this * instant in time * @hw: ieee802154_hw of target ca8210 * @level: Measured Energy Detect level * * Return: 0 or linux error code */ static int ca8210_get_ed(struct ieee802154_hw *hw, u8 *level) { u8 lenvar; struct ca8210_priv *priv = hw->priv; return link_to_linux_err( hwme_get_request_sync(HWME_EDVALUE, &lenvar, level, priv->spi) ); } /** * ca8210_set_channel() - Sets the current operating 802.15.4 channel of the * ca8210 * @hw: ieee802154_hw of target ca8210 * @page: Channel page to set * @channel: Channel number to set * * Return: 0 or linux error code */ static int ca8210_set_channel( struct ieee802154_hw *hw, u8 page, u8 channel ) { u8 status; struct ca8210_priv *priv = hw->priv; status = mlme_set_request_sync( PHY_CURRENT_CHANNEL, 0, 1, &channel, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting channel, MLME-SET.confirm status = %d\n", status ); } return link_to_linux_err(status); } /** * ca8210_set_hw_addr_filt() - Sets the address filtering parameters of the * ca8210 * @hw: ieee802154_hw of target ca8210 * @filt: Filtering parameters * @changed: Bitmap representing which parameters to change * * Effectively just sets the actual addressing information identifying this node * as all filtering is performed by the ca8210 as detailed in the IEEE 802.15.4 * 2006 specification. * * Return: 0 or linux error code */ static int ca8210_set_hw_addr_filt( struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed ) { u8 status = 0; struct ca8210_priv *priv = hw->priv; if (changed & IEEE802154_AFILT_PANID_CHANGED) { status = mlme_set_request_sync( MAC_PAN_ID, 0, 2, &filt->pan_id, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting pan id, MLME-SET.confirm status = %d", status ); return link_to_linux_err(status); } } if (changed & IEEE802154_AFILT_SADDR_CHANGED) { status = mlme_set_request_sync( MAC_SHORT_ADDRESS, 0, 2, &filt->short_addr, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting short address, MLME-SET.confirm status = %d", status ); return link_to_linux_err(status); } } if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { status = mlme_set_request_sync( NS_IEEE_ADDRESS, 0, 8, &filt->ieee_addr, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting ieee address, MLME-SET.confirm status = %d", status ); return link_to_linux_err(status); } } /* TODO: Should use MLME_START to set coord bit? */ return 0; } /** * ca8210_set_tx_power() - Sets the transmit power of the ca8210 * @hw: ieee802154_hw of target ca8210 * @mbm: Transmit power in mBm (dBm*100) * * Return: 0 or linux error code */ static int ca8210_set_tx_power(struct ieee802154_hw *hw, s32 mbm) { struct ca8210_priv *priv = hw->priv; mbm /= 100; return link_to_linux_err( mlme_set_request_sync(PHY_TRANSMIT_POWER, 0, 1, &mbm, priv->spi) ); } /** * ca8210_set_cca_mode() - Sets the clear channel assessment mode of the ca8210 * @hw: ieee802154_hw of target ca8210 * @cca: CCA mode to set * * Return: 0 or linux error code */ static int ca8210_set_cca_mode( struct ieee802154_hw *hw, const struct wpan_phy_cca *cca ) { u8 status; u8 cca_mode; struct ca8210_priv *priv = hw->priv; cca_mode = cca->mode & 3; if (cca_mode == 3 && cca->opt == NL802154_CCA_OPT_ENERGY_CARRIER_OR) { /* cca_mode 0 == CS OR ED, 3 == CS AND ED */ cca_mode = 0; } status = mlme_set_request_sync( PHY_CCA_MODE, 0, 1, &cca_mode, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting cca mode, MLME-SET.confirm status = %d", status ); } return link_to_linux_err(status); } /** * ca8210_set_cca_ed_level() - Sets the CCA ED level of the ca8210 * @hw: ieee802154_hw of target ca8210 * @level: ED level to set (in mbm) * * Sets the minimum threshold of measured energy above which the ca8210 will * back off and retry a transmission. * * Return: 0 or linux error code */ static int ca8210_set_cca_ed_level(struct ieee802154_hw *hw, s32 level) { u8 status; u8 ed_threshold = (level / 100) * 2 + 256; struct ca8210_priv *priv = hw->priv; status = hwme_set_request_sync( HWME_EDTHRESHOLD, 1, &ed_threshold, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting ed threshold, HWME-SET.confirm status = %d", status ); } return link_to_linux_err(status); } /** * ca8210_set_csma_params() - Sets the CSMA parameters of the ca8210 * @hw: ieee802154_hw of target ca8210 * @min_be: Minimum backoff exponent when backing off a transmission * @max_be: Maximum backoff exponent when backing off a transmission * @retries: Number of times to retry after backing off * * Return: 0 or linux error code */ static int ca8210_set_csma_params( struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries ) { u8 status; struct ca8210_priv *priv = hw->priv; status = mlme_set_request_sync(MAC_MIN_BE, 0, 1, &min_be, priv->spi); if (status) { dev_err( &priv->spi->dev, "error setting min be, MLME-SET.confirm status = %d", status ); return link_to_linux_err(status); } status = mlme_set_request_sync(MAC_MAX_BE, 0, 1, &max_be, priv->spi); if (status) { dev_err( &priv->spi->dev, "error setting max be, MLME-SET.confirm status = %d", status ); return link_to_linux_err(status); } status = mlme_set_request_sync( MAC_MAX_CSMA_BACKOFFS, 0, 1, &retries, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting max csma backoffs, MLME-SET.confirm status = %d", status ); } return link_to_linux_err(status); } /** * ca8210_set_frame_retries() - Sets the maximum frame retries of the ca8210 * @hw: ieee802154_hw of target ca8210 * @retries: Number of retries * * Sets the number of times to retry a transmission if no acknowledgment was * was received from the other end when one was requested. * * Return: 0 or linux error code */ static int ca8210_set_frame_retries(struct ieee802154_hw *hw, s8 retries) { u8 status; struct ca8210_priv *priv = hw->priv; status = mlme_set_request_sync( MAC_MAX_FRAME_RETRIES, 0, 1, &retries, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting frame retries, MLME-SET.confirm status = %d", status ); } return link_to_linux_err(status); } static int ca8210_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) { u8 status; struct ca8210_priv *priv = hw->priv; status = mlme_set_request_sync( MAC_PROMISCUOUS_MODE, 0, 1, (const void *)&on, priv->spi ); if (status) { dev_err( &priv->spi->dev, "error setting promiscuous mode, MLME-SET.confirm status = %d", status ); } else { priv->promiscuous = on; } return link_to_linux_err(status); } static const struct ieee802154_ops ca8210_phy_ops = { .start = ca8210_start, .stop = ca8210_stop, .xmit_async = ca8210_xmit_async, .ed = ca8210_get_ed, .set_channel = ca8210_set_channel, .set_hw_addr_filt = ca8210_set_hw_addr_filt, .set_txpower = ca8210_set_tx_power, .set_cca_mode = ca8210_set_cca_mode, .set_cca_ed_level = ca8210_set_cca_ed_level, .set_csma_params = ca8210_set_csma_params, .set_frame_retries = ca8210_set_frame_retries, .set_promiscuous_mode = ca8210_set_promiscuous_mode }; /* Test/EVBME Interface */ /** * ca8210_test_int_open() - Opens the test interface to the userspace * @inodp: inode representation of file interface * @filp: file interface * * Return: 0 or linux error code */ static int ca8210_test_int_open(struct inode *inodp, struct file *filp) { struct ca8210_priv *priv = inodp->i_private; filp->private_data = priv; return 0; } /** * ca8210_test_check_upstream() - Checks a command received from the upstream * testing interface for required action * @buf: Buffer containing command to check * @device_ref: Nondescript pointer to target device * * Return: 0 or linux error code */ static int ca8210_test_check_upstream(u8 *buf, void *device_ref) { int ret; u8 response[CA8210_SPI_BUF_SIZE]; if (buf[0] == SPI_MLME_SET_REQUEST) { ret = tdme_checkpibattribute(buf[2], buf[4], buf + 5); if (ret) { response[0] = SPI_MLME_SET_CONFIRM; response[1] = 3; response[2] = MAC_INVALID_PARAMETER; response[3] = buf[2]; response[4] = buf[3]; if (cascoda_api_upstream) cascoda_api_upstream(response, 5, device_ref); return ret; } } if (buf[0] == SPI_MLME_ASSOCIATE_REQUEST) { return tdme_channelinit(buf[2], device_ref); } else if (buf[0] == SPI_MLME_START_REQUEST) { return tdme_channelinit(buf[4], device_ref); } else if ( (buf[0] == SPI_MLME_SET_REQUEST) && (buf[2] == PHY_CURRENT_CHANNEL) ) { return tdme_channelinit(buf[5], device_ref); } else if ( (buf[0] == SPI_TDME_SET_REQUEST) && (buf[2] == TDME_CHANNEL) ) { return tdme_channelinit(buf[4], device_ref); } else if ( (CA8210_MAC_WORKAROUNDS) && (buf[0] == SPI_MLME_RESET_REQUEST) && (buf[2] == 1) ) { /* reset COORD Bit for Channel Filtering as Coordinator */ return tdme_setsfr_request_sync( 0, CA8210_SFR_MACCON, 0, device_ref ); } return 0; } /* End of EVBMECheckSerialCommand() */ /** * ca8210_test_int_user_write() - Called by a process in userspace to send a * message to the ca8210 drivers * @filp: file interface * @in_buf: Buffer containing message to write * @len: length of message * @off: file offset * * Return: 0 or linux error code */ static ssize_t ca8210_test_int_user_write( struct file *filp, const char __user *in_buf, size_t len, loff_t *off ) { int ret; struct ca8210_priv *priv = filp->private_data; u8 command[CA8210_SPI_BUF_SIZE]; memset(command, SPI_IDLE, 6); if (len > CA8210_SPI_BUF_SIZE || len < 2) { dev_warn( &priv->spi->dev, "userspace requested erroneous write length (%zu)\n", len ); return -EBADE; } ret = copy_from_user(command, in_buf, len); if (ret) { dev_err( &priv->spi->dev, "%d bytes could not be copied from userspace\n", ret ); return -EIO; } if (len != command[1] + 2) { dev_err( &priv->spi->dev, "write len does not match packet length field\n" ); return -EBADE; } ret = ca8210_test_check_upstream(command, priv->spi); if (ret == 0) { ret = ca8210_spi_exchange( command, command[1] + 2, NULL, priv->spi ); if (ret < 0) { /* effectively 0 bytes were written successfully */ dev_err( &priv->spi->dev, "spi exchange failed\n" ); return ret; } if (command[0] & SPI_SYN) priv->sync_down++; } return len; } /** * ca8210_test_int_user_read() - Called by a process in userspace to read a * message from the ca8210 drivers * @filp: file interface * @buf: Buffer to write message to * @len: length of message to read (ignored) * @offp: file offset * * If the O_NONBLOCK flag was set when opening the file then this function will * not block, i.e. it will return if the fifo is empty. Otherwise the function * will block, i.e. wait until new data arrives. * * Return: number of bytes read */ static ssize_t ca8210_test_int_user_read( struct file *filp, char __user *buf, size_t len, loff_t *offp ) { int i, cmdlen; struct ca8210_priv *priv = filp->private_data; unsigned char *fifo_buffer; unsigned long bytes_not_copied; if (filp->f_flags & O_NONBLOCK) { /* Non-blocking mode */ if (kfifo_is_empty(&priv->test.up_fifo)) return 0; } else { /* Blocking mode */ wait_event_interruptible( priv->test.readq, !kfifo_is_empty(&priv->test.up_fifo) ); } if (kfifo_out(&priv->test.up_fifo, &fifo_buffer, 4) != 4) { dev_err( &priv->spi->dev, "test_interface: Wrong number of elements popped from upstream fifo\n" ); return 0; } cmdlen = fifo_buffer[1]; bytes_not_copied = cmdlen + 2; bytes_not_copied = copy_to_user(buf, fifo_buffer, bytes_not_copied); if (bytes_not_copied > 0) { dev_err( &priv->spi->dev, "%lu bytes could not be copied to user space!\n", bytes_not_copied ); } dev_dbg(&priv->spi->dev, "test_interface: Cmd len = %d\n", cmdlen); dev_dbg(&priv->spi->dev, "test_interface: Read\n"); for (i = 0; i < cmdlen + 2; i++) dev_dbg(&priv->spi->dev, "%#03x\n", fifo_buffer[i]); kfree(fifo_buffer); return cmdlen + 2; } /** * ca8210_test_int_ioctl() - Called by a process in userspace to enact an * arbitrary action * @filp: file interface * @ioctl_num: which action to enact * @ioctl_param: arbitrary parameter for the action * * Return: status */ static long ca8210_test_int_ioctl( struct file *filp, unsigned int ioctl_num, unsigned long ioctl_param ) { struct ca8210_priv *priv = filp->private_data; switch (ioctl_num) { case CA8210_IOCTL_HARD_RESET: ca8210_reset_send(priv->spi, ioctl_param); break; default: break; } return 0; } /** * ca8210_test_int_poll() - Called by a process in userspace to determine which * actions are currently possible for the file * @filp: file interface * @ptable: poll table * * Return: set of poll return flags */ static __poll_t ca8210_test_int_poll( struct file *filp, struct poll_table_struct *ptable ) { __poll_t return_flags = 0; struct ca8210_priv *priv = filp->private_data; poll_wait(filp, &priv->test.readq, ptable); if (!kfifo_is_empty(&priv->test.up_fifo)) return_flags |= (EPOLLIN | EPOLLRDNORM); if (wait_event_interruptible( priv->test.readq, !kfifo_is_empty(&priv->test.up_fifo))) { return EPOLLERR; } return return_flags; } static const struct file_operations test_int_fops = { .read = ca8210_test_int_user_read, .write = ca8210_test_int_user_write, .open = ca8210_test_int_open, .release = NULL, .unlocked_ioctl = ca8210_test_int_ioctl, .poll = ca8210_test_int_poll }; /* Init/Deinit */ /** * ca8210_get_platform_data() - Populate a ca8210_platform_data object * @spi_device: Pointer to ca8210 spi device object to get data for * @pdata: Pointer to ca8210_platform_data object to populate * * Return: 0 or linux error code */ static int ca8210_get_platform_data( struct spi_device *spi_device, struct ca8210_platform_data *pdata ) { int ret = 0; if (!spi_device->dev.of_node) return -EINVAL; pdata->extclockenable = of_property_read_bool( spi_device->dev.of_node, "extclock-enable" ); if (pdata->extclockenable) { ret = of_property_read_u32( spi_device->dev.of_node, "extclock-freq", &pdata->extclockfreq ); if (ret < 0) return ret; ret = of_property_read_u32( spi_device->dev.of_node, "extclock-gpio", &pdata->extclockgpio ); } return ret; } /** * ca8210_config_extern_clk() - Configure the external clock provided by the * ca8210 * @pdata: Pointer to ca8210_platform_data containing clock parameters * @spi: Pointer to target ca8210 spi device * @on: True to turn the clock on, false to turn off * * The external clock is configured with a frequency and output pin taken from * the platform data. * * Return: 0 or linux error code */ static int ca8210_config_extern_clk( struct ca8210_platform_data *pdata, struct spi_device *spi, bool on ) { u8 clkparam[2]; if (on) { dev_info(&spi->dev, "Switching external clock on\n"); switch (pdata->extclockfreq) { case SIXTEEN_MHZ: clkparam[0] = 1; break; case EIGHT_MHZ: clkparam[0] = 2; break; case FOUR_MHZ: clkparam[0] = 3; break; case TWO_MHZ: clkparam[0] = 4; break; case ONE_MHZ: clkparam[0] = 5; break; default: dev_crit(&spi->dev, "Invalid extclock-freq\n"); return -EINVAL; } clkparam[1] = pdata->extclockgpio; } else { dev_info(&spi->dev, "Switching external clock off\n"); clkparam[0] = 0; /* off */ clkparam[1] = 0; } return link_to_linux_err( hwme_set_request_sync(HWME_SYSCLKOUT, 2, clkparam, spi) ); } /** * ca8210_register_ext_clock() - Register ca8210's external clock with kernel * @spi: Pointer to target ca8210 spi device * * Return: 0 or linux error code */ static int ca8210_register_ext_clock(struct spi_device *spi) { struct device_node *np = spi->dev.of_node; struct ca8210_priv *priv = spi_get_drvdata(spi); struct ca8210_platform_data *pdata = spi->dev.platform_data; int ret = 0; if (!np) return -EFAULT; priv->clk = clk_register_fixed_rate( &spi->dev, np->name, NULL, 0, pdata->extclockfreq ); if (IS_ERR(priv->clk)) { dev_crit(&spi->dev, "Failed to register external clk\n"); return PTR_ERR(priv->clk); } ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk); if (ret) { clk_unregister(priv->clk); dev_crit( &spi->dev, "Failed to register external clock as clock provider\n" ); } else { dev_info(&spi->dev, "External clock set as clock provider\n"); } return ret; } /** * ca8210_unregister_ext_clock() - Unregister ca8210's external clock with * kernel * @spi: Pointer to target ca8210 spi device */ static void ca8210_unregister_ext_clock(struct spi_device *spi) { struct ca8210_priv *priv = spi_get_drvdata(spi); if (!priv->clk) return of_clk_del_provider(spi->dev.of_node); clk_unregister(priv->clk); dev_info(&spi->dev, "External clock unregistered\n"); } /** * ca8210_reset_init() - Initialise the reset input to the ca8210 * @spi: Pointer to target ca8210 spi device * * Return: 0 or linux error code */ static int ca8210_reset_init(struct spi_device *spi) { int ret; struct ca8210_platform_data *pdata = spi->dev.platform_data; pdata->gpio_reset = of_get_named_gpio( spi->dev.of_node, "reset-gpio", 0 ); ret = gpio_direction_output(pdata->gpio_reset, 1); if (ret < 0) { dev_crit( &spi->dev, "Reset GPIO %d did not set to output mode\n", pdata->gpio_reset ); } return ret; } /** * ca8210_interrupt_init() - Initialise the irq output from the ca8210 * @spi: Pointer to target ca8210 spi device * * Return: 0 or linux error code */ static int ca8210_interrupt_init(struct spi_device *spi) { int ret; struct ca8210_platform_data *pdata = spi->dev.platform_data; pdata->gpio_irq = of_get_named_gpio( spi->dev.of_node, "irq-gpio", 0 ); pdata->irq_id = gpio_to_irq(pdata->gpio_irq); if (pdata->irq_id < 0) { dev_crit( &spi->dev, "Could not get irq for gpio pin %d\n", pdata->gpio_irq ); gpio_free(pdata->gpio_irq); return pdata->irq_id; } ret = request_irq( pdata->irq_id, ca8210_interrupt_handler, IRQF_TRIGGER_FALLING, "ca8210-irq", spi_get_drvdata(spi) ); if (ret) { dev_crit(&spi->dev, "request_irq %d failed\n", pdata->irq_id); gpio_unexport(pdata->gpio_irq); gpio_free(pdata->gpio_irq); } return ret; } /** * ca8210_dev_com_init() - Initialise the spi communication component * @priv: Pointer to private data structure * * Return: 0 or linux error code */ static int ca8210_dev_com_init(struct ca8210_priv *priv) { priv->mlme_workqueue = alloc_ordered_workqueue( "MLME work queue", WQ_UNBOUND ); if (!priv->mlme_workqueue) { dev_crit(&priv->spi->dev, "alloc of mlme_workqueue failed!\n"); return -ENOMEM; } priv->irq_workqueue = alloc_ordered_workqueue( "ca8210 irq worker", WQ_UNBOUND ); if (!priv->irq_workqueue) { dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n"); return -ENOMEM; } return 0; } /** * ca8210_dev_com_clear() - Deinitialise the spi communication component * @priv: Pointer to private data structure */ static void ca8210_dev_com_clear(struct ca8210_priv *priv) { flush_workqueue(priv->mlme_workqueue); destroy_workqueue(priv->mlme_workqueue); flush_workqueue(priv->irq_workqueue); destroy_workqueue(priv->irq_workqueue); } #define CA8210_MAX_TX_POWERS (9) static const s32 ca8210_tx_powers[CA8210_MAX_TX_POWERS] = { 800, 700, 600, 500, 400, 300, 200, 100, 0 }; #define CA8210_MAX_ED_LEVELS (21) static const s32 ca8210_ed_levels[CA8210_MAX_ED_LEVELS] = { -10300, -10250, -10200, -10150, -10100, -10050, -10000, -9950, -9900, -9850, -9800, -9750, -9700, -9650, -9600, -9550, -9500, -9450, -9400, -9350, -9300 }; /** * ca8210_hw_setup() - Populate the ieee802154_hw phy attributes with the * ca8210's defaults * @ca8210_hw: Pointer to ieee802154_hw to populate */ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw) { /* Support channels 11-26 */ ca8210_hw->phy->supported.channels[0] = CA8210_VALID_CHANNELS; ca8210_hw->phy->supported.tx_powers_size = CA8210_MAX_TX_POWERS; ca8210_hw->phy->supported.tx_powers = ca8210_tx_powers; ca8210_hw->phy->supported.cca_ed_levels_size = CA8210_MAX_ED_LEVELS; ca8210_hw->phy->supported.cca_ed_levels = ca8210_ed_levels; ca8210_hw->phy->current_channel = 18; ca8210_hw->phy->current_page = 0; ca8210_hw->phy->transmit_power = 800; ca8210_hw->phy->cca.mode = NL802154_CCA_ENERGY_CARRIER; ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND; ca8210_hw->phy->cca_ed_level = -9800; ca8210_hw->phy->symbol_duration = 16; ca8210_hw->phy->lifs_period = 40; ca8210_hw->phy->sifs_period = 12; ca8210_hw->flags = IEEE802154_HW_AFILT | IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_FRAME_RETRIES | IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS; ca8210_hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | WPAN_PHY_FLAG_CCA_MODE; } /** * ca8210_test_interface_init() - Initialise the test file interface * @priv: Pointer to private data structure * * Provided as an alternative to the standard linux network interface, the test * interface exposes a file in the filesystem (ca8210_test) that allows * 802.15.4 SAP Commands and Cascoda EVBME commands to be sent directly to * the stack. * * Return: 0 or linux error code */ static int ca8210_test_interface_init(struct ca8210_priv *priv) { struct ca8210_test *test = &priv->test; char node_name[32]; snprintf( node_name, sizeof(node_name), "ca8210@%d_%d", priv->spi->master->bus_num, priv->spi->chip_select ); test->ca8210_dfs_spi_int = debugfs_create_file( node_name, 0600, /* S_IRUSR | S_IWUSR */ NULL, priv, &test_int_fops ); if (IS_ERR(test->ca8210_dfs_spi_int)) { dev_err( &priv->spi->dev, "Error %ld when creating debugfs node\n", PTR_ERR(test->ca8210_dfs_spi_int) ); return PTR_ERR(test->ca8210_dfs_spi_int); } debugfs_create_symlink("ca8210", NULL, node_name); init_waitqueue_head(&test->readq); return kfifo_alloc( &test->up_fifo, CA8210_TEST_INT_FIFO_SIZE, GFP_KERNEL ); } /** * ca8210_test_interface_clear() - Deinitialise the test file interface * @priv: Pointer to private data structure */ static void ca8210_test_interface_clear(struct ca8210_priv *priv) { struct ca8210_test *test = &priv->test; debugfs_remove(test->ca8210_dfs_spi_int); kfifo_free(&test->up_fifo); dev_info(&priv->spi->dev, "Test interface removed\n"); } /** * ca8210_remove() - Shut down a ca8210 upon being disconnected * @priv: Pointer to private data structure * * Return: 0 or linux error code */ static int ca8210_remove(struct spi_device *spi_device) { struct ca8210_priv *priv; struct ca8210_platform_data *pdata; dev_info(&spi_device->dev, "Removing ca8210\n"); pdata = spi_device->dev.platform_data; if (pdata) { if (pdata->extclockenable) { ca8210_unregister_ext_clock(spi_device); ca8210_config_extern_clk(pdata, spi_device, 0); } free_irq(pdata->irq_id, spi_device->dev.driver_data); kfree(pdata); spi_device->dev.platform_data = NULL; } /* get spi_device private data */ priv = spi_get_drvdata(spi_device); if (priv) { dev_info( &spi_device->dev, "sync_down = %d, sync_up = %d\n", priv->sync_down, priv->sync_up ); ca8210_dev_com_clear(spi_device->dev.driver_data); if (priv->hw) { if (priv->hw_registered) ieee802154_unregister_hw(priv->hw); ieee802154_free_hw(priv->hw); priv->hw = NULL; dev_info( &spi_device->dev, "Unregistered & freed ieee802154_hw.\n" ); } if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) ca8210_test_interface_clear(priv); } return 0; } /** * ca8210_probe() - Set up a connected ca8210 upon being detected by the system * @priv: Pointer to private data structure * * Return: 0 or linux error code */ static int ca8210_probe(struct spi_device *spi_device) { struct ca8210_priv *priv; struct ieee802154_hw *hw; struct ca8210_platform_data *pdata; int ret; dev_info(&spi_device->dev, "Inserting ca8210\n"); /* allocate ieee802154_hw and private data */ hw = ieee802154_alloc_hw(sizeof(struct ca8210_priv), &ca8210_phy_ops); if (!hw) { dev_crit(&spi_device->dev, "ieee802154_alloc_hw failed\n"); ret = -ENOMEM; goto error; } priv = hw->priv; priv->hw = hw; priv->spi = spi_device; hw->parent = &spi_device->dev; spin_lock_init(&priv->lock); priv->async_tx_pending = false; priv->hw_registered = false; priv->sync_up = 0; priv->sync_down = 0; priv->promiscuous = false; priv->retries = 0; init_completion(&priv->ca8210_is_awake); init_completion(&priv->spi_transfer_complete); init_completion(&priv->sync_exchange_complete); spi_set_drvdata(priv->spi, priv); if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) { cascoda_api_upstream = ca8210_test_int_driver_write; ca8210_test_interface_init(priv); } else { cascoda_api_upstream = NULL; } ca8210_hw_setup(hw); ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); pdata = kmalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) { ret = -ENOMEM; goto error; } priv->spi->dev.platform_data = pdata; ret = ca8210_get_platform_data(priv->spi, pdata); if (ret) { dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n"); goto error; } ret = ca8210_dev_com_init(priv); if (ret) { dev_crit(&spi_device->dev, "ca8210_dev_com_init failed\n"); goto error; } ret = ca8210_reset_init(priv->spi); if (ret) { dev_crit(&spi_device->dev, "ca8210_reset_init failed\n"); goto error; } ret = ca8210_interrupt_init(priv->spi); if (ret) { dev_crit(&spi_device->dev, "ca8210_interrupt_init failed\n"); goto error; } msleep(100); ca8210_reset_send(priv->spi, 1); ret = tdme_chipinit(priv->spi); if (ret) { dev_crit(&spi_device->dev, "tdme_chipinit failed\n"); goto error; } if (pdata->extclockenable) { ret = ca8210_config_extern_clk(pdata, priv->spi, 1); if (ret) { dev_crit( &spi_device->dev, "ca8210_config_extern_clk failed\n" ); goto error; } ret = ca8210_register_ext_clock(priv->spi); if (ret) { dev_crit( &spi_device->dev, "ca8210_register_ext_clock failed\n" ); goto error; } } ret = ieee802154_register_hw(hw); if (ret) { dev_crit(&spi_device->dev, "ieee802154_register_hw failed\n"); goto error; } priv->hw_registered = true; return 0; error: msleep(100); /* wait for pending spi transfers to complete */ ca8210_remove(spi_device); return link_to_linux_err(ret); } static const struct of_device_id ca8210_of_ids[] = { {.compatible = "cascoda,ca8210", }, {}, }; MODULE_DEVICE_TABLE(of, ca8210_of_ids); static struct spi_driver ca8210_spi_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = of_match_ptr(ca8210_of_ids), }, .probe = ca8210_probe, .remove = ca8210_remove }; module_spi_driver(ca8210_spi_driver); MODULE_AUTHOR("Harry Morris <h.morris@cascoda.com>"); MODULE_DESCRIPTION("CA-8210 SoftMAC driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION("1.0");
./CrossVul/dataset_final_sorted/CWE-400/c/good_1265_0
crossvul-cpp_data_bad_1273_6
/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include <linux/slab.h> #include "dm_services.h" #include "dc.h" #include "resource.h" #include "include/irq_service_interface.h" #include "dcn20/dcn20_resource.h" #include "dcn10/dcn10_hubp.h" #include "dcn10/dcn10_ipp.h" #include "dcn20_hubbub.h" #include "dcn20_mpc.h" #include "dcn20_hubp.h" #include "irq/dcn20/irq_service_dcn20.h" #include "dcn20_dpp.h" #include "dcn20_optc.h" #include "dcn20_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn10/dcn10_resource.h" #include "dcn20_opp.h" #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dcn20_dsc.h" #endif #include "dcn20_link_encoder.h" #include "dcn20_stream_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" #include "dcn20_dccg.h" #include "dcn20_vmid.h" #include "navi10_ip_offset.h" #include "dcn/dcn_2_0_0_offset.h" #include "dcn/dcn_2_0_0_sh_mask.h" #include "nbio/nbio_2_3_offset.h" #include "dcn20/dcn20_dwb.h" #include "dcn20/dcn20_mmhubbub.h" #include "mmhub/mmhub_2_0_0_offset.h" #include "mmhub/mmhub_2_0_0_sh_mask.h" #include "reg_helper.h" #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "vm_helper.h" #include "amdgpu_socbb.h" /* NV12 SOC BB is currently in FW, mark SW bounding box invalid. */ #define SOC_BOUNDING_BOX_VALID false #define DC_LOGGER_INIT(logger) struct _vcs_dpi_ip_params_st dcn2_0_ip = { .odm_capable = 1, .gpuvm_enable = 0, .hostvm_enable = 0, .gpuvm_max_page_table_levels = 4, .hostvm_max_page_table_levels = 4, .hostvm_cached_page_table_levels = 0, .pte_group_size_bytes = 2048, #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 6, #else .num_dsc = 0, #endif .rob_buffer_size_kbytes = 168, .det_buffer_size_kbytes = 164, .dpte_buffer_size_in_pte_reqs_luma = 84, .pde_proc_buffer_size_64k_reqs = 48, .dpp_output_buffer_pixels = 2560, .opp_output_buffer_lines = 1, .pixel_chunk_size_kbytes = 8, .pte_chunk_size_kbytes = 2, .meta_chunk_size_kbytes = 2, .writeback_chunk_size_kbytes = 2, .line_buffer_size_bits = 789504, .is_line_buffer_bpp_fixed = 0, .line_buffer_fixed_bpp = 0, .dcc_supported = true, .max_line_buffer_lines = 12, .writeback_luma_buffer_size_kbytes = 12, .writeback_chroma_buffer_size_kbytes = 8, .writeback_chroma_line_buffer_width_pixels = 4, .writeback_max_hscl_ratio = 1, .writeback_max_vscl_ratio = 1, .writeback_min_hscl_ratio = 1, .writeback_min_vscl_ratio = 1, .writeback_max_hscl_taps = 12, .writeback_max_vscl_taps = 12, .writeback_line_buffer_luma_buffer_size = 0, .writeback_line_buffer_chroma_buffer_size = 14643, .cursor_buffer_size = 8, .cursor_chunk_size = 2, .max_num_otg = 6, .max_num_dpp = 6, .max_num_wb = 1, .max_dchub_pscl_bw_pix_per_clk = 4, .max_pscl_lb_bw_pix_per_clk = 2, .max_lb_vscl_bw_pix_per_clk = 4, .max_vscl_hscl_bw_pix_per_clk = 4, .max_hscl_ratio = 8, .max_vscl_ratio = 8, .hscl_mults = 4, .vscl_mults = 4, .max_hscl_taps = 8, .max_vscl_taps = 8, .dispclk_ramp_margin_percent = 1, .underscan_factor = 1.10, .min_vblank_lines = 32, // .dppclk_delay_subtotal = 77, // .dppclk_delay_scl_lb_only = 16, .dppclk_delay_scl = 50, .dppclk_delay_cnvc_formatter = 8, .dppclk_delay_cnvc_cursor = 6, .dispclk_delay_subtotal = 87, // .dcfclk_cstate_latency = 10, // SRExitTime .max_inter_dcn_tile_repeaters = 8, .xfc_supported = true, .xfc_fill_bw_overhead_percent = 10.0, .xfc_fill_constant_bytes = 0, }; struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { /* Defaults that get patched on driver load from firmware. */ .clock_limits = { { .state = 0, .dcfclk_mhz = 560.0, .fabricclk_mhz = 560.0, .dispclk_mhz = 513.0, .dppclk_mhz = 513.0, .phyclk_mhz = 540.0, .socclk_mhz = 560.0, .dscclk_mhz = 171.0, .dram_speed_mts = 8960.0, }, { .state = 1, .dcfclk_mhz = 694.0, .fabricclk_mhz = 694.0, .dispclk_mhz = 642.0, .dppclk_mhz = 642.0, .phyclk_mhz = 600.0, .socclk_mhz = 694.0, .dscclk_mhz = 214.0, .dram_speed_mts = 11104.0, }, { .state = 2, .dcfclk_mhz = 875.0, .fabricclk_mhz = 875.0, .dispclk_mhz = 734.0, .dppclk_mhz = 734.0, .phyclk_mhz = 810.0, .socclk_mhz = 875.0, .dscclk_mhz = 245.0, .dram_speed_mts = 14000.0, }, { .state = 3, .dcfclk_mhz = 1000.0, .fabricclk_mhz = 1000.0, .dispclk_mhz = 1100.0, .dppclk_mhz = 1100.0, .phyclk_mhz = 810.0, .socclk_mhz = 1000.0, .dscclk_mhz = 367.0, .dram_speed_mts = 16000.0, }, { .state = 4, .dcfclk_mhz = 1200.0, .fabricclk_mhz = 1200.0, .dispclk_mhz = 1284.0, .dppclk_mhz = 1284.0, .phyclk_mhz = 810.0, .socclk_mhz = 1200.0, .dscclk_mhz = 428.0, .dram_speed_mts = 16000.0, }, /*Extra state, no dispclk ramping*/ { .state = 5, .dcfclk_mhz = 1200.0, .fabricclk_mhz = 1200.0, .dispclk_mhz = 1284.0, .dppclk_mhz = 1284.0, .phyclk_mhz = 810.0, .socclk_mhz = 1200.0, .dscclk_mhz = 428.0, .dram_speed_mts = 16000.0, }, }, .num_states = 5, .sr_exit_time_us = 8.6, .sr_enter_plus_exit_time_us = 10.9, .urgent_latency_us = 4.0, .urgent_latency_pixel_data_only_us = 4.0, .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, .urgent_latency_vm_data_only_us = 4.0, .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0, .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0, .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, .max_avg_sdp_bw_use_normal_percent = 40.0, .max_avg_dram_bw_use_normal_percent = 40.0, .writeback_latency_us = 12.0, .ideal_dram_bw_after_urgent_percent = 40.0, .max_request_size_bytes = 256, .dram_channel_width_bytes = 2, .fabric_datapath_to_dcn_data_return_bytes = 64, .dcn_downspread_percent = 0.5, .downspread_percent = 0.38, .dram_page_open_time_ns = 50.0, .dram_rw_turnaround_time_ns = 17.5, .dram_return_buffer_per_channel_bytes = 8192, .round_trip_ping_latency_dcfclk_cycles = 131, .urgent_out_of_order_return_per_channel_bytes = 256, .channel_interleave_bytes = 256, .num_banks = 8, .num_chans = 16, .vmm_page_size_bytes = 4096, .dram_clock_change_latency_us = 404.0, .dummy_pstate_latency_us = 5.0, .writeback_dram_clock_change_latency_us = 23.0, .return_bus_width_bytes = 64, .dispclk_dppclk_vco_speed_mhz = 3850, .xfc_bus_transport_time_us = 20, .xfc_xbuf_latency_tolerance_us = 4, .use_urgent_burst_bw = 0 }; struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 }; #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #endif enum dcn20_clk_src_array_id { DCN20_CLK_SRC_PLL0, DCN20_CLK_SRC_PLL1, DCN20_CLK_SRC_PLL2, DCN20_CLK_SRC_PLL3, DCN20_CLK_SRC_PLL4, DCN20_CLK_SRC_PLL5, DCN20_CLK_SRC_TOTAL }; /* begin ********************* * macros to expend register list macro defined in HW object header file */ /* DCN */ /* TODO awful hack. fixup dcn20_dwb.h */ #undef BASE_INNER #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg #define BASE(seg) BASE_INNER(seg) #define SR(reg_name)\ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name #define SRI(reg_name, block, id)\ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define SRIR(var_name, reg_name, block, id)\ .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define SRII(reg_name, block, id)\ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name #define DCCG_SRII(reg_name, block, id)\ .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name /* NBIO */ #define NBIO_BASE_INNER(seg) \ NBIO_BASE__INST0_SEG ## seg #define NBIO_BASE(seg) \ NBIO_BASE_INNER(seg) #define NBIO_SR(reg_name)\ .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name /* MMHUB */ #define MMHUB_BASE_INNER(seg) \ MMHUB_BASE__INST0_SEG ## seg #define MMHUB_BASE(seg) \ MMHUB_BASE_INNER(seg) #define MMHUB_SR(reg_name)\ .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \ mmMM ## reg_name static const struct bios_registers bios_regs = { NBIO_SR(BIOS_SCRATCH_3), NBIO_SR(BIOS_SCRATCH_6) }; #define clk_src_regs(index, pllid)\ [index] = {\ CS_COMMON_REG_LIST_DCN2_0(index, pllid),\ } static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(0, A), clk_src_regs(1, B), clk_src_regs(2, C), clk_src_regs(3, D), clk_src_regs(4, E), clk_src_regs(5, F) }; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) }; static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) }; static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCN10_REG_LIST() }; static const struct dce_dmcu_shift dmcu_shift = { DMCU_MASK_SH_LIST_DCN10(__SHIFT) }; static const struct dce_dmcu_mask dmcu_mask = { DMCU_MASK_SH_LIST_DCN10(_MASK) }; static const struct dce_abm_registers abm_regs = { ABM_DCN20_REG_LIST() }; static const struct dce_abm_shift abm_shift = { ABM_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCN20(_MASK) }; #define audio_regs(id)\ [id] = {\ AUD_COMMON_REG_LIST(id)\ } static const struct dce_audio_registers audio_regs[] = { audio_regs(0), audio_regs(1), audio_regs(2), audio_regs(3), audio_regs(4), audio_regs(5), audio_regs(6), }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) static const struct dce_audio_shift audio_shift = { DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) }; static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; #define stream_enc_regs(id)\ [id] = {\ SE_DCN2_REG_LIST(id)\ } static const struct dcn10_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), stream_enc_regs(4), stream_enc_regs(5), }; static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn10_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCN20(_MASK) }; #define aux_regs(id)\ [id] = {\ DCN2_AUX_REG_LIST(id)\ } static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), aux_regs(3), aux_regs(4), aux_regs(5) }; #define hpd_regs(id)\ [id] = {\ HPD_REG_LIST(id)\ } static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), hpd_regs(3), hpd_regs(4), hpd_regs(5) }; #define link_regs(id, phyid)\ [id] = {\ LE_DCN10_REG_LIST(id), \ UNIPHY_DCN2_REG_LIST(phyid), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } static const struct dcn10_link_enc_registers link_enc_regs[] = { link_regs(0, A), link_regs(1, B), link_regs(2, C), link_regs(3, D), link_regs(4, E), link_regs(5, F) }; static const struct dcn10_link_enc_shift le_shift = { LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK) }; #define ipp_regs(id)\ [id] = {\ IPP_REG_LIST_DCN20(id),\ } static const struct dcn10_ipp_registers ipp_regs[] = { ipp_regs(0), ipp_regs(1), ipp_regs(2), ipp_regs(3), ipp_regs(4), ipp_regs(5), }; static const struct dcn10_ipp_shift ipp_shift = { IPP_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn10_ipp_mask ipp_mask = { IPP_MASK_SH_LIST_DCN20(_MASK), }; #define opp_regs(id)\ [id] = {\ OPP_REG_LIST_DCN20(id),\ } static const struct dcn20_opp_registers opp_regs[] = { opp_regs(0), opp_regs(1), opp_regs(2), opp_regs(3), opp_regs(4), opp_regs(5), }; static const struct dcn20_opp_shift opp_shift = { OPP_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn20_opp_mask opp_mask = { OPP_MASK_SH_LIST_DCN20(_MASK) }; #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST0(id), \ .AUXN_IMPCAL = 0, \ .AUXP_IMPCAL = 0, \ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ } static const struct dce110_aux_registers aux_engine_regs[] = { aux_engine_regs(0), aux_engine_regs(1), aux_engine_regs(2), aux_engine_regs(3), aux_engine_regs(4), aux_engine_regs(5) }; #define tf_regs(id)\ [id] = {\ TF_REG_LIST_DCN20(id),\ } static const struct dcn2_dpp_registers tf_regs[] = { tf_regs(0), tf_regs(1), tf_regs(2), tf_regs(3), tf_regs(4), tf_regs(5), }; static const struct dcn2_dpp_shift tf_shift = { TF_REG_LIST_SH_MASK_DCN20(__SHIFT) }; static const struct dcn2_dpp_mask tf_mask = { TF_REG_LIST_SH_MASK_DCN20(_MASK) }; #define dwbc_regs_dcn2(id)\ [id] = {\ DWBC_COMMON_REG_LIST_DCN2_0(id),\ } static const struct dcn20_dwbc_registers dwbc20_regs[] = { dwbc_regs_dcn2(0), }; static const struct dcn20_dwbc_shift dwbc20_shift = { DWBC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) }; static const struct dcn20_dwbc_mask dwbc20_mask = { DWBC_COMMON_MASK_SH_LIST_DCN2_0(_MASK) }; #define mcif_wb_regs_dcn2(id)\ [id] = {\ MCIF_WB_COMMON_REG_LIST_DCN2_0(id),\ } static const struct dcn20_mmhubbub_registers mcif_wb20_regs[] = { mcif_wb_regs_dcn2(0), }; static const struct dcn20_mmhubbub_shift mcif_wb20_shift = { MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) }; static const struct dcn20_mmhubbub_mask mcif_wb20_mask = { MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(_MASK) }; static const struct dcn20_mpc_registers mpc_regs = { MPC_REG_LIST_DCN2_0(0), MPC_REG_LIST_DCN2_0(1), MPC_REG_LIST_DCN2_0(2), MPC_REG_LIST_DCN2_0(3), MPC_REG_LIST_DCN2_0(4), MPC_REG_LIST_DCN2_0(5), MPC_OUT_MUX_REG_LIST_DCN2_0(0), MPC_OUT_MUX_REG_LIST_DCN2_0(1), MPC_OUT_MUX_REG_LIST_DCN2_0(2), MPC_OUT_MUX_REG_LIST_DCN2_0(3), MPC_OUT_MUX_REG_LIST_DCN2_0(4), MPC_OUT_MUX_REG_LIST_DCN2_0(5), }; static const struct dcn20_mpc_shift mpc_shift = { MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) }; static const struct dcn20_mpc_mask mpc_mask = { MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK) }; #define tg_regs(id)\ [id] = {TG_COMMON_REG_LIST_DCN2_0(id)} static const struct dcn_optc_registers tg_regs[] = { tg_regs(0), tg_regs(1), tg_regs(2), tg_regs(3), tg_regs(4), tg_regs(5) }; static const struct dcn_optc_shift tg_shift = { TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) }; static const struct dcn_optc_mask tg_mask = { TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK) }; #define hubp_regs(id)\ [id] = {\ HUBP_REG_LIST_DCN20(id)\ } static const struct dcn_hubp2_registers hubp_regs[] = { hubp_regs(0), hubp_regs(1), hubp_regs(2), hubp_regs(3), hubp_regs(4), hubp_regs(5) }; static const struct dcn_hubp2_shift hubp_shift = { HUBP_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn_hubp2_mask hubp_mask = { HUBP_MASK_SH_LIST_DCN20(_MASK) }; static const struct dcn_hubbub_registers hubbub_reg = { HUBBUB_REG_LIST_DCN20(0) }; static const struct dcn_hubbub_shift hubbub_shift = { HUBBUB_MASK_SH_LIST_DCN20(__SHIFT) }; static const struct dcn_hubbub_mask hubbub_mask = { HUBBUB_MASK_SH_LIST_DCN20(_MASK) }; #define vmid_regs(id)\ [id] = {\ DCN20_VMID_REG_LIST(id)\ } static const struct dcn_vmid_registers vmid_regs[] = { vmid_regs(0), vmid_regs(1), vmid_regs(2), vmid_regs(3), vmid_regs(4), vmid_regs(5), vmid_regs(6), vmid_regs(7), vmid_regs(8), vmid_regs(9), vmid_regs(10), vmid_regs(11), vmid_regs(12), vmid_regs(13), vmid_regs(14), vmid_regs(15) }; static const struct dcn20_vmid_shift vmid_shifts = { DCN20_VMID_MASK_SH_LIST(__SHIFT) }; static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #define dsc_regsDCN20(id)\ [id] = {\ DSC_REG_LIST_DCN20(id)\ } static const struct dcn20_dsc_registers dsc_regs[] = { dsc_regsDCN20(0), dsc_regsDCN20(1), dsc_regsDCN20(2), dsc_regsDCN20(3), dsc_regsDCN20(4), dsc_regsDCN20(5) }; static const struct dcn20_dsc_shift dsc_shift = { DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) }; static const struct dcn20_dsc_mask dsc_mask = { DSC_REG_LIST_SH_MASK_DCN20(_MASK) }; #endif static const struct dccg_registers dccg_regs = { DCCG_REG_LIST_DCN2() }; static const struct dccg_shift dccg_shift = { DCCG_MASK_SH_LIST_DCN2(__SHIFT) }; static const struct dccg_mask dccg_mask = { DCCG_MASK_SH_LIST_DCN2(_MASK) }; static const struct resource_caps res_cap_nv10 = { .num_timing_generator = 6, .num_opp = 6, .num_video_plane = 6, .num_audio = 7, .num_stream_encoder = 6, .num_pll = 6, .num_dwb = 1, .num_ddc = 6, .num_vmid = 16, #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 6, #endif }; static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCN_UNIVERSAL, .blends_with_above = true, .blends_with_below = true, .per_pixel_alpha = true, .pixel_format_support = { .argb8888 = true, .nv12 = true, .fp16 = true }, .max_upscale_factor = { .argb8888 = 16000, .nv12 = 16000, .fp16 = 1 }, .max_downscale_factor = { .argb8888 = 250, .nv12 = 250, .fp16 = 1 } }; static const struct resource_caps res_cap_nv14 = { .num_timing_generator = 5, .num_opp = 5, .num_video_plane = 5, .num_audio = 6, .num_stream_encoder = 5, .num_pll = 5, .num_dwb = 0, .num_ddc = 5, }; static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = true, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, .max_downscale_src_width = 5120,/*upto 5K*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = false, .disable_tri_buf = true, .underflow_assert_delay_us = 0xFFFFFFFF, }; static const struct dc_debug_options debug_defaults_diags = { .disable_dmcu = true, .force_abm_enable = false, .timing_trace = true, .clock_trace = true, .disable_dpp_power_gate = true, .disable_hubp_power_gate = true, .disable_clock_gate = true, .disable_pplib_clock_request = true, .disable_pplib_wm_range = true, .disable_stutter = true, .scl_reset_length10 = true, .underflow_assert_delay_us = 0xFFFFFFFF, }; void dcn20_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); *dpp = NULL; } struct dpp *dcn20_dpp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_dpp *dpp = kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL); if (!dpp) return NULL; if (dpp2_construct(dpp, ctx, inst, &tf_regs[inst], &tf_shift, &tf_mask)) return &dpp->base; BREAK_TO_DEBUGGER(); kfree(dpp); return NULL; } struct input_pixel_processor *dcn20_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_ipp *ipp = kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); return NULL; } dcn20_ipp_construct(ipp, ctx, inst, &ipp_regs[inst], &ipp_shift, &ipp_mask); return &ipp->base; } struct output_pixel_processor *dcn20_opp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_opp *opp = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); if (!opp) { BREAK_TO_DEBUGGER(); return NULL; } dcn20_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } struct dce_aux *dcn20_aux_engine_create( struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, &aux_engine_regs[inst]); return &aux_engine->base; } #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { i2c_inst_regs(1), i2c_inst_regs(2), i2c_inst_regs(3), i2c_inst_regs(4), i2c_inst_regs(5), i2c_inst_regs(6), }; static const struct dce_i2c_shift i2c_shifts = { I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) }; static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) }; struct dce_i2c_hw *dcn20_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); return dce_i2c_hw; } struct mpc *dcn20_mpc_create(struct dc_context *ctx) { struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), GFP_KERNEL); if (!mpc20) return NULL; dcn20_mpc_construct(mpc20, ctx, &mpc_regs, &mpc_shift, &mpc_mask, 6); return &mpc20->base; } struct hubbub *dcn20_hubbub_create(struct dc_context *ctx) { int i; struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); if (!hubbub) return NULL; hubbub2_construct(hubbub, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask); for (i = 0; i < res_cap_nv10.num_vmid; i++) { struct dcn20_vmid *vmid = &hubbub->vmid[i]; vmid->ctx = ctx; vmid->regs = &vmid_regs[i]; vmid->shifts = &vmid_shifts; vmid->masks = &vmid_masks; } return &hubbub->base; } struct timing_generator *dcn20_timing_generator_create( struct dc_context *ctx, uint32_t instance) { struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; tgn10->base.inst = instance; tgn10->base.ctx = ctx; tgn10->tg_regs = &tg_regs[instance]; tgn10->tg_shift = &tg_shift; tgn10->tg_mask = &tg_mask; dcn20_timing_generator_init(tgn10); return &tgn10->base; } static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = true, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true }; struct link_encoder *dcn20_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); if (!enc20) return NULL; dcn20_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); return &enc20->enc10.base; } struct clock_source *dcn20_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) { struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; if (dcn20_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; } BREAK_TO_DEBUGGER(); return NULL; } static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) { generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); } static struct audio *dcn20_create_audio( struct dc_context *ctx, unsigned int inst) { return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); } struct stream_encoder *dcn20_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { struct dcn10_stream_encoder *enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); if (!enc1) return NULL; dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc1->base; } static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCN2_REG_LIST() }; static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN2_MASK_SH_LIST(__SHIFT) }; static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCN2_MASK_SH_LIST(_MASK) }; struct dce_hwseq *dcn20_hwseq_create( struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; } return hws; } static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = dcn20_create_audio, .create_stream_encoder = dcn20_stream_encoder_create, .create_hwseq = dcn20_hwseq_create, }; static const struct resource_create_funcs res_create_maximus_funcs = { .read_dce_straps = NULL, .create_audio = NULL, .create_stream_encoder = NULL, .create_hwseq = dcn20_hwseq_create, }; void dcn20_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct display_stream_compressor *dcn20_dsc_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_dsc *dsc = kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); if (!dsc) { BREAK_TO_DEBUGGER(); return NULL; } dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); return &dsc->base; } void dcn20_dsc_destroy(struct display_stream_compressor **dsc) { kfree(container_of(*dsc, struct dcn20_dsc, base)); *dsc = NULL; } #endif static void destruct(struct dcn20_resource_pool *pool) { unsigned int i; for (i = 0; i < pool->base.stream_enc_count; i++) { if (pool->base.stream_enc[i] != NULL) { kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); pool->base.stream_enc[i] = NULL; } } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { if (pool->base.dscs[i] != NULL) dcn20_dsc_destroy(&pool->base.dscs[i]); } #endif if (pool->base.mpc != NULL) { kfree(TO_DCN20_MPC(pool->base.mpc)); pool->base.mpc = NULL; } if (pool->base.hubbub != NULL) { kfree(pool->base.hubbub); pool->base.hubbub = NULL; } for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.dpps[i] != NULL) dcn20_dpp_destroy(&pool->base.dpps[i]); if (pool->base.ipps[i] != NULL) pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); if (pool->base.hubps[i] != NULL) { kfree(TO_DCN20_HUBP(pool->base.hubps[i])); pool->base.hubps[i] = NULL; } if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; } if (pool->base.sw_i2cs[i] != NULL) { kfree(pool->base.sw_i2cs[i]); pool->base.sw_i2cs[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_opp; i++) { if (pool->base.opps[i] != NULL) pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); } for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { if (pool->base.timing_generators[i] != NULL) { kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } } for (i = 0; i < pool->base.res_cap->num_dwb; i++) { if (pool->base.dwbc[i] != NULL) { kfree(TO_DCN20_DWBC(pool->base.dwbc[i])); pool->base.dwbc[i] = NULL; } if (pool->base.mcif_wb[i] != NULL) { kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i])); pool->base.mcif_wb[i] = NULL; } } for (i = 0; i < pool->base.audio_count; i++) { if (pool->base.audios[i]) dce_aud_destroy(&pool->base.audios[i]); } for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] != NULL) { dcn20_clock_source_destroy(&pool->base.clock_sources[i]); pool->base.clock_sources[i] = NULL; } } if (pool->base.dp_clock_source != NULL) { dcn20_clock_source_destroy(&pool->base.dp_clock_source); pool->base.dp_clock_source = NULL; } if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); if (pool->base.pp_smu != NULL) dcn20_pp_smu_destroy(&pool->base.pp_smu); } struct hubp *dcn20_hubp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_hubp *hubp2 = kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); if (!hubp2) return NULL; if (hubp2_construct(hubp2, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask)) return &hubp2->base; BREAK_TO_DEBUGGER(); kfree(hubp2); return NULL; } static void get_pixel_clock_parameters( struct pipe_ctx *pipe_ctx, struct pixel_clk_params *pixel_clk_params) { const struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *odm_pipe; int opp_cnt = 1; for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; pixel_clk_params->encoder_object_id = stream->link->link_enc->id; pixel_clk_params->signal_type = pipe_ctx->stream->signal; pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; /* TODO: un-hardcode*/ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * LINK_RATE_REF_FREQ_IN_KHZ; pixel_clk_params->flags.ENABLE_SS = 0; pixel_clk_params->color_depth = stream->timing.display_color_depth; pixel_clk_params->flags.DISPLAY_BLANKED = 1; pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) pixel_clk_params->color_depth = COLOR_DEPTH_888; if (opp_cnt == 4) pixel_clk_params->requested_pix_clk_100hz /= 4; else if (optc1_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2) pixel_clk_params->requested_pix_clk_100hz /= 2; if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) pixel_clk_params->requested_pix_clk_100hz *= 2; } static void build_clamping_params(struct dc_stream_state *stream) { stream->clamping.clamping_level = CLAMPING_FULL_RANGE; stream->clamping.c_depth = stream->timing.display_color_depth; stream->clamping.pixel_encoding = stream->timing.pixel_encoding; } static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx) { get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); pipe_ctx->clock_source->funcs->get_pix_clk_dividers( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, &pipe_ctx->pll_settings); pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; resource_build_bit_depth_reduction_params(pipe_ctx->stream, &pipe_ctx->stream->bit_depth_params); build_clamping_params(pipe_ctx->stream); return DC_OK; } enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { enum dc_status status = DC_OK; struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); /*TODO Seems unneeded anymore */ /* if (old_context && resource_is_stream_unchanged(old_context, stream)) { if (stream != NULL && old_context->streams[i] != NULL) { todo: shouldn't have to copy missing parameter here resource_build_bit_depth_reduction_params(stream, &stream->bit_depth_params); stream->clamping.pixel_encoding = stream->timing.pixel_encoding; resource_build_bit_depth_reduction_params(stream, &stream->bit_depth_params); build_clamping_params(stream); continue; } } */ if (!pipe_ctx) return DC_ERROR_UNEXPECTED; status = build_pipe_hw_param(pipe_ctx); return status; } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT static void acquire_dsc(struct resource_context *res_ctx, const struct resource_pool *pool, struct display_stream_compressor **dsc) { int i; ASSERT(*dsc == NULL); *dsc = NULL; /* Find first free DSC */ for (i = 0; i < pool->res_cap->num_dsc; i++) if (!res_ctx->is_dsc_acquired[i]) { *dsc = pool->dscs[i]; res_ctx->is_dsc_acquired[i] = true; break; } } static void release_dsc(struct resource_context *res_ctx, const struct resource_pool *pool, struct display_stream_compressor **dsc) { int i; for (i = 0; i < pool->res_cap->num_dsc; i++) if (pool->dscs[i] == *dsc) { res_ctx->is_dsc_acquired[i] = false; *dsc = NULL; break; } } #endif #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT static enum dc_status add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_OK; int i; const struct resource_pool *pool = dc->res_pool; /* Get a DSC if required and available */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream != dc_stream) continue; acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc); /* The number of DSCs can be less than the number of pipes */ if (!pipe_ctx->stream_res.dsc) { dm_output_to_console("No DSCs available\n"); result = DC_NO_DSC_RESOURCE; } break; } return result; } static enum dc_status remove_dsc_from_stream_resource(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { struct pipe_ctx *pipe_ctx = NULL; int i; for (i = 0; i < MAX_PIPES; i++) { if (new_ctx->res_ctx.pipe_ctx[i].stream == dc_stream && !new_ctx->res_ctx.pipe_ctx[i].top_pipe) { pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream_res.dsc) release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc); } } if (!pipe_ctx) return DC_ERROR_UNEXPECTED; else return DC_OK; } #endif enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_ERROR_UNEXPECTED; result = resource_map_pool_resources(dc, new_ctx, dc_stream); if (result == DC_OK) result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Get a DSC if required and available */ if (result == DC_OK && dc_stream->timing.flags.DSC) result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream); #endif if (result == DC_OK) result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream); return result; } enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { enum dc_status result = DC_OK; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream); #endif return result; } static void swizzle_to_dml_params( enum swizzle_mode_values swizzle, unsigned int *sw_mode) { switch (swizzle) { case DC_SW_LINEAR: *sw_mode = dm_sw_linear; break; case DC_SW_4KB_S: *sw_mode = dm_sw_4kb_s; break; case DC_SW_4KB_S_X: *sw_mode = dm_sw_4kb_s_x; break; case DC_SW_4KB_D: *sw_mode = dm_sw_4kb_d; break; case DC_SW_4KB_D_X: *sw_mode = dm_sw_4kb_d_x; break; case DC_SW_64KB_S: *sw_mode = dm_sw_64kb_s; break; case DC_SW_64KB_S_X: *sw_mode = dm_sw_64kb_s_x; break; case DC_SW_64KB_S_T: *sw_mode = dm_sw_64kb_s_t; break; case DC_SW_64KB_D: *sw_mode = dm_sw_64kb_d; break; case DC_SW_64KB_D_X: *sw_mode = dm_sw_64kb_d_x; break; case DC_SW_64KB_D_T: *sw_mode = dm_sw_64kb_d_t; break; case DC_SW_64KB_R_X: *sw_mode = dm_sw_64kb_r_x; break; case DC_SW_VAR_S: *sw_mode = dm_sw_var_s; break; case DC_SW_VAR_S_X: *sw_mode = dm_sw_var_s_x; break; case DC_SW_VAR_D: *sw_mode = dm_sw_var_d; break; case DC_SW_VAR_D_X: *sw_mode = dm_sw_var_d_x; break; default: ASSERT(0); /* Not supported */ break; } } static bool dcn20_split_stream_for_odm( struct resource_context *res_ctx, const struct resource_pool *pool, struct pipe_ctx *prev_odm_pipe, struct pipe_ctx *next_odm_pipe) { int pipe_idx = next_odm_pipe->pipe_idx; *next_odm_pipe = *prev_odm_pipe; next_odm_pipe->pipe_idx = pipe_idx; next_odm_pipe->plane_res.mi = pool->mis[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.hubp = pool->hubps[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.ipp = pool->ipps[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT next_odm_pipe->stream_res.dsc = NULL; #endif if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) { ASSERT(!next_odm_pipe->next_odm_pipe); next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe; next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe; } prev_odm_pipe->next_odm_pipe = next_odm_pipe; next_odm_pipe->prev_odm_pipe = prev_odm_pipe; ASSERT(next_odm_pipe->top_pipe == NULL); if (prev_odm_pipe->plane_state) { struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data; int new_width; /* HACTIVE halved for odm combine */ sd->h_active /= 2; /* Calculate new vp and recout for left pipe */ /* Need at least 16 pixels width per side */ if (sd->recout.x + 16 >= sd->h_active) return false; new_width = sd->h_active - sd->recout.x; sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int( sd->ratios.horz, sd->recout.width - new_width)); sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int( sd->ratios.horz_c, sd->recout.width - new_width)); sd->recout.width = new_width; /* Calculate new vp and recout for right pipe */ sd = &next_odm_pipe->plane_res.scl_data; /* HACTIVE halved for odm combine */ sd->h_active /= 2; /* Need at least 16 pixels width per side */ if (new_width <= 16) return false; new_width = sd->recout.width + sd->recout.x - sd->h_active; sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int( sd->ratios.horz, sd->recout.width - new_width)); sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int( sd->ratios.horz_c, sd->recout.width - new_width)); sd->recout.width = new_width; sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int( sd->ratios.horz, sd->h_active - sd->recout.x)); sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int( sd->ratios.horz_c, sd->h_active - sd->recout.x)); sd->recout.x = 0; } next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (next_odm_pipe->stream->timing.flags.DSC == 1) { acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc); ASSERT(next_odm_pipe->stream_res.dsc); if (next_odm_pipe->stream_res.dsc == NULL) return false; } #endif return true; } static void dcn20_split_stream_for_mpc( struct resource_context *res_ctx, const struct resource_pool *pool, struct pipe_ctx *primary_pipe, struct pipe_ctx *secondary_pipe) { int pipe_idx = secondary_pipe->pipe_idx; struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe; *secondary_pipe = *primary_pipe; secondary_pipe->bottom_pipe = sec_bot_pipe; secondary_pipe->pipe_idx = pipe_idx; secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT secondary_pipe->stream_res.dsc = NULL; #endif if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) { ASSERT(!secondary_pipe->bottom_pipe); secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe; secondary_pipe->bottom_pipe->top_pipe = secondary_pipe; } primary_pipe->bottom_pipe = secondary_pipe; secondary_pipe->top_pipe = primary_pipe; ASSERT(primary_pipe->plane_state); resource_build_scaling_params(primary_pipe); resource_build_scaling_params(secondary_pipe); } void dcn20_populate_dml_writeback_from_context( struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) { int pipe_cnt, i; for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_writeback_info *wb_info = &res_ctx->pipe_ctx[i].stream->writeback_info[0]; if (!res_ctx->pipe_ctx[i].stream) continue; /* Set writeback information */ pipes[pipe_cnt].dout.wb_enable = (wb_info->wb_enabled == true) ? 1 : 0; pipes[pipe_cnt].dout.num_active_wb++; pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height; pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width; pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width; pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height; pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1; pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1; pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c; pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c; pipes[pipe_cnt].dout.wb.wb_hratio = 1.0; pipes[pipe_cnt].dout.wb.wb_vratio = 1.0; if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) { if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC) pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8; else pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10; } else pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32; pipe_cnt++; } } int dcn20_populate_dml_pipes_from_context( struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) { int pipe_cnt, i; bool synchronized_vblank = true; for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) { if (!res_ctx->pipe_ctx[i].stream) continue; if (pipe_cnt < 0) { pipe_cnt = i; continue; } if (!resource_are_streams_timing_synchronizable( res_ctx->pipe_ctx[pipe_cnt].stream, res_ctx->pipe_ctx[i].stream)) { synchronized_vblank = false; break; } } for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing; int output_bpc; if (!res_ctx->pipe_ctx[i].stream) continue; /* todo: pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0; pipes[pipe_cnt].pipe.src.dcc = 0; pipes[pipe_cnt].pipe.src.vm = 0;*/ #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC; /* todo: rotation?*/ pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h; #endif if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) { pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true; /* 1/2 vblank */ pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active = (timing->v_total - timing->v_addressable - timing->v_border_top - timing->v_border_bottom) / 2; /* 36 bytes dp, 32 hdmi */ pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes = dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32; } pipes[pipe_cnt].pipe.src.dcc = false; pipes[pipe_cnt].pipe.src.dcc_rate = 1; pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank; pipes[pipe_cnt].pipe.dest.hblank_start = timing->h_total - timing->h_front_porch; pipes[pipe_cnt].pipe.dest.hblank_end = pipes[pipe_cnt].pipe.dest.hblank_start - timing->h_addressable - timing->h_border_left - timing->h_border_right; pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch; pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom; pipes[pipe_cnt].pipe.dest.htotal = timing->h_total; pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total; pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable; pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable; pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE; pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0; if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2; pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst; pipes[pipe_cnt].dout.dp_lanes = 4; pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min; pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max; pipes[pipe_cnt].pipe.dest.odm_combine = res_ctx->pipe_ctx[i].prev_odm_pipe || res_ctx->pipe_ctx[i].next_odm_pipe; pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx; if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == res_ctx->pipe_ctx[i].plane_state) pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx; else if (res_ctx->pipe_ctx[i].prev_odm_pipe) { struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].prev_odm_pipe; while (first_pipe->prev_odm_pipe) first_pipe = first_pipe->prev_odm_pipe; pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx; } switch (res_ctx->pipe_ctx[i].stream->signal) { case SIGNAL_TYPE_DISPLAY_PORT_MST: case SIGNAL_TYPE_DISPLAY_PORT: pipes[pipe_cnt].dout.output_type = dm_dp; break; case SIGNAL_TYPE_EDP: pipes[pipe_cnt].dout.output_type = dm_edp; break; case SIGNAL_TYPE_HDMI_TYPE_A: case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: pipes[pipe_cnt].dout.output_type = dm_hdmi; break; default: /* In case there is no signal, set dp with 4 lanes to allow max config */ pipes[pipe_cnt].dout.output_type = dm_dp; pipes[pipe_cnt].dout.dp_lanes = 4; } switch (res_ctx->pipe_ctx[i].stream->timing.display_color_depth) { case COLOR_DEPTH_666: output_bpc = 6; break; case COLOR_DEPTH_888: output_bpc = 8; break; case COLOR_DEPTH_101010: output_bpc = 10; break; case COLOR_DEPTH_121212: output_bpc = 12; break; case COLOR_DEPTH_141414: output_bpc = 14; break; case COLOR_DEPTH_161616: output_bpc = 16; break; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 case COLOR_DEPTH_999: output_bpc = 9; break; case COLOR_DEPTH_111111: output_bpc = 11; break; #endif default: output_bpc = 8; break; } switch (res_ctx->pipe_ctx[i].stream->timing.pixel_encoding) { case PIXEL_ENCODING_RGB: case PIXEL_ENCODING_YCBCR444: pipes[pipe_cnt].dout.output_format = dm_444; pipes[pipe_cnt].dout.output_bpp = output_bpc * 3; break; case PIXEL_ENCODING_YCBCR420: pipes[pipe_cnt].dout.output_format = dm_420; pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2; break; case PIXEL_ENCODING_YCBCR422: if (true) /* todo */ pipes[pipe_cnt].dout.output_format = dm_s422; else pipes[pipe_cnt].dout.output_format = dm_n422; pipes[pipe_cnt].dout.output_bpp = output_bpc * 2; break; default: pipes[pipe_cnt].dout.output_format = dm_444; pipes[pipe_cnt].dout.output_bpp = output_bpc * 3; } /* todo: default max for now, until there is logic reflecting this in dc*/ pipes[pipe_cnt].dout.output_bpc = 12; /* * Use max cursor settings for calculations to minimize * bw calculations due to cursor on/off */ pipes[pipe_cnt].pipe.src.num_cursors = 2; pipes[pipe_cnt].pipe.src.cur0_src_width = 256; pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit; pipes[pipe_cnt].pipe.src.cur1_src_width = 256; pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit; if (!res_ctx->pipe_ctx[i].plane_state) { pipes[pipe_cnt].pipe.src.source_scan = dm_horz; pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear; pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile; pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable; if (pipes[pipe_cnt].pipe.src.viewport_width > 1920) pipes[pipe_cnt].pipe.src.viewport_width = 1920; pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable; if (pipes[pipe_cnt].pipe.src.viewport_height > 1080) pipes[pipe_cnt].pipe.src.viewport_height = 1080; pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */ pipes[pipe_cnt].pipe.src.source_format = dm_444_32; pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/ pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/ pipes[pipe_cnt].pipe.dest.full_recout_width = pipes[pipe_cnt].pipe.dest.recout_width; /*when is_hsplit != 1*/ pipes[pipe_cnt].pipe.dest.full_recout_height = pipes[pipe_cnt].pipe.dest.recout_height; /*when is_hsplit != 1*/ pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16; pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = 1.0; pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = 1.0; pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/ pipes[pipe_cnt].pipe.scale_taps.htaps = 1; pipes[pipe_cnt].pipe.scale_taps.vtaps = 1; pipes[pipe_cnt].pipe.src.is_hsplit = 0; pipes[pipe_cnt].pipe.dest.odm_combine = 0; pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total; pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total; } else { struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state; struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data; pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate; pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) || (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln); pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90 || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz; pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y; pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y; pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width; pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width; pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height; pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height; if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch; pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch; pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch; pipes[pipe_cnt].pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c; } else { pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch; pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch; } pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable; pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width; pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height; pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width; pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height; if (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) { pipes[pipe_cnt].pipe.dest.full_recout_width += res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.width; pipes[pipe_cnt].pipe.dest.full_recout_height += res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.height; } else if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) { pipes[pipe_cnt].pipe.dest.full_recout_width += res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.width; pipes[pipe_cnt].pipe.dest.full_recout_height += res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.height; } pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16; pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32); pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32); pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32); pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32); pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = scl->ratios.vert.value != dc_fixpt_one.value || scl->ratios.horz.value != dc_fixpt_one.value || scl->ratios.vert_c.value != dc_fixpt_one.value || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/ || dc->debug.always_scale; /*support always scale*/ pipes[pipe_cnt].pipe.scale_taps.htaps = scl->taps.h_taps; pipes[pipe_cnt].pipe.scale_taps.htaps_c = scl->taps.h_taps_c; pipes[pipe_cnt].pipe.scale_taps.vtaps = scl->taps.v_taps; pipes[pipe_cnt].pipe.scale_taps.vtaps_c = scl->taps.v_taps_c; pipes[pipe_cnt].pipe.src.macro_tile_size = swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle); swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle, &pipes[pipe_cnt].pipe.src.sw_mode); switch (pln->format) { case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: pipes[pipe_cnt].pipe.src.source_format = dm_420_8; break; case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: pipes[pipe_cnt].pipe.src.source_format = dm_420_10; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: pipes[pipe_cnt].pipe.src.source_format = dm_444_64; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: case SURFACE_PIXEL_FORMAT_GRPH_RGB565: pipes[pipe_cnt].pipe.src.source_format = dm_444_16; break; case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS: pipes[pipe_cnt].pipe.src.source_format = dm_444_8; break; default: pipes[pipe_cnt].pipe.src.source_format = dm_444_32; break; } } pipe_cnt++; } /* populate writeback information */ dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes); return pipe_cnt; } unsigned int dcn20_calc_max_scaled_time( unsigned int time_per_pixel, enum mmhubbub_wbif_mode mode, unsigned int urgent_watermark) { unsigned int time_per_byte = 0; unsigned int total_y_free_entry = 0x200; /* two memory piece for luma */ unsigned int total_c_free_entry = 0x140; /* two memory piece for chroma */ unsigned int small_free_entry, max_free_entry; unsigned int buf_lh_capability; unsigned int max_scaled_time; if (mode == PACKED_444) /* packed mode */ time_per_byte = time_per_pixel/4; else if (mode == PLANAR_420_8BPC) time_per_byte = time_per_pixel; else if (mode == PLANAR_420_10BPC) /* p010 */ time_per_byte = time_per_pixel * 819/1024; if (time_per_byte == 0) time_per_byte = 1; small_free_entry = (total_y_free_entry > total_c_free_entry) ? total_c_free_entry : total_y_free_entry; max_free_entry = (mode == PACKED_444) ? total_y_free_entry + total_c_free_entry : small_free_entry; buf_lh_capability = max_free_entry*time_per_byte*32/16; /* there is 4bit fraction */ max_scaled_time = buf_lh_capability - urgent_watermark; return max_scaled_time; } void dcn20_set_mcif_arb_params( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt) { enum mmhubbub_wbif_mode wbif_mode; struct mcif_arb_params *wb_arb_params; int i, j, k, dwb_pipe; /* Writeback MCIF_WB arbitration parameters */ dwb_pipe = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; for (j = 0; j < MAX_DWB_PIPES; j++) { if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].wb_enabled == false) continue; //wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params; wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe]; if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.out_format == dwb_scaler_mode_yuv420) { if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC) wbif_mode = PLANAR_420_8BPC; else wbif_mode = PLANAR_420_10BPC; } else wbif_mode = PACKED_444; for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) { wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; } wb_arb_params->time_per_pixel = 16.0 / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* 4 bit fraction, ms */ wb_arb_params->slice_lines = 32; wb_arb_params->arbitration_slice = 2; wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel, wbif_mode, wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */ dwb_pipe++; if (dwb_pipe >= MAX_DWB_PIPES) return; } if (dwb_pipe >= MAX_DWB_PIPES) return; } } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) { int i; /* Validate DSC config, dsc count validation is already done */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; struct dc_stream_state *stream = pipe_ctx->stream; struct dsc_config dsc_cfg; struct pipe_ctx *odm_pipe; int opp_cnt = 1; for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; /* Only need to validate top pipe */ if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC) continue; dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg)) return false; } return true; } #endif static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, struct resource_context *res_ctx, const struct resource_pool *pool, const struct pipe_ctx *primary_pipe) { struct pipe_ctx *secondary_pipe = NULL; if (dc && primary_pipe) { int j; int preferred_pipe_idx = 0; /* first check the prev dc state: * if this primary pipe has a bottom pipe in prev. state * and if the bottom pipe is still available (which it should be), * pick that pipe as secondary * Same logic applies for ODM pipes. Since mpo is not allowed with odm * check in else case. */ if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) { preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx; if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; secondary_pipe->pipe_idx = preferred_pipe_idx; } } else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) { preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx; if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; secondary_pipe->pipe_idx = preferred_pipe_idx; } } /* * if this primary pipe does not have a bottom pipe in prev. state * start backward and find a pipe that did not used to be a bottom pipe in * prev. dc state. This way we make sure we keep the same assignment as * last state and will not have to reprogram every pipe */ if (secondary_pipe == NULL) { for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) { if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL) { preferred_pipe_idx = j; if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; secondary_pipe->pipe_idx = preferred_pipe_idx; break; } } } } /* * We should never hit this assert unless assignments are shuffled around * if this happens we will prob. hit a vsync tdr */ ASSERT(secondary_pipe); /* * search backwards for the second pipe to keep pipe * assignment more consistent */ if (secondary_pipe == NULL) { for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) { preferred_pipe_idx = j; if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; secondary_pipe->pipe_idx = preferred_pipe_idx; break; } } } } return secondary_pipe; } bool dcn20_fast_validate_bw( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out) { bool out = false; int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit; bool odm_capable = context->bw_ctx.dml.ip.odm_capable; bool force_split = false; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool failed_non_odm_dsc = false; #endif int split_threshold = dc->res_pool->pipe_count / 2; bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC; ASSERT(pipes); if (!pipes) return false; /* merge previously split odm pipes since mode support needs to make the decision */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *odm_pipe = pipe->next_odm_pipe; if (pipe->prev_odm_pipe) continue; pipe->next_odm_pipe = NULL; while (odm_pipe) { struct pipe_ctx *next_odm_pipe = odm_pipe->next_odm_pipe; odm_pipe->plane_state = NULL; odm_pipe->stream = NULL; odm_pipe->top_pipe = NULL; odm_pipe->bottom_pipe = NULL; odm_pipe->prev_odm_pipe = NULL; odm_pipe->next_odm_pipe = NULL; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (odm_pipe->stream_res.dsc) release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc); #endif /* Clear plane_res and stream_res */ memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res)); memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res)); odm_pipe = next_odm_pipe; } if (pipe->plane_state) resource_build_scaling_params(pipe); } /* merge previously mpc split pipes since mode support needs to make the decision */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) continue; pipe->bottom_pipe = hsplit_pipe->bottom_pipe; if (hsplit_pipe->bottom_pipe) hsplit_pipe->bottom_pipe->top_pipe = pipe; hsplit_pipe->plane_state = NULL; hsplit_pipe->stream = NULL; hsplit_pipe->top_pipe = NULL; hsplit_pipe->bottom_pipe = NULL; /* Clear plane_res and stream_res */ memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res)); memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res)); if (pipe->plane_state) resource_build_scaling_params(pipe); } if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes); else pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, &context->res_ctx, pipes); *pipe_cnt_out = pipe_cnt; if (!pipe_cnt) { out = true; goto validate_out; } context->bw_ctx.dml.ip.odm_capable = 0; vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); context->bw_ctx.dml.ip.odm_capable = odm_capable; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* 1 dsc per stream dsc validation */ if (vlevel <= context->bw_ctx.dml.soc.num_states) if (!dcn20_validate_dsc(dc, context)) { failed_non_odm_dsc = true; vlevel = context->bw_ctx.dml.soc.num_states + 1; } #endif if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable) vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); if (vlevel > context->bw_ctx.dml.soc.num_states) goto validate_fail; if ((context->stream_count > split_threshold && dc->current_state->stream_count <= split_threshold) || (context->stream_count <= split_threshold && dc->current_state->stream_count > split_threshold)) context->commit_hints.full_update_needed = true; /*initialize pipe_just_split_from to invalid idx*/ for (i = 0; i < MAX_PIPES; i++) pipe_split_from[i] = -1; /* Single display only conditionals get set here */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; bool exit_loop = false; if (!pipe->stream || pipe->top_pipe) continue; if (dc->debug.force_single_disp_pipe_split) { if (!force_split) force_split = true; else { force_split = false; exit_loop = true; } } if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) { if (avoid_split) avoid_split = false; else { avoid_split = true; exit_loop = true; } } if (exit_loop) break; } if (context->stream_count > split_threshold) avoid_split = true; vlevel_unsplit = vlevel; for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; for (; vlevel_unsplit <= context->bw_ctx.dml.soc.num_states; vlevel_unsplit++) if (context->bw_ctx.dml.vba.NoOfDPP[vlevel_unsplit][0][pipe_idx] == 1) break; pipe_idx++; } for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; bool need_split = true; bool need_split3d; if (!pipe->stream || pipe_split_from[i] >= 0) continue; pipe_idx++; if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { force_split = true; context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = true; context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true; } if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1) context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2; if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); ASSERT(hsplit_pipe); if (!dcn20_split_stream_for_odm( &context->res_ctx, dc->res_pool, pipe, hsplit_pipe)) goto validate_fail; pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx; dcn20_build_mapped_resource(dc, context, pipe->stream); } if (!pipe->plane_state) continue; /* Skip 2nd half of already split pipe */ if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state) continue; need_split3d = ((pipe->stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE || pipe->stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) && (pipe->stream->timing.timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM || pipe->stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE)); if (avoid_split && vlevel_unsplit <= context->bw_ctx.dml.soc.num_states && !force_split && !need_split3d) { need_split = false; vlevel = vlevel_unsplit; context->bw_ctx.dml.vba.maxMpcComb = 0; } else need_split = context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 2; /* We do not support mpo + odm at the moment */ if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) goto validate_fail; if (need_split3d || need_split || force_split) { if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) { /* pipe not split previously needs split */ hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); ASSERT(hsplit_pipe || force_split); if (!hsplit_pipe) continue; if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { if (!dcn20_split_stream_for_odm( &context->res_ctx, dc->res_pool, pipe, hsplit_pipe)) goto validate_fail; } else dcn20_split_stream_for_mpc( &context->res_ctx, dc->res_pool, pipe, hsplit_pipe); pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx; } } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) { /* merge should already have been done */ ASSERT(0); } } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Actual dsc count per stream dsc validation*/ if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) { context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; goto validate_fail; } #endif *vlevel_out = vlevel; out = true; goto validate_out; validate_fail: out = false; validate_out: return out; } void dcn20_calculate_wm( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *out_pipe_cnt, int *pipe_split_from, int vlevel) { int pipe_cnt, i, pipe_idx; for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; if (pipe_split_from[i] < 0) { pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]; if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx) pipes[pipe_cnt].pipe.dest.odm_combine = context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; else pipes[pipe_cnt].pipe.dest.odm_combine = 0; pipe_idx++; } else { pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]]; if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i]) pipes[pipe_cnt].pipe.dest.odm_combine = context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]]; else pipes[pipe_cnt].pipe.dest.odm_combine = 0; } if (dc->config.forced_clocks) { pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; } if (dc->debug.min_disp_clk_khz > pipes[pipe_cnt].clks_cfg.dispclk_mhz * 1000) pipes[pipe_cnt].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; if (dc->debug.min_dpp_clk_khz > pipes[pipe_cnt].clks_cfg.dppclk_mhz * 1000) pipes[pipe_cnt].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; pipe_cnt++; } if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes); else pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, &context->res_ctx, pipes); } *out_pipe_cnt = pipe_cnt; pipes[0].clks_cfg.voltage = vlevel; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; /* only pipe 0 is read for voltage and dcf/soc clocks */ if (vlevel < 1) { pipes[0].clks_cfg.voltage = 1; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].dcfclk_mhz; pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].socclk_mhz; } context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; if (vlevel < 2) { pipes[0].clks_cfg.voltage = 2; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz; } context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; if (vlevel < 3) { pipes[0].clks_cfg.voltage = 3; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz; } context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; pipes[0].clks_cfg.voltage = vlevel; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; } void dcn20_calculate_dlg_params( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel) { int i, j, pipe_idx, pipe_idx_unsplit; bool visited[MAX_PIPES] = { 0 }; /* Writeback MCIF_WB arbitration parameters */ dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt); context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000; context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000; context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000; context->bw_ctx.bw.dcn.clk.fclk_khz = 0; context->bw_ctx.bw.dcn.clk.p_state_change_support = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_unsupported; context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; /* * An artifact of dml pipe split/odm is that pipes get merged back together for * calculation. Therefore we need to only extract for first pipe in ascending index order * and copy into the other split half. */ for (i = 0, pipe_idx = 0, pipe_idx_unsplit = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; if (!visited[pipe_idx]) { display_pipe_source_params_st *src = &pipes[pipe_idx_unsplit].pipe.src; display_pipe_dest_params_st *dst = &pipes[pipe_idx_unsplit].pipe.dest; dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit]; dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit]; dst->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit]; dst->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit]; /* * j iterates inside pipes array, unlike i which iterates inside * pipe_ctx array */ if (src->is_hsplit) for (j = pipe_idx + 1; j < pipe_cnt; j++) { display_pipe_source_params_st *src_j = &pipes[j].pipe.src; display_pipe_dest_params_st *dst_j = &pipes[j].pipe.dest; if (src_j->is_hsplit && !visited[j] && src->hsplit_grp == src_j->hsplit_grp) { dst_j->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit]; dst_j->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit]; dst_j->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit]; dst_j->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit]; visited[j] = true; } } visited[pipe_idx] = true; pipe_idx_unsplit++; } pipe_idx++; } for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; ASSERT(visited[pipe_idx]); context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; pipe_idx++; } /*save a original dppclock copy*/ context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000; context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000; for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2; if (!context->res_ctx.pipe_ctx[i].stream) continue; context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml, &context->res_ctx.pipe_ctx[i].dlg_regs, &context->res_ctx.pipe_ctx[i].ttu_regs, pipes, pipe_cnt, pipe_idx, cstate_en, context->bw_ctx.bw.dcn.clk.p_state_change_support, false, false, false); context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml, &context->res_ctx.pipe_ctx[i].rq_regs, pipes[pipe_idx].pipe); pipe_idx++; } } static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context, bool fast_validate) { bool out = false; BW_VAL_TRACE_SETUP(); int vlevel = 0; int pipe_split_from[MAX_PIPES]; int pipe_cnt = 0; display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); DC_LOGGER_INIT(dc->ctx->logger); BW_VAL_TRACE_COUNT(); out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel); if (pipe_cnt == 0) goto validate_out; if (!out) goto validate_fail; BW_VAL_TRACE_END_VOLTAGE_LEVEL(); if (fast_validate) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); BW_VAL_TRACE_END_WATERMARKS(); goto validate_out; validate_fail: DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); BW_VAL_TRACE_SKIP(fail); out = false; validate_out: kfree(pipes); BW_VAL_TRACE_FINISH(); return out; } bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { bool voltage_supported = false; bool full_pstate_supported = false; bool dummy_pstate_supported = false; double p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; if (fast_validate) return dcn20_validate_bandwidth_internal(dc, context, true); // Best case, we support full UCLK switch latency voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 || (voltage_supported && full_pstate_supported)) { context->bw_ctx.bw.dcn.clk.p_state_change_support = true; goto restore_dml_state; } // Fallback: Try to only support G6 temperature read latency context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us; voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; if (voltage_supported && dummy_pstate_supported) { context->bw_ctx.bw.dcn.clk.p_state_change_support = false; goto restore_dml_state; } // ERROR: fallback is supposed to always work. ASSERT(false); restore_dml_state: memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; return voltage_supported; } struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer( struct dc_state *state, const struct resource_pool *pool, struct dc_stream_state *stream) { struct resource_context *res_ctx = &state->res_ctx; struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe); if (!head_pipe) ASSERT(0); if (!idle_pipe) return NULL; idle_pipe->stream = head_pipe->stream; idle_pipe->stream_res.tg = head_pipe->stream_res.tg; idle_pipe->stream_res.opp = head_pipe->stream_res.opp; idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; return idle_pipe; } bool dcn20_get_dcc_compression_cap(const struct dc *dc, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output) { return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( dc->res_pool->hubbub, input, output); } static void dcn20_destroy_resource_pool(struct resource_pool **pool) { struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool); destruct(dcn20_pool); kfree(dcn20_pool); *pool = NULL; } static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state) { enum dc_status result = DC_OK; enum surface_pixel_format surf_pix_format = plane_state->format; unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format); enum swizzle_mode_values swizzle = DC_SW_LINEAR; if (bpp == 64) swizzle = DC_SW_64KB_D; else swizzle = DC_SW_64KB_S; plane_state->tiling_info.gfx9.swizzle = swizzle; return result; } static struct resource_funcs dcn20_res_pool_funcs = { .destroy = dcn20_destroy_resource_pool, .link_enc_create = dcn20_link_encoder_create, .validate_bandwidth = dcn20_validate_bandwidth, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, .add_stream_to_ctx = dcn20_add_stream_to_ctx, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, .get_default_swizzle_mode = dcn20_get_default_swizzle_mode, .set_mcif_arb_params = dcn20_set_mcif_arb_params, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link }; bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; ASSERT(pipe_count > 0); for (i = 0; i < pipe_count; i++) { struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc), GFP_KERNEL); if (!dwbc20) { dm_error("DC: failed to create dwbc20!\n"); return false; } dcn20_dwbc_construct(dwbc20, ctx, &dwbc20_regs[i], &dwbc20_shift, &dwbc20_mask, i); pool->dwbc[i] = &dwbc20->base; } return true; } bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; ASSERT(pipe_count > 0); for (i = 0; i < pipe_count; i++) { struct dcn20_mmhubbub *mcif_wb20 = kzalloc(sizeof(struct dcn20_mmhubbub), GFP_KERNEL); if (!mcif_wb20) { dm_error("DC: failed to create mcif_wb20!\n"); return false; } dcn20_mmhubbub_construct(mcif_wb20, ctx, &mcif_wb20_regs[i], &mcif_wb20_shift, &mcif_wb20_mask, i); pool->mcif_wb[i] = &mcif_wb20->base; } return true; } struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) { struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); if (!pp_smu) return pp_smu; dm_pp_get_funcs(ctx, pp_smu); if (pp_smu->ctx.ver != PP_SMU_VER_NV) pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs)); return pp_smu; } void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) { if (pp_smu && *pp_smu) { kfree(*pp_smu); *pp_smu = NULL; } } static void cap_soc_clocks( struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table max_clocks) { int i; // First pass - cap all clocks higher than the reported max for (i = 0; i < bb->num_states; i++) { if ((bb->clock_limits[i].dcfclk_mhz > (max_clocks.dcfClockInKhz / 1000)) && max_clocks.dcfClockInKhz != 0) bb->clock_limits[i].dcfclk_mhz = (max_clocks.dcfClockInKhz / 1000); if ((bb->clock_limits[i].dram_speed_mts > (max_clocks.uClockInKhz / 1000) * 16) && max_clocks.uClockInKhz != 0) bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16; if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000)) && max_clocks.fabricClockInKhz != 0) bb->clock_limits[i].fabricclk_mhz = (max_clocks.fabricClockInKhz / 1000); if ((bb->clock_limits[i].dispclk_mhz > (max_clocks.displayClockInKhz / 1000)) && max_clocks.displayClockInKhz != 0) bb->clock_limits[i].dispclk_mhz = (max_clocks.displayClockInKhz / 1000); if ((bb->clock_limits[i].dppclk_mhz > (max_clocks.dppClockInKhz / 1000)) && max_clocks.dppClockInKhz != 0) bb->clock_limits[i].dppclk_mhz = (max_clocks.dppClockInKhz / 1000); if ((bb->clock_limits[i].phyclk_mhz > (max_clocks.phyClockInKhz / 1000)) && max_clocks.phyClockInKhz != 0) bb->clock_limits[i].phyclk_mhz = (max_clocks.phyClockInKhz / 1000); if ((bb->clock_limits[i].socclk_mhz > (max_clocks.socClockInKhz / 1000)) && max_clocks.socClockInKhz != 0) bb->clock_limits[i].socclk_mhz = (max_clocks.socClockInKhz / 1000); if ((bb->clock_limits[i].dscclk_mhz > (max_clocks.dscClockInKhz / 1000)) && max_clocks.dscClockInKhz != 0) bb->clock_limits[i].dscclk_mhz = (max_clocks.dscClockInKhz / 1000); } // Second pass - remove all duplicate clock states for (i = bb->num_states - 1; i > 1; i--) { bool duplicate = true; if (bb->clock_limits[i-1].dcfclk_mhz != bb->clock_limits[i].dcfclk_mhz) duplicate = false; if (bb->clock_limits[i-1].dispclk_mhz != bb->clock_limits[i].dispclk_mhz) duplicate = false; if (bb->clock_limits[i-1].dppclk_mhz != bb->clock_limits[i].dppclk_mhz) duplicate = false; if (bb->clock_limits[i-1].dram_speed_mts != bb->clock_limits[i].dram_speed_mts) duplicate = false; if (bb->clock_limits[i-1].dscclk_mhz != bb->clock_limits[i].dscclk_mhz) duplicate = false; if (bb->clock_limits[i-1].fabricclk_mhz != bb->clock_limits[i].fabricclk_mhz) duplicate = false; if (bb->clock_limits[i-1].phyclk_mhz != bb->clock_limits[i].phyclk_mhz) duplicate = false; if (bb->clock_limits[i-1].socclk_mhz != bb->clock_limits[i].socclk_mhz) duplicate = false; if (duplicate) bb->num_states--; } } static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states) { struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0}; int i; int num_calculated_states = 0; int min_dcfclk = 0; if (num_states == 0) return; if (dc->bb_overrides.min_dcfclk_mhz > 0) min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; else // Accounting for SOC/DCF relationship, we can go as high as // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506. min_dcfclk = 507; for (i = 0; i < num_states; i++) { int min_fclk_required_by_uclk; calculated_states[i].state = i; calculated_states[i].dram_speed_mts = uclk_states[i] * 16 / 1000; // FCLK:UCLK ratio is 1.08 min_fclk_required_by_uclk = mul_u64_u32_shr(BIT_ULL(32) * 1080 / 1000000, uclk_states[i], 32); calculated_states[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ? min_dcfclk : min_fclk_required_by_uclk; calculated_states[i].socclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->socClockInKhz / 1000) ? max_clocks->socClockInKhz / 1000 : calculated_states[i].fabricclk_mhz; calculated_states[i].dcfclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->dcfClockInKhz / 1000) ? max_clocks->dcfClockInKhz / 1000 : calculated_states[i].fabricclk_mhz; calculated_states[i].dispclk_mhz = max_clocks->displayClockInKhz / 1000; calculated_states[i].dppclk_mhz = max_clocks->displayClockInKhz / 1000; calculated_states[i].dscclk_mhz = max_clocks->displayClockInKhz / (1000 * 3); calculated_states[i].phyclk_mhz = max_clocks->phyClockInKhz / 1000; num_calculated_states++; } calculated_states[num_calculated_states - 1].socclk_mhz = max_clocks->socClockInKhz / 1000; calculated_states[num_calculated_states - 1].fabricclk_mhz = max_clocks->socClockInKhz / 1000; calculated_states[num_calculated_states - 1].dcfclk_mhz = max_clocks->dcfClockInKhz / 1000; memcpy(bb->clock_limits, calculated_states, sizeof(bb->clock_limits)); bb->num_states = num_calculated_states; // Duplicate the last state, DML always an extra state identical to max state to work memcpy(&bb->clock_limits[num_calculated_states], &bb->clock_limits[num_calculated_states - 1], sizeof(struct _vcs_dpi_voltage_scaling_st)); bb->clock_limits[num_calculated_states].state = bb->num_states; } static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) { kernel_fpu_begin(); if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns && dc->bb_overrides.sr_exit_time_ns) { bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; } if ((int)(bb->sr_enter_plus_exit_time_us * 1000) != dc->bb_overrides.sr_enter_plus_exit_time_ns && dc->bb_overrides.sr_enter_plus_exit_time_ns) { bb->sr_enter_plus_exit_time_us = dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; } if ((int)(bb->urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns && dc->bb_overrides.urgent_latency_ns) { bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; } if ((int)(bb->dram_clock_change_latency_us * 1000) != dc->bb_overrides.dram_clock_change_latency_ns && dc->bb_overrides.dram_clock_change_latency_ns) { bb->dram_clock_change_latency_us = dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; } kernel_fpu_end(); } static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( uint32_t hw_internal_rev) { if (ASICREV_IS_NAVI12_P(hw_internal_rev)) return &dcn2_0_nv12_soc; return &dcn2_0_soc; } static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params( uint32_t hw_internal_rev) { /* NV12 and NV10 */ return &dcn2_0_ip; } static enum dml_project get_dml_project_version(uint32_t hw_internal_rev) { return DML_PROJECT_NAVI10v2; } #define fixed16_to_double(x) (((double) x) / ((double) (1 << 16))) #define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x)) static bool init_soc_bounding_box(struct dc *dc, struct dcn20_resource_pool *pool) { const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box; struct _vcs_dpi_soc_bounding_box_st *loaded_bb = get_asic_rev_soc_bb(dc->ctx->asic_id.hw_internal_rev); struct _vcs_dpi_ip_params_st *loaded_ip = get_asic_rev_ip_params(dc->ctx->asic_id.hw_internal_rev); DC_LOGGER_INIT(dc->ctx->logger); if (!bb && !SOC_BOUNDING_BOX_VALID) { DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__); return false; } if (bb && !SOC_BOUNDING_BOX_VALID) { int i; dcn2_0_nv12_soc.sr_exit_time_us = fixed16_to_double_to_cpu(bb->sr_exit_time_us); dcn2_0_nv12_soc.sr_enter_plus_exit_time_us = fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us); dcn2_0_nv12_soc.urgent_latency_us = fixed16_to_double_to_cpu(bb->urgent_latency_us); dcn2_0_nv12_soc.urgent_latency_pixel_data_only_us = fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us); dcn2_0_nv12_soc.urgent_latency_pixel_mixed_with_vm_data_us = fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us); dcn2_0_nv12_soc.urgent_latency_vm_data_only_us = fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us); dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes = le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes); dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes); dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_vm_only_bytes = le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes); dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only); dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm); dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only = fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only); dcn2_0_nv12_soc.max_avg_sdp_bw_use_normal_percent = fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent); dcn2_0_nv12_soc.max_avg_dram_bw_use_normal_percent = fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent); dcn2_0_nv12_soc.writeback_latency_us = fixed16_to_double_to_cpu(bb->writeback_latency_us); dcn2_0_nv12_soc.ideal_dram_bw_after_urgent_percent = fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent); dcn2_0_nv12_soc.max_request_size_bytes = le32_to_cpu(bb->max_request_size_bytes); dcn2_0_nv12_soc.dram_channel_width_bytes = le32_to_cpu(bb->dram_channel_width_bytes); dcn2_0_nv12_soc.fabric_datapath_to_dcn_data_return_bytes = le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes); dcn2_0_nv12_soc.dcn_downspread_percent = fixed16_to_double_to_cpu(bb->dcn_downspread_percent); dcn2_0_nv12_soc.downspread_percent = fixed16_to_double_to_cpu(bb->downspread_percent); dcn2_0_nv12_soc.dram_page_open_time_ns = fixed16_to_double_to_cpu(bb->dram_page_open_time_ns); dcn2_0_nv12_soc.dram_rw_turnaround_time_ns = fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns); dcn2_0_nv12_soc.dram_return_buffer_per_channel_bytes = le32_to_cpu(bb->dram_return_buffer_per_channel_bytes); dcn2_0_nv12_soc.round_trip_ping_latency_dcfclk_cycles = le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles); dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_bytes = le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes); dcn2_0_nv12_soc.channel_interleave_bytes = le32_to_cpu(bb->channel_interleave_bytes); dcn2_0_nv12_soc.num_banks = le32_to_cpu(bb->num_banks); dcn2_0_nv12_soc.num_chans = le32_to_cpu(bb->num_chans); dcn2_0_nv12_soc.vmm_page_size_bytes = le32_to_cpu(bb->vmm_page_size_bytes); dcn2_0_nv12_soc.dram_clock_change_latency_us = fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us); // HACK!! Lower uclock latency switch time so we don't switch dcn2_0_nv12_soc.dram_clock_change_latency_us = 10; dcn2_0_nv12_soc.writeback_dram_clock_change_latency_us = fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us); dcn2_0_nv12_soc.return_bus_width_bytes = le32_to_cpu(bb->return_bus_width_bytes); dcn2_0_nv12_soc.dispclk_dppclk_vco_speed_mhz = le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz); dcn2_0_nv12_soc.xfc_bus_transport_time_us = le32_to_cpu(bb->xfc_bus_transport_time_us); dcn2_0_nv12_soc.xfc_xbuf_latency_tolerance_us = le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us); dcn2_0_nv12_soc.use_urgent_burst_bw = le32_to_cpu(bb->use_urgent_burst_bw); dcn2_0_nv12_soc.num_states = le32_to_cpu(bb->num_states); for (i = 0; i < dcn2_0_nv12_soc.num_states; i++) { dcn2_0_nv12_soc.clock_limits[i].state = le32_to_cpu(bb->clock_limits[i].state); dcn2_0_nv12_soc.clock_limits[i].dcfclk_mhz = fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz); dcn2_0_nv12_soc.clock_limits[i].fabricclk_mhz = fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz); dcn2_0_nv12_soc.clock_limits[i].dispclk_mhz = fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz); dcn2_0_nv12_soc.clock_limits[i].dppclk_mhz = fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz); dcn2_0_nv12_soc.clock_limits[i].phyclk_mhz = fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz); dcn2_0_nv12_soc.clock_limits[i].socclk_mhz = fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz); dcn2_0_nv12_soc.clock_limits[i].dscclk_mhz = fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz); dcn2_0_nv12_soc.clock_limits[i].dram_speed_mts = fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts); } } if (pool->base.pp_smu) { struct pp_smu_nv_clock_table max_clocks = {0}; unsigned int uclk_states[8] = {0}; unsigned int num_states = 0; enum pp_smu_status status; bool clock_limits_available = false; bool uclk_states_available = false; if (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) { status = (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) (&pool->base.pp_smu->nv_funcs.pp_smu, uclk_states, &num_states); uclk_states_available = (status == PP_SMU_RESULT_OK); } if (pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) { status = (*pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) (&pool->base.pp_smu->nv_funcs.pp_smu, &max_clocks); /* SMU cannot set DCF clock to anything equal to or higher than SOC clock */ if (max_clocks.dcfClockInKhz >= max_clocks.socClockInKhz) max_clocks.dcfClockInKhz = max_clocks.socClockInKhz - 1000; clock_limits_available = (status == PP_SMU_RESULT_OK); } if (clock_limits_available && uclk_states_available && num_states) update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states); else if (clock_limits_available) cap_soc_clocks(loaded_bb, max_clocks); } loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; loaded_ip->max_num_dpp = pool->base.pipe_count; patch_bounding_box(dc, loaded_bb); return true; } static bool construct( uint8_t num_virtual_links, struct dc *dc, struct dcn20_resource_pool *pool) { int i; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; struct _vcs_dpi_soc_bounding_box_st *loaded_bb = get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev); struct _vcs_dpi_ip_params_st *loaded_ip = get_asic_rev_ip_params(ctx->asic_id.hw_internal_rev); enum dml_project dml_project_version = get_dml_project_version(ctx->asic_id.hw_internal_rev); ctx->dc_bios->regs = &bios_regs; pool->base.funcs = &dcn20_res_pool_funcs; if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) { pool->base.res_cap = &res_cap_nv14; pool->base.pipe_count = 5; pool->base.mpcc_count = 5; } else { pool->base.res_cap = &res_cap_nv10; pool->base.pipe_count = 6; pool->base.mpcc_count = 6; } /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 256; dc->caps.dmdata_alloc_size = 2048; dc->caps.max_slave_planes = 1; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.hw_3d_lut = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) { dc->debug = debug_defaults_drv; } else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { pool->base.pipe_count = 4; pool->base.mpcc_count = pool->base.pipe_count; dc->debug = debug_defaults_diags; } else { dc->debug = debug_defaults_diags; } //dcn2.0x dc->work_arounds.dedcn20_305_wa = true; // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); /************************************************* * Create resources * *************************************************/ pool->base.clock_sources[DCN20_CLK_SRC_PLL0] = dcn20_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL0, &clk_src_regs[0], false); pool->base.clock_sources[DCN20_CLK_SRC_PLL1] = dcn20_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[DCN20_CLK_SRC_PLL2] = dcn20_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); pool->base.clock_sources[DCN20_CLK_SRC_PLL3] = dcn20_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); pool->base.clock_sources[DCN20_CLK_SRC_PLL4] = dcn20_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL4, &clk_src_regs[4], false); pool->base.clock_sources[DCN20_CLK_SRC_PLL5] = dcn20_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL5, &clk_src_regs[5], false); pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL; /* todo: not reuse phy_pll registers */ pool->base.dp_clock_source = dcn20_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true); for (i = 0; i < pool->base.clk_src_count; i++) { if (pool->base.clock_sources[i] == NULL) { dm_error("DC: failed to create clock sources!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } } pool->base.dccg = dccg2_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); if (pool->base.dccg == NULL) { dm_error("DC: failed to create dccg!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } pool->base.dmcu = dcn20_dmcu_create(ctx, &dmcu_regs, &dmcu_shift, &dmcu_mask); if (pool->base.dmcu == NULL) { dm_error("DC: failed to create dmcu!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, &abm_mask); if (pool->base.abm == NULL) { dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } pool->base.pp_smu = dcn20_pp_smu_create(ctx); if (!init_soc_bounding_box(dc, pool)) { dm_error("DC: failed to initialize soc bounding box!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } dml_init_instance(&dc->dml, loaded_bb, loaded_ip, dml_project_version); if (!dc->debug.disable_pplib_wm_range) { struct pp_smu_wm_range_sets ranges = {0}; int i = 0; ranges.num_reader_wm_sets = 0; if (loaded_bb->num_states == 1) { ranges.reader_wm_sets[0].wm_inst = i; ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; ranges.num_reader_wm_sets = 1; } else if (loaded_bb->num_states > 1) { for (i = 0; i < 4 && i < loaded_bb->num_states; i++) { ranges.reader_wm_sets[i].wm_inst = i; ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0; ranges.reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16; ranges.num_reader_wm_sets = i + 1; } ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.reader_wm_sets[ranges.num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; } ranges.num_writer_wm_sets = 1; ranges.writer_wm_sets[0].wm_inst = 0; ranges.writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; ranges.writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ if (pool->base.pp_smu->nv_funcs.set_wm_ranges) pool->base.pp_smu->nv_funcs.set_wm_ranges(&pool->base.pp_smu->nv_funcs.pp_smu, &ranges); } init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dcn20_create(&init_data); if (!pool->base.irqs) goto create_fail; /* mem input -> ipp -> dpp -> opp -> TG */ for (i = 0; i < pool->base.pipe_count; i++) { pool->base.hubps[i] = dcn20_hubp_create(ctx, i); if (pool->base.hubps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto create_fail; } pool->base.ipps[i] = dcn20_ipp_create(ctx, i); if (pool->base.ipps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto create_fail; } pool->base.dpps[i] = dcn20_dpp_create(ctx, i); if (pool->base.dpps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create dpps!\n"); goto create_fail; } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dcn20_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create aux engine!!\n"); goto create_fail; } pool->base.hw_i2cs[i] = dcn20_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC:failed to create hw i2c!!\n"); goto create_fail; } pool->base.sw_i2cs[i] = NULL; } for (i = 0; i < pool->base.res_cap->num_opp; i++) { pool->base.opps[i] = dcn20_opp_create(ctx, i); if (pool->base.opps[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto create_fail; } } for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { pool->base.timing_generators[i] = dcn20_timing_generator_create( ctx, i); if (pool->base.timing_generators[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto create_fail; } } pool->base.timing_generator_count = i; pool->base.mpc = dcn20_mpc_create(ctx); if (pool->base.mpc == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mpc!\n"); goto create_fail; } pool->base.hubbub = dcn20_hubbub_create(ctx); if (pool->base.hubbub == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create hubbub!\n"); goto create_fail; } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { pool->base.dscs[i] = dcn20_dsc_create(ctx, i); if (pool->base.dscs[i] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create display stream compressor %d!\n", i); goto create_fail; } } #endif if (!dcn20_dwbc_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create dwbc!\n"); goto create_fail; } if (!dcn20_mmhubbub_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create mcif_wb!\n"); goto create_fail; } if (!resource_construct(num_virtual_links, dc, &pool->base, (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? &res_create_funcs : &res_create_maximus_funcs))) goto create_fail; dcn20_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; dc->cap_funcs = cap_funcs; return true; create_fail: destruct(pool); return false; } struct resource_pool *dcn20_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc) { struct dcn20_resource_pool *pool = kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL); if (!pool) return NULL; if (construct(init_data->num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); kfree(pool); return NULL; }
./CrossVul/dataset_final_sorted/CWE-400/c/bad_1273_6