type
stringclasses
5 values
content
stringlengths
9
163k
functions
void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) { DISABLE_WARNING_FORMAT_NONLITERAL; manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u)); REENABLE_WARNING; }
functions
bool unit_need_daemon_reload(Unit *u) { _cleanup_strv_free_ char **t = NULL; char **path; struct stat st; unsigned loaded_cnt, current_cnt; assert(u); if (u->fragment_path) { zero(st); if (stat(u->fragment_path, &st) < 0) /* What, cannot access this anymore? */ return true; if (u->fragment_mtime > 0 && timespec_load(&st.st_mtim) != u->fragment_mtime) return true; }
functions
void unit_reset_failed(Unit *u) { assert(u); if (UNIT_VTABLE(u)->reset_failed) UNIT_VTABLE(u)->reset_failed(u); }
functions
bool unit_stop_pending(Unit *u) { assert(u); /* This call does check the current state of the unit. It's * hence useful to be called from state change calls of the * unit itself, where the state isn't updated yet. This is * different from unit_inactive_or_pending() which checks both * the current state and for a queued job. */ return u->job && u->job->type == JOB_STOP; }
functions
bool unit_inactive_or_pending(Unit *u) { assert(u); /* Returns true if the unit is inactive or going down */ if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u))) return true; if (unit_stop_pending(u)) return true; return false; }
functions
bool unit_active_or_pending(Unit *u) { assert(u); /* Returns true if the unit is active or going up */ if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) return true; if (u->job && (u->job->type == JOB_START || u->job->type == JOB_RELOAD_OR_START || u->job->type == JOB_RESTART)) return true; return false; }
functions
int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) { assert(u); assert(w >= 0 && w < _KILL_WHO_MAX); assert(signo > 0); assert(signo < _NSIG); if (!UNIT_VTABLE(u)->kill) return -EOPNOTSUPP; return UNIT_VTABLE(u)->kill(u, w, signo, error); }
functions
int unit_kill_common( Unit *u, KillWho who, int signo, pid_t main_pid, pid_t control_pid, sd_bus_error *error) { int r = 0; bool killed = false; if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) { if (main_pid < 0) return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type)); else if (main_pid == 0) return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill"); }
functions
int unit_following_set(Unit *u, Set **s) { assert(u); assert(s); if (UNIT_VTABLE(u)->following_set) return UNIT_VTABLE(u)->following_set(u, s); *s = NULL; return 0; }
functions
UnitFileState unit_get_unit_file_state(Unit *u) { int r; assert(u); if (u->unit_file_state < 0 && u->fragment_path) { r = unit_file_get_state( u->manager->running_as == MANAGER_SYSTEM ? UNIT_FILE_SYSTEM : UNIT_FILE_USER, NULL, basename(u->fragment_path), &u->unit_file_state); if (r < 0) u->unit_file_state = UNIT_FILE_BAD; }
functions
int unit_get_unit_file_preset(Unit *u) { assert(u); if (u->unit_file_preset < 0 && u->fragment_path) u->unit_file_preset = unit_file_query_preset( u->manager->running_as == MANAGER_SYSTEM ? UNIT_FILE_SYSTEM : UNIT_FILE_USER, NULL, basename(u->fragment_path)); return u->unit_file_preset; }
functions
void unit_ref_unset(UnitRef *ref) { assert(ref); if (!ref->unit) return; LIST_REMOVE(refs, ref->unit->refs, ref); ref->unit = NULL; }
functions
int unit_patch_contexts(Unit *u) { CGroupContext *cc; ExecContext *ec; unsigned i; int r; assert(u); /* Patch in the manager defaults into the exec and cgroup * contexts, _after_ the rest of the settings have been * initialized */ ec = unit_get_exec_context(u); if (ec) { /* This only copies in the ones that need memory */ for (i = 0; i < _RLIMIT_MAX; i++) if (u->manager->rlimit[i] && !ec->rlimit[i]) { ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1); if (!ec->rlimit[i]) return -ENOMEM; }
functions
int unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode, bool transient, char **dir) { assert(u); if (u->manager->running_as == MANAGER_USER) { int r; if (mode == UNIT_PERSISTENT && !transient) r = user_config_home(dir); else r = user_runtime_dir(dir); if (r == 0) return -ENOENT; return r; }
functions
int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) { _cleanup_free_ char *dir = NULL, *p = NULL, *q = NULL; int r; assert(u); if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME)) return 0; r = unit_drop_in_dir(u, mode, u->transient, &dir); if (r < 0) return r; r = write_drop_in(dir, u->id, 50, name, data); if (r < 0) return r; r = drop_in_file(dir, u->id, 50, name, &p, &q); if (r < 0) return r; r = strv_extend(&u->dropin_paths, q); if (r < 0) return r; strv_sort(u->dropin_paths); strv_uniq(u->dropin_paths); u->dropin_mtime = now(CLOCK_REALTIME); return 0; }
functions
int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) { _cleanup_free_ char *p = NULL; va_list ap; int r; assert(u); assert(name); assert(format); if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME)) return 0; va_start(ap, format); r = vasprintf(&p, format, ap); va_end(ap); if (r < 0) return -ENOMEM; return unit_write_drop_in(u, mode, name, p); }
functions
int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) { _cleanup_free_ char *ndata = NULL; assert(u); assert(name); assert(data); if (!UNIT_VTABLE(u)->private_section) return -EINVAL; if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME)) return 0; ndata = strjoin("[", UNIT_VTABLE(u)->private_section, "]\n", data, NULL); if (!ndata) return -ENOMEM; return unit_write_drop_in(u, mode, name, ndata); }
functions
int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) { _cleanup_free_ char *p = NULL; va_list ap; int r; assert(u); assert(name); assert(format); if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME)) return 0; va_start(ap, format); r = vasprintf(&p, format, ap); va_end(ap); if (r < 0) return -ENOMEM; return unit_write_drop_in_private(u, mode, name, p); }
functions
int unit_make_transient(Unit *u) { assert(u); if (!UNIT_VTABLE(u)->can_transient) return -EOPNOTSUPP; u->load_state = UNIT_STUB; u->load_error = 0; u->transient = true; u->fragment_path = mfree(u->fragment_path); return 0; }
functions
int unit_kill_context( Unit *u, KillContext *c, KillOperation k, pid_t main_pid, pid_t control_pid, bool main_pid_alien) { bool wait_for_exit = false; int sig, r; assert(u); assert(c); if (c->kill_mode == KILL_NONE) return 0; switch (k) { case KILL_KILL: sig = SIGKILL; break; case KILL_ABORT: sig = SIGABRT; break; case KILL_TERMINATE: sig = c->kill_signal; break; default: assert_not_reached("KillOperation unknown"); }
functions
else if (r > 0) { /* FIXME: For now, on the legacy hierarchy, we * will not wait for the cgroup members to die * if we are running in a container or if this * is a delegation unit, simply because cgroup * notification is unreliable in these * cases. It doesn't work at all in * containers, and outside of containers it * can be confused easily by left-over * directories in the cgroup -- which however * should not exist in non-delegated units. On * the unified hierarchy that's different, * there we get proper events. Hence rely on * them.*/ if (cg_unified() > 0 || (detect_container() == 0 && !unit_cgroup_delegate(u))) wait_for_exit = true; if (c->send_sighup && k != KILL_KILL) { set_free(pid_set); pid_set = unit_pid_set(main_pid, control_pid); if (!pid_set) return -ENOMEM; cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, SIGHUP, false, true, false, pid_set); }
functions
int unit_require_mounts_for(Unit *u, const char *path) { char prefix[strlen(path) + 1], *p; int r; assert(u); assert(path); /* Registers a unit for requiring a certain path and all its * prefixes. We keep a simple array of these paths in the * unit, since its usually short. However, we build a prefix * table for all possible prefixes so that new appearing mount * units can easily determine which units to make themselves a * dependency of. */ if (!path_is_absolute(path)) return -EINVAL; p = strdup(path); if (!p) return -ENOMEM; path_kill_slashes(p); if (!path_is_safe(p)) { free(p); return -EPERM; }
functions
int unit_setup_exec_runtime(Unit *u) { ExecRuntime **rt; size_t offset; Iterator i; Unit *other; offset = UNIT_VTABLE(u)->exec_runtime_offset; assert(offset > 0); /* Check if there already is an ExecRuntime for this unit? */ rt = (ExecRuntime**) ((uint8_t*) u + offset); if (*rt) return 0; /* Try to get it from somebody else */ SET_FOREACH(other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) { *rt = unit_get_exec_runtime(other); if (*rt) { exec_runtime_ref(*rt); return 0; }
functions
bool unit_type_supported(UnitType t) { if (_unlikely_(t < 0)) return false; if (_unlikely_(t >= _UNIT_TYPE_MAX)) return false; if (!unit_vtable[t]->supported) return true; return unit_vtable[t]->supported(); }
functions
void unit_warn_if_dir_nonempty(Unit *u, const char* where) { int r; assert(u); assert(where); r = dir_is_empty(where); if (r > 0) return; if (r < 0) { log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where); return; }
functions
int unit_fail_if_symlink(Unit *u, const char* where) { int r; assert(u); assert(where); r = is_symlink(where); if (r < 0) { log_unit_debug_errno(u, r, "Failed to check symlink %s, ignoring: %m", where); return 0; }
includes
#include <linux/kernel.h>
includes
#include <linux/init.h>
includes
#include <linux/module.h>
includes
#include <linux/interrupt.h>
includes
#include <linux/gpio.h>
includes
#include <linux/slab.h>
includes
#include <linux/of_gpio.h>
includes
#include <linux/platform_device.h>
includes
#include <linux/irq.h>
includes
#include <media/rc-core.h>
includes
#include <media/gpio-ir-recv.h>
defines
#define GPIO_IR_DRIVER_NAME "gpio-rc-recv"
defines
#define GPIO_IR_DEVICE_NAME "gpio_ir_recv"
defines
#define gpio_ir_recv_get_devtree_pdata(dev, pdata) (-ENOSYS)
structs
struct gpio_rc_dev { struct rc_dev *rcdev; unsigned int gpio_nr; bool active_low; int can_sleep; };
functions
int gpio_ir_recv_get_devtree_pdata(struct device *dev, struct gpio_ir_recv_platform_data *pdata) { struct device_node *np = dev->of_node; enum of_gpio_flags flags; int gpio; gpio = of_get_gpio_flags(np, 0, &flags); if (gpio < 0) { if (gpio != -EPROBE_DEFER) dev_err(dev, "Failed to get gpio flags (%d)\n", gpio); return gpio; }
functions
irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id) { struct gpio_rc_dev *gpio_dev = dev_id; unsigned int gval; int rc = 0; enum raw_event_type type = IR_SPACE; if (gpio_dev->can_sleep) gval = gpio_get_value_cansleep(gpio_dev->gpio_nr); else gval = gpio_get_value(gpio_dev->gpio_nr); if (gval < 0) goto err_get_value; if (gpio_dev->active_low) gval = !gval; if (gval == 1) type = IR_PULSE; rc = ir_raw_event_store_edge(gpio_dev->rcdev, type); if (rc < 0) goto err_get_value; ir_raw_event_handle(gpio_dev->rcdev); err_get_value: return IRQ_HANDLED; }
functions
int gpio_ir_recv_probe(struct platform_device *pdev) { struct gpio_rc_dev *gpio_dev; struct rc_dev *rcdev; const struct gpio_ir_recv_platform_data *pdata = pdev->dev.platform_data; int rc; if (pdev->dev.of_node) { struct gpio_ir_recv_platform_data *dtpdata = devm_kzalloc(&pdev->dev, sizeof(*dtpdata), GFP_KERNEL); if (!dtpdata) return -ENOMEM; rc = gpio_ir_recv_get_devtree_pdata(&pdev->dev, dtpdata); if (rc) return rc; pdata = dtpdata; }
functions
int gpio_ir_recv_remove(struct platform_device *pdev) { struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev); free_irq(gpio_to_irq(gpio_dev->gpio_nr), gpio_dev); platform_set_drvdata(pdev, NULL); rc_unregister_device(gpio_dev->rcdev); gpio_free(gpio_dev->gpio_nr); kfree(gpio_dev); return 0; }
functions
int gpio_ir_recv_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev); if (device_may_wakeup(dev)) enable_irq_wake(gpio_to_irq(gpio_dev->gpio_nr)); else disable_irq(gpio_to_irq(gpio_dev->gpio_nr)); return 0; }
functions
int gpio_ir_recv_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev); if (device_may_wakeup(dev)) disable_irq_wake(gpio_to_irq(gpio_dev->gpio_nr)); else enable_irq(gpio_to_irq(gpio_dev->gpio_nr)); return 0; }
includes
#include <linux/major.h>
includes
#include <linux/blkdev.h>
includes
#include <linux/module.h>
includes
#include <linux/init.h>
includes
#include <linux/sched.h>
includes
#include <linux/fs.h>
includes
#include <linux/bio.h>
includes
#include <linux/stat.h>
includes
#include <linux/errno.h>
includes
#include <linux/file.h>
includes
#include <linux/ioctl.h>
includes
#include <linux/mutex.h>
includes
#include <linux/compiler.h>
includes
#include <linux/err.h>
includes
#include <linux/kernel.h>
includes
#include <linux/slab.h>
includes
#include <net/sock.h>
includes
#include <linux/net.h>
includes
#include <linux/kthread.h>
includes
#include <asm/uaccess.h>
includes
#include <asm/types.h>
includes
#include <linux/nbd.h>
defines
#define NBD_MAGIC 0x68797548
defines
#define dprintk(flags, fmt...)
defines
#define dprintk(flags, fmt...) do { \
defines
#define DBG_IOCTL 0x0004
defines
#define DBG_INIT 0x0010
defines
#define DBG_EXIT 0x0020
defines
#define DBG_BLKDEV 0x0100
defines
#define DBG_RX 0x0200
defines
#define DBG_TX 0x0400
functions
void nbd_end_request(struct request *req) { int error = req->errors ? -EIO : 0; struct request_queue *q = req->q; unsigned long flags; dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name, req, error ? "failed" : "done"); spin_lock_irqsave(q->queue_lock, flags); __blk_end_request_all(req, error); spin_unlock_irqrestore(q->queue_lock, flags); }
functions
void sock_shutdown(struct nbd_device *nbd, int lock) { /* Forcibly shutdown the socket causing all listeners * to error * * FIXME: This code is duplicated from sys_shutdown, but * there should be a more generic interface rather than * calling socket ops directly here */ if (lock) mutex_lock(&nbd->tx_lock); if (nbd->sock) { dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n"); kernel_sock_shutdown(nbd->sock, SHUT_RDWR); nbd->sock = NULL; }
functions
void nbd_xmit_timeout(unsigned long arg) { struct task_struct *task = (struct task_struct *)arg; printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n", task->comm, task->pid); force_sig(SIGKILL, task); }
functions
int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size, int msg_flags) { struct socket *sock = nbd->sock; int result; struct msghdr msg; struct kvec iov; sigset_t blocked, oldset; unsigned long pflags = current->flags; if (unlikely(!sock)) { dev_err(disk_to_dev(nbd->disk), "Attempted %s on closed socket in sock_xmit\n", (send ? "send" : "recv")); return -EINVAL; }
functions
int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec, int flags) { int result; void *kaddr = kmap(bvec->bv_page); result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset, bvec->bv_len, flags); kunmap(bvec->bv_page); return result; }
functions
int nbd_send_req(struct nbd_device *nbd, struct request *req) { int result, flags; struct nbd_request request; unsigned long size = blk_rq_bytes(req); request.magic = htonl(NBD_REQUEST_MAGIC); request.type = htonl(nbd_cmd(req)); if (nbd_cmd(req) == NBD_CMD_FLUSH) { /* Other values are reserved for FLUSH requests. */ request.from = 0; request.len = 0; }
functions
int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec) { int result; void *kaddr = kmap(bvec->bv_page); result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len, MSG_WAITALL); kunmap(bvec->bv_page); return result; }
functions
ssize_t pid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); return sprintf(buf, "%ld\n", (long) ((struct nbd_device *)disk->private_data)->pid); }
functions
int nbd_do_it(struct nbd_device *nbd) { struct request *req; int ret; BUG_ON(nbd->magic != NBD_MAGIC); sk_set_memalloc(nbd->sock->sk); nbd->pid = task_pid_nr(current); ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); if (ret) { dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); nbd->pid = 0; return ret; }
functions
void nbd_clear_que(struct nbd_device *nbd) { struct request *req; BUG_ON(nbd->magic != NBD_MAGIC); /* * Because we have set nbd->sock to NULL under the tx_lock, all * modifications to the list must have completed by now. For * the same reason, the active_req must be NULL. * * As a consequence, we don't need to take the spin lock while * purging the list here. */ BUG_ON(nbd->sock); BUG_ON(nbd->active_req); while (!list_empty(&nbd->queue_head)) { req = list_entry(nbd->queue_head.next, struct request, queuelist); list_del_init(&req->queuelist); req->errors++; nbd_end_request(req); }
functions
void nbd_handle_req(struct nbd_device *nbd, struct request *req) { if (req->cmd_type != REQ_TYPE_FS) goto error_out; nbd_cmd(req) = NBD_CMD_READ; if (rq_data_dir(req) == WRITE) { if ((req->cmd_flags & REQ_DISCARD)) { WARN_ON(!(nbd->flags & NBD_FLAG_SEND_TRIM)); nbd_cmd(req) = NBD_CMD_TRIM; }
functions
int nbd_thread(void *data) { struct nbd_device *nbd = data; struct request *req; set_user_nice(current, MIN_NICE); while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { /* wait for something to do */ wait_event_interruptible(nbd->waiting_wq, kthread_should_stop() || !list_empty(&nbd->waiting_queue)); /* extract request */ if (list_empty(&nbd->waiting_queue)) continue; spin_lock_irq(&nbd->queue_lock); req = list_entry(nbd->waiting_queue.next, struct request, queuelist); list_del_init(&req->queuelist); spin_unlock_irq(&nbd->queue_lock); /* handle request */ nbd_handle_req(nbd, req); }
functions
int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, unsigned int cmd, unsigned long arg) { switch (cmd) { case NBD_DISCONNECT: { struct request sreq; dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); if (!nbd->sock) return -EINVAL; mutex_unlock(&nbd->tx_lock); fsync_bdev(bdev); mutex_lock(&nbd->tx_lock); blk_rq_init(NULL, &sreq); sreq.cmd_type = REQ_TYPE_SPECIAL; nbd_cmd(&sreq) = NBD_CMD_DISC; /* Check again after getting mutex back. */ if (!nbd->sock) return -EINVAL; nbd->disconnect = 1; nbd_send_req(nbd, &sreq); return 0; }
functions
int nbd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct nbd_device *nbd = bdev->bd_disk->private_data; int error; if (!capable(CAP_SYS_ADMIN)) return -EPERM; BUG_ON(nbd->magic != NBD_MAGIC); /* Anyone capable of this syscall can do *real bad* things */ dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); mutex_lock(&nbd->tx_lock); error = __nbd_ioctl(bdev, nbd, cmd, arg); mutex_unlock(&nbd->tx_lock); return error; }
functions
__init nbd_init(void) { int err = -ENOMEM; int i; int part_shift; BUILD_BUG_ON(sizeof(struct nbd_request) != 28); if (max_part < 0) { printk(KERN_ERR "nbd: max_part must be >= 0\n"); return -EINVAL; }
functions
__exit nbd_cleanup(void) { int i; for (i = 0; i < nbds_max; i++) { struct gendisk *disk = nbd_dev[i].disk; nbd_dev[i].magic = 0; if (disk) { del_gendisk(disk); blk_cleanup_queue(disk->queue); put_disk(disk); }
includes
#include <linux/delay.h>
includes
#include <linux/init.h>
includes
#include <linux/module.h>
includes
#include <linux/i2c.h>
includes
#include <linux/input.h>
includes
#include <linux/slab.h>