answer
stringlengths
15
1.25M
#include <linux/module.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <media/v4l2-ioctl.h> #include "vpif.h" #include "vpif_capture.h" MODULE_DESCRIPTION("TI DaVinci VPIF Capture driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(<API key>); #define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg) #define vpif_dbg(level, debug, fmt, arg...) \ v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg) static int debug = 1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level 0-1"); #define VPIF_DRIVER_NAME "vpif_capture" /* global variables */ static struct vpif_device vpif_obj = { {NULL} }; static struct device *vpif_dev; static void <API key>(struct channel_obj *ch); static void vpif_config_addr(struct channel_obj *ch, int muxmode); static u8 channel_first_int[<API key>][2] = { {1, 1} }; /* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */ static int ycmux_mode; static inline struct vpif_cap_buffer *to_vpif_buffer(struct vb2_v4l2_buffer *vb) { return container_of(vb, struct vpif_cap_buffer, vb); } /** * vpif_buffer_prepare : callback function for buffer prepare * @vb: ptr to vb2_buffer * * This is the callback function for buffer prepare when vb2_qbuf() * function is called. The buffer is prepared and user space virtual address * or user address is converted into physical address */ static int vpif_buffer_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vb2_queue *q = vb->vb2_queue; struct channel_obj *ch = vb2_get_drv_priv(q); struct common_obj *common; unsigned long addr; vpif_dbg(2, debug, "vpif_buffer_prepare\n"); common = &ch->common[VPIF_VIDEO_INDEX]; <API key>(vb, 0, common->fmt.fmt.pix.sizeimage); if (<API key>(vb, 0) > vb2_plane_size(vb, 0)) return -EINVAL; vbuf->field = common->fmt.fmt.pix.field; addr = <API key>(vb, 0); if (!IS_ALIGNED((addr + common->ytop_off), 8) || !IS_ALIGNED((addr + common->ybtm_off), 8) || !IS_ALIGNED((addr + common->ctop_off), 8) || !IS_ALIGNED((addr + common->cbtm_off), 8)) { vpif_dbg(1, debug, "offset is not aligned\n"); return -EINVAL; } return 0; } /** * <API key> : Callback function for buffer setup. * @vq: vb2_queue ptr * @nbuffers: ptr to number of buffers requested by application * @nplanes:: contains number of distinct video planes needed to hold a frame * @sizes[]: contains the size (in bytes) of each plane. * @alloc_devs: ptr to allocation context * * This callback function is called when reqbuf() is called to adjust * the buffer count and buffer size */ static int <API key>(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct channel_obj *ch = vb2_get_drv_priv(vq); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; unsigned size = common->fmt.fmt.pix.sizeimage; vpif_dbg(2, debug, "vpif_buffer_setup\n"); if (*nplanes) { if (sizes[0] < size) return -EINVAL; size = sizes[0]; } if (vq->num_buffers + *nbuffers < 3) *nbuffers = 3 - vq->num_buffers; *nplanes = 1; sizes[0] = size; /* Calculate the offset for Y and C data in the buffer */ <API key>(ch); return 0; } /** * vpif_buffer_queue : Callback function to add buffer to DMA queue * @vb: ptr to vb2_buffer */ static void vpif_buffer_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue); struct vpif_cap_buffer *buf = to_vpif_buffer(vbuf); struct common_obj *common; unsigned long flags; common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_buffer_queue\n"); spin_lock_irqsave(&common->irqlock, flags); /* add the buffer to the DMA queue */ list_add_tail(&buf->list, &common->dma_queue); <API key>(&common->irqlock, flags); } /** * <API key> : Starts the DMA engine for streaming * @vb: ptr to vb2_buffer * @count: number of buffers */ static int <API key>(struct vb2_queue *vq, unsigned int count) { struct vpif_capture_config *vpif_config_data = vpif_dev->platform_data; struct channel_obj *ch = vb2_get_drv_priv(vq); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_params *vpif = &ch->vpifparams; struct vpif_cap_buffer *buf, *tmp; unsigned long addr, flags; int ret; spin_lock_irqsave(&common->irqlock, flags); /* Initialize field_id */ ch->field_id = 0; /* configure 1 or 2 channel mode */ if (vpif_config_data-><API key>) { ret = vpif_config_data-> <API key>(vpif->std_info.ycmux_mode); if (ret < 0) { vpif_dbg(1, debug, "can't set vpif channel mode\n"); goto err; } } ret = v4l2_subdev_call(ch->sd, video, s_stream, 1); if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) { vpif_dbg(1, debug, "stream on failed in subdev\n"); goto err; } /* Call vpif_set_params function to set the parameters and addresses */ ret = <API key>(vpif, ch->channel_id); if (ret < 0) { vpif_dbg(1, debug, "can't set video params\n"); goto err; } ycmux_mode = ret; vpif_config_addr(ch, ret); /* Get the next frame from the buffer queue */ common->cur_frm = common->next_frm = list_entry(common->dma_queue.next, struct vpif_cap_buffer, list); /* Remove buffer from the buffer queue */ list_del(&common->cur_frm->list); <API key>(&common->irqlock, flags); addr = <API key>(&common->cur_frm->vb.vb2_buf, 0); common->set_addr(addr + common->ytop_off, addr + common->ybtm_off, addr + common->ctop_off, addr + common->cbtm_off); /** * Set interrupt for both the fields in VPIF Register enable channel in * VPIF register */ channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1; if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { <API key>(); <API key>(1); enable_channel0(1); } if (VPIF_CHANNEL1_VIDEO == ch->channel_id || ycmux_mode == 2) { <API key>(); <API key>(1); enable_channel1(1); } return 0; err: <API key>(buf, tmp, &common->dma_queue, list) { list_del(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, <API key>); } <API key>(&common->irqlock, flags); return ret; } /** * vpif_stop_streaming : Stop the DMA engine * @vq: ptr to vb2_queue * * This callback stops the DMA engine and any remaining buffers * in the DMA queue are released. */ static void vpif_stop_streaming(struct vb2_queue *vq) { struct channel_obj *ch = vb2_get_drv_priv(vq); struct common_obj *common; unsigned long flags; int ret; common = &ch->common[VPIF_VIDEO_INDEX]; /* Disable channel as per its device type and channel id */ if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { enable_channel0(0); <API key>(0); } if (VPIF_CHANNEL1_VIDEO == ch->channel_id || ycmux_mode == 2) { enable_channel1(0); <API key>(0); } ycmux_mode = 0; ret = v4l2_subdev_call(ch->sd, video, s_stream, 0); if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) vpif_dbg(1, debug, "stream off failed in subdev\n"); /* release all active buffers */ spin_lock_irqsave(&common->irqlock, flags); if (common->cur_frm == common->next_frm) { vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); } else { if (common->cur_frm) vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); if (common->next_frm) vb2_buffer_done(&common->next_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); } while (!list_empty(&common->dma_queue)) { common->next_frm = list_entry(common->dma_queue.next, struct vpif_cap_buffer, list); list_del(&common->next_frm->list); vb2_buffer_done(&common->next_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); } <API key>(&common->irqlock, flags); } static struct vb2_ops video_qops = { .queue_setup = <API key>, .buf_prepare = vpif_buffer_prepare, .start_streaming = <API key>, .stop_streaming = vpif_stop_streaming, .buf_queue = vpif_buffer_queue, .wait_prepare = <API key>, .wait_finish = vb2_ops_wait_finish, }; /** * <API key>: process a completed buffer * @common: ptr to common channel object * * This function time stamp the buffer and mark it as DONE. It also * wake up any process waiting on the QUEUE and set the next buffer * as current */ static void <API key>(struct common_obj *common) { common->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE); /* Make curFrm pointing to nextFrm */ common->cur_frm = common->next_frm; } /** * <API key>: set next buffer address for capture * @common : ptr to common channel object * * This function will get next buffer from the dma queue and * set the buffer address in the vpif register for capture. * the buffer is marked active */ static void <API key>(struct common_obj *common) { unsigned long addr = 0; spin_lock(&common->irqlock); common->next_frm = list_entry(common->dma_queue.next, struct vpif_cap_buffer, list); /* Remove that buffer from the buffer queue */ list_del(&common->next_frm->list); spin_unlock(&common->irqlock); addr = <API key>(&common->next_frm->vb.vb2_buf, 0); /* Set top and bottom field addresses in VPIF registers */ common->set_addr(addr + common->ytop_off, addr + common->ybtm_off, addr + common->ctop_off, addr + common->cbtm_off); } /** * vpif_channel_isr : ISR handler for vpif capture * @irq: irq number * @dev_id: dev_id ptr * * It changes status of the captured buffer, takes next buffer from the queue * and sets its address in VPIF registers */ static irqreturn_t vpif_channel_isr(int irq, void *dev_id) { struct vpif_device *dev = &vpif_obj; struct common_obj *common; struct channel_obj *ch; int channel_id; int fid = -1, i; channel_id = *(int *)(dev_id); if (!vpif_intr_status(channel_id)) return IRQ_NONE; ch = dev->dev[channel_id]; for (i = 0; i < <API key>; i++) { common = &ch->common[i]; /* skip If streaming is not started in this channel */ /* Check the field format */ if (1 == ch->vpifparams.std_info.frm_fmt) { /* Progressive mode */ spin_lock(&common->irqlock); if (list_empty(&common->dma_queue)) { spin_unlock(&common->irqlock); continue; } spin_unlock(&common->irqlock); if (!channel_first_int[i][channel_id]) <API key>(common); channel_first_int[i][channel_id] = 0; <API key>(common); channel_first_int[i][channel_id] = 0; } else { /** * Interlaced mode. If it is first interrupt, ignore * it */ if (channel_first_int[i][channel_id]) { channel_first_int[i][channel_id] = 0; continue; } if (0 == i) { ch->field_id ^= 1; /* Get field id from VPIF registers */ fid = vpif_channel_getfid(ch->channel_id); if (fid != ch->field_id) { /** * If field id does not match stored * field id, make them in sync */ if (0 == fid) ch->field_id = fid; return IRQ_HANDLED; } } /* device field id and local field id are in sync */ if (0 == fid) { /* this is even field */ if (common->cur_frm == common->next_frm) continue; /* mark the current buffer as done */ <API key>(common); } else if (1 == fid) { /* odd field */ spin_lock(&common->irqlock); if (list_empty(&common->dma_queue) || (common->cur_frm != common->next_frm)) { spin_unlock(&common->irqlock); continue; } spin_unlock(&common->irqlock); <API key>(common); } } } return IRQ_HANDLED; } /** * <API key>() - update standard related info * @ch: ptr to channel object * * For a given standard selected by application, update values * in the device data structures */ static int <API key>(struct channel_obj *ch) { struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_params *vpifparams = &ch->vpifparams; const struct <API key> *config; struct <API key> *std_info = &vpifparams->std_info; struct video_obj *vid_ch = &ch->video; int index; vpif_dbg(2, debug, "<API key>\n"); for (index = 0; index < <API key>; index++) { config = &vpif_ch_params[index]; if (config->hd_sd == 0) { vpif_dbg(2, debug, "SD format\n"); if (config->stdid & vid_ch->stdid) { memcpy(std_info, config, sizeof(*config)); break; } } else { vpif_dbg(2, debug, "HD format\n"); if (!memcmp(&config->dv_timings, &vid_ch->dv_timings, sizeof(vid_ch->dv_timings))) { memcpy(std_info, config, sizeof(*config)); break; } } } /* standard not found */ if (index == <API key>) return -EINVAL; common->fmt.fmt.pix.width = std_info->width; common->width = std_info->width; common->fmt.fmt.pix.height = std_info->height; common->height = std_info->height; common->fmt.fmt.pix.sizeimage = common->height * common->width * 2; common->fmt.fmt.pix.bytesperline = std_info->width; vpifparams->video_params.hpitch = std_info->width; vpifparams->video_params.storage_mode = std_info->frm_fmt; if (vid_ch->stdid) common->fmt.fmt.pix.colorspace = <API key>; else common->fmt.fmt.pix.colorspace = <API key>; if (ch->vpifparams.std_info.frm_fmt) common->fmt.fmt.pix.field = V4L2_FIELD_NONE; else common->fmt.fmt.pix.field = <API key>; if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8; else common->fmt.fmt.pix.pixelformat = <API key>; common->fmt.type = <API key>; return 0; } /** * <API key> : This function calculates buffers offsets * @ch : ptr to channel object * * This function calculates buffer offsets for Y and C in the top and * bottom field */ static void <API key>(struct channel_obj *ch) { unsigned int hpitch, sizeimage; struct video_obj *vid_ch = &(ch->video); struct vpif_params *vpifparams = &ch->vpifparams; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; enum v4l2_field field = common->fmt.fmt.pix.field; vpif_dbg(2, debug, "<API key>\n"); if (V4L2_FIELD_ANY == field) { if (vpifparams->std_info.frm_fmt) vid_ch->buf_field = V4L2_FIELD_NONE; else vid_ch->buf_field = <API key>; } else vid_ch->buf_field = common->fmt.fmt.pix.field; sizeimage = common->fmt.fmt.pix.sizeimage; hpitch = common->fmt.fmt.pix.bytesperline; if ((V4L2_FIELD_NONE == vid_ch->buf_field) || (<API key> == vid_ch->buf_field)) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ytop_off = 0; common->ybtm_off = hpitch; common->ctop_off = sizeimage / 2; common->cbtm_off = sizeimage / 2 + hpitch; } else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ytop_off = 0; common->ybtm_off = sizeimage / 4; common->ctop_off = sizeimage / 2; common->cbtm_off = common->ctop_off + sizeimage / 4; } else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ybtm_off = 0; common->ytop_off = sizeimage / 4; common->cbtm_off = sizeimage / 2; common->ctop_off = common->cbtm_off + sizeimage / 4; } if ((V4L2_FIELD_NONE == vid_ch->buf_field) || (<API key> == vid_ch->buf_field)) vpifparams->video_params.storage_mode = 1; else vpifparams->video_params.storage_mode = 0; if (1 == vpifparams->std_info.frm_fmt) vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline; else { if ((field == V4L2_FIELD_ANY) || (field == <API key>)) vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline * 2; else vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline; } ch->vpifparams.video_params.stdid = vpifparams->std_info.stdid; } /** * <API key>() - Get default field type based on interface * @vpif_params - ptr to vpif params */ static inline enum v4l2_field <API key>( struct vpif_interface *iface) { return (iface->if_type == VPIF_IF_RAW_BAYER) ? V4L2_FIELD_NONE : <API key>; } /** * vpif_config_addr() - function to configure buffer address in vpif * @ch - channel ptr * @muxmode - channel mux mode */ static void vpif_config_addr(struct channel_obj *ch, int muxmode) { struct common_obj *common; vpif_dbg(2, debug, "vpif_config_addr\n"); common = &(ch->common[VPIF_VIDEO_INDEX]); if (VPIF_CHANNEL1_VIDEO == ch->channel_id) common->set_addr = <API key>; else if (2 == muxmode) common->set_addr = <API key>; else common->set_addr = <API key>; } /** * <API key>() - Maps input to sub device * @vpif_cfg - global config ptr * @chan_cfg - channel config ptr * @input_index - Given input index from application * * lookup the sub device information for a given input index. * we report all the inputs to application. inputs table also * has sub device name for the each input */ static int <API key>( struct vpif_capture_config *vpif_cfg, struct <API key> *chan_cfg, int input_index) { struct vpif_subdev_info *subdev_info; const char *subdev_name; int i; vpif_dbg(2, debug, "<API key>\n"); subdev_name = chan_cfg->inputs[input_index].subdev_name; if (!subdev_name) return -1; /* loop through the sub device list to get the sub device info */ for (i = 0; i < vpif_cfg->subdev_count; i++) { subdev_info = &vpif_cfg->subdev_info[i]; if (!strcmp(subdev_info->name, subdev_name)) return i; } return -1; } /** * vpif_set_input() - Select an input * @vpif_cfg - global config ptr * @ch - channel * @_index - Given input index from application * * Select the given input. */ static int vpif_set_input( struct vpif_capture_config *vpif_cfg, struct channel_obj *ch, int index) { struct <API key> *chan_cfg = &vpif_cfg->chan_config[ch->channel_id]; struct vpif_subdev_info *subdev_info = NULL; struct v4l2_subdev *sd = NULL; u32 input = 0, output = 0; int sd_index; int ret; sd_index = <API key>(vpif_cfg, chan_cfg, index); if (sd_index >= 0) { sd = vpif_obj.sd[sd_index]; subdev_info = &vpif_cfg->subdev_info[sd_index]; } /* first setup input path from sub device to vpif */ if (sd && vpif_cfg->setup_input_path) { ret = vpif_cfg->setup_input_path(ch->channel_id, subdev_info->name); if (ret < 0) { vpif_dbg(1, debug, "couldn't setup input path for the" \ " sub device %s, for input index %d\n", subdev_info->name, index); return ret; } } if (sd) { input = chan_cfg->inputs[index].input_route; output = chan_cfg->inputs[index].output_route; ret = v4l2_subdev_call(sd, video, s_routing, input, output, 0); if (ret < 0 && ret != -ENOIOCTLCMD) { vpif_dbg(1, debug, "Failed to set input\n"); return ret; } } ch->input_idx = index; ch->sd = sd; /* copy interface parameters to vpif */ ch->vpifparams.iface = chan_cfg->vpif_if; /* update tvnorms from the sub device input info */ ch->video_dev.tvnorms = chan_cfg->inputs[index].input.std; return 0; } /** * vpif_querystd() - querystd handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id * * This function is called to detect standard at the selected input */ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); int ret; vpif_dbg(2, debug, "vpif_querystd\n"); /* Call querystd function of decoder device */ ret = v4l2_subdev_call(ch->sd, video, querystd, std_id); if (ret == -ENOIOCTLCMD || ret == -ENODEV) return -ENODATA; if (ret) { vpif_dbg(1, debug, "Failed to query standard for sub devices\n"); return ret; } return 0; } /** * vpif_g_std() - get STD handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id */ static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct <API key> *chan_cfg; struct v4l2_input input; vpif_dbg(2, debug, "vpif_g_std\n"); if (!config->chan_config[ch->channel_id].inputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != V4L2_IN_CAP_STD) return -ENODATA; *std = ch->video.stdid; return 0; } /** * vpif_s_std() - set STD handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id */ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct <API key> *chan_cfg; struct v4l2_input input; int ret; vpif_dbg(2, debug, "vpif_s_std\n"); if (!config->chan_config[ch->channel_id].inputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != V4L2_IN_CAP_STD) return -ENODATA; if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; /* Call encoder subdevice function to set the standard */ ch->video.stdid = std_id; memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); /* Get the information about the standard */ if (<API key>(ch)) { vpif_err("Error getting the standard info\n"); return -EINVAL; } /* set standard in the sub device */ ret = v4l2_subdev_call(ch->sd, video, s_std, std_id); if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) { vpif_dbg(1, debug, "Failed to set standard for sub devices\n"); return ret; } return 0; } /** * vpif_enum_input() - ENUMINPUT handler * @file: file ptr * @priv: file handle * @input: ptr to input structure */ static int vpif_enum_input(struct file *file, void *priv, struct v4l2_input *input) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct <API key> *chan_cfg; chan_cfg = &config->chan_config[ch->channel_id]; if (input->index >= chan_cfg->input_count) return -EINVAL; memcpy(input, &chan_cfg->inputs[input->index].input, sizeof(*input)); return 0; } /** * vpif_g_input() - Get INPUT handler * @file: file ptr * @priv: file handle * @index: ptr to input index */ static int vpif_g_input(struct file *file, void *priv, unsigned int *index) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); *index = ch->input_idx; return 0; } /** * vpif_s_input() - Set INPUT handler * @file: file ptr * @priv: file handle * @index: input index */ static int vpif_s_input(struct file *file, void *priv, unsigned int index) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct <API key> *chan_cfg; chan_cfg = &config->chan_config[ch->channel_id]; if (index >= chan_cfg->input_count) return -EINVAL; if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; return vpif_set_input(config, ch, index); } /** * <API key>() - ENUM_FMT handler * @file: file ptr * @priv: file handle * @index: input index */ static int <API key>(struct file *file, void *priv, struct v4l2_fmtdesc *fmt) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); if (fmt->index != 0) { vpif_dbg(1, debug, "Invalid format index\n"); return -EINVAL; } /* Fill in the information about format */ if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) { fmt->type = <API key>; strcpy(fmt->description, "Raw Mode -Bayer Pattern GrRBGb"); fmt->pixelformat = V4L2_PIX_FMT_SBGGR8; } else { fmt->type = <API key>; strcpy(fmt->description, "YCbCr4:2:2 YC Planar"); fmt->pixelformat = <API key>; } return 0; } /** * <API key>() - TRY_FMT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int <API key>(struct file *file, void *priv, struct v4l2_format *fmt) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]); struct vpif_params *vpif_params = &ch->vpifparams; /* * to supress v4l-compliance warnings silently correct * the pixelformat */ if (vpif_params->iface.if_type == VPIF_IF_RAW_BAYER) { if (pixfmt->pixelformat != V4L2_PIX_FMT_SBGGR8) pixfmt->pixelformat = V4L2_PIX_FMT_SBGGR8; } else { if (pixfmt->pixelformat != <API key>) pixfmt->pixelformat = <API key>; } common->fmt.fmt.pix.pixelformat = pixfmt->pixelformat; <API key>(ch); pixfmt->field = common->fmt.fmt.pix.field; pixfmt->colorspace = common->fmt.fmt.pix.colorspace; pixfmt->bytesperline = common->fmt.fmt.pix.width; pixfmt->width = common->fmt.fmt.pix.width; pixfmt->height = common->fmt.fmt.pix.height; pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height * 2; pixfmt->priv = 0; return 0; } /** * vpif_g_fmt_vid_cap() - Set INPUT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; /* Check the validity of the buffer type */ if (common->fmt.type != fmt->type) return -EINVAL; /* Fill in the information about format */ *fmt = common->fmt; return 0; } /** * vpif_s_fmt_vid_cap() - Set FMT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; int ret; vpif_dbg(2, debug, "%s\n", __func__); if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; ret = <API key>(file, priv, fmt); if (ret) return ret; /* store the format in the channel object */ common->fmt = *fmt; return 0; } /** * vpif_querycap() - QUERYCAP handler * @file: file ptr * @priv: file handle * @cap: ptr to v4l2_capability structure */ static int vpif_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct vpif_capture_config *config = vpif_dev->platform_data; cap->device_caps = <API key> | V4L2_CAP_STREAMING; cap->capabilities = cap->device_caps | <API key>; strlcpy(cap->driver, VPIF_DRIVER_NAME, sizeof(cap->driver)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", dev_name(vpif_dev)); strlcpy(cap->card, config->card_name, sizeof(cap->card)); return 0; } /** * <API key>() - ENUM_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: input timings */ static int <API key>(struct file *file, void *priv, struct <API key> *timings) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct <API key> *chan_cfg; struct v4l2_input input; int ret; if (!config->chan_config[ch->channel_id].inputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != <API key>) return -ENODATA; timings->pad = 0; ret = v4l2_subdev_call(ch->sd, pad, enum_dv_timings, timings); if (ret == -ENOIOCTLCMD || ret == -ENODEV) return -EINVAL; return ret; } /** * <API key>() - QUERY_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: input timings */ static int <API key>(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct <API key> *chan_cfg; struct v4l2_input input; int ret; if (!config->chan_config[ch->channel_id].inputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != <API key>) return -ENODATA; ret = v4l2_subdev_call(ch->sd, video, query_dv_timings, timings); if (ret == -ENOIOCTLCMD || ret == -ENODEV) return -ENODATA; return ret; } /** * vpif_s_dv_timings() - S_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: digital video timings */ static int vpif_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_params *vpifparams = &ch->vpifparams; struct <API key> *std_info = &vpifparams->std_info; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct video_obj *vid_ch = &ch->video; struct v4l2_bt_timings *bt = &vid_ch->dv_timings.bt; struct <API key> *chan_cfg; struct v4l2_input input; int ret; if (!config->chan_config[ch->channel_id].inputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != <API key>) return -ENODATA; if (timings->type != V4L2_DV_BT_656_1120) { vpif_dbg(2, debug, "Timing type not defined\n"); return -EINVAL; } if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; /* Configure subdevice timings, if any */ ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings); if (ret == -ENOIOCTLCMD || ret == -ENODEV) ret = 0; if (ret < 0) { vpif_dbg(2, debug, "Error setting custom DV timings\n"); return ret; } if (!(timings->bt.width && timings->bt.height && (timings->bt.hbackporch || timings->bt.hfrontporch || timings->bt.hsync) && timings->bt.vfrontporch && (timings->bt.vbackporch || timings->bt.vsync))) { vpif_dbg(2, debug, "Timings for width, height, horizontal back porch, horizontal sync, horizontal front porch, vertical back porch, vertical sync and vertical back porch must be defined\n"); return -EINVAL; } vid_ch->dv_timings = *timings; /* Configure video port timings */ std_info->eav2sav = <API key>(bt) - 8; std_info->sav2eav = bt->width; std_info->l1 = 1; std_info->l3 = bt->vsync + bt->vbackporch + 1; std_info->vsize = <API key>(bt); if (bt->interlaced) { if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) { std_info->l5 = std_info->vsize/2 - (bt->vfrontporch - 1); std_info->l7 = std_info->vsize/2 + 1; std_info->l9 = std_info->l7 + bt->il_vsync + bt->il_vbackporch + 1; std_info->l11 = std_info->vsize - (bt->il_vfrontporch - 1); } else { vpif_dbg(2, debug, "Required timing values for interlaced BT format missing\n"); return -EINVAL; } } else { std_info->l5 = std_info->vsize - (bt->vfrontporch - 1); } strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME); std_info->width = bt->width; std_info->height = bt->height; std_info->frm_fmt = bt->interlaced ? 0 : 1; std_info->ycmux_mode = 0; std_info->capture_format = 0; std_info->vbi_supported = 0; std_info->hd_sd = 1; std_info->stdid = 0; vid_ch->stdid = 0; return 0; } /** * vpif_g_dv_timings() - G_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: digital video timings */ static int vpif_g_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct video_obj *vid_ch = &ch->video; struct <API key> *chan_cfg; struct v4l2_input input; if (!config->chan_config[ch->channel_id].inputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != <API key>) return -ENODATA; *timings = vid_ch->dv_timings; return 0; } /* * vpif_log_status() - Status information * @file: file ptr * @priv: file handle * * Returns zero. */ static int vpif_log_status(struct file *filep, void *priv) { /* status for sub devices */ <API key>(&vpif_obj.v4l2_dev, 0, core, log_status); return 0; } /* vpif capture ioctl operations */ static const struct v4l2_ioctl_ops vpif_ioctl_ops = { .vidioc_querycap = vpif_querycap, .<API key> = <API key>, .<API key> = vpif_g_fmt_vid_cap, .<API key> = vpif_s_fmt_vid_cap, .<API key> = <API key>, .vidioc_enum_input = vpif_enum_input, .vidioc_s_input = vpif_s_input, .vidioc_g_input = vpif_g_input, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = <API key>, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_querystd = vpif_querystd, .vidioc_s_std = vpif_s_std, .vidioc_g_std = vpif_g_std, .<API key> = <API key>, .<API key> = <API key>, .vidioc_s_dv_timings = vpif_s_dv_timings, .vidioc_g_dv_timings = vpif_g_dv_timings, .vidioc_log_status = vpif_log_status, }; /* vpif file operations */ static struct <API key> vpif_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, .poll = vb2_fop_poll }; /** * initialize_vpif() - Initialize vpif data structures * * Allocate memory for data structures and initialize them */ static int initialize_vpif(void) { int err, i, j; int <API key>; /* Allocate memory for six channel objects */ for (i = 0; i < <API key>; i++) { vpif_obj.dev[i] = kzalloc(sizeof(*vpif_obj.dev[i]), GFP_KERNEL); /* If memory allocation fails, return error */ if (!vpif_obj.dev[i]) { <API key> = i; err = -ENOMEM; goto <API key>; } } return 0; <API key>: for (j = 0; j < <API key>; j++) kfree(vpif_obj.dev[j]); return err; } static int vpif_async_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_subdev *asd) { int i; for (i = 0; i < vpif_obj.config->subdev_count; i++) if (!strcmp(vpif_obj.config->subdev_info[i].name, subdev->name)) { vpif_obj.sd[i] = subdev; return 0; } return -EINVAL; } static int vpif_probe_complete(void) { struct common_obj *common; struct video_device *vdev; struct channel_obj *ch; struct vb2_queue *q; int j, err, k; for (j = 0; j < <API key>; j++) { ch = vpif_obj.dev[j]; ch->channel_id = j; common = &(ch->common[VPIF_VIDEO_INDEX]); spin_lock_init(&common->irqlock); mutex_init(&common->lock); /* select input 0 */ err = vpif_set_input(vpif_obj.config, ch, 0); if (err) goto probe_out; /* set initial format */ ch->video.stdid = V4L2_STD_525_60; memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); <API key>(ch); /* Initialize vb2 queue */ q = &common->buffer_queue; q->type = <API key>; q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q->drv_priv = ch; q->ops = &video_qops; q->mem_ops = &<API key>; q->buf_struct_size = sizeof(struct vpif_cap_buffer); q->timestamp_flags = <API key>; q->min_buffers_needed = 1; q->lock = &common->lock; q->dev = vpif_dev; err = vb2_queue_init(q); if (err) { vpif_err("vpif_capture: vb2_queue_init() failed\n"); goto probe_out; } INIT_LIST_HEAD(&common->dma_queue); /* Initialize the video_device structure */ vdev = &ch->video_dev; strlcpy(vdev->name, VPIF_DRIVER_NAME, sizeof(vdev->name)); vdev->release = <API key>; vdev->fops = &vpif_fops; vdev->ioctl_ops = &vpif_ioctl_ops; vdev->v4l2_dev = &vpif_obj.v4l2_dev; vdev->vfl_dir = VFL_DIR_RX; vdev->queue = q; vdev->lock = &common->lock; video_set_drvdata(&ch->video_dev, ch); err = <API key>(vdev, VFL_TYPE_GRABBER, (j ? 1 : 0)); if (err) goto probe_out; } v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n"); return 0; probe_out: for (k = 0; k < j; k++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[k]; common = &ch->common[k]; /* Unregister video device */ <API key>(&ch->video_dev); } kfree(vpif_obj.sd); <API key>(&vpif_obj.v4l2_dev); return err; } static int vpif_async_complete(struct v4l2_async_notifier *notifier) { return vpif_probe_complete(); } /** * vpif_probe : This function probes the vpif capture driver * @pdev: platform device pointer * * This creates device entries by register itself to the V4L2 driver and * initializes fields of each channel objects */ static __init int vpif_probe(struct platform_device *pdev) { struct vpif_subdev_info *subdevdata; struct i2c_adapter *i2c_adap; struct resource *res; int subdev_count; int res_idx = 0; int i, err; vpif_dev = &pdev->dev; err = initialize_vpif(); if (err) { v4l2_err(vpif_dev->driver, "Error initializing vpif\n"); return err; } err = <API key>(vpif_dev, &vpif_obj.v4l2_dev); if (err) { v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n"); return err; } while ((res = <API key>(pdev, IORESOURCE_IRQ, res_idx))) { err = devm_request_irq(&pdev->dev, res->start, vpif_channel_isr, IRQF_SHARED, VPIF_DRIVER_NAME, (void *)(&vpif_obj.dev[res_idx]-> channel_id)); if (err) { err = -EINVAL; goto vpif_unregister; } res_idx++; } vpif_obj.config = pdev->dev.platform_data; subdev_count = vpif_obj.config->subdev_count; vpif_obj.sd = kcalloc(subdev_count, sizeof(*vpif_obj.sd), GFP_KERNEL); if (!vpif_obj.sd) { err = -ENOMEM; goto vpif_unregister; } if (!vpif_obj.config->asd_sizes) { i2c_adap = i2c_get_adapter(1); for (i = 0; i < subdev_count; i++) { subdevdata = &vpif_obj.config->subdev_info[i]; vpif_obj.sd[i] = <API key>(&vpif_obj.v4l2_dev, i2c_adap, &subdevdata-> board_info, NULL); if (!vpif_obj.sd[i]) { vpif_err("Error registering v4l2 subdevice\n"); err = -ENODEV; goto probe_subdev_out; } v4l2_info(&vpif_obj.v4l2_dev, "registered sub device %s\n", subdevdata->name); } vpif_probe_complete(); } else { vpif_obj.notifier.subdevs = vpif_obj.config->asd; vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0]; vpif_obj.notifier.bound = vpif_async_bound; vpif_obj.notifier.complete = vpif_async_complete; err = <API key>(&vpif_obj.v4l2_dev, &vpif_obj.notifier); if (err) { vpif_err("Error registering async notifier\n"); err = -EINVAL; goto probe_subdev_out; } } return 0; probe_subdev_out: /* free sub devices memory */ kfree(vpif_obj.sd); vpif_unregister: <API key>(&vpif_obj.v4l2_dev); return err; } /** * vpif_remove() - driver remove handler * @device: ptr to platform device structure * * The vidoe device is unregistered */ static int vpif_remove(struct platform_device *device) { struct common_obj *common; struct channel_obj *ch; int i; <API key>(&vpif_obj.v4l2_dev); kfree(vpif_obj.sd); /* un-register device */ for (i = 0; i < <API key>; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; common = &ch->common[VPIF_VIDEO_INDEX]; /* Unregister video device */ <API key>(&ch->video_dev); kfree(vpif_obj.dev[i]); } return 0; } #ifdef CONFIG_PM_SLEEP /** * vpif_suspend: vpif device suspend */ static int vpif_suspend(struct device *dev) { struct common_obj *common; struct channel_obj *ch; int i; for (i = 0; i < <API key>; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; common = &ch->common[VPIF_VIDEO_INDEX]; if (!<API key>(&common->buffer_queue)) continue; mutex_lock(&common->lock); /* Disable channel */ if (ch->channel_id == VPIF_CHANNEL0_VIDEO) { enable_channel0(0); <API key>(0); } if (ch->channel_id == VPIF_CHANNEL1_VIDEO || ycmux_mode == 2) { enable_channel1(0); <API key>(0); } mutex_unlock(&common->lock); } return 0; } /* * vpif_resume: vpif device suspend */ static int vpif_resume(struct device *dev) { struct common_obj *common; struct channel_obj *ch; int i; for (i = 0; i < <API key>; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; common = &ch->common[VPIF_VIDEO_INDEX]; if (!<API key>(&common->buffer_queue)) continue; mutex_lock(&common->lock); /* Enable channel */ if (ch->channel_id == VPIF_CHANNEL0_VIDEO) { enable_channel0(1); <API key>(1); } if (ch->channel_id == VPIF_CHANNEL1_VIDEO || ycmux_mode == 2) { enable_channel1(1); <API key>(1); } mutex_unlock(&common->lock); } return 0; } #endif static SIMPLE_DEV_PM_OPS(vpif_pm_ops, vpif_suspend, vpif_resume); static __refdata struct platform_driver vpif_driver = { .driver = { .name = VPIF_DRIVER_NAME, .pm = &vpif_pm_ops, }, .probe = vpif_probe, .remove = vpif_remove, }; <API key>(vpif_driver);
// This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // Free Software Foundation; either version 3, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // with this library; see the file COPYING3. If not see // { dg-do compile } #include <map> struct Key { Key() { } Key(const Key&) { } template<typename T> Key(const T&) { } bool operator<(const Key&) const; }; #if __cplusplus < 201103L // libstdc++/47628 void f() { typedef std::multimap<Key, int> MMap; MMap mm; mm.insert(MMap::value_type()); MMap::iterator i = mm.begin(); mm.erase(i); } #endif
#include <linux/init.h> #include <linux/platform_device.h> #include <linux/sysdev.h> #include <linux/amba/bus.h> #include <linux/amba/pl061.h> #include <linux/amba/mmci.h> #include <linux/amba/pl022.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/leds.h> #include <asm/mach-types.h> #include <asm/pmu.h> #include <asm/pgtable.h> #include <asm/hardware/gic.h> #include <asm/hardware/cache-l2x0.h> #include <asm/localtimer.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <mach/board-eb.h> #include <mach/irqs.h> #include "core.h" static struct map_desc realview_eb_io_desc[] __initdata = { { .virtual = IO_ADDRESS(REALVIEW_SYS_BASE), .pfn = __phys_to_pfn(REALVIEW_SYS_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(<API key>), .pfn = __phys_to_pfn(<API key>), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(<API key>), .pfn = __phys_to_pfn(<API key>), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_SCTL_BASE), .pfn = __phys_to_pfn(REALVIEW_SCTL_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(<API key>), .pfn = __phys_to_pfn(<API key>), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(<API key>), .pfn = __phys_to_pfn(<API key>), .length = SZ_4K, .type = MT_DEVICE, }, #ifdef CONFIG_DEBUG_LL { .virtual = IO_ADDRESS(<API key>), .pfn = __phys_to_pfn(<API key>), .length = SZ_4K, .type = MT_DEVICE, } #endif }; static struct map_desc <API key>[] __initdata = { { .virtual = IO_ADDRESS(<API key>), .pfn = __phys_to_pfn(<API key>), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(<API key>), .pfn = __phys_to_pfn(<API key>), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(<API key>), .pfn = __phys_to_pfn(<API key>), .length = SZ_8K, .type = MT_DEVICE, } }; static void __init realview_eb_map_io(void) { iotable_init(realview_eb_io_desc, ARRAY_SIZE(realview_eb_io_desc)); if (core_tile_eb11mp() || core_tile_a9mp()) iotable_init(<API key>, ARRAY_SIZE(<API key>)); } static struct pl061_platform_data gpio0_plat_data = { .gpio_base = 0, .irq_base = -1, }; static struct pl061_platform_data gpio1_plat_data = { .gpio_base = 8, .irq_base = -1, }; static struct pl061_platform_data gpio2_plat_data = { .gpio_base = 16, .irq_base = -1, }; static struct <API key> ssp0_plat_data = { .bus_id = 0, .enable_dma = 0, .num_chipselect = 1, }; /* * RealView EB AMBA devices */ /* * These devices are connected via the core APB bridge */ #define GPIO2_IRQ { IRQ_EB_GPIO2, NO_IRQ } #define GPIO3_IRQ { IRQ_EB_GPIO3, NO_IRQ } #define AACI_IRQ { IRQ_EB_AACI, NO_IRQ } #define MMCI0_IRQ { IRQ_EB_MMCI0A, IRQ_EB_MMCI0B } #define KMI0_IRQ { IRQ_EB_KMI0, NO_IRQ } #define KMI1_IRQ { IRQ_EB_KMI1, NO_IRQ } /* * These devices are connected directly to the multi-layer AHB switch */ #define EB_SMC_IRQ { NO_IRQ, NO_IRQ } #define MPMC_IRQ { NO_IRQ, NO_IRQ } #define EB_CLCD_IRQ { IRQ_EB_CLCD, NO_IRQ } #define DMAC_IRQ { IRQ_EB_DMA, NO_IRQ } /* * These devices are connected via the core APB bridge */ #define SCTL_IRQ { NO_IRQ, NO_IRQ } #define EB_WATCHDOG_IRQ { IRQ_EB_WDOG, NO_IRQ } #define EB_GPIO0_IRQ { IRQ_EB_GPIO0, NO_IRQ } #define GPIO1_IRQ { IRQ_EB_GPIO1, NO_IRQ } #define EB_RTC_IRQ { IRQ_EB_RTC, NO_IRQ } /* * These devices are connected via the DMA APB bridge */ #define SCI_IRQ { IRQ_EB_SCI, NO_IRQ } #define EB_UART0_IRQ { IRQ_EB_UART0, NO_IRQ } #define EB_UART1_IRQ { IRQ_EB_UART1, NO_IRQ } #define EB_UART2_IRQ { IRQ_EB_UART2, NO_IRQ } #define EB_UART3_IRQ { IRQ_EB_UART3, NO_IRQ } #define EB_SSP_IRQ { IRQ_EB_SSP, NO_IRQ } /* FPGA Primecells */ AMBA_DEVICE(aaci, "fpga:aaci", AACI, NULL); AMBA_DEVICE(mmc0, "fpga:mmc0", MMCI0, &<API key>); AMBA_DEVICE(kmi0, "fpga:kmi0", KMI0, NULL); AMBA_DEVICE(kmi1, "fpga:kmi1", KMI1, NULL); AMBA_DEVICE(uart3, "fpga:uart3", EB_UART3, NULL); /* DevChip Primecells */ AMBA_DEVICE(smc, "dev:smc", EB_SMC, NULL); AMBA_DEVICE(clcd, "dev:clcd", EB_CLCD, &clcd_plat_data); AMBA_DEVICE(dmac, "dev:dmac", DMAC, NULL); AMBA_DEVICE(sctl, "dev:sctl", SCTL, NULL); AMBA_DEVICE(wdog, "dev:wdog", EB_WATCHDOG, NULL); AMBA_DEVICE(gpio0, "dev:gpio0", EB_GPIO0, &gpio0_plat_data); AMBA_DEVICE(gpio1, "dev:gpio1", GPIO1, &gpio1_plat_data); AMBA_DEVICE(gpio2, "dev:gpio2", GPIO2, &gpio2_plat_data); AMBA_DEVICE(rtc, "dev:rtc", EB_RTC, NULL); AMBA_DEVICE(sci0, "dev:sci0", SCI, NULL); AMBA_DEVICE(uart0, "dev:uart0", EB_UART0, NULL); AMBA_DEVICE(uart1, "dev:uart1", EB_UART1, NULL); AMBA_DEVICE(uart2, "dev:uart2", EB_UART2, NULL); AMBA_DEVICE(ssp0, "dev:ssp0", EB_SSP, &ssp0_plat_data); static struct amba_device *amba_devs[] __initdata = { &dmac_device, &uart0_device, &uart1_device, &uart2_device, &uart3_device, &smc_device, &clcd_device, &sctl_device, &wdog_device, &gpio0_device, &gpio1_device, &gpio2_device, &rtc_device, &sci0_device, &ssp0_device, &aaci_device, &mmc0_device, &kmi0_device, &kmi1_device, }; /* * RealView EB platform devices */ static struct resource <API key> = { .start = <API key>, .end = <API key> + <API key> - 1, .flags = IORESOURCE_MEM, }; static struct resource <API key>[] = { [0] = { .start = <API key>, .end = <API key> + SZ_64K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_EB_ETH, .end = IRQ_EB_ETH, .flags = IORESOURCE_IRQ, }, }; /* * Detect and register the correct Ethernet device. RealView/EB rev D * platforms use the newer SMSC LAN9118 Ethernet chip */ static int eth_device_register(void) { void __iomem *eth_addr = ioremap(<API key>, SZ_4K); const char *name = NULL; u32 idrev; if (!eth_addr) return -ENOMEM; idrev = readl(eth_addr + 0x50); if ((idrev & 0xFFFF0000) != 0x01180000) /* SMSC LAN9118 not present, use LAN91C111 instead */ name = "smc91x"; iounmap(eth_addr); return <API key>(name, <API key>); } static struct resource <API key>[] = { [0] = { .start = <API key>, .end = <API key> + SZ_128K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_EB_USB, .end = IRQ_EB_USB, .flags = IORESOURCE_IRQ, }, }; static struct resource pmu_resources[] = { [0] = { .start = IRQ_EB11MP_PMU_CPU0, .end = IRQ_EB11MP_PMU_CPU0, .flags = IORESOURCE_IRQ, }, [1] = { .start = IRQ_EB11MP_PMU_CPU1, .end = IRQ_EB11MP_PMU_CPU1, .flags = IORESOURCE_IRQ, }, [2] = { .start = IRQ_EB11MP_PMU_CPU2, .end = IRQ_EB11MP_PMU_CPU2, .flags = IORESOURCE_IRQ, }, [3] = { .start = IRQ_EB11MP_PMU_CPU3, .end = IRQ_EB11MP_PMU_CPU3, .flags = IORESOURCE_IRQ, }, }; static struct platform_device pmu_device = { .name = "arm-pmu", .id = ARM_PMU_DEVICE_CPU, .num_resources = ARRAY_SIZE(pmu_resources), .resource = pmu_resources, }; static struct resource char_lcd_resources[] = { { .start = <API key>, .end = (<API key> + SZ_4K - 1), .flags = IORESOURCE_MEM, }, { .start = IRQ_EB_CHARLCD, .end = IRQ_EB_CHARLCD, .flags = IORESOURCE_IRQ, }, }; static struct platform_device char_lcd_device = { .name = "arm-charlcd", .id = -1, .num_resources = ARRAY_SIZE(char_lcd_resources), .resource = char_lcd_resources, }; static void __init gic_init_irq(void) { if (core_tile_eb11mp() || core_tile_a9mp()) { unsigned int pldctrl; /* new irq mode */ writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK)); pldctrl = readl(__io_address(REALVIEW_SYS_BASE) + <API key>); pldctrl |= 0x00800000; writel(pldctrl, __io_address(REALVIEW_SYS_BASE) + <API key>); writel(0x00000000, __io_address(REALVIEW_SYS_LOCK)); /* core tile GIC, primary */ gic_init(0, 29, __io_address(<API key>), __io_address(<API key>)); #ifndef <API key> /* board GIC, secondary */ gic_init(1, 96, __io_address(<API key>), __io_address(<API key>)); gic_cascade_irq(1, IRQ_EB11MP_EB_IRQ1); #endif } else { /* board GIC, primary */ gic_init(0, 29, __io_address(<API key>), __io_address(<API key>)); } } /* * Fix up the IRQ numbers for the RealView EB/ARM11MPCore tile */ static void <API key>(void) { /* AMBA devices */ dmac_device.irq[0] = IRQ_EB11MP_DMA; uart0_device.irq[0] = IRQ_EB11MP_UART0; uart1_device.irq[0] = IRQ_EB11MP_UART1; uart2_device.irq[0] = IRQ_EB11MP_UART2; uart3_device.irq[0] = IRQ_EB11MP_UART3; clcd_device.irq[0] = IRQ_EB11MP_CLCD; wdog_device.irq[0] = IRQ_EB11MP_WDOG; gpio0_device.irq[0] = IRQ_EB11MP_GPIO0; gpio1_device.irq[0] = IRQ_EB11MP_GPIO1; gpio2_device.irq[0] = IRQ_EB11MP_GPIO2; rtc_device.irq[0] = IRQ_EB11MP_RTC; sci0_device.irq[0] = IRQ_EB11MP_SCI; ssp0_device.irq[0] = IRQ_EB11MP_SSP; aaci_device.irq[0] = IRQ_EB11MP_AACI; mmc0_device.irq[0] = IRQ_EB11MP_MMCI0A; mmc0_device.irq[1] = IRQ_EB11MP_MMCI0B; kmi0_device.irq[0] = IRQ_EB11MP_KMI0; kmi1_device.irq[0] = IRQ_EB11MP_KMI1; /* platform devices */ <API key>[1].start = IRQ_EB11MP_ETH; <API key>[1].end = IRQ_EB11MP_ETH; <API key>[1].start = IRQ_EB11MP_USB; <API key>[1].end = IRQ_EB11MP_USB; } static void __init <API key>(void) { unsigned int timer_irq; timer0_va_base = __io_address(<API key>); timer1_va_base = __io_address(<API key>) + 0x20; timer2_va_base = __io_address(<API key>); timer3_va_base = __io_address(<API key>) + 0x20; if (core_tile_eb11mp() || core_tile_a9mp()) { #ifdef CONFIG_LOCAL_TIMERS twd_base = __io_address(<API key>); #endif timer_irq = IRQ_EB11MP_TIMER0_1; } else timer_irq = IRQ_EB_TIMER0_1; realview_timer_init(timer_irq); } static struct sys_timer realview_eb_timer = { .init = <API key>, }; static void realview_eb_reset(char mode) { void __iomem *reset_ctrl = __io_address(<API key>); void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); /* * To reset, we hit the on-board reset register * in the system FPGA */ __raw_writel(<API key>, lock_ctrl); if (core_tile_eb11mp()) __raw_writel(0x0008, reset_ctrl); } static void __init realview_eb_init(void) { int i; if (core_tile_eb11mp() || core_tile_a9mp()) { <API key>(); #ifdef CONFIG_CACHE_L2X0 /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled * Bits: .... ...0 0111 1001 0000 .... .... .... */ l2x0_init(__io_address(<API key>), 0x00790000, 0xfe000fff); #endif <API key>(&pmu_device); } <API key>(&<API key>, 1); <API key>(&realview_i2c_device); <API key>(&char_lcd_device); eth_device_register(); <API key>(<API key>); for (i = 0; i < ARRAY_SIZE(amba_devs); i++) { struct amba_device *d = amba_devs[i]; <API key>(d, &iomem_resource); } #ifdef CONFIG_LEDS leds_event = realview_leds_event; #endif realview_reset = realview_eb_reset; } MACHINE_START(REALVIEW_EB, "ARM-RealView EB") /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ .atag_offset = 0x100, .fixup = realview_fixup, .map_io = realview_eb_map_io, .init_early = realview_init_early, .init_irq = gic_init_irq, .timer = &realview_eb_timer, .handle_irq = gic_handle_irq, .init_machine = realview_eb_init, #ifdef CONFIG_ZONE_DMA .dma_zone_size = SZ_256M, #endif MACHINE_END
#ifndef __MFC_BUF_H_ #define __MFC_BUF_H_ __FILE__ #include <linux/list.h> #include "mfc.h" #include "mfc_inst.h" #include "mfc_interface.h" /* FIXME */ #define ALIGN_4B (1 << 2) #define ALIGN_2KB (1 << 11) #define ALIGN_4KB (1 << 12) #define ALIGN_8KB (1 << 13) #define ALIGN_64KB (1 << 16) #define ALIGN_128KB (1 << 17) #define ALIGN_W 128 #define ALIGN_H 32 /* System */ /* Size, Port, Align */ #define MFC_FW_SYSTEM_SIZE (0x80000) /* 512KB, A, N(4KB for VMEM) */ /* Instance */ #define MFC_CTX_SIZE_L (0x96000) /* 600KB, N, 2KB, H.264 Decoding only */ #define MFC_CTX_SIZE (0x2800) /* 10KB, N, 2KB */ #define MFC_SHM_SIZE (0x400) /* 1KB, N, 4B */ /* Decoding */ #define MFC_CPB_SIZE (0x400000) /* Max.4MB, A, 2KB */ #define MFC_DESC_SIZE (0x20000) /* Max.128KB, A, 2KB */ #define MFC_DEC_NBMV_SIZE (0x4000) /* 16KB, A, 2KB */ #define MFC_DEC_NBIP_SIZE (0x8000) /* 32KB, A, 2KB */ #define MFC_DEC_NBDCAC_SIZE (0x4000) /* 16KB, A, 2KB */ #define MFC_DEC_UPNBMV_SIZE (0x11000) /* 68KB, A, 2KB */ #define MFC_DEC_SAMV_SIZE (0x40000) /* 256KB, A, 2KB */ #define MFC_DEC_OTLINE_SIZE (0x8000) /* 32KB, A, 2KB */ #define MFC_DEC_SYNPAR_SIZE (0x11000) /* 68KB, A, 2KB */ #define <API key> (0x800) /* 2KB, A, 2KB */ /* Encoding */ #define MFC_STRM_SIZE (0x300000) /* 3MB, A, 2KB (multi. 4KB) */ /* FIXME: variable size */ #define MFC_ENC_UPMV_SIZE (0x10000) /* Var, A, 2KB */ #define MFC_ENC_COLFLG_SIZE (0x10000) /* Var, A, 2KB */ #define <API key> (0x10000) /* Var, A, 2KB */ #define <API key> (0x4000) /* 16KB, A, 2KB */ #define <API key> (0x10000) /* Var, A, 2KB */ #define <API key> (0x10000) /* Var, A, 2KB */ #define MFC_LUMA_ALIGN ALIGN_8KB #define MFC_CHROMA_ALIGN ALIGN_8KB #define MFC_MV_ALIGN ALIGN_8KB /* H.264 Decoding only */ #define PORT_A 0 #define PORT_B 1 /* FIXME: MFC Buffer Type add as allocation parameter */ /* #define MBT_ACCESS_MASK (0xFF << 24) #define MBT_SYSMMU (0x01 << 24) */ #define MBT_KERNEL (0x02 << 24) #define MBT_USER (0x04 << 24) #define MBT_OTHER (0x08 << 24) #if 0 #define MBT_TYPE_MASK (0xFF << 16) #define MBT_CTX (MBT_SYSMMU | MBT_KERNEL | (0x01 << 16))/* S, K */ #define MBT_DESC (MBT_SYSMMU | (0x02 << 16)) #define MBT_CODEC (MBT_SYSMMU | (0x04 << 16)) #define MBT_SHM (MBT_SYSMMU | MBT_KERNEL | (0x08 << 16))/* S, K */ #define MBT_CPB (MBT_SYSMMU | MBT_USER | (0x10 << 16))/* D: S, [K], U E: */ #define MBT_DPB (MBT_SYSMMU | MBT_USER | (0x20 << 16))/* D: S, [K], U E: */ #endif #define MBT_CTX (MBT_KERNEL | (0x01 << 16)) /* S, K */ #define MBT_DESC (0x02 << 16) #define MBT_CODEC (0x04 << 16) #define MBT_SHM (MBT_KERNEL | (0x08 << 16)) /* S, K */ #if 0 #define MBT_CPB (MBT_USER | (0x10 << 16)) /* D: S, [K], U E: */ #define MBT_DPB (MBT_USER | (0x20 << 16)) /* D: S, [K], U E: */ #endif #define MBT_CPB (MBT_KERNEL | MBT_USER | (0x10 << 16)) /* D: S, [K], U E: */ #define MBT_DPB (MBT_KERNEL | MBT_USER | (0x20 << 16)) /* D: S, [K], U E: */ enum <API key> { MBS_BEST_FIT = 0, MBS_FIRST_FIT = 1, }; /* Remove before Release */ #if 0 #define CPB_BUF_SIZE (0x400000) /* 3MB : 3x1024x1024 for decoder */ #define DESC_BUF_SIZE (0x20000) /* 128KB : 128x1024 */ #define SHARED_BUF_SIZE (0x10000) /* 64KB : 64x1024 */ #define PRED_BUF_SIZE (0x10000) /* 64KB : 64x1024 */ #define DEC_CODEC_BUF_SIZE (0x80000) /* 512KB : 512x1024 size per instance */ #define ENC_CODEC_BUF_SIZE (0x50000) /* 320KB : 512x1024 size per instance */ #define STREAM_BUF_SIZE (0x200000) /* 2MB : 2x1024x1024 for encoder */ #define MV_BUF_SIZE (0x10000) /* 64KB : 64x1024 for encoder */ #define MFC_CONTEXT_SIZE_L (640 * 1024) /* 600KB -> 640KB for alignment */ #define VC1DEC_CONTEXT_SIZE (64 * 1024) /* 10KB -> 64KB for alignment */ #define <API key> (64 * 1024) /* 10KB -> 64KB for alignment */ #define <API key> (64 * 1024) /* 10KB -> 64KB for alignment */ #define <API key> (64 * 1024) /* 10KB -> 64KB for alignment */ #define <API key> (64 * 1024) /* 10KB -> 64KB for alignment */ #define <API key> (64 * 1024) /* 10KB -> 64KB for alignment */ #define <API key> (64 * 1024) /* 10KB -> 64KB for alignment */ #define DESC_BUF_SIZE (0x20000) /* 128KB : 128x1024 */ #define SHARED_MEM_SIZE (0x1000) /* 4KB : 4x1024 size */ #define CPB_BUF_SIZE (0x400000) /* 4MB : 4x1024x1024 for decoder */ #define STREAM_BUF_SIZE (0x200000) /* 2MB : 2x1024x1024 for encoder */ #define <API key> (0x10000) /* 64KB : 64x1024 for encoder */ #endif struct mfc_alloc_buffer { struct list_head list; unsigned long real; /* phys. or virt. addr for MFC */ unsigned int size; /* allocation size */ unsigned char *addr; /* kernel virtual address space */ unsigned int type; /* buffer type */ int owner; /* instance context id */ #if defined(<API key>) struct vcm_mmu_res *vcm_s; struct vcm_res *vcm_k; unsigned long vcm_addr; size_t vcm_size; void *ump_handle; #elif defined(CONFIG_S5P_VMEM) unsigned int vmem_cookie; unsigned long vmem_addr; size_t vmem_size; #else unsigned int ofs; /* * offset phys. or virt. contiguous memory * phys.[bootmem, memblock] virt.[vmalloc] * when user use mmap, * user can access whole of memory by offset. */ #endif }; struct mfc_free_buffer { struct list_head list; unsigned long real; /* phys. or virt. addr for MFC */ unsigned int size; }; void mfc_print_buf(void); int mfc_init_buf(void); void mfc_final_buf(void); void <API key>(enum <API key> scheme); void mfc_merge_buf(void); struct mfc_alloc_buffer *_mfc_alloc_buf( struct mfc_inst_ctx *ctx, unsigned int size, int align, int flag); int mfc_alloc_buf( struct mfc_inst_ctx *ctx, struct mfc_buf_alloc_arg* args, int flag); int _mfc_free_buf(unsigned long real); int mfc_free_buf(struct mfc_inst_ctx *ctx, unsigned int key); void mfc_free_buf_type(int owner, int type); void mfc_free_buf_inst(int owner); unsigned long mfc_get_buf_real(int owner, unsigned int key); /* unsigned char *mfc_get_buf_addr(int owner, unsigned char *user); unsigned char *_mfc_get_buf_addr(int owner, unsigned char *user); */ #ifdef <API key> unsigned int <API key>(struct mfc_inst_ctx *ctx, struct mfc_buf_alloc_arg *args, int flag); void *<API key>(unsigned long real); #endif #endif /* __MFC_BUF_H_ */
/* A Bison parser, made by GNU Bison 1.875. */ /* As a special exception, when this file is copied by Bison into a Bison output file, you may use that output file without restriction. This special exception was added by the Free Software Foundation in version 1.24 of Bison. */ /* Tokens. */ #ifndef YYTOKENTYPE # define YYTOKENTYPE /* Put the tokens into the symbol table, so that GDB and other debuggers know about them. */ enum yytokentype { TOK_IDENT = 258, TOK_ATIDENT = 259, TOK_CONST_INT = 260, TOK_CONST_FLOAT = 261, TOK_CONST_MVA = 262, TOK_QUOTED_STRING = 263, TOK_USERVAR = 264, TOK_SYSVAR = 265, TOK_CONST_STRINGS = 266, TOK_BAD_NUMERIC = 267, TOK_SUBKEY = 268, TOK_DOT_NUMBER = 269, TOK_ADD = 270, TOK_AGENT = 271, TOK_ALTER = 272, TOK_AS = 273, TOK_ASC = 274, TOK_ATTACH = 275, TOK_ATTRIBUTES = 276, TOK_AVG = 277, TOK_BEGIN = 278, TOK_BETWEEN = 279, TOK_BIGINT = 280, TOK_BOOL = 281, TOK_BY = 282, TOK_CALL = 283, TOK_CHARACTER = 284, TOK_CHUNK = 285, TOK_COLLATION = 286, TOK_COLUMN = 287, TOK_COMMIT = 288, TOK_COMMITTED = 289, TOK_COUNT = 290, TOK_CREATE = 291, TOK_DATABASES = 292, TOK_DELETE = 293, TOK_DESC = 294, TOK_DESCRIBE = 295, TOK_DISTINCT = 296, TOK_DIV = 297, TOK_DOUBLE = 298, TOK_DROP = 299, TOK_FACET = 300, TOK_FALSE = 301, TOK_FLOAT = 302, TOK_FLUSH = 303, TOK_FOR = 304, TOK_FROM = 305, TOK_FUNCTION = 306, TOK_GLOBAL = 307, TOK_GROUP = 308, TOK_GROUPBY = 309, TOK_GROUP_CONCAT = 310, TOK_HAVING = 311, TOK_ID = 312, TOK_IN = 313, TOK_INDEX = 314, TOK_INSERT = 315, TOK_INT = 316, TOK_INTEGER = 317, TOK_INTO = 318, TOK_IS = 319, TOK_ISOLATION = 320, TOK_JSON = 321, TOK_LEVEL = 322, TOK_LIKE = 323, TOK_LIMIT = 324, TOK_MATCH = 325, TOK_MAX = 326, TOK_META = 327, TOK_MIN = 328, TOK_MOD = 329, TOK_MULTI = 330, TOK_MULTI64 = 331, TOK_NAMES = 332, TOK_NULL = 333, TOK_OPTION = 334, TOK_ORDER = 335, TOK_OPTIMIZE = 336, TOK_PLAN = 337, TOK_PLUGIN = 338, TOK_PLUGINS = 339, TOK_PROFILE = 340, TOK_RAND = 341, TOK_RAMCHUNK = 342, TOK_READ = 343, TOK_RECONFIGURE = 344, TOK_RELOAD = 345, TOK_REPEATABLE = 346, TOK_REPLACE = 347, TOK_REMAP = 348, TOK_RETURNS = 349, TOK_ROLLBACK = 350, TOK_RTINDEX = 351, TOK_SELECT = 352, TOK_SERIALIZABLE = 353, TOK_SET = 354, TOK_SETTINGS = 355, TOK_SESSION = 356, TOK_SHOW = 357, TOK_SONAME = 358, TOK_START = 359, TOK_STATUS = 360, TOK_STRING = 361, TOK_SUM = 362, TOK_TABLE = 363, TOK_TABLES = 364, TOK_THREADS = 365, TOK_TO = 366, TOK_TRANSACTION = 367, TOK_TRUE = 368, TOK_TRUNCATE = 369, TOK_TYPE = 370, TOK_UNCOMMITTED = 371, TOK_UPDATE = 372, TOK_VALUES = 373, TOK_VARIABLES = 374, TOK_WARNINGS = 375, TOK_WEIGHT = 376, TOK_WHERE = 377, TOK_WITHIN = 378, TOK_OR = 379, TOK_AND = 380, TOK_NE = 381, TOK_GTE = 382, TOK_LTE = 383, TOK_NOT = 384, TOK_NEG = 385 }; #endif #define TOK_IDENT 258 #define TOK_ATIDENT 259 #define TOK_CONST_INT 260 #define TOK_CONST_FLOAT 261 #define TOK_CONST_MVA 262 #define TOK_QUOTED_STRING 263 #define TOK_USERVAR 264 #define TOK_SYSVAR 265 #define TOK_CONST_STRINGS 266 #define TOK_BAD_NUMERIC 267 #define TOK_SUBKEY 268 #define TOK_DOT_NUMBER 269 #define TOK_ADD 270 #define TOK_AGENT 271 #define TOK_ALTER 272 #define TOK_AS 273 #define TOK_ASC 274 #define TOK_ATTACH 275 #define TOK_ATTRIBUTES 276 #define TOK_AVG 277 #define TOK_BEGIN 278 #define TOK_BETWEEN 279 #define TOK_BIGINT 280 #define TOK_BOOL 281 #define TOK_BY 282 #define TOK_CALL 283 #define TOK_CHARACTER 284 #define TOK_CHUNK 285 #define TOK_COLLATION 286 #define TOK_COLUMN 287 #define TOK_COMMIT 288 #define TOK_COMMITTED 289 #define TOK_COUNT 290 #define TOK_CREATE 291 #define TOK_DATABASES 292 #define TOK_DELETE 293 #define TOK_DESC 294 #define TOK_DESCRIBE 295 #define TOK_DISTINCT 296 #define TOK_DIV 297 #define TOK_DOUBLE 298 #define TOK_DROP 299 #define TOK_FACET 300 #define TOK_FALSE 301 #define TOK_FLOAT 302 #define TOK_FLUSH 303 #define TOK_FOR 304 #define TOK_FROM 305 #define TOK_FUNCTION 306 #define TOK_GLOBAL 307 #define TOK_GROUP 308 #define TOK_GROUPBY 309 #define TOK_GROUP_CONCAT 310 #define TOK_HAVING 311 #define TOK_ID 312 #define TOK_IN 313 #define TOK_INDEX 314 #define TOK_INSERT 315 #define TOK_INT 316 #define TOK_INTEGER 317 #define TOK_INTO 318 #define TOK_IS 319 #define TOK_ISOLATION 320 #define TOK_JSON 321 #define TOK_LEVEL 322 #define TOK_LIKE 323 #define TOK_LIMIT 324 #define TOK_MATCH 325 #define TOK_MAX 326 #define TOK_META 327 #define TOK_MIN 328 #define TOK_MOD 329 #define TOK_MULTI 330 #define TOK_MULTI64 331 #define TOK_NAMES 332 #define TOK_NULL 333 #define TOK_OPTION 334 #define TOK_ORDER 335 #define TOK_OPTIMIZE 336 #define TOK_PLAN 337 #define TOK_PLUGIN 338 #define TOK_PLUGINS 339 #define TOK_PROFILE 340 #define TOK_RAND 341 #define TOK_RAMCHUNK 342 #define TOK_READ 343 #define TOK_RECONFIGURE 344 #define TOK_RELOAD 345 #define TOK_REPEATABLE 346 #define TOK_REPLACE 347 #define TOK_REMAP 348 #define TOK_RETURNS 349 #define TOK_ROLLBACK 350 #define TOK_RTINDEX 351 #define TOK_SELECT 352 #define TOK_SERIALIZABLE 353 #define TOK_SET 354 #define TOK_SETTINGS 355 #define TOK_SESSION 356 #define TOK_SHOW 357 #define TOK_SONAME 358 #define TOK_START 359 #define TOK_STATUS 360 #define TOK_STRING 361 #define TOK_SUM 362 #define TOK_TABLE 363 #define TOK_TABLES 364 #define TOK_THREADS 365 #define TOK_TO 366 #define TOK_TRANSACTION 367 #define TOK_TRUE 368 #define TOK_TRUNCATE 369 #define TOK_TYPE 370 #define TOK_UNCOMMITTED 371 #define TOK_UPDATE 372 #define TOK_VALUES 373 #define TOK_VARIABLES 374 #define TOK_WARNINGS 375 #define TOK_WEIGHT 376 #define TOK_WHERE 377 #define TOK_WITHIN 378 #define TOK_OR 379 #define TOK_AND 380 #define TOK_NE 381 #define TOK_GTE 382 #define TOK_LTE 383 #define TOK_NOT 384 #define TOK_NEG 385 #if ! defined (YYSTYPE) && ! defined (YYSTYPE_IS_DECLARED) typedef int YYSTYPE; # define yystype YYSTYPE /* obsolescent; will be withdrawn */ # define YYSTYPE_IS_DECLARED 1 # define YYSTYPE_IS_TRIVIAL 1 #endif
/* * The ISP1362 chip requires a large delay (300ns and 462ns) between * accesses to the address and data register. * The following timing options exist: * * 1. Configure your memory controller to add such delays if it can (the best) * 2. Implement platform-specific delay function possibly * combined with configuring the memory controller; see * include/linux/usb_isp1362.h for more info. * 3. Use ndelay (easiest, poorest). * * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the * platform specific section of isp1362.h to select the appropriate variant. * * Also note that according to the Philips "ISP1362 Errata" document * Rev 1.00 from 27 May data corruption may occur when the #WR signal * is reasserted (even with #CS deasserted) within 132ns after a * write cycle to any controller register. If the hardware doesn't * implement the recommended fix (gating the #WR with #CS) software * must ensure that no further write cycle (not necessarily to the chip!) * is issued by the CPU within this interval. * For PXA25x this can be ensured by using VLIO with the maximum * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz. */ #ifdef CONFIG_USB_DEBUG # define ISP1362_DEBUG #else # undef ISP1362_DEBUG #endif /* * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and * GET_INTERFACE requests correctly when the SETUP and DATA stages of the * requests are carried out in separate frames. This will delay any SETUP * packets until the start of the next frame so that this situation is * unlikely to occur (and makes usbtest happy running with a PXA255 target * device). */ #undef <API key> #undef PTD_TRACE #undef URB_TRACE #undef VERBOSE #undef REGISTERS /* This enables a memory test on the ISP1362 chip memory to make sure the * chip access timing is correct. */ #undef CHIP_BUFFER_TEST #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/smp_lock.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/usb.h> #include <linux/usb/isp1362.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/io.h> #include <linux/bitmap.h> #include <asm/irq.h> #include <asm/system.h> #include <asm/byteorder.h> #include <asm/unaligned.h> static int dbg_level; #ifdef ISP1362_DEBUG module_param(dbg_level, int, 0644); #else module_param(dbg_level, int, 0); #define STUB_DEBUG_FILE #endif #include "../core/hcd.h" #include "../core/usb.h" #include "isp1362.h" #define DRIVER_VERSION "2005-04-04" #define DRIVER_DESC "ISP1362 USB Host Controller Driver" MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static const char hcd_name[] = "isp1362-hcd"; static void isp1362_hc_stop(struct usb_hcd *hcd); static int isp1362_hc_start(struct usb_hcd *hcd); /* * When called from the interrupthandler only isp1362_hcd->irqenb is modified, * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon * completion. * We don't need a 'disable' counterpart, since interrupts will be disabled * only by the interrupt handler. */ static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask) { if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb) return; if (mask & ~isp1362_hcd->irqenb) isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb); isp1362_hcd->irqenb |= mask; if (isp1362_hcd->irq_active) return; isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb); } static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd, u16 offset) { struct isp1362_ep_queue *epq = NULL; if (offset < isp1362_hcd->istl_queue[1].buf_start) epq = &isp1362_hcd->istl_queue[0]; else if (offset < isp1362_hcd->intl_queue.buf_start) epq = &isp1362_hcd->istl_queue[1]; else if (offset < isp1362_hcd->atl_queue.buf_start) epq = &isp1362_hcd->intl_queue; else if (offset < isp1362_hcd->atl_queue.buf_start + isp1362_hcd->atl_queue.buf_size) epq = &isp1362_hcd->atl_queue; if (epq) DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name); else pr_warning("%s: invalid PTD $%04x\n", __func__, offset); return epq; } static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index) { int offset; if (index * epq->blk_size > epq->buf_size) { pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index, epq->buf_size / epq->blk_size); return -EINVAL; } offset = epq->buf_start + index * epq->blk_size; DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset); return offset; } static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size, int mps) { u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size); xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE); if (xfer_size < size && xfer_size % mps) xfer_size -= xfer_size % mps; return xfer_size; } static int claim_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep, u16 len) { int ptd_offset = -EINVAL; int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1; int found; BUG_ON(len > epq->buf_size); if (!epq->buf_avail) return -ENOMEM; if (ep->num_ptds) pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__, epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map); BUG_ON(ep->num_ptds != 0); found = <API key>(&epq->buf_map, epq->buf_count, 0, num_ptds, 0); if (found >= epq->buf_count) return -EOVERFLOW; DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__, num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE)); ptd_offset = get_ptd_offset(epq, found); WARN_ON(ptd_offset < 0); ep->ptd_offset = ptd_offset; ep->num_ptds += num_ptds; epq->buf_avail -= num_ptds; BUG_ON(epq->buf_avail > epq->buf_count); ep->ptd_index = found; bitmap_set(&epq->buf_map, found, num_ptds); DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n", __func__, epq->name, ep->ptd_index, ep->ptd_offset, epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map); return found; } static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep) { int index = ep->ptd_index; int last = ep->ptd_index + ep->num_ptds; if (last > epq->buf_count) pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n", __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index, ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail, epq->buf_map, epq->skip_map); BUG_ON(last > epq->buf_count); for (; index < last; index++) { __clear_bit(index, &epq->buf_map); __set_bit(index, &epq->skip_map); } epq->buf_avail += ep->num_ptds; epq->ptd_count BUG_ON(epq->buf_avail > epq->buf_count); BUG_ON(epq->ptd_count > epq->buf_count); DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n", __func__, epq->name, ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count); DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__, epq->buf_map, epq->skip_map); ep->num_ptds = 0; ep->ptd_offset = -EINVAL; ep->ptd_index = -EINVAL; } /* Set up PTD's. */ static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb, struct isp1362_ep *ep, struct isp1362_ep_queue *epq, u16 fno) { struct ptd *ptd; int toggle; int dir; u16 len; size_t buf_len = urb-><API key> - urb->actual_length; DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep); ptd = &ep->ptd; ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length; switch (ep->nextpid) { case USB_PID_IN: toggle = usb_gettoggle(urb->dev, ep->epnum, 0); dir = PTD_DIR_IN; if (usb_pipecontrol(urb->pipe)) { len = min_t(size_t, ep->maxpacket, buf_len); } else if (usb_pipeisoc(urb->pipe)) { len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE); ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset; } else len = max_transfer_size(epq, buf_len, ep->maxpacket); DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket, (int)buf_len); break; case USB_PID_OUT: toggle = usb_gettoggle(urb->dev, ep->epnum, 1); dir = PTD_DIR_OUT; if (usb_pipecontrol(urb->pipe)) len = min_t(size_t, ep->maxpacket, buf_len); else if (usb_pipeisoc(urb->pipe)) len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE); else len = max_transfer_size(epq, buf_len, ep->maxpacket); if (len == 0) pr_info("%s: Sending ZERO packet: %d\n", __func__, urb->transfer_flags & URB_ZERO_PACKET); DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket, (int)buf_len); break; case USB_PID_SETUP: toggle = 0; dir = PTD_DIR_SETUP; len = sizeof(struct usb_ctrlrequest); DBG(1, "%s: SETUP len %d\n", __func__, len); ep->data = urb->setup_packet; break; case USB_PID_ACK: toggle = 1; len = 0; dir = (urb-><API key> && usb_pipein(urb->pipe)) ? PTD_DIR_OUT : PTD_DIR_IN; DBG(1, "%s: ACK len %d\n", __func__, len); break; default: toggle = dir = len = 0; pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid); BUG_ON(1); } ep->length = len; if (!len) ep->data = NULL; ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle); ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) | PTD_EP(ep->epnum); ptd->len = PTD_LEN(len) | PTD_DIR(dir); ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe)); if (usb_pipeint(urb->pipe)) { ptd->faddr |= PTD_SF_INT(ep->branch); ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0); } if (usb_pipeisoc(urb->pipe)) ptd->faddr |= PTD_SF_ISO(fno); DBG(1, "%s: Finished\n", __func__); } static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep, struct isp1362_ep_queue *epq) { struct ptd *ptd = &ep->ptd; int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length; _BUG_ON(ep->ptd_offset < 0); prefetch(ptd); <API key>(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE); if (len) <API key>(isp1362_hcd, ep->data, ep->ptd_offset + PTD_HEADER_SIZE, len); dump_ptd(ptd); dump_ptd_out_data(ptd, ep->data); } static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep, struct isp1362_ep_queue *epq) { struct ptd *ptd = &ep->ptd; int act_len; WARN_ON(list_empty(&ep->active)); BUG_ON(ep->ptd_offset < 0); list_del_init(&ep->active); DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active); prefetchw(ptd); isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE); dump_ptd(ptd); act_len = PTD_GET_COUNT(ptd); if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0) return; if (act_len > ep->length) pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep, ep->ptd_offset, act_len, ep->length); BUG_ON(act_len > ep->length); /* Only transfer the amount of data that has actually been overwritten * in the chip buffer. We don't want any data that doesn't belong to the * transfer to leak out of the chip to the callers transfer buffer! */ prefetchw(ep->data); isp1362_read_buffer(isp1362_hcd, ep->data, ep->ptd_offset + PTD_HEADER_SIZE, act_len); dump_ptd_in_data(ptd, ep->data); } /* * INT PTDs will stay in the chip until data is available. * This function will remove a PTD from the chip when the URB is dequeued. * Must be called with the spinlock held and IRQs disabled */ static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep) { int index; struct isp1362_ep_queue *epq; DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset); BUG_ON(ep->ptd_offset < 0); epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset); BUG_ON(!epq); /* put ep in remove_list for cleanup */ WARN_ON(!list_empty(&ep->remove_list)); list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list); /* let SOF interrupt handle the cleanup */ isp1362_enable_int(isp1362_hcd, HCuPINT_SOF); index = ep->ptd_index; if (index < 0) /* ISO queues don't have SKIP registers */ return; DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__, index, ep->ptd_offset, epq->skip_map, 1 << index); /* prevent further processing of PTD (will be effective after next SOF) */ epq->skip_map |= 1 << index; if (epq == &isp1362_hcd->atl_queue) { DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__, isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map); isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map); if (~epq->skip_map == 0) isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, <API key>); } else if (epq == &isp1362_hcd->intl_queue) { DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__, isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map); isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map); if (~epq->skip_map == 0) isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, <API key>); } } /* Take done or failed requests out of schedule. Give back processed urbs. */ static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep, struct urb *urb, int status) __releases(isp1362_hcd->lock) __acquires(isp1362_hcd->lock) { urb->hcpriv = NULL; ep->error_count = 0; if (usb_pipecontrol(urb->pipe)) ep->nextpid = USB_PID_SETUP; URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__, ep->num_req, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), !usb_pipein(urb->pipe) ? "out" : "in", usb_pipecontrol(urb->pipe) ? "ctrl" : usb_pipeint(urb->pipe) ? "int" : usb_pipebulk(urb->pipe) ? "bulk" : "iso", urb->actual_length, urb-><API key>, !(urb->transfer_flags & URB_SHORT_NOT_OK) ? "short_ok" : "", urb->status); <API key>(isp1362_hcd_to_hcd(isp1362_hcd), urb); spin_unlock(&isp1362_hcd->lock); <API key>(isp1362_hcd_to_hcd(isp1362_hcd), urb, status); spin_lock(&isp1362_hcd->lock); /* take idle endpoints out of the schedule right away */ if (!list_empty(&ep->hep->urb_list)) return; /* async deschedule */ if (!list_empty(&ep->schedule)) { list_del_init(&ep->schedule); return; } if (ep->interval) { /* periodic deschedule */ DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval, ep, ep->branch, ep->load, isp1362_hcd->load[ep->branch], isp1362_hcd->load[ep->branch] - ep->load); isp1362_hcd->load[ep->branch] -= ep->load; ep->branch = PERIODIC_SIZE; } } /* * Analyze transfer results, handle partial transfers and errors */ static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep) { struct urb *urb = get_urb(ep); struct usb_device *udev; struct ptd *ptd; int short_ok; u16 len; int urbstat = -EINPROGRESS; u8 cc; DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req); udev = urb->dev; ptd = &ep->ptd; cc = PTD_GET_CC(ptd); if (cc == PTD_NOTACCESSED) { pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__, ep->num_req, ptd); cc = PTD_DEVNOTRESP; } short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK); len = urb-><API key> - urb->actual_length; /* Data underrun is special. For allowed underrun we clear the error and continue as normal. For forbidden underrun we finish the DATA stage immediately while for control transfer, we do a STATUS stage. */ if (cc == PTD_DATAUNDERRUN) { if (short_ok) { DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n", __func__, ep->num_req, short_ok ? "" : "not_", PTD_GET_COUNT(ptd), ep->maxpacket, len); cc = PTD_CC_NOERROR; urbstat = 0; } else { DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n", __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid, short_ok ? "" : "not_", PTD_GET_COUNT(ptd), ep->maxpacket, len); if (usb_pipecontrol(urb->pipe)) { ep->nextpid = USB_PID_ACK; /* save the data underrun error code for later and * procede with the status stage */ urb->actual_length += PTD_GET_COUNT(ptd); BUG_ON(urb->actual_length > urb-><API key>); if (urb->status == -EINPROGRESS) urb->status = cc_to_error[PTD_DATAUNDERRUN]; } else { usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT, PTD_GET_TOGGLE(ptd)); urbstat = cc_to_error[PTD_DATAUNDERRUN]; } goto out; } } if (cc != PTD_CC_NOERROR) { if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) { urbstat = cc_to_error[cc]; DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n", __func__, ep->num_req, ep->nextpid, urbstat, cc, ep->error_count); } goto out; } switch (ep->nextpid) { case USB_PID_OUT: if (PTD_GET_COUNT(ptd) != ep->length) pr_err("%s: count=%d len=%d\n", __func__, PTD_GET_COUNT(ptd), ep->length); BUG_ON(PTD_GET_COUNT(ptd) != ep->length); urb->actual_length += ep->length; BUG_ON(urb->actual_length > urb-><API key>); usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd)); if (urb->actual_length == urb-><API key>) { DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__, ep->num_req, len, ep->maxpacket, urbstat); if (usb_pipecontrol(urb->pipe)) { DBG(3, "%s: req %d %s Wait for ACK\n", __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT"); ep->nextpid = USB_PID_ACK; } else { if (len % ep->maxpacket || !(urb->transfer_flags & URB_ZERO_PACKET)) { urbstat = 0; DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n", __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT", urbstat, len, ep->maxpacket, urb->actual_length); } } } break; case USB_PID_IN: len = PTD_GET_COUNT(ptd); BUG_ON(len > ep->length); urb->actual_length += len; BUG_ON(urb->actual_length > urb-><API key>); usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd)); /* if transfer completed or (allowed) data underrun */ if ((urb-><API key> == urb->actual_length) || len % ep->maxpacket) { DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__, ep->num_req, len, ep->maxpacket, urbstat); if (usb_pipecontrol(urb->pipe)) { DBG(3, "%s: req %d %s Wait for ACK\n", __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT"); ep->nextpid = USB_PID_ACK; } else { urbstat = 0; DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n", __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT", urbstat, len, ep->maxpacket, urb->actual_length); } } break; case USB_PID_SETUP: if (urb-><API key> == urb->actual_length) { ep->nextpid = USB_PID_ACK; } else if (usb_pipeout(urb->pipe)) { usb_settoggle(udev, 0, 1, 1); ep->nextpid = USB_PID_OUT; } else { usb_settoggle(udev, 0, 0, 1); ep->nextpid = USB_PID_IN; } break; case USB_PID_ACK: DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req, urbstat); WARN_ON(urbstat != -EINPROGRESS); urbstat = 0; ep->nextpid = 0; break; default: BUG_ON(1); } out: if (urbstat != -EINPROGRESS) { DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__, ep, ep->num_req, urb, urbstat); finish_request(isp1362_hcd, ep, urb, urbstat); } } static void finish_unlinks(struct isp1362_hcd *isp1362_hcd) { struct isp1362_ep *ep; struct isp1362_ep *tmp; <API key>(ep, tmp, &isp1362_hcd->remove_list, remove_list) { struct isp1362_ep_queue *epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset); int index = ep->ptd_index; BUG_ON(epq == NULL); if (index >= 0) { DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset); BUG_ON(ep->num_ptds == 0); release_ptd_buffers(epq, ep); } if (!list_empty(&ep->hep->urb_list)) { struct urb *urb = get_urb(ep); DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__, ep->num_req, ep); finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN); } WARN_ON(list_empty(&ep->active)); if (!list_empty(&ep->active)) { list_del_init(&ep->active); DBG(1, "%s: ep %p removed from active list\n", __func__, ep); } list_del_init(&ep->remove_list); DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep); } DBG(1, "%s: Done\n", __func__); } static inline void <API key>(struct isp1362_hcd *isp1362_hcd, int count) { if (count > 0) { if (count < isp1362_hcd->atl_queue.ptd_count) isp1362_write_reg16(isp1362_hcd, HCATLDTC, count); isp1362_enable_int(isp1362_hcd, HCuPINT_ATL); isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map); isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, <API key>); } else isp1362_enable_int(isp1362_hcd, HCuPINT_SOF); } static inline void <API key>(struct isp1362_hcd *isp1362_hcd) { isp1362_enable_int(isp1362_hcd, HCuPINT_INTL); isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, <API key>); isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map); } static inline void <API key>(struct isp1362_hcd *isp1362_hcd, int flip) { isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0); isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ? <API key> : <API key>); } static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb, struct isp1362_ep *ep, struct isp1362_ep_queue *epq) { int index = epq->free_ptd; prepare_ptd(isp1362_hcd, urb, ep, epq, 0); index = claim_ptd_buffers(epq, ep, ep->length); if (index == -ENOMEM) { DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__, ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map); return index; } else if (index == -EOVERFLOW) { DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n", __func__, ep->num_req, ep->length, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map); return index; } else BUG_ON(index < 0); list_add_tail(&ep->active, &epq->active); DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__, ep, ep->num_req, ep->length, &epq->active); DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name, ep->ptd_offset, ep, ep->num_req); isp1362_write_ptd(isp1362_hcd, ep, epq); __clear_bit(ep->ptd_index, &epq->skip_map); return 0; } static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd) { int ptd_count = 0; struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue; struct isp1362_ep *ep; int defer = 0; if (atomic_read(&epq->finishing)) { DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name); return; } list_for_each_entry(ep, &isp1362_hcd->async, schedule) { struct urb *urb = get_urb(ep); int ret; if (!list_empty(&ep->active)) { DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep); continue; } DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name, ep, ep->num_req); ret = submit_req(isp1362_hcd, urb, ep, epq); if (ret == -ENOMEM) { defer = 1; break; } else if (ret == -EOVERFLOW) { defer = 1; continue; } #ifdef <API key> defer = ep->nextpid == USB_PID_SETUP; #endif ptd_count++; } /* Avoid starving of endpoints */ if (isp1362_hcd->async.next != isp1362_hcd->async.prev) { DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count); list_move(&isp1362_hcd->async, isp1362_hcd->async.next); } if (ptd_count || defer) <API key>(isp1362_hcd, defer ? 0 : ptd_count); epq->ptd_count += ptd_count; if (epq->ptd_count > epq->stat_maxptds) { epq->stat_maxptds = epq->ptd_count; DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds); } } static void <API key>(struct isp1362_hcd *isp1362_hcd) { int ptd_count = 0; struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue; struct isp1362_ep *ep; if (atomic_read(&epq->finishing)) { DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name); return; } list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) { struct urb *urb = get_urb(ep); int ret; if (!list_empty(&ep->active)) { DBG(1, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep); continue; } DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name, ep, ep->num_req); ret = submit_req(isp1362_hcd, urb, ep, epq); if (ret == -ENOMEM) break; else if (ret == -EOVERFLOW) continue; ptd_count++; } if (ptd_count) { static int last_count; if (ptd_count != last_count) { DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count); last_count = ptd_count; } <API key>(isp1362_hcd); } epq->ptd_count += ptd_count; if (epq->ptd_count > epq->stat_maxptds) epq->stat_maxptds = epq->ptd_count; } static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep) { u16 ptd_offset = ep->ptd_offset; int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size; DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset, ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size); ptd_offset += num_ptds * epq->blk_size; if (ptd_offset < epq->buf_start + epq->buf_size) return ptd_offset; else return -ENOMEM; } static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd) { int ptd_count = 0; int flip = isp1362_hcd->istl_flip; struct isp1362_ep_queue *epq; int ptd_offset; struct isp1362_ep *ep; struct isp1362_ep *tmp; u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM); fill2: epq = &isp1362_hcd->istl_queue[flip]; if (atomic_read(&epq->finishing)) { DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name); return; } if (!list_empty(&epq->active)) return; ptd_offset = epq->buf_start; <API key>(ep, tmp, &isp1362_hcd->isoc, schedule) { struct urb *urb = get_urb(ep); s16 diff = fno - (u16)urb->start_frame; DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep); if (diff > urb->number_of_packets) { /* time frame for this URB has elapsed */ finish_request(isp1362_hcd, ep, urb, -EOVERFLOW); continue; } else if (diff < -1) { /* URB is not due in this frame or the next one. * Comparing with '-1' instead of '0' accounts for double * buffering in the ISP1362 which enables us to queue the PTD * one frame ahead of time */ } else if (diff == -1) { /* submit PTD's that are due in the next frame */ prepare_ptd(isp1362_hcd, urb, ep, epq, fno); if (ptd_offset + PTD_HEADER_SIZE + ep->length > epq->buf_start + epq->buf_size) { pr_err("%s: Not enough ISO buffer space for %d byte PTD\n", __func__, ep->length); continue; } ep->ptd_offset = ptd_offset; list_add_tail(&ep->active, &epq->active); ptd_offset = next_ptd(epq, ep); if (ptd_offset < 0) { pr_warning("%s: req %d No more %s PTD buffers available\n", __func__, ep->num_req, epq->name); break; } } } list_for_each_entry(ep, &epq->active, active) { if (epq->active.next == &ep->active) ep->ptd.mps |= PTD_LAST_MSK; isp1362_write_ptd(isp1362_hcd, ep, epq); ptd_count++; } if (ptd_count) <API key>(isp1362_hcd, flip); epq->ptd_count += ptd_count; if (epq->ptd_count > epq->stat_maxptds) epq->stat_maxptds = epq->ptd_count; /* check, whether the second ISTL buffer may also be filled */ if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & (flip ? <API key> : <API key>))) { fno++; ptd_count = 0; flip = 1 - flip; goto fill2; } } static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map, struct isp1362_ep_queue *epq) { struct isp1362_ep *ep; struct isp1362_ep *tmp; if (list_empty(&epq->active)) { DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name); return; } DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map); atomic_inc(&epq->finishing); <API key>(ep, tmp, &epq->active, active) { int index = ep->ptd_index; DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name, index, ep->ptd_offset); BUG_ON(index < 0); if (<API key>(index, &done_map)) { isp1362_read_ptd(isp1362_hcd, ep, epq); epq->free_ptd = index; BUG_ON(ep->num_ptds == 0); release_ptd_buffers(epq, ep); DBG(1, "%s: ep %p req %d removed from active list\n", __func__, ep, ep->num_req); if (!list_empty(&ep->remove_list)) { list_del_init(&ep->remove_list); DBG(1, "%s: ep %p removed from remove list\n", __func__, ep); } DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name, ep, ep->num_req); postproc_ep(isp1362_hcd, ep); } if (!done_map) break; } if (done_map) pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map, epq->skip_map); atomic_dec(&epq->finishing); } static void <API key>(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq) { struct isp1362_ep *ep; struct isp1362_ep *tmp; if (list_empty(&epq->active)) { DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name); return; } DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name); atomic_inc(&epq->finishing); <API key>(ep, tmp, &epq->active, active) { DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset); isp1362_read_ptd(isp1362_hcd, ep, epq); DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep); postproc_ep(isp1362_hcd, ep); } WARN_ON(epq->blk_size != 0); atomic_dec(&epq->finishing); } static irqreturn_t isp1362_irq(struct usb_hcd *hcd) { int handled = 0; struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); u16 irqstat; u16 svc_mask; spin_lock(&isp1362_hcd->lock); BUG_ON(isp1362_hcd->irq_active++); isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT); DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb); /* only handle interrupts that are currently enabled */ irqstat &= isp1362_hcd->irqenb; isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat); svc_mask = irqstat; if (irqstat & HCuPINT_SOF) { isp1362_hcd->irqenb &= ~HCuPINT_SOF; isp1362_hcd->irq_stat[ISP1362_INT_SOF]++; handled = 1; svc_mask &= ~HCuPINT_SOF; DBG(3, "%s: SOF\n", __func__); isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM); if (!list_empty(&isp1362_hcd->remove_list)) finish_unlinks(isp1362_hcd); if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) { if (list_empty(&isp1362_hcd->atl_queue.active)) { start_atl_transfers(isp1362_hcd); } else { isp1362_enable_int(isp1362_hcd, HCuPINT_ATL); isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map); isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, <API key>); } } } if (irqstat & HCuPINT_ISTL0) { isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++; handled = 1; svc_mask &= ~HCuPINT_ISTL0; isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, <API key>); DBG(1, "%s: ISTL0\n", __func__); WARN_ON((int)!!isp1362_hcd->istl_flip); WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & <API key>); WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & <API key>)); isp1362_hcd->irqenb &= ~HCuPINT_ISTL0; } if (irqstat & HCuPINT_ISTL1) { isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++; handled = 1; svc_mask &= ~HCuPINT_ISTL1; isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, <API key>); DBG(1, "%s: ISTL1\n", __func__); WARN_ON(!(int)isp1362_hcd->istl_flip); WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & <API key>); WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & <API key>)); isp1362_hcd->irqenb &= ~HCuPINT_ISTL1; } if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) { WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) == (HCuPINT_ISTL0 | HCuPINT_ISTL1)); <API key>(isp1362_hcd, &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]); start_iso_transfers(isp1362_hcd); isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip; } if (irqstat & HCuPINT_INTL) { u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE); u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP); isp1362_hcd->irq_stat[ISP1362_INT_INTL]++; DBG(2, "%s: INTL\n", __func__); svc_mask &= ~HCuPINT_INTL; isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map); if (~(done_map | skip_map) == 0) /* All PTDs are finished, disable INTL processing entirely */ isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, <API key>); handled = 1; WARN_ON(!done_map); if (done_map) { DBG(3, "%s: INTL done_map %08x\n", __func__, done_map); finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue); <API key>(isp1362_hcd); } } if (irqstat & HCuPINT_ATL) { u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE); u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP); isp1362_hcd->irq_stat[ISP1362_INT_ATL]++; DBG(2, "%s: ATL\n", __func__); svc_mask &= ~HCuPINT_ATL; isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map); if (~(done_map | skip_map) == 0) isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, <API key>); if (done_map) { DBG(3, "%s: ATL done_map %08x\n", __func__, done_map); finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue); start_atl_transfers(isp1362_hcd); } handled = 1; } if (irqstat & HCuPINT_OPR) { u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT); isp1362_hcd->irq_stat[ISP1362_INT_OPR]++; svc_mask &= ~HCuPINT_OPR; DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb); intstat &= isp1362_hcd->intenb; if (intstat & OHCI_INTR_UE) { pr_err("Unrecoverable error\n"); /* FIXME: do here reset or cleanup or whatever */ } if (intstat & OHCI_INTR_RHSC) { isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS); isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1); isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2); } if (intstat & OHCI_INTR_RD) { pr_info("%s: RESUME DETECTED\n", __func__); isp1362_show_reg(isp1362_hcd, HCCONTROL); <API key>(hcd); } isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat); irqstat &= ~HCuPINT_OPR; handled = 1; } if (irqstat & HCuPINT_SUSP) { isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++; handled = 1; svc_mask &= ~HCuPINT_SUSP; pr_info("%s: SUSPEND IRQ\n", __func__); } if (irqstat & HCuPINT_CLKRDY) { isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++; handled = 1; isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY; svc_mask &= ~HCuPINT_CLKRDY; pr_info("%s: CLKRDY IRQ\n", __func__); } if (svc_mask) pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask); isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb); isp1362_hcd->irq_active spin_unlock(&isp1362_hcd->lock); return IRQ_RETVAL(handled); } #define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */ static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load) { int i, branch = -ENOSPC; /* search for the least loaded schedule branch of that interval * which has enough bandwidth left unreserved. */ for (i = 0; i < interval; i++) { if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) { int j; for (j = i; j < PERIODIC_SIZE; j += interval) { if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) { pr_err("%s: new load %d load[%02x] %d max %d\n", __func__, load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD); break; } } if (j < PERIODIC_SIZE) continue; branch = i; } } return branch; } /* NB! ALL the code above this point runs with isp1362_hcd->lock held, irqs off */ static int isp1362_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); struct usb_device *udev = urb->dev; unsigned int pipe = urb->pipe; int is_out = !usb_pipein(pipe); int type = usb_pipetype(pipe); int epnum = usb_pipeendpoint(pipe); struct usb_host_endpoint *hep = urb->ep; struct isp1362_ep *ep = NULL; unsigned long flags; int retval = 0; DBG(3, "%s: urb %p\n", __func__, urb); if (type == PIPE_ISOCHRONOUS) { pr_err("Isochronous transfers not supported\n"); return -ENOSPC; } URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__, usb_pipedevice(pipe), epnum, is_out ? "out" : "in", usb_pipecontrol(pipe) ? "ctrl" : usb_pipeint(pipe) ? "int" : usb_pipebulk(pipe) ? "bulk" : "iso", urb-><API key>, (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "", !(urb->transfer_flags & URB_SHORT_NOT_OK) ? "short_ok" : ""); /* avoid all allocations within spinlocks: request or endpoint */ if (!hep->hcpriv) { ep = kcalloc(1, sizeof *ep, mem_flags); if (!ep) return -ENOMEM; } spin_lock_irqsave(&isp1362_hcd->lock, flags); /* don't submit to a dead or disabled port */ if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) & (1 << <API key>)) || !HC_IS_RUNNING(hcd->state)) { kfree(ep); retval = -ENODEV; goto fail_not_linked; } retval = <API key>(hcd, urb); if (retval) { kfree(ep); goto fail_not_linked; } if (hep->hcpriv) { ep = hep->hcpriv; } else { INIT_LIST_HEAD(&ep->schedule); INIT_LIST_HEAD(&ep->active); INIT_LIST_HEAD(&ep->remove_list); ep->udev = usb_get_dev(udev); ep->hep = hep; ep->epnum = epnum; ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out); ep->ptd_offset = -EINVAL; ep->ptd_index = -EINVAL; usb_settoggle(udev, epnum, is_out, 0); if (type == PIPE_CONTROL) ep->nextpid = USB_PID_SETUP; else if (is_out) ep->nextpid = USB_PID_OUT; else ep->nextpid = USB_PID_IN; switch (type) { case PIPE_ISOCHRONOUS: case PIPE_INTERRUPT: if (urb->interval > PERIODIC_SIZE) urb->interval = PERIODIC_SIZE; ep->interval = urb->interval; ep->branch = PERIODIC_SIZE; ep->load = usb_calc_bus_time(udev->speed, !is_out, (type == PIPE_ISOCHRONOUS), usb_maxpacket(udev, pipe, is_out)) / 1000; break; } hep->hcpriv = ep; } ep->num_req = isp1362_hcd->req_serial++; /* maybe put endpoint into schedule */ switch (type) { case PIPE_CONTROL: case PIPE_BULK: if (list_empty(&ep->schedule)) { DBG(1, "%s: Adding ep %p req %d to async schedule\n", __func__, ep, ep->num_req); list_add_tail(&ep->schedule, &isp1362_hcd->async); } break; case PIPE_ISOCHRONOUS: case PIPE_INTERRUPT: urb->interval = ep->interval; /* urb submitted for already existing EP */ if (ep->branch < PERIODIC_SIZE) break; retval = balance(isp1362_hcd, ep->interval, ep->load); if (retval < 0) { pr_err("%s: balance returned %d\n", __func__, retval); goto fail; } ep->branch = retval; retval = 0; isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM); DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n", __func__, isp1362_hcd->fmindex, ep->branch, ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) & ~(PERIODIC_SIZE - 1)) + ep->branch, (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch); if (list_empty(&ep->schedule)) { if (type == PIPE_ISOCHRONOUS) { u16 frame = isp1362_hcd->fmindex; frame += max_t(u16, 8, ep->interval); frame &= ~(ep->interval - 1); frame |= ep->branch; if (frame_before(frame, isp1362_hcd->fmindex)) frame += ep->interval; urb->start_frame = frame; DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep); list_add_tail(&ep->schedule, &isp1362_hcd->isoc); } else { DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep); list_add_tail(&ep->schedule, &isp1362_hcd->periodic); } } else DBG(1, "%s: ep %p already scheduled\n", __func__, ep); DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__, ep->load / ep->interval, isp1362_hcd->load[ep->branch], isp1362_hcd->load[ep->branch] + ep->load); isp1362_hcd->load[ep->branch] += ep->load; } urb->hcpriv = hep; ALIGNSTAT(isp1362_hcd, urb->transfer_buffer); switch (type) { case PIPE_CONTROL: case PIPE_BULK: start_atl_transfers(isp1362_hcd); break; case PIPE_INTERRUPT: <API key>(isp1362_hcd); break; case PIPE_ISOCHRONOUS: start_iso_transfers(isp1362_hcd); break; default: BUG(); } fail: if (retval) <API key>(hcd, urb); fail_not_linked: <API key>(&isp1362_hcd->lock, flags); if (retval) DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval); return retval; } static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); struct usb_host_endpoint *hep; unsigned long flags; struct isp1362_ep *ep; int retval = 0; DBG(3, "%s: urb %p\n", __func__, urb); spin_lock_irqsave(&isp1362_hcd->lock, flags); retval = <API key>(hcd, urb, status); if (retval) goto done; hep = urb->hcpriv; if (!hep) { <API key>(&isp1362_hcd->lock, flags); return -EIDRM; } ep = hep->hcpriv; if (ep) { /* In front of queue? */ if (ep->hep->urb_list.next == &urb->urb_list) { if (!list_empty(&ep->active)) { DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__, urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset); /* disable processing and queue PTD for removal */ remove_ptd(isp1362_hcd, ep); urb = NULL; } } if (urb) { DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep, ep->num_req); finish_request(isp1362_hcd, ep, urb, status); } else DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb); } else { pr_warning("%s: No EP in URB %p\n", __func__, urb); retval = -EINVAL; } done: <API key>(&isp1362_hcd->lock, flags); DBG(3, "%s: exit\n", __func__); return retval; } static void <API key>(struct usb_hcd *hcd, struct usb_host_endpoint *hep) { struct isp1362_ep *ep = hep->hcpriv; struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); unsigned long flags; DBG(1, "%s: ep %p\n", __func__, ep); if (!ep) return; spin_lock_irqsave(&isp1362_hcd->lock, flags); if (!list_empty(&hep->urb_list)) { if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) { DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__, ep, ep->num_req, ep->ptd_index, ep->ptd_offset); remove_ptd(isp1362_hcd, ep); pr_info("%s: Waiting for Interrupt to clean up\n", __func__); } } <API key>(&isp1362_hcd->lock, flags); /* Wait for interrupt to clear out active list */ while (!list_empty(&ep->active)) msleep(1); DBG(1, "%s: Freeing EP %p\n", __func__, ep); usb_put_dev(ep->udev); kfree(ep); hep->hcpriv = NULL; } static int isp1362_get_frame(struct usb_hcd *hcd) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); u32 fmnum; unsigned long flags; spin_lock_irqsave(&isp1362_hcd->lock, flags); fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM); <API key>(&isp1362_hcd->lock, flags); return (int)fmnum; } /* Adapted from ohci-hub.c */ static int <API key>(struct usb_hcd *hcd, char *buf) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); int ports, i, changed = 0; unsigned long flags; if (!HC_IS_RUNNING(hcd->state)) return -ESHUTDOWN; /* Report no status change now, if we are scheduled to be called later */ if (timer_pending(&hcd->rh_timer)) return 0; ports = isp1362_hcd->rhdesca & RH_A_NDP; BUG_ON(ports > 2); spin_lock_irqsave(&isp1362_hcd->lock, flags); /* init status */ if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC)) buf[0] = changed = 1; else buf[0] = 0; for (i = 0; i < ports; i++) { u32 status = isp1362_hcd->rhport[i]; if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC | RH_PS_OCIC | RH_PS_PRSC)) { changed = 1; buf[0] |= 1 << (i + 1); continue; } if (!(status & RH_PS_CCS)) continue; } <API key>(&isp1362_hcd->lock, flags); return changed; } static void <API key>(struct isp1362_hcd *isp1362_hcd, struct usb_hub_descriptor *desc) { u32 reg = isp1362_hcd->rhdesca; DBG(3, "%s: enter\n", __func__); desc->bDescriptorType = 0x29; desc->bDescLength = 9; desc->bHubContrCurrent = 0; desc->bNbrPorts = reg & 0x3; /* Power switching, device type, overcurrent. */ desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f); DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f)); desc->bPwrOn2PwrGood = (reg >> 24) & 0xff; /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */ desc->bitmap[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1; desc->bitmap[1] = ~0; DBG(3, "%s: exit\n", __func__); } /* Adapted from ohci-hub.c */ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); int retval = 0; unsigned long flags; unsigned long t1; int ports = isp1362_hcd->rhdesca & RH_A_NDP; u32 tmp = 0; switch (typeReq) { case ClearHubFeature: DBG(0, "ClearHubFeature: "); switch (wValue) { case C_HUB_OVER_CURRENT: _DBG(0, "C_HUB_OVER_CURRENT\n"); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC); <API key>(&isp1362_hcd->lock, flags); case C_HUB_LOCAL_POWER: _DBG(0, "C_HUB_LOCAL_POWER\n"); break; default: goto error; } break; case SetHubFeature: DBG(0, "SetHubFeature: "); switch (wValue) { case C_HUB_OVER_CURRENT: case C_HUB_LOCAL_POWER: _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n"); break; default: goto error; } break; case GetHubDescriptor: DBG(0, "GetHubDescriptor\n"); <API key>(isp1362_hcd, (struct usb_hub_descriptor *)buf); break; case GetHubStatus: DBG(0, "GetHubStatus\n"); put_unaligned(cpu_to_le32(0), (__le32 *) buf); break; case GetPortStatus: #ifndef VERBOSE DBG(0, "GetPortStatus\n"); #endif if (!wIndex || wIndex > ports) goto error; tmp = isp1362_hcd->rhport[--wIndex]; put_unaligned(cpu_to_le32(tmp), (__le32 *) buf); break; case ClearPortFeature: DBG(0, "ClearPortFeature: "); if (!wIndex || wIndex > ports) goto error; wIndex switch (wValue) { case <API key>: _DBG(0, "<API key>\n"); tmp = RH_PS_CCS; break; case <API key>: _DBG(0, "<API key>\n"); tmp = RH_PS_PESC; break; case <API key>: _DBG(0, "<API key>\n"); tmp = RH_PS_POCI; break; case <API key>: _DBG(0, "<API key>\n"); tmp = RH_PS_PSSC; break; case USB_PORT_FEAT_POWER: _DBG(0, "USB_PORT_FEAT_POWER\n"); tmp = RH_PS_LSDA; break; case <API key>: _DBG(0, "<API key>\n"); tmp = RH_PS_CSC; break; case <API key>: _DBG(0, "<API key>\n"); tmp = RH_PS_OCIC; break; case <API key>: _DBG(0, "<API key>\n"); tmp = RH_PS_PRSC; break; default: goto error; } spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp); isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); <API key>(&isp1362_hcd->lock, flags); break; case SetPortFeature: DBG(0, "SetPortFeature: "); if (!wIndex || wIndex > ports) goto error; wIndex switch (wValue) { case <API key>: _DBG(0, "<API key>\n"); #ifdef CONFIG_USB_OTG if (ohci->hcd.self.otg_port == (wIndex + 1) && ohci->hcd.self.b_hnp_enable) { start_hnp(ohci); break; } #endif spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS); isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); <API key>(&isp1362_hcd->lock, flags); break; case USB_PORT_FEAT_POWER: _DBG(0, "USB_PORT_FEAT_POWER\n"); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS); isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); <API key>(&isp1362_hcd->lock, flags); break; case USB_PORT_FEAT_RESET: _DBG(0, "USB_PORT_FEAT_RESET\n"); spin_lock_irqsave(&isp1362_hcd->lock, flags); t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH); while (time_before(jiffies, t1)) { /* spin until any current reset finishes */ for (;;) { tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); if (!(tmp & RH_PS_PRS)) break; udelay(500); } if (!(tmp & RH_PS_CCS)) break; /* Reset lasts 10ms (claims datasheet) */ isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS)); <API key>(&isp1362_hcd->lock, flags); msleep(10); spin_lock_irqsave(&isp1362_hcd->lock, flags); } isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); <API key>(&isp1362_hcd->lock, flags); break; default: goto error; } break; default: error: /* "protocol stall" on error */ _DBG(0, "PROTOCOL STALL\n"); retval = -EPIPE; } return retval; } #ifdef CONFIG_PM static int isp1362_bus_suspend(struct usb_hcd *hcd) { int status = 0; struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); unsigned long flags; if (time_before(jiffies, isp1362_hcd->next_statechange)) msleep(5); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL); switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) { case OHCI_USB_RESUME: DBG(0, "%s: resume/suspend?\n", __func__); isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS; isp1362_hcd->hc_control |= OHCI_USB_RESET; isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); /* FALL THROUGH */ case OHCI_USB_RESET: status = -EBUSY; pr_warning("%s: needs reinit!\n", __func__); goto done; case OHCI_USB_SUSPEND: pr_warning("%s: already suspended?\n", __func__); goto done; } DBG(0, "%s: suspend root hub\n", __func__); /* First stop any processing */ hcd->state = HC_STATE_QUIESCING; if (!list_empty(&isp1362_hcd->atl_queue.active) || !list_empty(&isp1362_hcd->intl_queue.active) || !list_empty(&isp1362_hcd->istl_queue[0] .active) || !list_empty(&isp1362_hcd->istl_queue[1] .active)) { int limit; isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0); isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0); isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0); isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF); DBG(0, "%s: stopping schedules ...\n", __func__); limit = 2000; while (limit > 0) { udelay(250); limit -= 250; if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF) break; } mdelay(7); if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) { u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE); finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue); } if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) { u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE); finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue); } if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0) <API key>(isp1362_hcd, &isp1362_hcd->istl_queue[0]); if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1) <API key>(isp1362_hcd, &isp1362_hcd->istl_queue[1]); } DBG(0, "%s: HCINTSTAT: %08x\n", __func__, isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); isp1362_write_reg32(isp1362_hcd, HCINTSTAT, isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); /* Suspend hub */ isp1362_hcd->hc_control = OHCI_USB_SUSPEND; isp1362_show_reg(isp1362_hcd, HCCONTROL); isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); isp1362_show_reg(isp1362_hcd, HCCONTROL); #if 1 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL); if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) { pr_err("%s: controller won't suspend %08x\n", __func__, isp1362_hcd->hc_control); status = -EBUSY; } else #endif { /* no resumes until devices finish suspending */ isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5); } done: if (status == 0) { hcd->state = HC_STATE_SUSPENDED; DBG(0, "%s: HCD suspended: %08x\n", __func__, isp1362_read_reg32(isp1362_hcd, HCCONTROL)); } <API key>(&isp1362_hcd->lock, flags); return status; } static int isp1362_bus_resume(struct usb_hcd *hcd) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); u32 port; unsigned long flags; int status = -EINPROGRESS; if (time_before(jiffies, isp1362_hcd->next_statechange)) msleep(5); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL); pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control); if (hcd->state == HC_STATE_RESUMING) { pr_warning("%s: duplicate resume\n", __func__); status = 0; } else switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) { case OHCI_USB_SUSPEND: DBG(0, "%s: resume root hub\n", __func__); isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS; isp1362_hcd->hc_control |= OHCI_USB_RESUME; isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); break; case OHCI_USB_RESUME: /* HCFS changes sometime after INTR_RD */ DBG(0, "%s: remote wakeup\n", __func__); break; case OHCI_USB_OPER: DBG(0, "%s: odd resume\n", __func__); status = 0; hcd->self.root_hub->dev.power.power_state = PMSG_ON; break; default: /* RESET, we lost power */ DBG(0, "%s: root hub hardware reset\n", __func__); status = -EBUSY; } <API key>(&isp1362_hcd->lock, flags); if (status == -EBUSY) { DBG(0, "%s: Restarting HC\n", __func__); isp1362_hc_stop(hcd); return isp1362_hc_start(hcd); } if (status != -EINPROGRESS) return status; spin_lock_irqsave(&isp1362_hcd->lock, flags); port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP; while (port u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port); /* force global, not selective, resume */ if (!(stat & RH_PS_PSS)) { DBG(0, "%s: Not Resuming RH port %d\n", __func__, port); continue; } DBG(0, "%s: Resuming RH port %d\n", __func__, port); isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI); } <API key>(&isp1362_hcd->lock, flags); /* Some controllers (lucent) need extra-long delays */ hcd->state = HC_STATE_RESUMING; mdelay(20 /* usb 11.5.1.10 */ + 15); isp1362_hcd->hc_control = OHCI_USB_OPER; spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_show_reg(isp1362_hcd, HCCONTROL); isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); <API key>(&isp1362_hcd->lock, flags); /* TRSMRCY */ msleep(10); /* keep it alive for ~5x suspend + resume costs */ isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250); hcd->self.root_hub->dev.power.power_state = PMSG_ON; hcd->state = HC_STATE_RUNNING; return 0; } #else #define isp1362_bus_suspend NULL #define isp1362_bus_resume NULL #endif #ifdef STUB_DEBUG_FILE static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd) { } static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd) { } #else #include <linux/proc_fs.h> #include <linux/seq_file.h> static void dump_irq(struct seq_file *s, char *label, u16 mask) { seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask, mask & HCuPINT_CLKRDY ? " clkrdy" : "", mask & HCuPINT_SUSP ? " susp" : "", mask & HCuPINT_OPR ? " opr" : "", mask & HCuPINT_EOT ? " eot" : "", mask & HCuPINT_ATL ? " atl" : "", mask & HCuPINT_SOF ? " sof" : ""); } static void dump_int(struct seq_file *s, char *label, u32 mask) { seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask, mask & OHCI_INTR_MIE ? " MIE" : "", mask & OHCI_INTR_RHSC ? " rhsc" : "", mask & OHCI_INTR_FNO ? " fno" : "", mask & OHCI_INTR_UE ? " ue" : "", mask & OHCI_INTR_RD ? " rd" : "", mask & OHCI_INTR_SF ? " sof" : "", mask & OHCI_INTR_SO ? " so" : ""); } static void dump_ctrl(struct seq_file *s, char *label, u32 mask) { seq_printf(s, "%-15s %08x%s%s%s\n", label, mask, mask & OHCI_CTRL_RWC ? " rwc" : "", mask & OHCI_CTRL_RWE ? " rwe" : "", ({ char *hcfs; switch (mask & OHCI_CTRL_HCFS) { case OHCI_USB_OPER: hcfs = " oper"; break; case OHCI_USB_RESET: hcfs = " reset"; break; case OHCI_USB_RESUME: hcfs = " resume"; break; case OHCI_USB_SUSPEND: hcfs = " suspend"; break; default: hcfs = " ?"; } hcfs; })); } static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd) { seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCREVISION)); seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCCONTROL)); seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCCMDSTAT)); seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCINTENB)); seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCFMINTVL)); seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM), isp1362_read_reg32(isp1362_hcd, HCFMREM)); seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM), isp1362_read_reg32(isp1362_hcd, HCFMNUM)); seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCLSTHRESH)); seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCRHDESCA)); seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCRHDESCB)); seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCRHSTATUS)); seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCRHPORT1)); seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCRHPORT2)); seq_printf(s, "\n"); seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG), isp1362_read_reg16(isp1362_hcd, HCHWCFG)); seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCDMACFG)); seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCXFERCTR)); seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT), isp1362_read_reg16(isp1362_hcd, HCuPINT)); seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCuPINTENB)); seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCCHIPID)); seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCSCRATCH)); seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCBUFSTAT)); seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCDIRADDR)); #if 0 seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA), isp1362_read_reg16(isp1362_hcd, HCDIRDATA)); #endif seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ)); seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCISTLRATE)); seq_printf(s, "\n"); seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ)); seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ)); seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCINTLDONE)); seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCINTLSKIP)); seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCINTLLAST)); seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCINTLCURR)); seq_printf(s, "\n"); seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ)); seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ)); #if 0 seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCATLDONE)); #endif seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCATLSKIP)); seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg32(isp1362_hcd, HCATLLAST)); seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCATLCURR)); seq_printf(s, "\n"); seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCATLDTC)); seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(<API key>), isp1362_read_reg16(isp1362_hcd, HCATLDTCTO)); } static int proc_isp1362_show(struct seq_file *s, void *unused) { struct isp1362_hcd *isp1362_hcd = s->private; struct isp1362_ep *ep; int i; seq_printf(s, "%s\n%s version %s\n", isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION); /* collect statistics to help estimate potential win for * DMA engines that care about alignment (PXA) */ seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n", isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4, isp1362_hcd->stat2, isp1362_hcd->stat1); seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds); seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds); seq_printf(s, "max # ptds in ISTL fifo: %d\n", max(isp1362_hcd->istl_queue[0] .stat_maxptds, isp1362_hcd->istl_queue[1] .stat_maxptds)); /* FIXME: don't show the following in suspended state */ spin_lock_irq(&isp1362_hcd->lock); dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB)); dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT)); dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB)); dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL)); for (i = 0; i < NUM_ISP1362_IRQS; i++) if (isp1362_hcd->irq_stat[i]) seq_printf(s, "%-15s: %d\n", ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]); dump_regs(s, isp1362_hcd); list_for_each_entry(ep, &isp1362_hcd->async, schedule) { struct urb *urb; seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum, ({ char *s; switch (ep->nextpid) { case USB_PID_IN: s = "in"; break; case USB_PID_OUT: s = "out"; break; case USB_PID_SETUP: s = "setup"; break; case USB_PID_ACK: s = "status"; break; default: s = "?"; break; }; s;}), ep->maxpacket) ; list_for_each_entry(urb, &ep->hep->urb_list, urb_list) { seq_printf(s, " urb%p, %d/%d\n", urb, urb->actual_length, urb-><API key>); } } if (!list_empty(&isp1362_hcd->async)) seq_printf(s, "\n"); dump_ptd_queue(&isp1362_hcd->atl_queue); seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE); list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) { seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch, isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset); seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n", ep->interval, ep, (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ", ep->udev->devnum, ep->epnum, (ep->epnum == 0) ? "" : ((ep->nextpid == USB_PID_IN) ? "in" : "out"), ep->maxpacket); } dump_ptd_queue(&isp1362_hcd->intl_queue); seq_printf(s, "ISO:\n"); list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) { seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n", ep->interval, ep, (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ", ep->udev->devnum, ep->epnum, (ep->epnum == 0) ? "" : ((ep->nextpid == USB_PID_IN) ? "in" : "out"), ep->maxpacket); } spin_unlock_irq(&isp1362_hcd->lock); seq_printf(s, "\n"); return 0; } static int proc_isp1362_open(struct inode *inode, struct file *file) { return single_open(file, proc_isp1362_show, PDE(inode)->data); } static const struct file_operations proc_ops = { .open = proc_isp1362_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* expect just one isp1362_hcd per system */ static const char proc_filename[] = "driver/isp1362"; static void create_debug_file(struct isp1362_hcd *isp1362_hcd) { struct proc_dir_entry *pde; pde = create_proc_entry(proc_filename, 0, NULL); if (pde == NULL) { pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename); return; } pde->proc_fops = &proc_ops; pde->data = isp1362_hcd; isp1362_hcd->pde = pde; } static void remove_debug_file(struct isp1362_hcd *isp1362_hcd) { if (isp1362_hcd->pde) remove_proc_entry(proc_filename, 0); } #endif static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) { int tmp = 20; unsigned long flags; spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC); isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR); while (--tmp) { mdelay(1); if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR)) break; } if (!tmp) pr_err("Software reset timeout\n"); <API key>(&isp1362_hcd->lock, flags); } static int isp1362_mem_config(struct usb_hcd *hcd) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); unsigned long flags; u32 total; u16 istl_size = <API key>; u16 intl_blksize = <API key> + PTD_HEADER_SIZE; u16 intl_size = <API key> * intl_blksize; u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE; u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize; u16 atl_size; int i; WARN_ON(istl_size & 3); WARN_ON(atl_blksize & 3); WARN_ON(intl_blksize & 3); WARN_ON(atl_blksize < PTD_HEADER_SIZE); WARN_ON(intl_blksize < PTD_HEADER_SIZE); BUG_ON((unsigned)<API key> > 32); if (atl_buffers > 32) atl_buffers = 32; atl_size = atl_buffers * atl_blksize; total = atl_size + intl_size + istl_size; dev_info(hcd->self.controller, "ISP1362 Memory usage:\n"); dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n", istl_size / 2, istl_size, 0, istl_size / 2); dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n", <API key>, intl_blksize - PTD_HEADER_SIZE, intl_size, istl_size); dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n", atl_buffers, atl_blksize - PTD_HEADER_SIZE, atl_size, istl_size + intl_size); dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total, ISP1362_BUF_SIZE - total); if (total > ISP1362_BUF_SIZE) { dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n", __func__, total, ISP1362_BUF_SIZE); return -ENOMEM; } total = istl_size + intl_size + atl_size; spin_lock_irqsave(&isp1362_hcd->lock, flags); for (i = 0; i < 2; i++) { isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2, isp1362_hcd->istl_queue[i].buf_size = istl_size / 2; isp1362_hcd->istl_queue[i].blk_size = 4; INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active); snprintf(isp1362_hcd->istl_queue[i].name, sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i); DBG(3, "%s: %5s buf $%04x %d\n", __func__, isp1362_hcd->istl_queue[i].name, isp1362_hcd->istl_queue[i].buf_start, isp1362_hcd->istl_queue[i].buf_size); } isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2); isp1362_hcd->intl_queue.buf_start = istl_size; isp1362_hcd->intl_queue.buf_size = intl_size; isp1362_hcd->intl_queue.buf_count = <API key>; isp1362_hcd->intl_queue.blk_size = intl_blksize; isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count; isp1362_hcd->intl_queue.skip_map = ~0; INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active); isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ, isp1362_hcd->intl_queue.buf_size); isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ, isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE); isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0); isp1362_write_reg32(isp1362_hcd, HCINTLLAST, 1 << (<API key> - 1)); isp1362_hcd->atl_queue.buf_start = istl_size + intl_size; isp1362_hcd->atl_queue.buf_size = atl_size; isp1362_hcd->atl_queue.buf_count = atl_buffers; isp1362_hcd->atl_queue.blk_size = atl_blksize; isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count; isp1362_hcd->atl_queue.skip_map = ~0; INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active); isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ, isp1362_hcd->atl_queue.buf_size); isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ, isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE); isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0); isp1362_write_reg32(isp1362_hcd, HCATLLAST, 1 << (atl_buffers - 1)); snprintf(isp1362_hcd->atl_queue.name, sizeof(isp1362_hcd->atl_queue.name), "ATL"); snprintf(isp1362_hcd->intl_queue.name, sizeof(isp1362_hcd->intl_queue.name), "INTL"); DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__, isp1362_hcd->intl_queue.name, isp1362_hcd->intl_queue.buf_start, <API key>, isp1362_hcd->intl_queue.blk_size, isp1362_hcd->intl_queue.buf_size); DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__, isp1362_hcd->atl_queue.name, isp1362_hcd->atl_queue.buf_start, atl_buffers, isp1362_hcd->atl_queue.blk_size, isp1362_hcd->atl_queue.buf_size); <API key>(&isp1362_hcd->lock, flags); return 0; } static int isp1362_hc_reset(struct usb_hcd *hcd) { int ret = 0; struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); unsigned long t; unsigned long timeout = 100; unsigned long flags; int clkrdy = 0; pr_info("%s:\n", __func__); if (isp1362_hcd->board && isp1362_hcd->board->reset) { isp1362_hcd->board->reset(hcd->self.controller, 1); msleep(20); if (isp1362_hcd->board->clock) isp1362_hcd->board->clock(hcd->self.controller, 1); isp1362_hcd->board->reset(hcd->self.controller, 0); } else isp1362_sw_reset(isp1362_hcd); /* chip has been reset. First we need to see a clock */ t = jiffies + msecs_to_jiffies(timeout); while (!clkrdy && time_before_eq(jiffies, t)) { spin_lock_irqsave(&isp1362_hcd->lock, flags); clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY; <API key>(&isp1362_hcd->lock, flags); if (!clkrdy) msleep(4); } spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY); <API key>(&isp1362_hcd->lock, flags); if (!clkrdy) { pr_err("Clock not ready after %lums\n", timeout); ret = -ENODEV; } return ret; } static void isp1362_hc_stop(struct usb_hcd *hcd) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); unsigned long flags; u32 tmp; pr_info("%s:\n", __func__); del_timer_sync(&hcd->rh_timer); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); /* Switch off power for all ports */ tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA); tmp &= ~(RH_A_NPS | RH_A_PSM); isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp); isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS); /* Reset the chip */ if (isp1362_hcd->board && isp1362_hcd->board->reset) isp1362_hcd->board->reset(hcd->self.controller, 1); else isp1362_sw_reset(isp1362_hcd); if (isp1362_hcd->board && isp1362_hcd->board->clock) isp1362_hcd->board->clock(hcd->self.controller, 0); <API key>(&isp1362_hcd->lock, flags); } #ifdef CHIP_BUFFER_TEST static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd) { int ret = 0; u16 *ref; unsigned long flags; ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL); if (ref) { int offset; u16 *tst = &ref[ISP1362_BUF_SIZE / 2]; for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) { ref[offset] = ~offset; tst[offset] = offset; } for (offset = 0; offset < 4; offset++) { int j; for (j = 0; j < 8; j++) { spin_lock_irqsave(&isp1362_hcd->lock, flags); <API key>(isp1362_hcd, (u8 *)ref + offset, 0, j); isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j); <API key>(&isp1362_hcd->lock, flags); if (memcmp(ref, tst, j)) { ret = -ENODEV; pr_err("%s: memory check with %d byte offset %d failed\n", __func__, j, offset); dump_data((u8 *)ref + offset, j); dump_data((u8 *)tst + offset, j); } } } spin_lock_irqsave(&isp1362_hcd->lock, flags); <API key>(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE); isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE); <API key>(&isp1362_hcd->lock, flags); if (memcmp(ref, tst, ISP1362_BUF_SIZE)) { ret = -ENODEV; pr_err("%s: memory check failed\n", __func__); dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2); } for (offset = 0; offset < 256; offset++) { int test_size = 0; yield(); memset(tst, 0, ISP1362_BUF_SIZE); spin_lock_irqsave(&isp1362_hcd->lock, flags); <API key>(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE); isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE); <API key>(&isp1362_hcd->lock, flags); if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))), ISP1362_BUF_SIZE / 2)) { pr_err("%s: Failed to clear buffer\n", __func__); dump_data((u8 *)tst, ISP1362_BUF_SIZE); break; } spin_lock_irqsave(&isp1362_hcd->lock, flags); <API key>(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE); <API key>(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref), offset * 2 + PTD_HEADER_SIZE, test_size); isp1362_read_buffer(isp1362_hcd, tst, offset * 2, PTD_HEADER_SIZE + test_size); <API key>(&isp1362_hcd->lock, flags); if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) { dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size); dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_read_buffer(isp1362_hcd, tst, offset * 2, PTD_HEADER_SIZE + test_size); <API key>(&isp1362_hcd->lock, flags); if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) { ret = -ENODEV; pr_err("%s: memory check with offset %02x failed\n", __func__, offset); break; } pr_warning("%s: memory check with offset %02x ok after second read\n", __func__, offset); } } kfree(ref); } return ret; } #endif static int isp1362_hc_start(struct usb_hcd *hcd) { int ret; struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); struct <API key> *board = isp1362_hcd->board; u16 hwcfg; u16 chipid; unsigned long flags; pr_info("%s:\n", __func__); spin_lock_irqsave(&isp1362_hcd->lock, flags); chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID); <API key>(&isp1362_hcd->lock, flags); if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) { pr_err("%s: Invalid chip ID %04x\n", __func__, chipid); return -ENODEV; } #ifdef CHIP_BUFFER_TEST ret = isp1362_chip_test(isp1362_hcd); if (ret) return -ENODEV; #endif spin_lock_irqsave(&isp1362_hcd->lock, flags); /* clear interrupt status and disable all interrupt sources */ isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff); isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); /* HW conf */ hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1); if (board->sel15Kres) hwcfg |= <API key> | ((MAX_ROOT_PORTS > 1) ? <API key> : 0); if (board->clknotstop) hwcfg |= HCHWCFG_CLKNOTSTOP; if (board->oc_enable) hwcfg |= HCHWCFG_ANALOG_OC; if (board->int_act_high) hwcfg |= HCHWCFG_INT_POL; if (board->int_edge_triggered) hwcfg |= HCHWCFG_INT_TRIGGER; if (board->dreq_act_high) hwcfg |= HCHWCFG_DREQ_POL; if (board->dack_act_high) hwcfg |= HCHWCFG_DACK_POL; isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg); isp1362_show_reg(isp1362_hcd, HCHWCFG); isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0); <API key>(&isp1362_hcd->lock, flags); ret = isp1362_mem_config(hcd); if (ret) return ret; spin_lock_irqsave(&isp1362_hcd->lock, flags); /* Root hub conf */ isp1362_hcd->rhdesca = 0; if (board->no_power_switching) isp1362_hcd->rhdesca |= RH_A_NPS; if (board-><API key>) isp1362_hcd->rhdesca |= RH_A_PSM; if (board->potpg) isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT; else isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT; isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM); isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM); isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA); isp1362_hcd->rhdescb = RH_B_PPCM; isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb); isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB); isp1362_read_reg32(isp1362_hcd, HCFMINTVL); isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI); isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH); <API key>(&isp1362_hcd->lock, flags); isp1362_hcd->hc_control = OHCI_USB_OPER; hcd->state = HC_STATE_RUNNING; spin_lock_irqsave(&isp1362_hcd->lock, flags); /* Set up interrupts */ isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE; isp1362_hcd->intenb |= OHCI_INTR_RD; isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP; isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb); isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb); /* Go operational */ isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); /* enable global power */ isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE); <API key>(&isp1362_hcd->lock, flags); return 0; } static struct hc_driver isp1362_hc_driver = { .description = hcd_name, .product_desc = "ISP1362 Host Controller", .hcd_priv_size = sizeof(struct isp1362_hcd), .irq = isp1362_irq, .flags = HCD_USB11 | HCD_MEMORY, .reset = isp1362_hc_reset, .start = isp1362_hc_start, .stop = isp1362_hc_stop, .urb_enqueue = isp1362_urb_enqueue, .urb_dequeue = isp1362_urb_dequeue, .endpoint_disable = <API key>, .get_frame_number = isp1362_get_frame, .hub_status_data = <API key>, .hub_control = isp1362_hub_control, .bus_suspend = isp1362_bus_suspend, .bus_resume = isp1362_bus_resume, }; #define resource_len(r) (((r)->end - (r)->start) + 1) static int __devexit isp1362_remove(struct platform_device *pdev) { struct usb_hcd *hcd = <API key>(pdev); struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); struct resource *res; remove_debug_file(isp1362_hcd); DBG(0, "%s: Removing HCD\n", __func__); usb_remove_hcd(hcd); DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, isp1362_hcd->data_reg); iounmap(isp1362_hcd->data_reg); DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, isp1362_hcd->addr_reg); iounmap(isp1362_hcd->addr_reg); res = <API key>(pdev, IORESOURCE_MEM, 1); DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start); if (res) release_mem_region(res->start, resource_len(res)); res = <API key>(pdev, IORESOURCE_MEM, 0); DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start); if (res) release_mem_region(res->start, resource_len(res)); DBG(0, "%s: put_hcd\n", __func__); usb_put_hcd(hcd); DBG(0, "%s: Done\n", __func__); return 0; } static int __init isp1362_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct isp1362_hcd *isp1362_hcd; struct resource *addr, *data; void __iomem *addr_reg; void __iomem *data_reg; int irq; int retval = 0; struct resource *irq_res; unsigned int irq_flags = 0; /* basic sanity checks first. board-specific init logic should * have initialized this the three resources and probably board * specific platform_data. we don't probe for IRQs, and do only * minimal sanity checking. */ if (pdev->num_resources < 3) { retval = -ENODEV; goto err1; } data = <API key>(pdev, IORESOURCE_MEM, 0); addr = <API key>(pdev, IORESOURCE_MEM, 1); irq_res = <API key>(pdev, IORESOURCE_IRQ, 0); if (!addr || !data || !irq_res) { retval = -ENODEV; goto err1; } irq = irq_res->start; #ifdef CONFIG_USB_HCD_DMA if (pdev->dev.dma_mask) { struct resource *dma_res = <API key>(pdev, IORESOURCE_MEM, 2); if (!dma_res) { retval = -ENODEV; goto err1; } isp1362_hcd->data_dma = dma_res->start; isp1362_hcd->max_dma_size = resource_len(dma_res); } #else if (pdev->dev.dma_mask) { DBG(1, "won't do DMA"); retval = -ENODEV; goto err1; } #endif if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) { retval = -EBUSY; goto err1; } addr_reg = ioremap(addr->start, resource_len(addr)); if (addr_reg == NULL) { retval = -ENOMEM; goto err2; } if (!request_mem_region(data->start, resource_len(data), hcd_name)) { retval = -EBUSY; goto err3; } data_reg = ioremap(data->start, resource_len(data)); if (data_reg == NULL) { retval = -ENOMEM; goto err4; } /* allocate and initialize hcd */ hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { retval = -ENOMEM; goto err5; } hcd->rsrc_start = data->start; isp1362_hcd = hcd_to_isp1362_hcd(hcd); isp1362_hcd->data_reg = data_reg; isp1362_hcd->addr_reg = addr_reg; isp1362_hcd->next_statechange = jiffies; spin_lock_init(&isp1362_hcd->lock); INIT_LIST_HEAD(&isp1362_hcd->async); INIT_LIST_HEAD(&isp1362_hcd->periodic); INIT_LIST_HEAD(&isp1362_hcd->isoc); INIT_LIST_HEAD(&isp1362_hcd->remove_list); isp1362_hcd->board = pdev->dev.platform_data; #if USE_PLATFORM_DELAY if (!isp1362_hcd->board->delay) { dev_err(hcd->self.controller, "No platform delay function given\n"); retval = -ENODEV; goto err6; } #endif if (irq_res->flags & <API key>) irq_flags |= IRQF_TRIGGER_RISING; if (irq_res->flags & <API key>) irq_flags |= <API key>; if (irq_res->flags & <API key>) irq_flags |= IRQF_TRIGGER_HIGH; if (irq_res->flags & <API key>) irq_flags |= IRQF_TRIGGER_LOW; retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_DISABLED | IRQF_SHARED); if (retval != 0) goto err6; pr_info("%s, irq %d\n", hcd->product_desc, irq); create_debug_file(isp1362_hcd); return 0; err6: DBG(0, "%s: Freeing dev %p\n", __func__, isp1362_hcd); usb_put_hcd(hcd); err5: DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, data_reg); iounmap(data_reg); err4: DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start); release_mem_region(data->start, resource_len(data)); err3: DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg); iounmap(addr_reg); err2: DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start); release_mem_region(addr->start, resource_len(addr)); err1: pr_err("%s: init error, %d\n", __func__, retval); return retval; } #ifdef CONFIG_PM static int isp1362_suspend(struct platform_device *pdev, pm_message_t state) { struct usb_hcd *hcd = <API key>(pdev); struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); unsigned long flags; int retval = 0; DBG(0, "%s: Suspending device\n", __func__); if (state.event == PM_EVENT_FREEZE) { DBG(0, "%s: Suspending root hub\n", __func__); retval = isp1362_bus_suspend(hcd); } else { DBG(0, "%s: Suspending RH ports\n", __func__); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS); <API key>(&isp1362_hcd->lock, flags); } if (retval == 0) pdev->dev.power.power_state = state; return retval; } static int isp1362_resume(struct platform_device *pdev) { struct usb_hcd *hcd = <API key>(pdev); struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); unsigned long flags; DBG(0, "%s: Resuming\n", __func__); if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { DBG(0, "%s: Resume RH ports\n", __func__); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC); <API key>(&isp1362_hcd->lock, flags); return 0; } pdev->dev.power.power_state = PMSG_ON; return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd)); } #else #define isp1362_suspend NULL #define isp1362_resume NULL #endif static struct platform_driver isp1362_driver = { .probe = isp1362_probe, .remove = __devexit_p(isp1362_remove), .suspend = isp1362_suspend, .resume = isp1362_resume, .driver = { .name = (char *)hcd_name, .owner = THIS_MODULE, }, }; static int __init isp1362_init(void) { if (usb_disabled()) return -ENODEV; pr_info("driver %s, %s\n", hcd_name, DRIVER_VERSION); return <API key>(&isp1362_driver); } module_init(isp1362_init); static void __exit isp1362_cleanup(void) { <API key>(&isp1362_driver); } module_exit(isp1362_cleanup);
#include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/i2c-id.h> #include <linux/init.h> #include <linux/time.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <asm/hardware.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/arch/regs-gpio.h> #include <asm/arch/regs-iic.h> #include <asm/arch/iic.h> /* i2c controller state */ enum s3c24xx_i2c_state { STATE_IDLE, STATE_START, STATE_READ, STATE_WRITE, STATE_STOP }; struct s3c24xx_i2c { spinlock_t lock; wait_queue_head_t wait; struct i2c_msg *msg; unsigned int msg_num; unsigned int msg_idx; unsigned int msg_ptr; unsigned int tx_setup; enum s3c24xx_i2c_state state; void __iomem *regs; struct clk *clk; struct device *dev; struct resource *irq; struct resource *ioarea; struct i2c_adapter adap; }; /* default platform data to use if not supplied in the platform_device */ static struct <API key> <API key> = { .flags = 0, .slave_addr = 0x10, .bus_freq = 100*1000, .max_freq = 400*1000, .sda_delay = <API key> | <API key>, }; /* s3c24xx_i2c_is2440() * * return true is this is an s3c2440 */ static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c) { struct platform_device *pdev = to_platform_device(i2c->dev); return !strcmp(pdev->name, "s3c2440-i2c"); } /* <API key> * * get the platform data associated with the given device, or return * the default if there is none */ static inline struct <API key> *<API key>(struct device *dev) { if (dev->platform_data != NULL) return (struct <API key> *)dev->platform_data; return &<API key>; } /* <API key> * * complete the message and wake up the caller, using the given return code, * or zero to mean ok. */ static inline void <API key>(struct s3c24xx_i2c *i2c, int ret) { dev_dbg(i2c->dev, "master_complete %d\n", ret); i2c->msg_ptr = 0; i2c->msg = NULL; i2c->msg_idx ++; i2c->msg_num = 0; if (ret) i2c->msg_idx = ret; wake_up(&i2c->wait); } static inline void <API key>(struct s3c24xx_i2c *i2c) { unsigned long tmp; tmp = readl(i2c->regs + S3C2410_IICCON); writel(tmp & ~<API key>, i2c->regs + S3C2410_IICCON); } static inline void <API key>(struct s3c24xx_i2c *i2c) { unsigned long tmp; tmp = readl(i2c->regs + S3C2410_IICCON); writel(tmp | <API key>, i2c->regs + S3C2410_IICCON); } /* irq enable/disable functions */ static inline void <API key>(struct s3c24xx_i2c *i2c) { unsigned long tmp; tmp = readl(i2c->regs + S3C2410_IICCON); writel(tmp & ~<API key>, i2c->regs + S3C2410_IICCON); } static inline void <API key>(struct s3c24xx_i2c *i2c) { unsigned long tmp; tmp = readl(i2c->regs + S3C2410_IICCON); writel(tmp | <API key>, i2c->regs + S3C2410_IICCON); } /* <API key> * * put the start of a message onto the bus */ static void <API key>(struct s3c24xx_i2c *i2c, struct i2c_msg *msg) { unsigned int addr = (msg->addr & 0x7f) << 1; unsigned long stat; unsigned long iiccon; stat = 0; stat |= <API key>; if (msg->flags & I2C_M_RD) { stat |= <API key>; addr |= 1; } else stat |= <API key>; if (msg->flags & I2C_M_REV_DIR_ADDR) addr ^= 1; // todo - check for wether ack wanted or not <API key>(i2c); iiccon = readl(i2c->regs + S3C2410_IICCON); writel(stat, i2c->regs + S3C2410_IICSTAT); dev_dbg(i2c->dev, "START: %08lx to IICSTAT, %02x to DS\n", stat, addr); writeb(addr, i2c->regs + S3C2410_IICDS); /* delay here to ensure the data byte has gotten onto the bus * before the transaction is started */ ndelay(i2c->tx_setup); dev_dbg(i2c->dev, "iiccon, %08lx\n", iiccon); writel(iiccon, i2c->regs + S3C2410_IICCON); stat |= <API key>; writel(stat, i2c->regs + S3C2410_IICSTAT); } static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret) { unsigned long iicstat = readl(i2c->regs + S3C2410_IICSTAT); dev_dbg(i2c->dev, "STOP\n"); /* stop the transfer */ iicstat &= ~ <API key>; writel(iicstat, i2c->regs + S3C2410_IICSTAT); i2c->state = STATE_STOP; <API key>(i2c, ret); <API key>(i2c); } /* helper functions to determine the current state in the set of * messages we are sending */ /* is_lastmsg() * * returns TRUE if the current message is the last in the set */ static inline int is_lastmsg(struct s3c24xx_i2c *i2c) { return i2c->msg_idx >= (i2c->msg_num - 1); } /* is_msglast * * returns TRUE if we this is the last byte in the current message */ static inline int is_msglast(struct s3c24xx_i2c *i2c) { return i2c->msg_ptr == i2c->msg->len-1; } /* is_msgend * * returns TRUE if we reached the end of the current message */ static inline int is_msgend(struct s3c24xx_i2c *i2c) { return i2c->msg_ptr >= i2c->msg->len; } /* <API key> * * process an interrupt and work out what to do */ static int <API key>(struct s3c24xx_i2c *i2c, unsigned long iicstat) { unsigned long tmp; unsigned char byte; int ret = 0; switch (i2c->state) { case STATE_IDLE: dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __FUNCTION__); goto out; break; case STATE_STOP: dev_err(i2c->dev, "%s: called in STATE_STOP\n", __FUNCTION__); <API key>(i2c); goto out_ack; case STATE_START: /* last thing we did was send a start condition on the * bus, or started a new i2c message */ if (iicstat & <API key> && !(i2c->msg->flags & I2C_M_IGNORE_NAK)) { /* ack was not received... */ dev_dbg(i2c->dev, "ack was not received\n"); s3c24xx_i2c_stop(i2c, -EREMOTEIO); goto out_ack; } if (i2c->msg->flags & I2C_M_RD) i2c->state = STATE_READ; else i2c->state = STATE_WRITE; /* terminate the transfer if there is nothing to do * (used by the i2c probe to find devices */ if (is_lastmsg(i2c) && i2c->msg->len == 0) { s3c24xx_i2c_stop(i2c, 0); goto out_ack; } if (i2c->state == STATE_READ) goto prepare_read; /* fall through to the write state, as we will need to * send a byte as well */ case STATE_WRITE: /* we are writing data to the device... check for the * end of the message, and if so, work out what to do */ retry_write: if (!is_msgend(i2c)) { byte = i2c->msg->buf[i2c->msg_ptr++]; writeb(byte, i2c->regs + S3C2410_IICDS); /* delay after writing the byte to allow the * data setup time on the bus, as writing the * data to the register causes the first bit * to appear on SDA, and SCL will change as * soon as the interrupt is acknowledged */ ndelay(i2c->tx_setup); } else if (!is_lastmsg(i2c)) { /* we need to go to the next i2c message */ dev_dbg(i2c->dev, "WRITE: Next Message\n"); i2c->msg_ptr = 0; i2c->msg_idx ++; i2c->msg++; /* check to see if we need to do another message */ if (i2c->msg->flags & I2C_M_NOSTART) { if (i2c->msg->flags & I2C_M_RD) { /* cannot do this, the controller * forces us to send a new START * when we change direction */ s3c24xx_i2c_stop(i2c, -EINVAL); } goto retry_write; } else { /* send the new start */ <API key>(i2c, i2c->msg); i2c->state = STATE_START; } } else { /* send stop */ s3c24xx_i2c_stop(i2c, 0); } break; case STATE_READ: /* we have a byte of data in the data register, do * something with it, and then work out wether we are * going to do any more read/write */ if (!(i2c->msg->flags & I2C_M_IGNORE_NAK) && !(is_msglast(i2c) && is_lastmsg(i2c))) { if (iicstat & <API key>) { dev_dbg(i2c->dev, "READ: No Ack\n"); s3c24xx_i2c_stop(i2c, -ECONNREFUSED); goto out_ack; } } byte = readb(i2c->regs + S3C2410_IICDS); i2c->msg->buf[i2c->msg_ptr++] = byte; prepare_read: if (is_msglast(i2c)) { /* last byte of buffer */ if (is_lastmsg(i2c)) <API key>(i2c); } else if (is_msgend(i2c)) { /* ok, we've read the entire buffer, see if there * is anything else we need to do */ if (is_lastmsg(i2c)) { /* last message, send stop and complete */ dev_dbg(i2c->dev, "READ: Send Stop\n"); s3c24xx_i2c_stop(i2c, 0); } else { /* go to the next transfer */ dev_dbg(i2c->dev, "READ: Next Transfer\n"); i2c->msg_ptr = 0; i2c->msg_idx++; i2c->msg++; } } break; } /* acknowlegde the IRQ and get back on with the work */ out_ack: tmp = readl(i2c->regs + S3C2410_IICCON); tmp &= ~<API key>; writel(tmp, i2c->regs + S3C2410_IICCON); out: return ret; } /* s3c24xx_i2c_irq * * top level IRQ servicing routine */ static irqreturn_t s3c24xx_i2c_irq(int irqno, void *dev_id) { struct s3c24xx_i2c *i2c = dev_id; unsigned long status; unsigned long tmp; status = readl(i2c->regs + S3C2410_IICSTAT); if (status & <API key>) { // deal with arbitration loss dev_err(i2c->dev, "deal with arbitration loss\n"); } if (i2c->state == STATE_IDLE) { dev_dbg(i2c->dev, "IRQ: error i2c->state == IDLE\n"); tmp = readl(i2c->regs + S3C2410_IICCON); tmp &= ~<API key>; writel(tmp, i2c->regs + S3C2410_IICCON); goto out; } /* pretty much this leaves us with the fact that we've * transmitted or received whatever byte we last sent */ <API key>(i2c, status); out: return IRQ_HANDLED; } /* <API key> * * get the i2c bus for a master transaction */ static int <API key>(struct s3c24xx_i2c *i2c) { unsigned long iicstat; int timeout = 400; while (timeout iicstat = readl(i2c->regs + S3C2410_IICSTAT); if (!(iicstat & <API key>)) return 0; msleep(1); } dev_dbg(i2c->dev, "timeout: GPEDAT is %08x\n", __raw_readl(S3C2410_GPEDAT)); return -ETIMEDOUT; } /* s3c24xx_i2c_doxfer * * this starts an i2c transfer */ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c, struct i2c_msg *msgs, int num) { unsigned long timeout; int ret; ret = <API key>(i2c); if (ret != 0) { dev_err(i2c->dev, "cannot get bus (error %d)\n", ret); ret = -EAGAIN; goto out; } spin_lock_irq(&i2c->lock); i2c->msg = msgs; i2c->msg_num = num; i2c->msg_ptr = 0; i2c->msg_idx = 0; i2c->state = STATE_START; <API key>(i2c); <API key>(i2c, msgs); spin_unlock_irq(&i2c->lock); timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5); ret = i2c->msg_idx; /* having these next two as dev_err() makes life very * noisy when doing an i2cdetect */ if (timeout == 0) dev_dbg(i2c->dev, "timeout\n"); else if (ret != num) dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret); /* ensure the stop has been through the bus */ msleep(1); out: return ret; } /* s3c24xx_i2c_xfer * * first port of call from the i2c bus code when an message needs * transferring across the i2c bus. */ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct s3c24xx_i2c *i2c = (struct s3c24xx_i2c *)adap->algo_data; int retry; int ret; for (retry = 0; retry < adap->retries; retry++) { ret = s3c24xx_i2c_doxfer(i2c, msgs, num); if (ret != -EAGAIN) return ret; dev_dbg(i2c->dev, "Retrying transmission (%d)\n", retry); udelay(100); } return -EREMOTEIO; } /* declare our i2c functionality */ static u32 s3c24xx_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | <API key>; } /* i2c bus registration info */ static const struct i2c_algorithm <API key> = { .master_xfer = s3c24xx_i2c_xfer, .functionality = s3c24xx_i2c_func, }; static struct s3c24xx_i2c s3c24xx_i2c = { .lock = <API key>(s3c24xx_i2c.lock), .wait = <API key>(s3c24xx_i2c.wait), .tx_setup = 50, .adap = { .name = "s3c2410-i2c", .owner = THIS_MODULE, .algo = &<API key>, .retries = 2, .class = I2C_CLASS_HWMON, }, }; /* <API key> * * return the divisor settings for a given frequency */ static int <API key>(unsigned long clkin, unsigned int wanted, unsigned int *div1, unsigned int *divs) { unsigned int calc_divs = clkin / wanted; unsigned int calc_div1; if (calc_divs > (16*16)) calc_div1 = 512; else calc_div1 = 16; calc_divs += calc_div1-1; calc_divs /= calc_div1; if (calc_divs == 0) calc_divs = 1; if (calc_divs > 17) calc_divs = 17; *divs = calc_divs; *div1 = calc_div1; return clkin / (calc_divs * calc_div1); } /* freq_acceptable * * test wether a frequency is within the acceptable range of error */ static inline int freq_acceptable(unsigned int freq, unsigned int wanted) { int diff = freq - wanted; return (diff >= -2 && diff <= 2); } /* <API key> * * work out a divisor for the user requested frequency setting, * either by the requested frequency, or scanning the acceptable * range of frequencies until something is found */ static int <API key>(struct s3c24xx_i2c *i2c, struct <API key> *pdata, unsigned long *iicon, unsigned int *got) { unsigned long clkin = clk_get_rate(i2c->clk); unsigned int divs, div1; int freq; int start, end; clkin /= 1000; /* clkin now in KHz */ dev_dbg(i2c->dev, "pdata %p, freq %lu %lu..%lu\n", pdata, pdata->bus_freq, pdata->min_freq, pdata->max_freq); if (pdata->bus_freq != 0) { freq = <API key>(clkin, pdata->bus_freq/1000, &div1, &divs); if (freq_acceptable(freq, pdata->bus_freq/1000)) goto found; } /* ok, we may have to search for something suitable... */ start = (pdata->max_freq == 0) ? pdata->bus_freq : pdata->max_freq; end = pdata->min_freq; start /= 1000; end /= 1000; /* search loop... */ for (; start > end; start freq = <API key>(clkin, start, &div1, &divs); if (freq_acceptable(freq, start)) goto found; } /* cannot find frequency spec */ return -EINVAL; found: *got = freq; *iicon |= (divs-1); *iicon |= (div1 == 512) ? <API key> : 0; return 0; } /* s3c24xx_i2c_init * * initialise the controller, set the IO lines and frequency */ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c) { unsigned long iicon = <API key> | <API key>; struct <API key> *pdata; unsigned int freq; /* get the plafrom data */ pdata = <API key>(i2c->adap.dev.parent); /* inititalise the gpio */ s3c2410_gpio_cfgpin(S3C2410_GPE15, <API key>); s3c2410_gpio_cfgpin(S3C2410_GPE14, <API key>); /* write slave address */ writeb(pdata->slave_addr, i2c->regs + S3C2410_IICADD); dev_info(i2c->dev, "slave address 0x%02x\n", pdata->slave_addr); /* we need to work out the divisors for the clock... */ if (<API key>(i2c, pdata, &iicon, &freq) != 0) { dev_err(i2c->dev, "cannot meet bus frequency required\n"); return -EINVAL; } /* todo - check that the i2c lines aren't being dragged anywhere */ dev_info(i2c->dev, "bus frequency set to %d KHz\n", freq); dev_dbg(i2c->dev, "S3C2410_IICCON=0x%02lx\n", iicon); writel(iicon, i2c->regs + S3C2410_IICCON); /* check for s3c2440 i2c controller */ if (s3c24xx_i2c_is2440(i2c)) { dev_dbg(i2c->dev, "S3C2440_IICLC=%08x\n", pdata->sda_delay); writel(pdata->sda_delay, i2c->regs + S3C2440_IICLC); } return 0; } /* s3c24xx_i2c_probe * * called by the bus driver when a suitable device is found */ static int s3c24xx_i2c_probe(struct platform_device *pdev) { struct s3c24xx_i2c *i2c = &s3c24xx_i2c; struct resource *res; int ret; /* find the clock and enable it */ i2c->dev = &pdev->dev; i2c->clk = clk_get(&pdev->dev, "i2c"); if (IS_ERR(i2c->clk)) { dev_err(&pdev->dev, "cannot get clock\n"); ret = -ENOENT; goto err_noclk; } dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk); clk_enable(i2c->clk); /* map the registers */ res = <API key>(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "cannot find IO resource\n"); ret = -ENOENT; goto err_clk; } i2c->ioarea = request_mem_region(res->start, (res->end-res->start)+1, pdev->name); if (i2c->ioarea == NULL) { dev_err(&pdev->dev, "cannot request IO\n"); ret = -ENXIO; goto err_clk; } i2c->regs = ioremap(res->start, (res->end-res->start)+1); if (i2c->regs == NULL) { dev_err(&pdev->dev, "cannot map IO\n"); ret = -ENXIO; goto err_ioarea; } dev_dbg(&pdev->dev, "registers %p (%p, %p)\n", i2c->regs, i2c->ioarea, res); /* setup info block for the i2c core */ i2c->adap.algo_data = i2c; i2c->adap.dev.parent = &pdev->dev; /* initialise the i2c controller */ ret = s3c24xx_i2c_init(i2c); if (ret != 0) goto err_iomap; /* find the IRQ for this unit (note, this relies on the init call to * ensure no current IRQs pending */ res = <API key>(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(&pdev->dev, "cannot find IRQ\n"); ret = -ENOENT; goto err_iomap; } ret = request_irq(res->start, s3c24xx_i2c_irq, IRQF_DISABLED, pdev->name, i2c); if (ret != 0) { dev_err(&pdev->dev, "cannot claim IRQ\n"); goto err_iomap; } i2c->irq = res; dev_dbg(&pdev->dev, "irq resource %p (%lu)\n", res, (unsigned long)res->start); ret = i2c_add_adapter(&i2c->adap); if (ret < 0) { dev_err(&pdev->dev, "failed to add bus to i2c core\n"); goto err_irq; } <API key>(pdev, i2c); dev_info(&pdev->dev, "%s: S3C I2C adapter\n", i2c->adap.dev.bus_id); return 0; err_irq: free_irq(i2c->irq->start, i2c); err_iomap: iounmap(i2c->regs); err_ioarea: release_resource(i2c->ioarea); kfree(i2c->ioarea); err_clk: clk_disable(i2c->clk); clk_put(i2c->clk); err_noclk: return ret; } /* s3c24xx_i2c_remove * * called when device is removed from the bus */ static int s3c24xx_i2c_remove(struct platform_device *pdev) { struct s3c24xx_i2c *i2c = <API key>(pdev); i2c_del_adapter(&i2c->adap); free_irq(i2c->irq->start, i2c); clk_disable(i2c->clk); clk_put(i2c->clk); iounmap(i2c->regs); release_resource(i2c->ioarea); kfree(i2c->ioarea); return 0; } #ifdef CONFIG_PM static int s3c24xx_i2c_resume(struct platform_device *dev) { struct s3c24xx_i2c *i2c = <API key>(dev); if (i2c != NULL) s3c24xx_i2c_init(i2c); return 0; } #else #define s3c24xx_i2c_resume NULL #endif /* device driver for platform bus bits */ static struct platform_driver s3c2410_i2c_driver = { .probe = s3c24xx_i2c_probe, .remove = s3c24xx_i2c_remove, .resume = s3c24xx_i2c_resume, .driver = { .owner = THIS_MODULE, .name = "s3c2410-i2c", }, }; static struct platform_driver s3c2440_i2c_driver = { .probe = s3c24xx_i2c_probe, .remove = s3c24xx_i2c_remove, .resume = s3c24xx_i2c_resume, .driver = { .owner = THIS_MODULE, .name = "s3c2440-i2c", }, }; static int __init i2c_adap_s3c_init(void) { int ret; ret = <API key>(&s3c2410_i2c_driver); if (ret == 0) { ret = <API key>(&s3c2440_i2c_driver); if (ret) <API key>(&s3c2410_i2c_driver); } return ret; } static void __exit i2c_adap_s3c_exit(void) { <API key>(&s3c2410_i2c_driver); <API key>(&s3c2440_i2c_driver); } module_init(i2c_adap_s3c_init); module_exit(i2c_adap_s3c_exit); MODULE_DESCRIPTION("S3C24XX I2C Bus driver"); MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); MODULE_LICENSE("GPL");
#ifndef __MSM_MEMORY_DUMP_H #define __MSM_MEMORY_DUMP_H #include <linux/types.h> enum dump_client_type { MSM_CPU_CTXT = 0, MSM_L1_CACHE, MSM_L2_CACHE, MSM_OCMEM, MSM_TMC_ETFETB, MSM_ETM0_REG, MSM_ETM1_REG, MSM_ETM2_REG, MSM_ETM3_REG, MSM_TMC0_REG, /* TMC_ETR */ MSM_TMC1_REG, /* TMC_ETF */ MSM_LOG_BUF, <API key>, MAX_NUM_CLIENTS, }; struct msm_client_dump { enum dump_client_type id; unsigned long start_addr; unsigned long end_addr; }; #ifdef <API key> extern int <API key>(struct msm_client_dump *client_entry); #else static inline int <API key>(struct msm_client_dump *entry) { return -EIO; } #endif #if defined(<API key>) || defined(<API key>) extern uint32_t <API key>(void); #else static inline uint32_t <API key>(void) { return 0; } #endif #define <API key>(ma, mi) ((ma << 20) | mi) #define MSM_DUMP_MAJOR(val) (val >> 20) #define MSM_DUMP_MINOR(val) (val & 0xFFFFF) #define MAX_NUM_ENTRIES 0x120 enum msm_dump_data_ids { <API key> = 0x00, <API key> = 0x60, <API key> = 0x80, <API key> = 0xA0, <API key> = 0xC0, <API key> = 0xD0, MSM_DUMP_DATA_OCMEM = 0xE0, MSM_DUMP_DATA_MISC = 0xE8, <API key> = 0xF0, <API key> = 0x100, <API key> = 0x110, <API key> = 0x111, MSM_DUMP_DATA_MAX = MAX_NUM_ENTRIES, }; enum msm_dump_table_ids { MSM_DUMP_TABLE_APPS, MSM_DUMP_TABLE_MAX = MAX_NUM_ENTRIES, }; enum msm_dump_type { MSM_DUMP_TYPE_DATA, MSM_DUMP_TYPE_TABLE, }; struct msm_dump_data { uint32_t version; uint32_t magic; char name[32]; uint64_t addr; uint64_t len; uint32_t reserved; }; struct msm_dump_entry { uint32_t id; char name[32]; uint32_t type; uint64_t addr; }; #ifdef <API key> extern int <API key>(enum msm_dump_table_ids id, struct msm_dump_entry *entry); #else static inline int <API key>(enum msm_dump_table_ids id, struct msm_dump_entry *entry) { return -ENOSYS; } #endif #endif
/* { dg-do compile } */ /* { dg-options "-mavx512f -O2" } */ /* { dg-final { <API key> "vpmovzxbq\[ \\t\]+\[^\n\]*%xmm\[0-9\]\[^\n\]*%zmm\[0-9\]\[^\{\]" 1 } } */ /* { dg-final { <API key> "vpmovzxbq\[ \\t\]+\[^\n\]*%xmm\[0-9\]\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\[^\{\]" 1 } } */ /* { dg-final { <API key> "vpmovzxbq\[ \\t\]+\[^\n\]*%xmm\[0-9\]\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\{z\}" 1 } } */ #include <immintrin.h> volatile __m128i s; volatile __m512i res; volatile __mmask8 m; void extern avx512f_test (void) { res = <API key> (s); res = <API key> (res, m, s); res = <API key> (m, s); }
using System; using System.Text; namespace Server.Ethics.Evil { public sealed class UnholySense : Power { public UnholySense() { this.m_Definition = new PowerDefinition( 0, "Unholy Sense", "Drewrok Velgo", ""); } public override void BeginInvoke(Player from) { Ethic opposition = Ethic.Hero; int enemyCount = 0; int maxRange = 18 + from.Power; Player primary = null; foreach (Player pl in opposition.Players) { Mobile mob = pl.Mobile; if (mob == null || mob.Map != from.Mobile.Map || !mob.Alive) continue; if (!mob.InRange(from.Mobile, Math.Max(18, maxRange - pl.Power))) continue; if (primary == null || pl.Power > primary.Power) primary = pl; ++enemyCount; } StringBuilder sb = new StringBuilder(); sb.Append("You sense "); sb.Append(enemyCount == 0 ? "no" : enemyCount.ToString()); sb.Append(enemyCount == 1 ? " enemy" : " enemies"); if (primary != null) { sb.Append(", and a strong presense"); switch ( from.Mobile.GetDirectionTo(primary.Mobile) ) { case Direction.West: sb.Append(" to the west."); break; case Direction.East: sb.Append(" to the east."); break; case Direction.North: sb.Append(" to the north."); break; case Direction.South: sb.Append(" to the south."); break; case Direction.Up: sb.Append(" to the north-west."); break; case Direction.Down: sb.Append(" to the south-east."); break; case Direction.Left: sb.Append(" to the south-west."); break; case Direction.Right: sb.Append(" to the north-east."); break; } } else { sb.Append('.'); } from.Mobile.<API key>(Server.Network.MessageType.Regular, 0x59, false, sb.ToString()); this.FinishInvoke(from); } } }
#!/bin/bash export PATH=$HOME/.local/bin:/usr/local/bin:$HOME/prefix/bin:$HOME/APM/px4/<API key>/bin:$PATH export PYTHONUNBUFFERED=1 export PYTHONPATH=$HOME/APM cd $HOME/APM || exit 1 test -n "$FORCEBUILD" || { (cd APM && git fetch > /dev/null 2>&1) newtags=$(cd APM && git fetch --tags | wc -l) oldhash=$(cd APM && git rev-parse origin/master) newhash=$(cd APM && git rev-parse HEAD) newtagspx4=$(cd PX4Firmware && git fetch --tags | wc -l) oldhashpx4=$(cd PX4Firmware && git rev-parse origin/master) newhashpx4=$(cd PX4Firmware && git rev-parse HEAD) newtagsnuttx=$(cd PX4NuttX && git fetch --tags | wc -l) oldhashnuttx=$(cd PX4NuttX && git rev-parse origin/master) newhashnuttx=$(cd PX4NuttX && git rev-parse HEAD) newtagsuavcan=$(cd uavcan && git fetch --tags | wc -l) oldhashuavcan=$(cd uavcan && git rev-parse origin/master) newhashuavcan=$(cd uavcan && git rev-parse HEAD) if [ "$oldhash" = "$newhash" -a "$newtags" = "0" -a "$oldhashpx4" = "$newhashpx4" -a "$newtagspx4" = "0" -a "$oldhashnuttx" = "$newhashnuttx" -a "$newtagsnuttx" = "0" -a "$oldhashuavcan" = "$newhashuavcan" -a "$newtagsuavcan" = "0" ]; then echo "no change $oldhash $newhash `date`" >> build.log exit 0 fi } # grab a lock file. Not atomic, but close :) # tries to cope with NFS lock_file() { lck="$1" pid=`cat "$lck" 2> /dev/null` if test -f "$lck" && kill -0 $pid 2> /dev/null; then LOCKAGE=$(($(date +%s) - $(stat -c '%Y' "build.lck"))) test $LOCKAGE -gt 7200 && { echo "old lock file $lck is valid for $pid with age $LOCKAGE seconds" } return 1 fi /bin/rm -f "$lck" echo "$$" > "$lck" return 0 } lock_file build.lck || { exit 1 } #ulimit -m 500000 #ulimit -s 500000 #ulimit -t 1800 #ulimit -v 500000 ( date report() { d="$1" old="$2" new="$3" cat <<EOF | mail -s 'build failed' drones-discuss@googlegroups.com A build of $d failed at `date` You can view the build logs at http://autotest.diydrones.com/ A log of the commits since the last attempted build is below `git log $old $new` EOF } report_pull_failure() { d="$1" git show origin/master | mail -s 'APM pull failed' drones-discuss@googlegroups.com exit 1 } oldhash=$(cd APM && git rev-parse HEAD) pushd APM git checkout -f master git fetch origin git reset --hard origin/master git pull || report_pull_failure git clean -f -f -x -d -d git tag autotest-$(date '+%Y-%m-%d-%H%M%S') -m "test tag `date`" cp ../config.mk . popd rsync -a APM/Tools/autotest/web-firmware/ buildlogs/binaries/ pushd PX4Firmware git fetch origin git reset --hard origin/master for v in ArduPlane ArduCopter APMrover2; do git tag -d $v-beta || true git tag -d $v-stable || true done git fetch origin --tags git show popd pushd PX4NuttX git fetch origin git reset --hard origin/master for v in ArduPlane ArduCopter APMrover2; do git tag -d $v-beta || true git tag -d $v-stable || true done git fetch origin --tags git show popd pushd uavcan git fetch origin git reset --hard origin/master for v in ArduPlane ArduCopter APMrover2; do git tag -d $v-beta || true git tag -d $v-stable || true done git fetch origin --tags git show popd echo "Updating pymavlink" pushd mavlink/pymavlink git fetch origin git reset --hard origin/master git show python setup.py build install --user popd echo "Updating MAVProxy" pushd MAVProxy git fetch origin git reset --hard origin/master git show python setup.py build install --user popd githash=$(cd APM && git rev-parse HEAD) hdate=$(date +"%Y-%m-%d-%H:%m") for d in ArduPlane ArduCopter APMrover2 AntennaTracker; do pushd APM/$d rm -rf ../../buildlogs/$d.build (date && TMPDIR=../../buildlogs make) > ../../buildlogs/$d.txt 2>&1 status=$? if [ $status != 0 ]; then report $d $oldhash $newhash fi popd APM/Tools/scripts/frame_sizes.py buildlogs/$d.build > buildlogs/$d.framesizes.txt ( avr-size buildlogs/$d.build/$d.elf avr-nm --size-sort --print-size -C buildlogs/$d.build/$d.elf ) > buildlogs/$d.sizes.txt done mkdir -p "buildlogs/history/$hdate" (cd buildlogs && cp -f *.txt *.flashlog *.tlog *.km[lz] *.gpx *.html *.png *.bin *.BIN *.elf "history/$hdate/") echo $githash > "buildlogs/history/$hdate/githash.txt" (cd APM && Tools/scripts/build_parameters.sh) (cd APM && Tools/scripts/build_docs.sh) killall -9 JSBSim || /bin/true # raise core limit ulimit -c 10000000 timelimit 12000 APM/Tools/autotest/autotest.py --timeout=11500 > buildlogs/autotest-output.txt 2>&1 ) >> build.log 2>&1
package org.appcelerator.kroll; public class KrollPropertyChange { protected String name; protected Object oldValue, newValue; public KrollPropertyChange(String name, Object oldValue, Object newValue) { this.name = name; this.oldValue = oldValue; this.newValue = newValue; } public void fireEvent(KrollProxy proxy, KrollProxyListener listener) { if (listener != null) { listener.propertyChanged(name, oldValue, newValue, proxy); } } public String getName() { return name; } public Object getOldValue() { return oldValue; } public Object getNewValue() { return newValue; } }
/** * Supplies a set of utility methods for building Geometry objects from lists * of Coordinates. * * Note that the factory constructor methods do <b>not</b> change the input * coordinates in any way. * * In particular, they are not rounded to the supplied <tt>PrecisionModel</tt>. * It is assumed that input Coordinates meet the given precision. */ /** * @requires jsts/geom/PrecisionModel.js */ /** * Constructs a GeometryFactory that generates Geometries having a floating * PrecisionModel and a spatial-reference ID of 0. * * @constructor */ jsts.geom.GeometryFactory = function(precisionModel) { this.precisionModel = precisionModel || new jsts.geom.PrecisionModel(); }; jsts.geom.GeometryFactory.prototype.precisionModel = null; jsts.geom.GeometryFactory.prototype.getPrecisionModel = function() { return this.precisionModel; }; /** * Creates a Point using the given Coordinate; a null Coordinate will create an * empty Geometry. * * @param {Coordinate} * coordinate Coordinate to base this Point on. * @return {Point} A new Point. */ jsts.geom.GeometryFactory.prototype.createPoint = function(coordinate) { var point = new jsts.geom.Point(coordinate, this); return point; }; /** * Creates a LineString using the given Coordinates; a null or empty array will * create an empty LineString. Consecutive points must not be equal. * * @param {Coordinate[]} * coordinates an array without null elements, or an empty array, or * null. * @return {LineString} A new LineString. */ jsts.geom.GeometryFactory.prototype.createLineString = function(coordinates) { var lineString = new jsts.geom.LineString(coordinates, this); return lineString; }; /** * Creates a LinearRing using the given Coordinates; a null or empty array will * create an empty LinearRing. The points must form a closed and simple * linestring. Consecutive points must not be equal. * * @param {Coordinate[]} * coordinates an array without null elements, or an empty array, or * null. * @return {LinearRing} A new LinearRing. */ jsts.geom.GeometryFactory.prototype.createLinearRing = function(coordinates) { var linearRing = new jsts.geom.LinearRing(coordinates, this); return linearRing; }; /** * Constructs a <code>Polygon</code> with the given exterior boundary and * interior boundaries. * * @param {LinearRing} * shell the outer boundary of the new <code>Polygon</code>, or * <code>null</code> or an empty <code>LinearRing</code> if the * empty geometry is to be created. * @param {LinearRing[]} * holes the inner boundaries of the new <code>Polygon</code>, or * <code>null</code> or empty <code>LinearRing</code> s if the * empty geometry is to be created. * @return {Polygon} A new Polygon. */ jsts.geom.GeometryFactory.prototype.createPolygon = function(shell, holes) { var polygon = new jsts.geom.Polygon(shell, holes, this); return polygon; }; jsts.geom.GeometryFactory.prototype.createMultiPoint = function(points) { if (points && points[0] instanceof jsts.geom.Coordinate) { var converted = []; var i; for (i = 0; i < points.length; i++) { converted.push(this.createPoint(points[i])); } points = converted; } return new jsts.geom.MultiPoint(points, this); }; jsts.geom.GeometryFactory.prototype.<API key> = function( lineStrings) { return new jsts.geom.MultiLineString(lineStrings, this); }; jsts.geom.GeometryFactory.prototype.createMultiPolygon = function(polygons) { return new jsts.geom.MultiPolygon(polygons, this); }; /** * Build an appropriate <code>Geometry</code>, <code>MultiGeometry</code>, * or <code>GeometryCollection</code> to contain the <code>Geometry</code>s * in it. For example:<br> * * <ul> * <li> If <code>geomList</code> contains a single <code>Polygon</code>, * the <code>Polygon</code> is returned. * <li> If <code>geomList</code> contains several <code>Polygon</code>s, a * <code>MultiPolygon</code> is returned. * <li> If <code>geomList</code> contains some <code>Polygon</code>s and * some <code>LineString</code>s, a <code>GeometryCollection</code> is * returned. * <li> If <code>geomList</code> is empty, an empty * <code>GeometryCollection</code> is returned * </ul> * * Note that this method does not "flatten" Geometries in the input, and hence * if any MultiGeometries are contained in the input a GeometryCollection * containing them will be returned. * * @param geomList * the <code>Geometry</code>s to combine. * @return {Geometry} a <code>Geometry</code> of the "smallest", "most * type-specific" class that can contain the elements of * <code>geomList</code> . */ jsts.geom.GeometryFactory.prototype.buildGeometry = function(geomList) { /** * Determine some facts about the geometries in the list */ var geomClass = null; var isHeterogeneous = false; var <API key> = false; for (var i = geomList.iterator(); i.hasNext();) { var geom = i.next(); var partClass = geom.CLASS_NAME; if (geomClass === null) { geomClass = partClass; } if (!(partClass === geomClass)) { isHeterogeneous = true; } if (geom.<API key>()) <API key> = true; } /** * Now construct an appropriate geometry to return */ // for the empty geometry, return an empty GeometryCollection if (geomClass === null) { return this.<API key>(null); } if (isHeterogeneous || <API key>) { return this.<API key>(geomList.toArray()); } // at this point we know the collection is hetereogenous. // Determine the type of the result from the first Geometry in the list // this should always return a geometry, since otherwise an empty collection // would have already been returned var geom0 = geomList.get(0); var isCollection = geomList.size() > 1; if (isCollection) { if (geom0 instanceof jsts.geom.Polygon) { return this.createMultiPolygon(geomList.toArray()); } else if (geom0 instanceof jsts.geom.LineString) { return this.<API key>(geomList.toArray()); } else if (geom0 instanceof jsts.geom.Point) { return this.createMultiPoint(geomList.toArray()); } jsts.util.Assert.<API key>('Unhandled class: ' + geom0); } return geom0; }; jsts.geom.GeometryFactory.prototype.<API key> = function( geometries) { return new jsts.geom.GeometryCollection(geometries, this); }; /** * Creates a {@link Geometry} with the same extent as the given envelope. The * Geometry returned is guaranteed to be valid. To provide this behaviour, the * following cases occur: * <p> * If the <code>Envelope</code> is: * <ul> * <li>null : returns an empty {@link Point} * <li>a point : returns a non-empty {@link Point} * <li>a line : returns a two-point {@link LineString} * <li>a rectangle : returns a {@link Polygon}> whose points are (minx, miny), * (minx, maxy), (maxx, maxy), (maxx, miny), (minx, miny). * </ul> * * @param {jsts.geom.Envelope} * envelope the <code>Envelope</code> to convert. * @return {jsts.geom.Geometry} an empty <code>Point</code> (for null * <code>Envelope</code>s), a <code>Point</code> (when min x = max * x and min y = max y) or a <code>Polygon</code> (in all other cases). */ jsts.geom.GeometryFactory.prototype.toGeometry = function(envelope) { // null envelope - return empty point geometry if (envelope.isNull()) { return this.createPoint(null); } // point? if (envelope.getMinX() === envelope.getMaxX() && envelope.getMinY() === envelope.getMaxY()) { return this.createPoint(new jsts.geom.Coordinate(envelope.getMinX(), envelope.getMinY())); } // vertical or horizontal line? if (envelope.getMinX() === envelope.getMaxX() || envelope.getMinY() === envelope.getMaxY()) { return this.createLineString([ new jsts.geom.Coordinate(envelope.getMinX(), envelope.getMinY()), new jsts.geom.Coordinate(envelope.getMaxX(), envelope.getMaxY())]); } // create a CW ring for the polygon return this.createPolygon(this.createLinearRing([ new jsts.geom.Coordinate(envelope.getMinX(), envelope.getMinY()), new jsts.geom.Coordinate(envelope.getMinX(), envelope.getMaxY()), new jsts.geom.Coordinate(envelope.getMaxX(), envelope.getMaxY()), new jsts.geom.Coordinate(envelope.getMaxX(), envelope.getMinY()), new jsts.geom.Coordinate(envelope.getMinX(), envelope.getMinY())]), null); };
class Ipcalc < Formula homepage "http://jodies.de/ipcalc" url "http://jodies.de/ipcalc-archive/ipcalc-0.41.tar.gz" sha256 "<SHA256-like>" def install bin.install "ipcalc" end test do system "#{bin}/ipcalc", "--nobinary", "192.168.0.1/24" end end
#ifndef <API key> #define <API key> #include <signal.h> #include <sys/types.h> #ifndef QT_NO_SOCKET_H # include <sys/socket.h> #endif #include <sys/stat.h> #if defined(<API key>) && defined(<API key>) #define QT_STATBUF struct stat64 #define QT_FPOS_T fpos64_t #define QT_OFF_T off64_t #define QT_STAT ::stat64 #define QT_LSTAT ::lstat64 #define QT_TRUNCATE ::truncate64 // File I/O #define QT_OPEN ::open64 #define QT_LSEEK ::lseek64 #define QT_FSTAT ::fstat64 #define QT_FTRUNCATE ::ftruncate64 // Standard C89 #define QT_FOPEN ::fopen64 #define QT_FSEEK ::fseeko64 #define QT_FTELL ::ftello64 #define QT_FGETPOS ::fgetpos64 #define QT_FSETPOS ::fsetpos64 #define QT_MMAP ::mmap64 #else // !defined(<API key>) || !defined(<API key>) #include "../c89/qplatformdefs.h" #define QT_STATBUF struct stat #define QT_STAT ::stat #define QT_LSTAT ::lstat #define QT_TRUNCATE ::truncate // File I/O #define QT_OPEN ::open #define QT_LSEEK ::lseek #define QT_FSTAT ::fstat #define QT_FTRUNCATE ::ftruncate // Posix extensions to C89 #if !defined(<API key>) && !defined(QT_NO_USE_FSEEKO) #undef QT_OFF_T #undef QT_FSEEK #undef QT_FTELL #define QT_OFF_T off_t #define QT_FSEEK ::fseeko #define QT_FTELL ::ftello #endif #define QT_MMAP ::mmap #endif // !defined (<API key>) || !defined(<API key>) #define QT_STAT_MASK S_IFMT #define QT_STAT_REG S_IFREG #define QT_STAT_DIR S_IFDIR #define QT_STAT_LNK S_IFLNK #define QT_ACCESS ::access #define QT_GETCWD ::getcwd #define QT_CHDIR ::chdir #define QT_MKDIR ::mkdir #define QT_RMDIR ::rmdir // File I/O #define QT_CLOSE ::close #define QT_READ ::read #define QT_WRITE ::write #define QT_OPEN_LARGEFILE O_LARGEFILE #define QT_OPEN_RDONLY O_RDONLY #define QT_OPEN_WRONLY O_WRONLY #define QT_OPEN_RDWR O_RDWR #define QT_OPEN_CREAT O_CREAT #define QT_OPEN_TRUNC O_TRUNC #define QT_OPEN_APPEND O_APPEND // Posix extensions to C89 #define QT_FILENO fileno // Directory iteration #define QT_DIR DIR #define QT_OPENDIR ::opendir #define QT_CLOSEDIR ::closedir #if defined(<API key>) \ && defined(<API key>) \ && !defined(QT_NO_READDIR64) #define QT_DIRENT struct dirent64 #define QT_READDIR ::readdir64 #define QT_READDIR_R ::readdir64_r #else #define QT_DIRENT struct dirent #define QT_READDIR ::readdir #define QT_READDIR_R ::readdir_r #endif #define QT_SOCKLEN_T socklen_t #define QT_SOCKET_CONNECT ::connect #define QT_SOCKET_BIND ::bind #define QT_SIGNAL_RETTYPE void #define QT_SIGNAL_ARGS int #define QT_SIGNAL_IGNORE SIG_IGN #endif // include guard
#!/usr/bin/env python """Sample Input Reader for map job.""" import random import string import time from mapreduce import context from mapreduce import errors from mapreduce import operation from mapreduce.api import map_job # pylint: disable=invalid-name # Counter name for number of bytes read. <API key> = "io-read-bytes" # Counter name for milliseconds spent reading data. <API key> = "io-read-msec" class SampleInputReader(map_job.InputReader): """A sample InputReader that generates random strings as output. Primary usage is to as an example InputReader that can be use for test purposes. """ # Total number of entries this reader should generate. COUNT = "count" # Length of the generated strings. STRING_LENGTH = "string_length" # The default string length if one is not specified. <API key> = 10 def __init__(self, count, string_length): """Initialize input reader. Args: count: number of entries this shard should generate. string_length: the length of generated random strings. """ self._count = count self._string_length = string_length def __iter__(self): ctx = context.get() while self._count: self._count -= 1 start_time = time.time() content = "".join(random.choice(string.ascii_lowercase) for _ in range(self._string_length)) if ctx: operation.counters.Increment( <API key>, int((time.time() - start_time) * 1000))(ctx) operation.counters.Increment(<API key>, len(content))(ctx) yield content @classmethod def from_json(cls, state): """Inherit docs.""" return cls(state[cls.COUNT], state[cls.STRING_LENGTH]) def to_json(self): """Inherit docs.""" return {self.COUNT: self._count, self.STRING_LENGTH: self._string_length} @classmethod def split_input(cls, job_config): """Inherit docs.""" params = job_config.input_reader_params count = params[cls.COUNT] string_length = params.get(cls.STRING_LENGTH, cls.<API key>) shard_count = job_config.shard_count count_per_shard = count // shard_count mr_input_readers = [ cls(count_per_shard, string_length) for _ in range(shard_count)] left = count - count_per_shard*shard_count if left > 0: mr_input_readers.append(cls(left, string_length)) return mr_input_readers @classmethod def validate(cls, job_config): """Inherit docs.""" super(SampleInputReader, cls).validate(job_config) params = job_config.input_reader_params # Validate count. if cls.COUNT not in params: raise errors.<API key>("Must specify %s" % cls.COUNT) if not isinstance(params[cls.COUNT], int): raise errors.<API key>("%s should be an int but is %s" % (cls.COUNT, type(params[cls.COUNT]))) if params[cls.COUNT] <= 0: raise errors.<API key>("%s should be a positive int") # Validate string length. if cls.STRING_LENGTH in params and not ( isinstance(params[cls.STRING_LENGTH], int) and params[cls.STRING_LENGTH] > 0): raise errors.<API key>("%s should be a positive int " "but is %s" % (cls.STRING_LENGTH, params[cls.STRING_LENGTH]))
<!DOCTYPE html> <! Copyright (c) 2003-2015, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.md or http: <html> <head> <meta charset="utf-8"> <title>Append To Page Element Using JavaScript Code &mdash; CKEditor Sample</title> <script src="../../ckeditor.js"></script> <link rel="stylesheet" href="sample.css"> </head> <body> <h1 class="samples"> <a href="index.html">CKEditor Samples</a> &raquo; Append To Page Element Using JavaScript Code </h1> <div class="warning deprecated"> This sample is not maintained anymore. Check out the <a href="http://sdk.ckeditor.com/">brand new samples in CKEditor SDK</a>. </div> <div id="section1"> <div class="description"> <p> The <code><a class="samples" href="http: a target container to be replaced is no longer necessary. A new editor instance is inserted directly wherever it is desired. </p> <pre class="samples">CKEDITOR.appendTo( '<em>container_id</em>', { /* Configuration options to be used. */ } 'Editor content to be used.' );</pre> </div> <script> // This call can be placed at any point after the // DOM element to append CKEditor to or inside the <head><script> // in a window.onload event handler. // Append a CKEditor instance using the default configuration and the // provided content to the <div> element of ID "section1". CKEDITOR.appendTo( 'section1', null, '<p>This is some <strong>sample text</strong>. You are using <a href="http://ckeditor.com/">CKEditor</a>.</p>' ); </script> </div> <br> <div id="footer"> <hr> <p> CKEditor - The text editor for the Internet - <a class="samples" href="http: </p> <p id="copy"> Copyright &copy; 2003-2015, <a class="samples" href="http://cksource.com/">CKSource</a> - Frederico Knabben. All rights reserved. </p> </div> </body> </html>
"use strict";angular.module("mgcrea.ngStrap.helpers.dateFormatter",[]).service("$dateFormatter",["$locale","dateFilter",function(t,e){function r(t){return/(h+)([:\.])?(m+)[ ]?(a?)/i.exec(t).slice(1)}this.getDefaultLocale=function(){return t.id},this.getDatetimeFormat=function(e){return t.DATETIME_FORMATS[e]||e},this.weekdaysShort=function(){return t.DATETIME_FORMATS.SHORTDAY},this.hoursFormat=function(t){return r(t)[0]},this.minutesFormat=function(t){return r(t)[2]},this.timeSeparator=function(t){return r(t)[1]},this.showAM=function(t){return!!r(t)[3]},this.formatDate=function(t,r){return e(t,r)}}]); //# sourceMappingURL=date-formatter.min.js.map
#include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/smp_lock.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/parser.h> #include <linux/statfs.h> #include <linux/sched.h> #include "internal.h" #define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */ static void afs_i_init_once(void *foo); static int afs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt); static struct inode *afs_alloc_inode(struct super_block *sb); static void afs_put_super(struct super_block *sb); static void afs_destroy_inode(struct inode *inode); static int afs_statfs(struct dentry *dentry, struct kstatfs *buf); struct file_system_type afs_fs_type = { .owner = THIS_MODULE, .name = "afs", .get_sb = afs_get_sb, .kill_sb = kill_anon_super, .fs_flags = 0, }; static const struct super_operations afs_super_ops = { .statfs = afs_statfs, .alloc_inode = afs_alloc_inode, .destroy_inode = afs_destroy_inode, .clear_inode = afs_clear_inode, .put_super = afs_put_super, .show_options = <API key>, }; static struct kmem_cache *afs_inode_cachep; static atomic_t <API key>; enum { afs_no_opt, afs_opt_cell, afs_opt_rwpath, afs_opt_vol, }; static const match_table_t afs_options_list = { { afs_opt_cell, "cell=%s" }, { afs_opt_rwpath, "rwpath" }, { afs_opt_vol, "vol=%s" }, { afs_no_opt, NULL }, }; /* * initialise the filesystem */ int __init afs_fs_init(void) { int ret; _enter(""); /* create ourselves an inode cache */ atomic_set(&<API key>, 0); ret = -ENOMEM; afs_inode_cachep = kmem_cache_create("afs_inode_cache", sizeof(struct afs_vnode), 0, SLAB_HWCACHE_ALIGN, afs_i_init_once); if (!afs_inode_cachep) { printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n"); return ret; } /* now export our filesystem to lesser mortals */ ret = register_filesystem(&afs_fs_type); if (ret < 0) { kmem_cache_destroy(afs_inode_cachep); _leave(" = %d", ret); return ret; } _leave(" = 0"); return 0; } /* * clean up the filesystem */ void __exit afs_fs_exit(void) { _enter(""); <API key>(); <API key>(&afs_fs_type); if (atomic_read(&<API key>) != 0) { printk("kAFS: %d active inode objects still present\n", atomic_read(&<API key>)); BUG(); } kmem_cache_destroy(afs_inode_cachep); _leave(""); } /* * parse the mount options * - this function has been shamelessly adapted from the ext3 fs which * shamelessly adapted it from the msdos fs */ static int afs_parse_options(struct afs_mount_params *params, char *options, const char **devname) { struct afs_cell *cell; substring_t args[MAX_OPT_ARGS]; char *p; int token; _enter("%s", options); options[PAGE_SIZE - 1] = 0; while ((p = strsep(&options, ","))) { if (!*p) continue; token = match_token(p, afs_options_list, args); switch (token) { case afs_opt_cell: cell = afs_cell_lookup(args[0].from, args[0].to - args[0].from); if (IS_ERR(cell)) return PTR_ERR(cell); afs_put_cell(params->cell); params->cell = cell; break; case afs_opt_rwpath: params->rwpath = 1; break; case afs_opt_vol: *devname = args[0].from; break; default: printk(KERN_ERR "kAFS:" " Unknown or invalid mount option: '%s'\n", p); return -EINVAL; } } _leave(" = 0"); return 0; } /* * parse a device name to get cell name, volume name, volume type and R/W * selector * - this can be one of the following: * "%[cell:]volume[.]" R/W volume * "#[cell:]volume[.]" R/O or R/W volume (rwpath=0), * or R/W (rwpath=1) volume * "%[cell:]volume.readonly" R/O volume * "#[cell:]volume.readonly" R/O volume * "%[cell:]volume.backup" Backup volume * "#[cell:]volume.backup" Backup volume */ static int <API key>(struct afs_mount_params *params, const char *name) { struct afs_cell *cell; const char *cellname, *suffix; int cellnamesz; _enter(",%s", name); if (!name) { printk(KERN_ERR "kAFS: no volume name specified\n"); return -EINVAL; } if ((name[0] != '%' && name[0] != '#') || !name[1]) { printk(KERN_ERR "kAFS: unparsable volume name\n"); return -EINVAL; } /* determine the type of volume we're looking for */ params->type = AFSVL_ROVOL; params->force = false; if (params->rwpath || name[0] == '%') { params->type = AFSVL_RWVOL; params->force = true; } name++; /* split the cell name out if there is one */ params->volname = strchr(name, ':'); if (params->volname) { cellname = name; cellnamesz = params->volname - name; params->volname++; } else { params->volname = name; cellname = NULL; cellnamesz = 0; } /* the volume type is further affected by a possible suffix */ suffix = strrchr(params->volname, '.'); if (suffix) { if (strcmp(suffix, ".readonly") == 0) { params->type = AFSVL_ROVOL; params->force = true; } else if (strcmp(suffix, ".backup") == 0) { params->type = AFSVL_BACKVOL; params->force = true; } else if (suffix[1] == 0) { } else { suffix = NULL; } } params->volnamesz = suffix ? suffix - params->volname : strlen(params->volname); _debug("cell %*.*s [%p]", cellnamesz, cellnamesz, cellname ?: "", params->cell); /* lookup the cell record */ if (cellname || !params->cell) { cell = afs_cell_lookup(cellname, cellnamesz); if (IS_ERR(cell)) { printk(KERN_ERR "kAFS: unable to lookup cell '%s'\n", cellname ?: ""); return PTR_ERR(cell); } afs_put_cell(params->cell); params->cell = cell; } _debug("CELL:%s [%p] VOLUME:%*.*s SUFFIX:%s TYPE:%d%s", params->cell->name, params->cell, params->volnamesz, params->volnamesz, params->volname, suffix ?: "-", params->type, params->force ? " FORCE" : ""); return 0; } /* * check a superblock to see if it's the one we're looking for */ static int afs_test_super(struct super_block *sb, void *data) { struct afs_mount_params *params = data; struct afs_super_info *as = sb->s_fs_info; return as->volume == params->volume; } /* * fill in the superblock */ static int afs_fill_super(struct super_block *sb, void *data) { struct afs_mount_params *params = data; struct afs_super_info *as = NULL; struct afs_fid fid; struct dentry *root = NULL; struct inode *inode = NULL; int ret; _enter(""); /* allocate a superblock info record */ as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL); if (!as) { _leave(" = -ENOMEM"); return -ENOMEM; } afs_get_volume(params->volume); as->volume = params->volume; /* fill in the superblock */ sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = AFS_FS_MAGIC; sb->s_op = &afs_super_ops; sb->s_fs_info = as; sb->s_bdi = &as->volume->bdi; /* allocate the root inode and dentry */ fid.vid = as->volume->vid; fid.vnode = 1; fid.unique = 1; inode = afs_iget(sb, params->key, &fid, NULL, NULL); if (IS_ERR(inode)) goto error_inode; ret = -ENOMEM; root = d_alloc_root(inode); if (!root) goto error; sb->s_root = root; _leave(" = 0"); return 0; error_inode: ret = PTR_ERR(inode); inode = NULL; error: iput(inode); afs_put_volume(as->volume); kfree(as); sb->s_fs_info = NULL; _leave(" = %d", ret); return ret; } /* * get an AFS superblock */ static int afs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *options, struct vfsmount *mnt) { struct afs_mount_params params; struct super_block *sb; struct afs_volume *vol; struct key *key; char *new_opts = kstrdup(options, GFP_KERNEL); int ret; _enter(",,%s,%p", dev_name, options); memset(&params, 0, sizeof(params)); /* parse the options and device name */ if (options) { ret = afs_parse_options(&params, options, &dev_name); if (ret < 0) goto error; } ret = <API key>(&params, dev_name); if (ret < 0) goto error; /* try and do the mount securely */ key = afs_request_key(params.cell); if (IS_ERR(key)) { _leave(" = %ld [key]", PTR_ERR(key)); ret = PTR_ERR(key); goto error; } params.key = key; /* parse the device name */ vol = afs_volume_lookup(&params); if (IS_ERR(vol)) { ret = PTR_ERR(vol); goto error; } params.volume = vol; /* allocate a deviceless superblock */ sb = sget(fs_type, afs_test_super, set_anon_super, &params); if (IS_ERR(sb)) { ret = PTR_ERR(sb); goto error; } if (!sb->s_root) { /* initial superblock/root creation */ _debug("create"); sb->s_flags = flags; ret = afs_fill_super(sb, &params); if (ret < 0) { <API key>(sb); goto error; } save_mount_options(sb, new_opts); sb->s_flags |= MS_ACTIVE; } else { _debug("reuse"); ASSERTCMP(sb->s_flags, &, MS_ACTIVE); } simple_set_mnt(mnt, sb); afs_put_volume(params.volume); afs_put_cell(params.cell); kfree(new_opts); _leave(" = 0 [%p]", sb); return 0; error: afs_put_volume(params.volume); afs_put_cell(params.cell); key_put(params.key); kfree(new_opts); _leave(" = %d", ret); return ret; } /* * finish the unmounting process on the superblock */ static void afs_put_super(struct super_block *sb) { struct afs_super_info *as = sb->s_fs_info; _enter(""); lock_kernel(); afs_put_volume(as->volume); unlock_kernel(); _leave(""); } /* * initialise an inode cache slab element prior to any use */ static void afs_i_init_once(void *_vnode) { struct afs_vnode *vnode = _vnode; memset(vnode, 0, sizeof(*vnode)); inode_init_once(&vnode->vfs_inode); init_waitqueue_head(&vnode->update_waitq); mutex_init(&vnode->permits_lock); mutex_init(&vnode->validate_lock); spin_lock_init(&vnode->writeback_lock); spin_lock_init(&vnode->lock); INIT_LIST_HEAD(&vnode->writebacks); INIT_LIST_HEAD(&vnode->pending_locks); INIT_LIST_HEAD(&vnode->granted_locks); INIT_DELAYED_WORK(&vnode->lock_work, afs_lock_work); INIT_WORK(&vnode->cb_broken_work, <API key>); } /* * allocate an AFS inode struct from our slab cache */ static struct inode *afs_alloc_inode(struct super_block *sb) { struct afs_vnode *vnode; vnode = kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL); if (!vnode) return NULL; atomic_inc(&<API key>); memset(&vnode->fid, 0, sizeof(vnode->fid)); memset(&vnode->status, 0, sizeof(vnode->status)); vnode->volume = NULL; vnode->update_cnt = 0; vnode->flags = 1 << AFS_VNODE_UNSET; vnode->cb_promised = false; _leave(" = %p", &vnode->vfs_inode); return &vnode->vfs_inode; } /* * destroy an AFS inode struct */ static void afs_destroy_inode(struct inode *inode) { struct afs_vnode *vnode = AFS_FS_I(inode); _enter("%p{%x:%u}", inode, vnode->fid.vid, vnode->fid.vnode); _debug("DESTROY INODE %p", inode); ASSERTCMP(vnode->server, ==, NULL); kmem_cache_free(afs_inode_cachep, vnode); atomic_dec(&<API key>); } /* * return information about an AFS volume */ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct afs_volume_status vs; struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode); struct key *key; int ret; key = afs_request_key(vnode->volume->cell); if (IS_ERR(key)) return PTR_ERR(key); ret = <API key>(vnode, key, &vs); key_put(key); if (ret < 0) { _leave(" = %d", ret); return ret; } buf->f_type = dentry->d_sb->s_magic; buf->f_bsize = AFS_BLOCK_SIZE; buf->f_namelen = AFSNAMEMAX - 1; if (vs.max_quota == 0) buf->f_blocks = vs.part_max_blocks; else buf->f_blocks = vs.max_quota; buf->f_bavail = buf->f_bfree = buf->f_blocks - vs.blocks_in_use; return 0; }
/** * An Image is a light-weight object you can use to display anything that doesn't need physics or animation. * It can still rotate, scale, crop and receive input events. This makes it perfect for logos, backgrounds, simple buttons and other non-Sprite graphics. * * @class Phaser.Image * @extends PIXI.Sprite * @extends Phaser.Component.Core * @extends Phaser.Component.Angle * @extends Phaser.Component.Animation * @extends Phaser.Component.AutoCull * @extends Phaser.Component.Bounds * @extends Phaser.Component.BringToTop * @extends Phaser.Component.Crop * @extends Phaser.Component.Destroy * @extends Phaser.Component.FixedToCamera * @extends Phaser.Component.InputEnabled * @extends Phaser.Component.LifeSpan * @extends Phaser.Component.LoadTexture * @extends Phaser.Component.Overlap * @extends Phaser.Component.Reset * @extends Phaser.Component.ScaleMinMax * @extends Phaser.Component.Smoothed * @constructor * @param {Phaser.Game} game - A reference to the currently running game. * @param {number} [x=0] - The x coordinate of the Image. The coordinate is relative to any parent container this Image may be in. * @param {number} [y=0] - The y coordinate of the Image. The coordinate is relative to any parent container this Image may be in. * @param {string|Phaser.RenderTexture|Phaser.BitmapData|PIXI.Texture} [key] - The texture used by the Image during rendering. It can be a string which is a reference to the Cache entry, or an instance of a RenderTexture, BitmapData or PIXI.Texture. * @param {string|number} [frame] - If this Image is using part of a sprite sheet or texture atlas you can specify the exact frame to use by giving a string or numeric index. */ Phaser.Image = function (game, x, y, key, frame) { x = x || 0; y = y || 0; key = key || null; frame = frame || null; /** * @property {number} type - The const type of this object. * @readonly */ this.type = Phaser.IMAGE; PIXI.Sprite.call(this, Phaser.Cache.DEFAULT); Phaser.Component.Core.init.call(this, game, x, y, key, frame); }; Phaser.Image.prototype = Object.create(PIXI.Sprite.prototype); Phaser.Image.prototype.constructor = Phaser.Image; Phaser.Component.Core.install.call(Phaser.Image.prototype, [ 'Angle', 'Animation', 'AutoCull', 'Bounds', 'BringToTop', 'Crop', 'Destroy', 'FixedToCamera', 'InputEnabled', 'LifeSpan', 'LoadTexture', 'Overlap', 'Reset', 'ScaleMinMax', 'Smoothed' ]); Phaser.Image.prototype.preUpdateInWorld = Phaser.Component.InWorld.preUpdate; Phaser.Image.prototype.preUpdateCore = Phaser.Component.Core.preUpdate; /** * Automatically called by World.preUpdate. * * @method Phaser.Image#preUpdate * @memberof Phaser.Image */ Phaser.Image.prototype.preUpdate = function() { if (!this.preUpdateInWorld()) { return false; } return this.preUpdateCore(); };
#ifndef <API key> #define <API key> #include <linux/const.h> #include <asm/pgtable_64_types.h> #ifndef __ASSEMBLY__ /* * This file contains the functions and defines necessary to modify and use * the x86-64 page table tree. */ #include <asm/processor.h> #include <linux/bitops.h> #include <linux/threads.h> extern pud_t level3_kernel_pgt[512]; extern pud_t level3_ident_pgt[512]; extern pmd_t level2_kernel_pgt[512]; extern pmd_t level2_fixmap_pgt[512]; extern pmd_t level2_ident_pgt[512]; extern pte_t level1_fixmap_pgt[512]; extern pgd_t init_level4_pgt[]; #define swapper_pg_dir init_level4_pgt extern void paging_init(void); #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %p(%016lx)\n", \ __FILE__, __LINE__, &(e), pte_val(e)) #define pmd_ERROR(e) \ pr_err("%s:%d: bad pmd %p(%016lx)\n", \ __FILE__, __LINE__, &(e), pmd_val(e)) #define pud_ERROR(e) \ pr_err("%s:%d: bad pud %p(%016lx)\n", \ __FILE__, __LINE__, &(e), pud_val(e)) #define pgd_ERROR(e) \ pr_err("%s:%d: bad pgd %p(%016lx)\n", \ __FILE__, __LINE__, &(e), pgd_val(e)) struct mm_struct; void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { *ptep = native_make_pte(0); } static inline void native_set_pte(pte_t *ptep, pte_t pte) { *ptep = pte; } static inline void <API key>(pte_t *ptep, pte_t pte) { native_set_pte(ptep, pte); } static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { *pmdp = pmd; } static inline void native_pmd_clear(pmd_t *pmd) { native_set_pmd(pmd, native_make_pmd(0)); } static inline pte_t <API key>(pte_t *xp) { #ifdef CONFIG_SMP return native_make_pte(xchg(&xp->pte, 0)); #else /* <API key>, but duplicated because of cyclic dependency */ pte_t ret = *xp; native_pte_clear(NULL, 0, xp); return ret; #endif } static inline pmd_t <API key>(pmd_t *xp) { #ifdef CONFIG_SMP return native_make_pmd(xchg(&xp->pmd, 0)); #else /* <API key>, but duplicated because of cyclic dependency */ pmd_t ret = *xp; native_pmd_clear(xp); return ret; #endif } static inline void native_set_pud(pud_t *pudp, pud_t pud) { *pudp = pud; } static inline void native_pud_clear(pud_t *pud) { native_set_pud(pud, native_make_pud(0)); } static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) { *pgdp = pgd; } static inline void native_pgd_clear(pgd_t *pgd) { native_set_pgd(pgd, native_make_pgd(0)); } extern void sync_global_pgds(unsigned long start, unsigned long end); /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ /* * Level 4 access. */ static inline int pgd_large(pgd_t pgd) { return 0; } #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) /* PUD - Level3 access */ /* PMD - Level 2 access */ /* PTE - Level 1 access. */ /* x86-64 always has all page tables mapped. */ #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) #define pte_unmap(pte) ((void)(pte))/* NOP */ /* * Encode and de-code a swap entry * * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names * | OFFSET (14->63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0| <- swp entry * * G (8) is aliased and used as a PROT_NONE indicator for * !present ptes. We need to start storing swap entries above * there. We also need to avoid using A and D because of an * erratum where they can be incorrectly set by hardware on * non-present PTEs. */ #define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) #define SWP_TYPE_BITS 5 /* Place the offset above the type: */ #define <API key> (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS) #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) #define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \ & ((1U << SWP_TYPE_BITS) - 1)) #define __swp_offset(x) ((x).val >> <API key>) #define __swp_entry(type, offset) ((swp_entry_t) { \ ((type) << (SWP_TYPE_FIRST_BIT)) \ | ((offset) << <API key>) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) extern int kern_addr_valid(unsigned long addr); extern void cleanup_highmap(void); #define <API key> #define <API key> #define pgtable_cache_init() do { } while (0) #define check_pgt_cache() do { } while (0) #define PAGE_AGP PAGE_KERNEL_NOCACHE #define HAVE_PAGE_AGP 1 /* fs/proc/kcore.c */ #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) #define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK) #define <API key> #define vmemmap ((struct page *)VMEMMAP_START) extern void <API key>(unsigned long phys, unsigned long size); extern void <API key>(unsigned long phys, unsigned long size); #endif /* !__ASSEMBLY__ */ #endif /* <API key> */
<?php // Moodle is free software: you can redistribute it and/or modify // (at your option) any later version. // Moodle is distributed in the hope that it will be useful, // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the namespace core\external; defined('MOODLE_INTERNAL') || die(); require_once($CFG->libdir . '/externallib.php'); use stdClass; use renderer_base; use context; use context_system; use coding_exception; use <API key>; use <API key>; use external_value; use <API key>; abstract class exporter { /** @var array $related List of related objects used to avoid DB queries. */ protected $related = array(); /** @var stdClass|array The data of this exporter. */ protected $data = null; /** * Constructor - saves the persistent object, and the related objects. * * @param mixed $data - Either an stdClass or an array of values. * @param array $related - An optional list of pre-loaded objects related to this object. */ public function __construct($data, $related = array()) { $this->data = $data; // Cache the valid related objects. foreach (static::define_related() as $key => $classname) { $isarray = false; $nullallowed = false; // Allow ? to mean null is allowed. if (substr($classname, -1) === '?') { $classname = substr($classname, 0, -1); $nullallowed = true; } // Allow [] to mean an array of values. if (substr($classname, -2) === '[]') { $classname = substr($classname, 0, -2); $isarray = true; } $missingdataerr = 'Exporter class is missing required related data: (' . get_called_class() . ') '; $scalartypes = ['string', 'int', 'bool', 'float']; $scalarcheck = 'is_' . $classname; if ($nullallowed && (!array_key_exists($key, $related) || $related[$key] === null)) { $this->related[$key] = null; } else if ($isarray) { if (array_key_exists($key, $related) && is_array($related[$key])) { foreach ($related[$key] as $index => $value) { if (!$value instanceof $classname && !$scalarcheck($value)) { throw new coding_exception($missingdataerr . $key . ' => ' . $classname . '[]'); } } $this->related[$key] = $related[$key]; } else { throw new coding_exception($missingdataerr . $key . ' => ' . $classname . '[]'); } } else { if (array_key_exists($key, $related) && ((in_array($classname, $scalartypes) && $scalarcheck($related[$key])) || ($related[$key] instanceof $classname))) { $this->related[$key] = $related[$key]; } else { throw new coding_exception($missingdataerr . $key . ' => ' . $classname); } } } } /** * Function to export the renderer data in a format that is suitable for a * mustache template. This means raw records are generated as in to_record, * but all strings are correctly passed through <API key> (or <API key>). * * @param renderer_base $output Used to do a final render of any components that need to be rendered for export. * @return stdClass */ final public function export(renderer_base $output) { $data = new stdClass(); $properties = self::<API key>(); $values = (array) $this->data; $othervalues = $this->get_other_values($output); if (array_intersect_key($values, $othervalues)) { // Attempt to replace a standard property. throw new coding_exception('Cannot override a standard property value.'); } $values += $othervalues; $record = (object) $values; foreach ($properties as $property => $definition) { if (isset($data->$property)) { // This happens when we have already defined the format properties. continue; } else if (!property_exists($record, $property) && array_key_exists('default', $definition)) { // We have a default value for this property. $record->$property = $definition['default']; } else if (!property_exists($record, $property) && !empty($definition['optional'])) { // Fine, this property can be omitted. continue; } else if (!property_exists($record, $property)) { // Whoops, we got something that wasn't defined. throw new coding_exception('Unexpected property ' . $property); } $data->$property = $record->$property; // If the field is PARAM_RAW and has a format field. if ($propertyformat = self::get_format_field($properties, $property)) { if (!property_exists($record, $propertyformat)) { // Whoops, we got something that wasn't defined. throw new coding_exception('Unexpected property ' . $propertyformat); } $formatparams = $this-><API key>($property); $format = $record->$propertyformat; list($text, $format) = <API key>($data->$property, $format, $formatparams['context'], $formatparams['component'], $formatparams['filearea'], $formatparams['itemid'], $formatparams['options']); $data->$property = $text; $data->$propertyformat = $format; } else if ($definition['type'] === PARAM_TEXT) { $formatparams = $this-><API key>($property); if (!empty($definition['multiple'])) { foreach ($data->$property as $key => $value) { $data->{$property}[$key] = <API key>($value, $formatparams['context'], $formatparams['striplinks'], $formatparams['options']); } } else { $data->$property = <API key>($data->$property, $formatparams['context'], $formatparams['striplinks'], $formatparams['options']); } } } return $data; } /** * Get the format parameters. * * This method returns the parameters to use with the functions <API key>(), and * <API key>(). To override the default parameters, you can define a protected method * called '<API key><propertyName>'. For example, '<API key>', * if your property is 'description'. * * Your method must return an array containing any of the following keys: * - context: The context to use. Defaults to $this->related['context'] if defined, else throws an exception. * - component: The component to use with <API key>(). Defaults to null. * - filearea: The filearea to use with <API key>(). Defaults to null. * - itemid: The itemid to use with <API key>(). Defaults to null. * - options: An array of options accepted by <API key>() or <API key>(). Defaults to []. * - striplinks: Whether to strip the links with <API key>(). Defaults to true. * * @param string $property The property to get the parameters for. * @return array */ final protected function <API key>($property) { $parameters = [ 'component' => null, 'filearea' => null, 'itemid' => null, 'options' => [], 'striplinks' => true, ]; $candidate = '<API key>' . $property; if (method_exists($this, $candidate)) { $parameters = array_merge($parameters, $this->{$candidate}()); } if (!isset($parameters['context'])) { if (!isset($this->related['context']) || !($this->related['context'] instanceof context)) { throw new coding_exception("Unknown context to use for formatting the property '$property' in the " . "exporter '" . get_class($this) . "'. You either need to add 'context' to your related objects, " . "or create the method '$candidate' and return the context from there."); } $parameters['context'] = $this->related['context']; } else if (!($parameters['context'] instanceof context)) { throw new coding_exception("The context given to format the property '$property' in the exporter '" . get_class($this) . "' is invalid."); } return $parameters; } /** * Get the additional values to inject while exporting. * * These are additional generated values that are not passed in through $data * to the exporter. For a persistent exporter - these are generated values that * do not exist in the persistent class. For your convenience the format_text or * format_string functions do not need to be applied to PARAM_TEXT fields, * it will be done automatically during export. * * These values are only used when returning data via {@link self::export()}, * they are not used when generating any of the different external structures. * * Note: These must be defined in {@link self::<API key>()}. * * @param renderer_base $output The renderer. * @return array Keys are the property names, values are their values. */ protected function get_other_values(renderer_base $output) { return array(); } /** * Get the read properties definition of this exporter. Read properties combines the * default properties from the model (persistent or stdClass) with the properties defined * by {@link self::<API key>()}. * * @return array Keys are the property names, and value their definition. */ final public static function <API key>() { $properties = static::<API key>(); $customprops = static::<API key>(); $customprops = static::format_properties($customprops); $properties += $customprops; return $properties; } /** * Recursively formats a given property definition with the default fields required. * * @param array $properties List of properties to format * @return array Formatted array */ final public static function format_properties($properties) { foreach ($properties as $property => $definition) { // Ensures that null is set to its default. if (!isset($definition['null'])) { $properties[$property]['null'] = NULL_NOT_ALLOWED; } if (!isset($definition['description'])) { $properties[$property]['description'] = $property; } // If an array is provided, it may be a nested array that is unformatted so rinse and repeat. if (is_array($definition['type'])) { $properties[$property]['type'] = static::format_properties($definition['type']); } } return $properties; } /** * Get the properties definition of this exporter used for create, and update structures. * The read structures are returned by: {@link self::<API key>()}. * * @return array Keys are the property names, and value their definition. */ final public static function <API key>() { $properties = static::define_properties(); foreach ($properties as $property => $definition) { // Ensures that null is set to its default. if (!isset($definition['null'])) { $properties[$property]['null'] = NULL_NOT_ALLOWED; } if (!isset($definition['description'])) { $properties[$property]['description'] = $property; } } return $properties; } /** * Return the list of additional properties used only for display. * * Additional properties are only ever used for the read structure, and during * export of the persistent data. * * The format of the array returned by this method has to match the structure * defined in {@link \core\persistent::define_properties()}. The display properties * can however do some more fancy things. They can define 'multiple' => true to wrap * values in an <API key> automatically - or they can define the * type as a nested array of more properties in order to generate a nested * <API key>. * * You can specify an array of values by including a 'multiple' => true array value. This * will result in a nested <API key>. * E.g. * * 'arrayofbools' => array( * 'type' => PARAM_BOOL, * 'multiple' => true * ), * * You can return a nested array in the type field, which will result in a nested <API key>. * E.g. * 'competency' => array( * 'type' => competency_exporter::<API key>() * ), * * Other properties can be specifically marked as optional, in which case they do not need * to be included in the export in {@link self::get_other_values()}. This is useful when exporting * a substructure which cannot be set as null due to webservices protocol constraints. * E.g. * 'competency' => array( * 'type' => competency_exporter::<API key>(), * 'optional' => true * ), * * @return array */ protected static function <API key>() { return array(); } /** * Return the list of properties. * * The format of the array returned by this method has to match the structure * defined in {@link \core\persistent::define_properties()}. Howewer you can * add a new attribute "description" to describe the parameter for documenting the API. * * Note that the type PARAM_TEXT should ONLY be used for strings which need to * go through filters (multilang, etc...) and do not have a FORMAT_* associated * to them. Typically strings passed through to format_string(). * * Other filtered strings which use a FORMAT_* constant (hear used with format_text) * must be defined as PARAM_RAW. * * @return array */ protected static function define_properties() { return array(); } /** * Returns a list of objects that are related to this persistent. * * Only objects listed here can be cached in this object. * * The class name can be suffixed: * - with [] to indicate an array of values. * - with ? to indicate that 'null' is allowed. * * @return array of 'propertyname' => array('type' => classname, 'required' => true) */ protected static function define_related() { return array(); } /** * Get the context structure. * * @return <API key> */ final protected static function <API key>() { return array( 'contextid' => new external_value(PARAM_INT, 'The context id', VALUE_OPTIONAL), 'contextlevel' => new external_value(PARAM_ALPHA, 'The context level', VALUE_OPTIONAL), 'instanceid' => new external_value(PARAM_INT, 'The Instance id', VALUE_OPTIONAL), ); } /** * Get the format field name. * * @param array $definitions List of properties definitions. * @param string $property The name of the property that may have a format field. * @return bool|string False, or the name of the format property. */ final protected static function get_format_field($definitions, $property) { $formatproperty = $property . 'format'; if (($definitions[$property]['type'] == PARAM_RAW || $definitions[$property]['type'] == PARAM_CLEANHTML) && isset($definitions[$formatproperty]) && $definitions[$formatproperty]['type'] == PARAM_INT) { return $formatproperty; } return false; } /** * Get the format structure. * * @param string $property The name of the property on which the format applies. * @param array $definition The definition of the format property. * @param int $required Constant VALUE_*. * @return <API key> */ final protected static function <API key>($property, $definition, $required = VALUE_REQUIRED) { if (array_key_exists('default', $definition)) { $required = VALUE_DEFAULT; } return new <API key>($property, $required); } /** * Returns the create structure. * * @return <API key> */ final public static function <API key>() { $properties = self::<API key>(); $returns = array(); foreach ($properties as $property => $definition) { if ($property == 'id') { // The can not be set on create. continue; } else if (isset($returns[$property]) && substr($property, -6) === 'format') { // We've already treated the format. continue; } $required = VALUE_REQUIRED; $default = null; // We cannot use isset here because we want to detect nulls. if (array_key_exists('default', $definition)) { $required = VALUE_DEFAULT; $default = $definition['default']; } // Magically treat the contextid fields. if ($property == 'contextid') { if (isset($properties['context'])) { throw new coding_exception('There cannot be a context and a contextid column'); } $returns += self::<API key>(); } else { $returns[$property] = new external_value($definition['type'], $definition['description'], $required, $default, $definition['null']); // Magically treat the format properties. if ($formatproperty = self::get_format_field($properties, $property)) { if (isset($returns[$formatproperty])) { throw new coding_exception('The format for \'' . $property . '\' is already defined.'); } $returns[$formatproperty] = self::<API key>($property, $properties[$formatproperty], VALUE_REQUIRED); } } } return new <API key>($returns); } /** * Returns the read structure. * * @return <API key> */ final public static function get_read_structure() { $properties = self::<API key>(); return self::<API key>($properties); } /** * Returns the read structure from a set of properties (recursive). * * @param array $properties The properties. * @param int $required Whether is required. * @param mixed $default The default value. * @return <API key> */ final protected static function <API key>($properties, $required = VALUE_REQUIRED, $default = null) { $returns = array(); foreach ($properties as $property => $definition) { if (isset($returns[$property]) && substr($property, -6) === 'format') { // We've already treated the format. continue; } $thisvalue = null; $type = $definition['type']; $proprequired = VALUE_REQUIRED; $propdefault = null; if (array_key_exists('default', $definition)) { $propdefault = $definition['default']; } if (array_key_exists('optional', $definition)) { // Mark as optional. Note that this should only apply to "reading" "other" properties. $proprequired = VALUE_OPTIONAL; } if (is_array($type)) { // This is a nested array of more properties. $thisvalue = self::<API key>($type, $proprequired, $propdefault); } else { if ($definition['type'] == PARAM_TEXT || $definition['type'] == PARAM_CLEANHTML) { // PARAM_TEXT always becomes PARAM_RAW because filters may be applied. $type = PARAM_RAW; } $thisvalue = new external_value($type, $definition['description'], $proprequired, $propdefault, $definition['null']); } if (!empty($definition['multiple'])) { $returns[$property] = new <API key>($thisvalue, $definition['description'], $proprequired, $propdefault); } else { $returns[$property] = $thisvalue; // Magically treat the format properties (not possible for arrays). if ($formatproperty = self::get_format_field($properties, $property)) { if (isset($returns[$formatproperty])) { throw new coding_exception('The format for \'' . $property . '\' is already defined.'); } $returns[$formatproperty] = self::<API key>($property, $properties[$formatproperty]); } } } return new <API key>($returns, '', $required, $default); } /** * Returns the update structure. * * This structure can never be included at the top level for an external function signature * because it contains optional parameters. * * @return <API key> */ final public static function <API key>() { $properties = self::<API key>(); $returns = array(); foreach ($properties as $property => $definition) { if (isset($returns[$property]) && substr($property, -6) === 'format') { // We've already treated the format. continue; } $default = null; $required = VALUE_OPTIONAL; if ($property == 'id') { $required = VALUE_REQUIRED; } // Magically treat the contextid fields. if ($property == 'contextid') { if (isset($properties['context'])) { throw new coding_exception('There cannot be a context and a contextid column'); } $returns += self::<API key>(); } else { $returns[$property] = new external_value($definition['type'], $definition['description'], $required, $default, $definition['null']); // Magically treat the format properties. if ($formatproperty = self::get_format_field($properties, $property)) { if (isset($returns[$formatproperty])) { throw new coding_exception('The format for \'' . $property . '\' is already defined.'); } $returns[$formatproperty] = self::<API key>($property, $properties[$formatproperty], VALUE_OPTIONAL); } } } return new <API key>($returns); } }
class Foo { public void foo() { int i; } }
// Use of this source code is governed by a BSD-style // +build windows package windows_test import ( "fmt" "internal/syscall/windows" "os" "os/exec" "syscall" "testing" "unsafe" ) func <API key>(t *testing.T) { if os.Getenv("<API key>") == "1" { wil, err := <API key>() if err != nil { fmt.Fprintf(os.Stderr, "error: %s\n", err.Error()) os.Exit(9) return } fmt.Printf("%s", wil) os.Exit(0) return } cmd := exec.Command(os.Args[0], "-test.run=<API key>", " cmd.Env = []string{"<API key>=1"} token, err := <API key>(sidWilLow) if err != nil { t.Fatal(err) } defer token.Close() cmd.SysProcAttr = &syscall.SysProcAttr{ Token: token, } out, err := cmd.CombinedOutput() if err != nil { t.Fatal(err) } if string(out) != sidWilLow { t.Fatalf("Child process did not run as low integrity level: %s", string(out)) } } const ( sidWilLow = `S-1-16-4096` ) func <API key>() (string, error) { procToken, err := syscall.<API key>() if err != nil { return "", err } defer procToken.Close() p, err := tokenGetInfo(procToken, syscall.TokenIntegrityLevel, 64) if err != nil { return "", err } tml := (*windows.<API key>)(p) sid := (*syscall.SID)(unsafe.Pointer(tml.Label.Sid)) return sid.String() } func tokenGetInfo(t syscall.Token, class uint32, initSize int) (unsafe.Pointer, error) { n := uint32(initSize) for { b := make([]byte, n) e := syscall.GetTokenInformation(t, class, &b[0], uint32(len(b)), &n) if e == nil { return unsafe.Pointer(&b[0]), nil } if e != syscall.<API key> { return nil, e } if n <= uint32(len(b)) { return nil, e } } } func <API key>(wns string) (syscall.Token, error) { var procToken, token syscall.Token proc, err := syscall.GetCurrentProcess() if err != nil { return 0, err } defer syscall.CloseHandle(proc) err = syscall.OpenProcessToken(proc, syscall.TOKEN_DUPLICATE| syscall.<API key>| syscall.TOKEN_QUERY| syscall.<API key>, &procToken) if err != nil { return 0, err } defer procToken.Close() sid, err := syscall.StringToSid(wns) if err != nil { return 0, err } tml := &windows.<API key>{} tml.Label.Attributes = windows.SE_GROUP_INTEGRITY tml.Label.Sid = sid err = windows.DuplicateTokenEx(procToken, 0, nil, windows.<API key>, windows.TokenPrimary, &token) if err != nil { return 0, err } err = windows.SetTokenInformation(token, syscall.TokenIntegrityLevel, uintptr(unsafe.Pointer(tml)), tml.Size()) if err != nil { token.Close() return 0, err } return token, nil }
<head> <meta charset="utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0"> <title>{% if page.title %}{{ page.title }}{% else %}{{ site.title }}{% endif %}</title> <meta name="author" content="{{ site.author.name }}" /> {% if page.subtitle %} <meta name="description" content="{{ page.subtitle }}"> {% endif %} <link rel="alternate" type="application/rss+xml" title="{{ site.title }} - {{ site.description }}" href="{{ site.baseurl }}/feed.xml" /> {% if layout.common-ext-css %} {% for css in layout.common-ext-css %} <link rel="stylesheet" href="{{ css }}" /> {% endfor %} {% endif %} {% if layout.common-css %} {% for css in layout.common-css %} <link rel="stylesheet" href="{{ css | prepend: site.baseurl | replace: ' {% endfor %} {% endif %} {% if layout.common-googlefonts %} {% for font in layout.common-googlefonts %} <link rel="stylesheet" href="//fonts.googleapis.com/css?family={{ font }}" /> {% endfor %} {% endif %} {% if page.ext-css %} {% for css in page.ext-css %} <link rel="stylesheet" href="{{ css }}" /> {% endfor %} {% endif %} {% if page.css %} {% for css in page.css %} <link rel="stylesheet" href="{{ css | prepend: site.baseurl | replace: ' {% endfor %} {% endif %} {% if page.googlefonts %} {% for font in page.googlefonts %} <link rel="stylesheet" href="//fonts.googleapis.com/css?family={{ font }}" /> {% endfor %} {% endif %} <!-- Facebook OpenGraph tags --> <meta property="og:title" content="{% if page.title %}{{ page.title }}{% else %}{{ site.title }}{% endif %}" /> <meta property="og:type" content="website" /> {% if page.id %} <meta property="og:url" content="{{ site.url }}{{ page.url }}/" /> {% else %} <meta property="og:url" content="{{ site.url }}{{ page.url | remove: '/index.html' | remove: '.html' }}" /> {% endif %} {% if page.fb-img %} <meta property="og:image" content="{{ page.fb-img }}" /> {% elsif site.avatar %} <meta property="og:image" content="{{ site.url }}{{ site.avatar }}" /> {% else %} <meta property="og:image" content="" /> {% endif %} </head>
#<API key> td, #summary-filter-form td, #<API key> td { border: none; } .filter-controls { clear: both; } /*button.btn.btn-primary.btn-filter,*/ input.btn.btn-primary.filter-submit { width: 90%; padding-right: 5px; padding-left: 5px; } input.btn.btn-primary.filter-submit.filter-ajax { width: 100%; margin-top: 15px; margin-bottom: -15px; margin-left: -1px; } /* Responsive styling */ input.filter-search{ width: 50%; text-align: left; } /* Large desktop */ @media (min-width: 1200px) { input.filter-search{ width: 66%; } } /* Portrait tablet to landscape and desktop */ @media (min-width: 768px) and (max-width: 979px) { input.filter-search{ width: 33%; } input.btn.btn-primary.filter-submit.filter-ajax { width: 100%; margin-left: -1px; } } /* Landscape phone to portrait tablet */ @media (max-width: 767px) { input.filter-search{ width: 100% !important; } input.btn.btn-primary.filter-submit.filter-ajax { width: 100%; margin-left: -1px; } } /* Landscape phones and down */ @media (max-width: 480px) { input.filter-search{ width: 100% !important; } }
<script> // HACK: This is not an ideal way to deliver chrome messages // to a innef frame content but seems only way that would // make `event.source` an this (outer frame) window. window.onmessage = function(event) { var frame = document.querySelector("iframe"); var content = frame.contentWindow; // If message is posted from chrome it has no `event.source`. if (event.source === null) content.postMessage(event.data, "*"); }; // Hack: Ideally we would have used srcdoc on iframe, but in // that case origin of document is either content which is unable // to load add-on resources or a chrome to which add-on resource // can not send messages back. document.documentElement.style.overflow = "hidden"; document.documentElement.innerHTML = atob(location.hash.substr(1)); </script>
/* * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 * * Removed a lot of unnecessary code and simplified things now that * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 * * Speed up hash, lru, and free list operations. Use gfp() for allocating * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM * * Added 32k buffer block sizes - these are required older ARM systems. - RMK * * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> */ #include <linux/kernel.h> #include <linux/syscalls.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/percpu.h> #include <linux/slab.h> #include <linux/capability.h> #include <linux/blkdev.h> #include <linux/file.h> #include <linux/quotaops.h> #include <linux/highmem.h> #include <linux/export.h> #include <linux/writeback.h> #include <linux/hash.h> #include <linux/suspend.h> #include <linux/buffer_head.h> #include <linux/<API key>.h> #include <linux/bio.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/bitops.h> #include <linux/mpage.h> #include <linux/bit_spinlock.h> static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) inline void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) { bh->b_end_io = handler; bh->b_private = private; } EXPORT_SYMBOL(init_buffer); static int sleep_on_buffer(void *word) { io_schedule(); return 0; } void __lock_buffer(struct buffer_head *bh) { wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer, <API key>); } EXPORT_SYMBOL(__lock_buffer); void unlock_buffer(struct buffer_head *bh) { clear_bit_unlock(BH_Lock, &bh->b_state); <API key>(); wake_up_bit(&bh->b_state, BH_Lock); } EXPORT_SYMBOL(unlock_buffer); /* * Block until a buffer comes unlocked. This doesn't stop it * from becoming locked again - you have to lock it yourself * if you want to preserve its state. */ void __wait_on_buffer(struct buffer_head * bh) { wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, <API key>); } EXPORT_SYMBOL(__wait_on_buffer); static void <API key>(struct page *page) { ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); } static int quiet_error(struct buffer_head *bh) { if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit()) return 0; return 1; } static void buffer_io_error(struct buffer_head *bh) { char b[BDEVNAME_SIZE]; printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n", bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr); } /* * End-of-IO handler helper function which does not touch the bh after * unlocking it. * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but * a race there is benign: unlock_buffer() only use the bh's address for * hashing after unlocking the buffer, so it doesn't actually touch the bh * itself. */ static void <API key>(struct buffer_head *bh, int uptodate) { if (uptodate) { set_buffer_uptodate(bh); } else { /* This happens, due to failed READA attempts. */ <API key>(bh); } unlock_buffer(bh); } /* * Default synchronous end-of-IO handler.. Just mark it up-to-date and * unlock the buffer. This is what ll_rw_block uses too. */ void <API key>(struct buffer_head *bh, int uptodate) { <API key>(bh, uptodate); put_bh(bh); } EXPORT_SYMBOL(<API key>); void <API key>(struct buffer_head *bh, int uptodate) { char b[BDEVNAME_SIZE]; if (uptodate) { set_buffer_uptodate(bh); } else { if (!quiet_error(bh)) { buffer_io_error(bh); printk(KERN_WARNING "lost page write due to " "I/O error on %s\n", bdevname(bh->b_bdev, b)); } <API key>(bh); <API key>(bh); } unlock_buffer(bh); put_bh(bh); } EXPORT_SYMBOL(<API key>); /* * Various filesystems appear to want __find_get_block to be non-blocking. * But it's the page lock which protects the buffers. To get around this, * we get exclusion from try_to_free_buffers with the blockdev mapping's * private_lock. * * Hack idea: for the blockdev mapping, i_bufferlist_lock contention * may be quite high. This code could TryLock the page, and if that * succeeds, there is no need to take private_lock. (But if * private_lock is contended then so is mapping->tree_lock). */ static struct buffer_head * <API key>(struct block_device *bdev, sector_t block) { struct inode *bd_inode = bdev->bd_inode; struct address_space *bd_mapping = bd_inode->i_mapping; struct buffer_head *ret = NULL; pgoff_t index; struct buffer_head *bh; struct buffer_head *head; struct page *page; int all_mapped = 1; index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); page = find_get_page(bd_mapping, index); if (!page) goto out; spin_lock(&bd_mapping->private_lock); if (!page_has_buffers(page)) goto out_unlock; head = page_buffers(page); bh = head; do { if (!buffer_mapped(bh)) all_mapped = 0; else if (bh->b_blocknr == block) { ret = bh; get_bh(bh); goto out_unlock; } bh = bh->b_this_page; } while (bh != head); /* we might be here because some of the buffers on this page are * not mapped. This is due to various races between * file io on the block device and getblk. It gets dealt with * elsewhere, don't buffer_error if we had some unmapped buffers */ if (all_mapped) { char b[BDEVNAME_SIZE]; printk("<API key>() failed. " "block=%llu, b_blocknr=%llu\n", (unsigned long long)block, (unsigned long long)bh->b_blocknr); printk("b_state=0x%08lx, b_size=%zu\n", bh->b_state, bh->b_size); printk("device %s blocksize: %d\n", bdevname(bdev, b), 1 << bd_inode->i_blkbits); } out_unlock: spin_unlock(&bd_mapping->private_lock); page_cache_release(page); out: return ret; } /* * Kick the writeback threads then try to free up some ZONE_NORMAL memory. */ static void free_more_memory(void) { struct zone *zone; int nid; <API key>(1024, <API key>); yield(); <API key>(nid) { (void)<API key>(node_zonelist(nid, GFP_NOFS), gfp_zone(GFP_NOFS), NULL, &zone); if (zone) try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, GFP_NOFS, NULL); } } /* * I/O completion handler for <API key>() - pages * which come unlocked at the end of I/O. */ static void <API key>(struct buffer_head *bh, int uptodate) { unsigned long flags; struct buffer_head *first; struct buffer_head *tmp; struct page *page; int page_uptodate = 1; BUG_ON(!buffer_async_read(bh)); page = bh->b_page; if (uptodate) { set_buffer_uptodate(bh); } else { <API key>(bh); if (!quiet_error(bh)) buffer_io_error(bh); SetPageError(page); } /* * Be _very_ careful from here on. Bad things can happen if * two buffer heads end IO at almost the same time and both * decide that the page is now completely done. */ first = page_buffers(page); local_irq_save(flags); bit_spin_lock(BH_Uptodate_Lock, &first->b_state); <API key>(bh); unlock_buffer(bh); tmp = bh; do { if (!buffer_uptodate(tmp)) page_uptodate = 0; if (buffer_async_read(tmp)) { BUG_ON(!buffer_locked(tmp)); goto still_busy; } tmp = tmp->b_this_page; } while (tmp != bh); bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); local_irq_restore(flags); /* * If none of the buffers had errors and they are all * uptodate then we can set the page uptodate. */ if (page_uptodate && !PageError(page)) SetPageUptodate(page); unlock_page(page); return; still_busy: bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); local_irq_restore(flags); return; } /* * Completion handler for <API key>() - pages which are unlocked * during I/O, and which have PageWriteback cleared upon I/O completion. */ void <API key>(struct buffer_head *bh, int uptodate) { char b[BDEVNAME_SIZE]; unsigned long flags; struct buffer_head *first; struct buffer_head *tmp; struct page *page; BUG_ON(!buffer_async_write(bh)); page = bh->b_page; if (uptodate) { set_buffer_uptodate(bh); } else { if (!quiet_error(bh)) { buffer_io_error(bh); printk(KERN_WARNING "lost page write due to " "I/O error on %s\n", bdevname(bh->b_bdev, b)); } set_bit(AS_EIO, &page->mapping->flags); <API key>(bh); <API key>(bh); SetPageError(page); } first = page_buffers(page); local_irq_save(flags); bit_spin_lock(BH_Uptodate_Lock, &first->b_state); <API key>(bh); unlock_buffer(bh); tmp = bh->b_this_page; while (tmp != bh) { if (buffer_async_write(tmp)) { BUG_ON(!buffer_locked(tmp)); goto still_busy; } tmp = tmp->b_this_page; } bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); local_irq_restore(flags); end_page_writeback(page); return; still_busy: bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); local_irq_restore(flags); return; } EXPORT_SYMBOL(<API key>); /* * If a page's buffers are under async readin (<API key> * completion) then there is a possibility that another thread of * control could lock one of the buffers after it has completed * but while some of the other buffers have not completed. This * locked buffer would confuse <API key>() into not unlocking * the page. So the absence of BH_Async_Read tells <API key>() * that this buffer is not under async I/O. * * The page comes unlocked when it has no locked buffer_async buffers * left. * * PageLocked prevents anyone starting new async I/O reads any of * the buffers. * * PageWriteback is used to prevent simultaneous writeout of the same * page. * * PageLocked prevents anyone from starting writeback of a page which is * under read I/O (PageWriteback is only ever set against a locked page). */ static void <API key>(struct buffer_head *bh) { bh->b_end_io = <API key>; <API key>(bh); } static void <API key>(struct buffer_head *bh, bh_end_io_t *handler) { bh->b_end_io = handler; <API key>(bh); } void <API key>(struct buffer_head *bh) { <API key>(bh, <API key>); } EXPORT_SYMBOL(<API key>); /* * fs/buffer.c contains helper functions for buffer-backed address space's * fsync functions. A common requirement for buffer-based filesystems is * that certain data from the backing blockdev needs to be written out for * a successful fsync(). For example, ext2 indirect blocks need to be * written back and waited upon before fsync() returns. * * The functions <API key>(), fsync_inode_buffers(), * inode_has_buffers() and <API key>() are provided for the * management of a list of dependent buffers at ->i_mapping->private_list. * * Locking is a little subtle: try_to_free_buffers() will remove buffers * from their controlling inode's queue when they are being freed. But * try_to_free_buffers() will be operating against the *blockdev* mapping * at the time, not against the S_ISREG file which depends on those buffers. * So the locking for private_list is via the private_lock in the address_space * which backs the buffers. Which is different from the address_space * against which the buffers are listed. So for a particular address_space, * mapping->private_lock does *not* protect mapping->private_list! In fact, * mapping->private_list will always be protected by the backing blockdev's * ->private_lock. * * Which introduces a requirement: all buffers on an address_space's * ->private_list must be from the same address_space: the blockdev's. * * address_spaces which do not place buffers at ->private_list via these * utility functions are free to use private_lock and private_list for * whatever they want. The only requirement is that list_empty(private_list) * be true at clear_inode() time. * * FIXME: clear_inode should not call <API key>(). The * filesystems should do that. <API key>() should just go * BUG_ON(!list_empty). * * FIXME: <API key>() is a data-plane operation. It should * take an address_space, not an inode. And it should be called * <API key>() to clearly define why those buffers are being * queued up. * * FIXME: <API key>() doesn't need to add the buffer to the * list if it is already on a list. Because if the buffer is on a list, * it *must* already be on the right one. If not, the filesystem is being * silly. This will save a ton of locking. But first we have to ensure * that buffers are taken *off* the old inode's list when they are freed * (presumably in truncate). That requires careful auditing of all * filesystems (do it inside bforget()). It could also be done by bringing * b_inode back. */ /* * The buffer's backing address_space's private_lock must be held */ static void <API key>(struct buffer_head *bh) { list_del_init(&bh->b_assoc_buffers); WARN_ON(!bh->b_assoc_map); if (<API key>(bh)) set_bit(AS_EIO, &bh->b_assoc_map->flags); bh->b_assoc_map = NULL; } int inode_has_buffers(struct inode *inode) { return !list_empty(&inode->i_data.private_list); } /* * osync is designed to support O_SYNC io. It waits synchronously for * all already-submitted IO to complete, but does not queue any new * writes to the disk. * * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as * you dirty the buffers, and then use osync_inode_buffers to wait for * completion. Any other dirty buffers which are not yet queued for * write will not be flushed to disk by the osync. */ static int osync_buffers_list(spinlock_t *lock, struct list_head *list) { struct buffer_head *bh; struct list_head *p; int err = 0; spin_lock(lock); repeat: list_for_each_prev(p, list) { bh = BH_ENTRY(p); if (buffer_locked(bh)) { get_bh(bh); spin_unlock(lock); wait_on_buffer(bh); if (!buffer_uptodate(bh)) err = -EIO; brelse(bh); spin_lock(lock); goto repeat; } } spin_unlock(lock); return err; } static void do_thaw_one(struct super_block *sb, void *unused) { char b[BDEVNAME_SIZE]; while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb)) printk(KERN_WARNING "Emergency Thaw on %s\n", bdevname(sb->s_bdev, b)); } static void do_thaw_all(struct work_struct *work) { iterate_supers(do_thaw_one, NULL); kfree(work); printk(KERN_WARNING "Emergency Thaw complete\n"); } /** * emergency_thaw_all -- forcibly thaw every frozen filesystem * * Used for emergency unfreeze of all filesystems via SysRq */ void emergency_thaw_all(void) { struct work_struct *work; work = kmalloc(sizeof(*work), GFP_ATOMIC); if (work) { INIT_WORK(work, do_thaw_all); schedule_work(work); } } /** * <API key> - write out & wait upon a mapping's "associated" buffers * @mapping: the mapping which wants those buffers written * * Starts I/O against the buffers at mapping->private_list, and waits upon * that I/O. * * Basically, this is a convenience function for fsync(). * @mapping is a file or directory which needs those buffers to be written for * a successful fsync(). */ int <API key>(struct address_space *mapping) { struct address_space *buffer_mapping = mapping->assoc_mapping; if (buffer_mapping == NULL || list_empty(&mapping->private_list)) return 0; return fsync_buffers_list(&buffer_mapping->private_lock, &mapping->private_list); } EXPORT_SYMBOL(<API key>); /* * Called when we've recently written block `bblock', and it is known that * `bblock' was for a buffer_boundary() buffer. This means that the block at * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's * dirty, schedule it for IO. So that indirects merge nicely with their data. */ void <API key>(struct block_device *bdev, sector_t bblock, unsigned blocksize) { struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); if (bh) { if (buffer_dirty(bh)) ll_rw_block(WRITE, 1, &bh); put_bh(bh); } } void <API key>(struct buffer_head *bh, struct inode *inode) { struct address_space *mapping = inode->i_mapping; struct address_space *buffer_mapping = bh->b_page->mapping; mark_buffer_dirty(bh); if (!mapping->assoc_mapping) { mapping->assoc_mapping = buffer_mapping; } else { BUG_ON(mapping->assoc_mapping != buffer_mapping); } if (!bh->b_assoc_map) { spin_lock(&buffer_mapping->private_lock); list_move_tail(&bh->b_assoc_buffers, &mapping->private_list); bh->b_assoc_map = mapping; spin_unlock(&buffer_mapping->private_lock); } } EXPORT_SYMBOL(<API key>); /* * Mark the page dirty, and set it dirty in the radix tree, and mark the inode * dirty. * * If warn is true, then emit a warning if the page is not uptodate and has * not been truncated. */ static void __set_page_dirty(struct page *page, struct address_space *mapping, int warn) { spin_lock_irq(&mapping->tree_lock); if (page->mapping) { /* Race with truncate? */ WARN_ON_ONCE(warn && !PageUptodate(page)); <API key>(page, mapping); radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } spin_unlock_irq(&mapping->tree_lock); __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); } /* * Add a page to the dirty page list. * * It is a sad fact of life that this function is called from several places * deeply under spinlocking. It may not sleep. * * If the page has buffers, the uptodate buffers are set dirty, to preserve * dirty-state coherency between the page and the buffers. It the page does * not have buffers then when they are later attached they will all be set * dirty. * * The buffers are dirtied before the page is dirtied. There's a small race * window in which a writepage caller may see the page cleanness but not the * buffer dirtiness. That's fine. If this code were to set the page dirty * before the buffers, a concurrent writepage caller could clear the page dirty * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean * page on the dirty page list. * * We use private_lock to lock against try_to_free_buffers while using the * page's buffer list. Also use this to protect against clean buffers being * added to the page after it was set dirty. * * FIXME: may need to call ->reservepage here as well. That's rather up to the * address_space though. */ int <API key>(struct page *page) { int newly_dirty; struct address_space *mapping = page_mapping(page); if (unlikely(!mapping)) return !TestSetPageDirty(page); spin_lock(&mapping->private_lock); if (page_has_buffers(page)) { struct buffer_head *head = page_buffers(page); struct buffer_head *bh = head; do { set_buffer_dirty(bh); bh = bh->b_this_page; } while (bh != head); } newly_dirty = !TestSetPageDirty(page); spin_unlock(&mapping->private_lock); if (newly_dirty) __set_page_dirty(page, mapping, 1); return newly_dirty; } EXPORT_SYMBOL(<API key>); /* * Write out and wait upon a list of buffers. * * We have conflicting pressures: we want to make sure that all * initially dirty buffers get waited on, but that any subsequently * dirtied buffers don't. After all, we don't want fsync to last * forever if somebody is actively writing to the file. * * Do this in two main stages: first we copy dirty buffers to a * temporary inode list, queueing the writes as we go. Then we clean * up, waiting for those writes to complete. * * During this second stage, any subsequent updates to the file may end * up refiling the buffer on the original inode's dirty list again, so * there is a chance we will end up with a buffer queued for write but * not yet completed on that list. So, as a final cleanup we go through * the osync code to catch these locked, dirty buffers without requeuing * any newly dirty buffers for write. */ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) { struct buffer_head *bh; struct list_head tmp; struct address_space *mapping; int err = 0, err2; struct blk_plug plug; INIT_LIST_HEAD(&tmp); blk_start_plug(&plug); spin_lock(lock); while (!list_empty(list)) { bh = BH_ENTRY(list->next); mapping = bh->b_assoc_map; <API key>(bh); /* Avoid race with <API key>() which does * a lockless check and we rely on seeing the dirty bit */ smp_mb(); if (buffer_dirty(bh) || buffer_locked(bh)) { list_add(&bh->b_assoc_buffers, &tmp); bh->b_assoc_map = mapping; if (buffer_dirty(bh)) { get_bh(bh); spin_unlock(lock); /* * Ensure any pending I/O completes so that * write_dirty_buffer() actually writes the * current contents - it is a noop if I/O is * still in flight on potentially older * contents. */ write_dirty_buffer(bh, WRITE_SYNC); /* * Kick off IO for the previous mapping. Note * that we will not run the very last mapping, * wait_on_buffer() will do that for us * through sync_buffer(). */ brelse(bh); spin_lock(lock); } } } spin_unlock(lock); blk_finish_plug(&plug); spin_lock(lock); while (!list_empty(&tmp)) { bh = BH_ENTRY(tmp.prev); get_bh(bh); mapping = bh->b_assoc_map; <API key>(bh); /* Avoid race with <API key>() which does * a lockless check and we rely on seeing the dirty bit */ smp_mb(); if (buffer_dirty(bh)) { list_add(&bh->b_assoc_buffers, &mapping->private_list); bh->b_assoc_map = mapping; } spin_unlock(lock); wait_on_buffer(bh); if (!buffer_uptodate(bh)) err = -EIO; brelse(bh); spin_lock(lock); } spin_unlock(lock); err2 = osync_buffers_list(lock, list); if (err) return err; else return err2; } /* * Invalidate any and all dirty buffers on a given inode. We are * probably unmounting the fs, but that doesn't mean we have already * done a sync(). Just drop the buffers from the inode list. * * NOTE: we take the inode's blockdev's mapping's private_lock. Which * assumes that all the buffers are against the blockdev. Not true * for reiserfs. */ void <API key>(struct inode *inode) { if (inode_has_buffers(inode)) { struct address_space *mapping = &inode->i_data; struct list_head *list = &mapping->private_list; struct address_space *buffer_mapping = mapping->assoc_mapping; spin_lock(&buffer_mapping->private_lock); while (!list_empty(list)) <API key>(BH_ENTRY(list->next)); spin_unlock(&buffer_mapping->private_lock); } } EXPORT_SYMBOL(<API key>); /* * Remove any clean buffers from the inode's buffer list. This is called * when we're trying to free the inode itself. Those buffers can pin it. * * Returns true if all buffers were removed. */ int <API key>(struct inode *inode) { int ret = 1; if (inode_has_buffers(inode)) { struct address_space *mapping = &inode->i_data; struct list_head *list = &mapping->private_list; struct address_space *buffer_mapping = mapping->assoc_mapping; spin_lock(&buffer_mapping->private_lock); while (!list_empty(list)) { struct buffer_head *bh = BH_ENTRY(list->next); if (buffer_dirty(bh)) { ret = 0; break; } <API key>(bh); } spin_unlock(&buffer_mapping->private_lock); } return ret; } /* * Create the appropriate buffers when given a page for data area and * the size of each buffer.. Use the bh->b_this_page linked list to * follow the buffers created. Return NULL if unable to create more * buffers. * * The retry flag is used to differentiate async IO (paging, swapping) * which may not fail from ordinary buffer allocations. */ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, int retry) { struct buffer_head *bh, *head; long offset; try_again: head = NULL; offset = PAGE_SIZE; while ((offset -= size) >= 0) { bh = alloc_buffer_head(GFP_NOFS); if (!bh) goto no_grow; bh->b_bdev = NULL; bh->b_this_page = head; bh->b_blocknr = -1; head = bh; bh->b_state = 0; atomic_set(&bh->b_count, 0); bh->b_size = size; /* Link the buffer to its page */ set_bh_page(bh, page, offset); init_buffer(bh, NULL, NULL); } return head; /* * In case anything failed, we just free everything we got. */ no_grow: if (head) { do { bh = head; head = head->b_this_page; free_buffer_head(bh); } while (head); } /* * Return failure for non-async IO requests. Async IO requests * are not allowed to fail, so we have to wait until buffer heads * become available. But we don't want tasks sleeping with * partially complete buffers, so all were released above. */ if (!retry) return NULL; /* We're _really_ low on memory. Now we just * wait for old buffer heads to become free due to * finishing IO. Since this is an async request and * the reserve list is empty, we're sure there are * async buffer heads in use. */ free_more_memory(); goto try_again; } EXPORT_SYMBOL_GPL(alloc_page_buffers); static inline void link_dev_buffers(struct page *page, struct buffer_head *head) { struct buffer_head *bh, *tail; bh = head; do { tail = bh; bh = bh->b_this_page; } while (bh); tail->b_this_page = head; attach_page_buffers(page, head); } /* * Initialise the state of a blockdev page's buffers. */ static sector_t init_page_buffers(struct page *page, struct block_device *bdev, sector_t block, int size) { struct buffer_head *head = page_buffers(page); struct buffer_head *bh = head; int uptodate = PageUptodate(page); sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode)); do { if (!buffer_mapped(bh)) { init_buffer(bh, NULL, NULL); bh->b_bdev = bdev; bh->b_blocknr = block; if (uptodate) set_buffer_uptodate(bh); if (block < end_block) set_buffer_mapped(bh); } block++; bh = bh->b_this_page; } while (bh != head); /* * Caller needs to validate requested block against end of device. */ return end_block; } /* * Create the page-cache page that contains the requested block. * * This is used purely for blockdev mappings. */ static int grow_dev_page(struct block_device *bdev, sector_t block, pgoff_t index, int size, int sizebits) { struct inode *inode = bdev->bd_inode; struct page *page; struct buffer_head *bh; sector_t end_block; int ret = 0; /* Will call free_more_memory() */ page = find_or_create_page(inode->i_mapping, index, (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE); if (!page) return ret; BUG_ON(!PageLocked(page)); if (page_has_buffers(page)) { bh = page_buffers(page); if (bh->b_size == size) { end_block = init_page_buffers(page, bdev, index << sizebits, size); goto done; } if (!try_to_free_buffers(page)) goto failed; } /* * Allocate some buffers for this page */ bh = alloc_page_buffers(page, size, 0); if (!bh) goto failed; /* * Link the page to the buffers and initialise them. Take the * lock to be atomic wrt __find_get_block(), which does not * run under the page lock. */ spin_lock(&inode->i_mapping->private_lock); link_dev_buffers(page, bh); end_block = init_page_buffers(page, bdev, index << sizebits, size); spin_unlock(&inode->i_mapping->private_lock); done: ret = (block < end_block) ? 1 : -ENXIO; failed: unlock_page(page); page_cache_release(page); return ret; } /* * Create buffers for the specified block device block's page. If * that page was dirty, the buffers are set dirty also. */ static int grow_buffers(struct block_device *bdev, sector_t block, int size) { pgoff_t index; int sizebits; sizebits = -1; do { sizebits++; } while ((size << sizebits) < PAGE_SIZE); index = block >> sizebits; /* * Check for a block which wants to lie outside our maximum possible * pagecache index. (this comparison is done using sector_t types). */ if (unlikely(index != block >> sizebits)) { char b[BDEVNAME_SIZE]; printk(KERN_ERR "%s: requested out-of-range block %llu for " "device %s\n", __func__, (unsigned long long)block, bdevname(bdev, b)); return -EIO; } /* Create a page with the proper size buffers.. */ return grow_dev_page(bdev, block, index, size, sizebits); } static struct buffer_head * __getblk_slow(struct block_device *bdev, sector_t block, int size) { /* Size must be multiple of hard sectorsize */ if (unlikely(size & (<API key>(bdev)-1) || (size < 512 || size > PAGE_SIZE))) { printk(KERN_ERR "getblk(): invalid block size %d requested\n", size); printk(KERN_ERR "logical block size: %d\n", <API key>(bdev)); dump_stack(); return NULL; } for (;;) { struct buffer_head *bh; int ret; bh = __find_get_block(bdev, block, size); if (bh) return bh; ret = grow_buffers(bdev, block, size); if (ret < 0) return NULL; if (ret == 0) free_more_memory(); } } /* * The relationship between dirty buffers and dirty pages: * * Whenever a page has any dirty buffers, the page's dirty bit is set, and * the page is tagged dirty in its radix tree. * * At all times, the dirtiness of the buffers represents the dirtiness of * subsections of the page. If the page has buffers, the page dirty bit is * merely a hint about the true dirty state. * * When a page is set dirty in its entirety, all its buffers are marked dirty * (if the page has buffers). * * When a buffer is marked dirty, its page is dirtied, but the page's other * buffers are not. * * Also. When blockdev buffers are explicitly read with bread(), they * individually become uptodate. But their backing page remains not * uptodate - even if all of its buffers are uptodate. A subsequent * <API key>() against that page will discover all the uptodate * buffers, will set the page uptodate and will perform no I/O. */ /** * mark_buffer_dirty - mark a buffer_head as needing writeout * @bh: the buffer_head to mark dirty * * mark_buffer_dirty() will set the dirty bit against the buffer, then set its * backing page dirty, then tag the page as dirty in its address_space's radix * tree and then attach the address_space's inode to its superblock's dirty * inode list. * * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, * mapping->tree_lock and mapping->host->i_lock. */ void mark_buffer_dirty(struct buffer_head *bh) { WARN_ON_ONCE(!buffer_uptodate(bh)); /* * Very *carefully* optimize the it-is-already-dirty case. * * Don't let the final "is it dirty" escape to before we * perhaps modified the buffer. */ if (buffer_dirty(bh)) { smp_mb(); if (buffer_dirty(bh)) return; } if (!<API key>(bh)) { struct page *page = bh->b_page; if (!TestSetPageDirty(page)) { struct address_space *mapping = page_mapping(page); if (mapping) __set_page_dirty(page, mapping, 0); } } } EXPORT_SYMBOL(mark_buffer_dirty); /* * Decrement a buffer_head's reference count. If all buffers against a page * have zero reference count, are clean and unlocked, and if the page is clean * and unlocked then try_to_free_buffers() may strip the buffers from the page * in preparation for freeing it (sometimes, rarely, buffers are removed from * a page but it ends up not being freed, and buffers may later be reattached). */ void __brelse(struct buffer_head * buf) { if (atomic_read(&buf->b_count)) { put_bh(buf); return; } WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); } EXPORT_SYMBOL(__brelse); /* * bforget() is like brelse(), except it discards any * potentially dirty data. */ void __bforget(struct buffer_head *bh) { clear_buffer_dirty(bh); if (bh->b_assoc_map) { struct address_space *buffer_mapping = bh->b_page->mapping; spin_lock(&buffer_mapping->private_lock); list_del_init(&bh->b_assoc_buffers); bh->b_assoc_map = NULL; spin_unlock(&buffer_mapping->private_lock); } __brelse(bh); } EXPORT_SYMBOL(__bforget); static struct buffer_head *__bread_slow(struct buffer_head *bh) { lock_buffer(bh); if (buffer_uptodate(bh)) { unlock_buffer(bh); return bh; } else { get_bh(bh); bh->b_end_io = <API key>; submit_bh(READ, bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; } brelse(bh); return NULL; } /* * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their * refcount elevated by one when they're in an LRU. A buffer can only appear * once in a particular CPU's LRU. A single buffer can be present in multiple * CPU's LRUs at the same time. * * This is a transparent caching front-end to sb_bread(), sb_getblk() and * sb_find_get_block(). * * The LRUs themselves only need locking against invalidate_bh_lrus. We use * a local interrupt disable for that. */ #define BH_LRU_SIZE 8 struct bh_lru { struct buffer_head *bhs[BH_LRU_SIZE]; }; static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; #ifdef CONFIG_SMP #define bh_lru_lock() local_irq_disable() #define bh_lru_unlock() local_irq_enable() #else #define bh_lru_lock() preempt_disable() #define bh_lru_unlock() preempt_enable() #endif static inline void check_irqs_on(void) { #ifdef irqs_disabled BUG_ON(irqs_disabled()); #endif } /* * The LRU management algorithm is dopey-but-simple. Sorry. */ static void bh_lru_install(struct buffer_head *bh) { struct buffer_head *evictee = NULL; check_irqs_on(); bh_lru_lock(); if (__this_cpu_read(bh_lrus.bhs[0]) != bh) { struct buffer_head *bhs[BH_LRU_SIZE]; int in; int out = 0; get_bh(bh); bhs[out++] = bh; for (in = 0; in < BH_LRU_SIZE; in++) { struct buffer_head *bh2 = __this_cpu_read(bh_lrus.bhs[in]); if (bh2 == bh) { __brelse(bh2); } else { if (out >= BH_LRU_SIZE) { BUG_ON(evictee != NULL); evictee = bh2; } else { bhs[out++] = bh2; } } } while (out < BH_LRU_SIZE) bhs[out++] = NULL; memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs)); } bh_lru_unlock(); if (evictee) __brelse(evictee); } /* * Look up the bh in this cpu's LRU. If it's there, move it to the head. */ static struct buffer_head * lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *ret = NULL; unsigned int i; check_irqs_on(); bh_lru_lock(); for (i = 0; i < BH_LRU_SIZE; i++) { struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); if (bh && bh->b_bdev == bdev && bh->b_blocknr == block && bh->b_size == size) { if (i) { while (i) { __this_cpu_write(bh_lrus.bhs[i], __this_cpu_read(bh_lrus.bhs[i - 1])); i } __this_cpu_write(bh_lrus.bhs[0], bh); } get_bh(bh); ret = bh; break; } } bh_lru_unlock(); return ret; } /* * Perform a pagecache lookup for the matching buffer. If it's there, refresh * it in the LRU and mark it as accessed. If it is not present then return * NULL */ struct buffer_head * __find_get_block(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *bh = lookup_bh_lru(bdev, block, size); if (bh == NULL) { bh = <API key>(bdev, block); if (bh) bh_lru_install(bh); } if (bh) touch_buffer(bh); return bh; } EXPORT_SYMBOL(__find_get_block); /* * __getblk will locate (and, if necessary, create) the buffer_head * which corresponds to the passed block_device, block and size. The * returned buffer has its reference count incremented. * * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() * attempt is failing. FIXME, perhaps? */ struct buffer_head * __getblk(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *bh = __find_get_block(bdev, block, size); might_sleep(); if (bh == NULL) bh = __getblk_slow(bdev, block, size); return bh; } EXPORT_SYMBOL(__getblk); /* * Do async read-ahead on a buffer.. */ void __breadahead(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *bh = __getblk(bdev, block, size); if (likely(bh)) { ll_rw_block(READA, 1, &bh); brelse(bh); } } EXPORT_SYMBOL(__breadahead); /** * __bread() - reads a specified block and returns the bh * @bdev: the block_device to read from * @block: number of block * @size: size (in bytes) to read * * Reads a specified block, and returns buffer head that contains it. * It returns NULL if the block was unreadable. */ struct buffer_head * __bread(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *bh = __getblk(bdev, block, size); if (likely(bh) && !buffer_uptodate(bh)) bh = __bread_slow(bh); return bh; } EXPORT_SYMBOL(__bread); /* * invalidate_bh_lrus() is called rarely - but not only at unmount. * This doesn't race because it runs in each cpu either in irq * or with preempt disabled. */ static void invalidate_bh_lru(void *arg) { struct bh_lru *b = &get_cpu_var(bh_lrus); int i; for (i = 0; i < BH_LRU_SIZE; i++) { brelse(b->bhs[i]); b->bhs[i] = NULL; } put_cpu_var(bh_lrus); } static bool has_bh_in_lru(int cpu, void *dummy) { struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); int i; for (i = 0; i < BH_LRU_SIZE; i++) { if (b->bhs[i]) return 1; } return 0; } void invalidate_bh_lrus(void) { on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL); } EXPORT_SYMBOL_GPL(invalidate_bh_lrus); void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset) { bh->b_page = page; BUG_ON(offset >= PAGE_SIZE); if (PageHighMem(page)) bh->b_data = (char *)(0 + offset); else bh->b_data = page_address(page) + offset; } EXPORT_SYMBOL(set_bh_page); /* * Called when truncating a buffer on a page completely. */ static void discard_buffer(struct buffer_head * bh) { lock_buffer(bh); clear_buffer_dirty(bh); bh->b_bdev = NULL; clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); clear_buffer_delay(bh); <API key>(bh); unlock_buffer(bh); } /** * <API key> - invalidate part or all of a buffer-backed page * * @page: the page which is affected * @offset: the index of the truncation point * * <API key>() is called when all or part of the page has become * invalidated by a truncate operation. * * <API key>() does not have to release all buffers, but it must * ensure that no dirty buffer is left outside @offset and that no I/O * is underway against any of the blocks which are outside the truncation * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ void <API key>(struct page *page, unsigned long offset) { struct buffer_head *head, *bh, *next; unsigned int curr_off = 0; BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) goto out; head = page_buffers(page); bh = head; do { unsigned int next_off = curr_off + bh->b_size; next = bh->b_this_page; /* * is this block fully invalidated? */ if (offset <= curr_off) discard_buffer(bh); curr_off = next_off; bh = next; } while (bh != head); /* * We release buffers only if the entire page is being invalidated. * The get_block cached value has been unconditionally invalidated, * so real IO is not possible anymore. */ if (offset == 0) try_to_release_page(page, 0); out: return; } EXPORT_SYMBOL(<API key>); /* * We attach and possibly dirty the buffers atomically wrt * <API key>() via private_lock. try_to_free_buffers * is already excluded via the page lock. */ void <API key>(struct page *page, unsigned long blocksize, unsigned long b_state) { struct buffer_head *bh, *head, *tail; head = alloc_page_buffers(page, blocksize, 1); bh = head; do { bh->b_state |= b_state; tail = bh; bh = bh->b_this_page; } while (bh); tail->b_this_page = head; spin_lock(&page->mapping->private_lock); if (PageUptodate(page) || PageDirty(page)) { bh = head; do { if (PageDirty(page)) set_buffer_dirty(bh); if (PageUptodate(page)) set_buffer_uptodate(bh); bh = bh->b_this_page; } while (bh != head); } attach_page_buffers(page, head); spin_unlock(&page->mapping->private_lock); } EXPORT_SYMBOL(<API key>); /* * We are taking a block for data and we don't want any output from any * buffer-cache aliases starting from return from that function and * until the moment when something will explicitly mark the buffer * dirty (hopefully that will not happen until we will free that block ;-) * We don't even need to mark it not-uptodate - nobody can expect * anything from a newly allocated buffer anyway. We used to used * unmap_buffer() for such invalidation, but that was wrong. We definitely * don't want to mark the alias unmapped, for example - it would confuse * anyone who might pick it with bread() afterwards... * * Also.. Note that bforget() doesn't lock the buffer. So there can * be writeout I/O going on against recently-freed buffers. We don't * wait on that I/O in bforget() - it's more efficient to wait on the I/O * only if we really need to. That happens here. */ void <API key>(struct block_device *bdev, sector_t block) { struct buffer_head *old_bh; might_sleep(); old_bh = <API key>(bdev, block); if (old_bh) { clear_buffer_dirty(old_bh); wait_on_buffer(old_bh); clear_buffer_req(old_bh); __brelse(old_bh); } } EXPORT_SYMBOL(<API key>); /* * NOTE! All mapped/uptodate combinations are valid: * * Mapped Uptodate Meaning * * No No "unknown" - must do get_block() * No Yes "hole" - zero-filled * Yes No "allocated" - allocated on disk, not read in * Yes Yes "valid" - allocated and up-to-date in memory. * * "Dirty" is valid only with the last case (mapped+uptodate). */ /* * While <API key> is writing back the dirty buffers under * the page lock, whoever dirtied the buffers may decide to clean them * again at any time. We handle that by only looking at the buffer * state inside lock_buffer(). * * If <API key>() is called for regular writeback * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a * locked buffer. This only can happen if someone has written the buffer * directly, with submit_bh(). At the address_space level PageWriteback * prevents this contention from occurring. * * If <API key>() is called with wbc->sync_mode == * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this * causes the writes to be flagged as synchronous writes. */ static int <API key>(struct inode *inode, struct page *page, get_block_t *get_block, struct writeback_control *wbc, bh_end_io_t *handler) { int err; sector_t block; sector_t last_block; struct buffer_head *bh, *head; const unsigned blocksize = 1 << inode->i_blkbits; int nr_underway = 0; int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); BUG_ON(!PageLocked(page)); last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; if (!page_has_buffers(page)) { <API key>(page, blocksize, (1 << BH_Dirty)|(1 << BH_Uptodate)); } /* * Be very careful. We have no exclusion from <API key> * here, and the (potentially unmapped) buffers may become dirty at * any time. If a buffer becomes dirty here after we've inspected it * then we just miss that fact, and the page stays dirty. * * Buffers outside i_size may be dirtied by <API key>; * handle that here by just cleaning them. */ block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); head = page_buffers(page); bh = head; /* * Get all the dirty buffers mapped to disk addresses and * handle any aliases from the underlying blockdev's mapping. */ do { if (block > last_block) { /* * mapped buffers outside i_size will occur, because * this page can be outside i_size when there is a * truncate in progress. */ /* * The buffer was zeroed by <API key>() */ clear_buffer_dirty(bh); set_buffer_uptodate(bh); } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && buffer_dirty(bh)) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, block, bh, 1); if (err) goto recover; clear_buffer_delay(bh); if (buffer_new(bh)) { /* blockdev mappings never come here */ clear_buffer_new(bh); <API key>(bh->b_bdev, bh->b_blocknr); } } bh = bh->b_this_page; block++; } while (bh != head); do { if (!buffer_mapped(bh)) continue; /* * If it's a fully non-blocking write attempt and we cannot * lock the buffer then redirty the page. Note that this can * potentially cause a busy-wait loop from writeback threads * and kswapd activity, but those code paths have their own * higher-level throttling. */ if (wbc->sync_mode != WB_SYNC_NONE) { lock_buffer(bh); } else if (!trylock_buffer(bh)) { <API key>(wbc, page); continue; } if (<API key>(bh)) { <API key>(bh, handler); } else { unlock_buffer(bh); } } while ((bh = bh->b_this_page) != head); /* * The page and its buffers are protected by PageWriteback(), so we can * drop the bh refcounts early. */ BUG_ON(PageWriteback(page)); set_page_writeback(page); do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { submit_bh(write_op, bh); nr_underway++; } bh = next; } while (bh != head); unlock_page(page); err = 0; done: if (nr_underway == 0) { /* * The page was marked dirty, but the buffers were * clean. Someone wrote them back by hand with * ll_rw_block/submit_bh. A rare case. */ end_page_writeback(page); /* * The page and buffer_heads can be released at any time from * here on. */ } return err; recover: /* * ENOSPC, or some other error. We may already have added some * blocks to the file, so we need to write these out to avoid * exposing stale data. * The page is currently locked and not marked for writeback */ bh = head; /* Recovery: lock and submit the mapped buffers */ do { if (buffer_mapped(bh) && buffer_dirty(bh) && !buffer_delay(bh)) { lock_buffer(bh); <API key>(bh, handler); } else { /* * The buffer may have been set dirty during * attachment to a dirty page. */ clear_buffer_dirty(bh); } } while ((bh = bh->b_this_page) != head); SetPageError(page); BUG_ON(PageWriteback(page)); mapping_set_error(page->mapping, err); set_page_writeback(page); do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { clear_buffer_dirty(bh); submit_bh(write_op, bh); nr_underway++; } bh = next; } while (bh != head); unlock_page(page); goto done; } /* * If a page has any new buffers, zero them out here, and mark them uptodate * and dirty so they'll be written out (in order to prevent uninitialised * block data from leaking). And clear the new bit. */ void <API key>(struct page *page, unsigned from, unsigned to) { unsigned int block_start, block_end; struct buffer_head *head, *bh; BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) return; bh = head = page_buffers(page); block_start = 0; do { block_end = block_start + bh->b_size; if (buffer_new(bh)) { if (block_end > from && block_start < to) { if (!PageUptodate(page)) { unsigned start, size; start = max(from, block_start); size = min(to, block_end) - start; zero_user(page, start, size); set_buffer_uptodate(bh); } clear_buffer_new(bh); mark_buffer_dirty(bh); } } block_start = block_end; bh = bh->b_this_page; } while (bh != head); } EXPORT_SYMBOL(<API key>); int __block_write_begin(struct page *page, loff_t pos, unsigned len, get_block_t *get_block) { unsigned from = pos & (PAGE_CACHE_SIZE - 1); unsigned to = from + len; struct inode *inode = page->mapping->host; unsigned block_start, block_end; sector_t block; int err = 0; unsigned blocksize, bbits; struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; BUG_ON(!PageLocked(page)); BUG_ON(from > PAGE_CACHE_SIZE); BUG_ON(to > PAGE_CACHE_SIZE); BUG_ON(from > to); blocksize = 1 << inode->i_blkbits; if (!page_has_buffers(page)) <API key>(page, blocksize, 0); head = page_buffers(page); bbits = inode->i_blkbits; block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); for(bh = head, block_start = 0; bh != head || !block_start; block++, block_start=block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); } continue; } if (buffer_new(bh)) clear_buffer_new(bh); if (!buffer_mapped(bh)) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, block, bh, 1); if (err) break; if (buffer_new(bh)) { <API key>(bh->b_bdev, bh->b_blocknr); if (PageUptodate(page)) { clear_buffer_new(bh); set_buffer_uptodate(bh); mark_buffer_dirty(bh); continue; } if (block_end > to || block_start < from) zero_user_segments(page, to, block_end, block_start, from); continue; } } if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); continue; } if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh) && (block_start < from || block_end > to)) { ll_rw_block(READ, 1, &bh); *wait_bh++=bh; } } /* * If we issued read requests - let them complete. */ while(wait_bh > wait) { wait_on_buffer(*--wait_bh); if (!buffer_uptodate(*wait_bh)) err = -EIO; } if (unlikely(err)) <API key>(page, from, to); return err; } EXPORT_SYMBOL(__block_write_begin); static int <API key>(struct inode *inode, struct page *page, unsigned from, unsigned to) { unsigned block_start, block_end; int partial = 0; unsigned blocksize; struct buffer_head *bh, *head; blocksize = 1 << inode->i_blkbits; for(bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start=block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (!buffer_uptodate(bh)) partial = 1; } else { set_buffer_uptodate(bh); mark_buffer_dirty(bh); } clear_buffer_new(bh); } /* * If this is a partial write which happened to make all buffers * uptodate then we can optimize away a bogus readpage() for * the next read(). Here we 'discover' whether the page went * uptodate as a result of this (potentially partial) write. */ if (!partial) SetPageUptodate(page); return 0; } /* * block_write_begin takes care of the basic task of block allocation and * bringing partial write blocks uptodate first. * * The filesystem needs to handle block truncation upon failure. */ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, get_block_t *get_block) { pgoff_t index = pos >> PAGE_CACHE_SHIFT; struct page *page; int status; page = <API key>(mapping, index, flags); if (!page) return -ENOMEM; status = __block_write_begin(page, pos, len, get_block); if (unlikely(status)) { unlock_page(page); page_cache_release(page); page = NULL; } *pagep = page; return status; } EXPORT_SYMBOL(block_write_begin); int block_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; unsigned start; start = pos & (PAGE_CACHE_SIZE - 1); if (unlikely(copied < len)) { /* * The buffers that were written will now be uptodate, so we * don't have to worry about a readpage reading them and * overwriting a partial write. However if we have encountered * a short write and only partially written into a buffer, it * will not be marked uptodate, so a readpage might come in and * destroy our partial write. * * Do the simplest thing, and just treat any short write to a * non uptodate page as a zero-length write, and force the * caller to redo the whole thing. */ if (!PageUptodate(page)) copied = 0; <API key>(page, start+copied, start+len); } flush_dcache_page(page); /* This could be a short (even 0-length) commit */ <API key>(inode, page, start, start+copied); return copied; } EXPORT_SYMBOL(block_write_end); int generic_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; int i_size_changed = 0; copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); /* * No need to use i_size_read() here, the i_size * cannot change under us because we hold i_mutex. * * But it's important to update i_size while still holding page lock: * page writeout could otherwise come in and zero beyond i_size. */ if (pos+copied > inode->i_size) { i_size_write(inode, pos+copied); i_size_changed = 1; } unlock_page(page); page_cache_release(page); /* * Don't mark the inode dirty under page lock. First, it unnecessarily * makes the holding time of page lock longer. Second, it forces lock * ordering of page lock and transaction start for journaling * filesystems. */ if (i_size_changed) mark_inode_dirty(inode); return copied; } EXPORT_SYMBOL(generic_write_end); /* * <API key> checks whether buffers within a page are * uptodate or not. * * Returns true if all buffers which correspond to a file portion * we want to read are uptodate. */ int <API key>(struct page *page, read_descriptor_t *desc, unsigned long from) { struct inode *inode = page->mapping->host; unsigned block_start, block_end, blocksize; unsigned to; struct buffer_head *bh, *head; int ret = 1; if (!page_has_buffers(page)) return 0; blocksize = 1 << inode->i_blkbits; to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count); to = from + to; if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize) return 0; head = page_buffers(page); bh = head; block_start = 0; do { block_end = block_start + blocksize; if (block_end > from && block_start < to) { if (!buffer_uptodate(bh)) { ret = 0; break; } if (block_end >= to) break; } block_start = block_end; bh = bh->b_this_page; } while (bh != head); return ret; } EXPORT_SYMBOL(<API key>); int <API key>(struct page *page, get_block_t *get_block) { struct inode *inode = page->mapping->host; sector_t iblock, lblock; struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; unsigned int blocksize; int nr, i; int fully_mapped = 1; BUG_ON(!PageLocked(page)); blocksize = 1 << inode->i_blkbits; if (!page_has_buffers(page)) <API key>(page, blocksize, 0); head = page_buffers(page); iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits; bh = head; nr = 0; i = 0; do { if (buffer_uptodate(bh)) continue; if (!buffer_mapped(bh)) { int err = 0; fully_mapped = 0; if (iblock < lblock) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, iblock, bh, 0); if (err) SetPageError(page); } if (!buffer_mapped(bh)) { zero_user(page, i * blocksize, blocksize); if (!err) set_buffer_uptodate(bh); continue; } /* * get_block() might have updated the buffer * synchronously */ if (buffer_uptodate(bh)) continue; } arr[nr++] = bh; } while (i++, iblock++, (bh = bh->b_this_page) != head); if (fully_mapped) SetPageMappedToDisk(page); if (!nr) { /* * All buffers are uptodate - we can set the page uptodate * as well. But not if get_block() returned an error. */ if (!PageError(page)) SetPageUptodate(page); unlock_page(page); return 0; } /* Stage two: lock the buffers */ for (i = 0; i < nr; i++) { bh = arr[i]; lock_buffer(bh); <API key>(bh); } /* * Stage 3: start the IO. Check for uptodateness * inside the buffer lock in case another process reading * the underlying blockdev brought it uptodate (the sct fix). */ for (i = 0; i < nr; i++) { bh = arr[i]; if (buffer_uptodate(bh)) <API key>(bh, 1); else submit_bh(READ, bh); } return 0; } EXPORT_SYMBOL(<API key>); /* utility function for filesystems that need to do work on expanding * truncates. Uses filesystem pagecache writes to allow the filesystem to * deal with the hole. */ int <API key>(struct inode *inode, loff_t size) { struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; int err; err = inode_newsize_ok(inode, size); if (err) goto out; err = <API key>(NULL, mapping, size, 0, <API key>|<API key>, &page, &fsdata); if (err) goto out; err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata); BUG_ON(err > 0); out: return err; } EXPORT_SYMBOL(<API key>); static int cont_expand_zero(struct file *file, struct address_space *mapping, loff_t pos, loff_t *bytes) { struct inode *inode = mapping->host; unsigned blocksize = 1 << inode->i_blkbits; struct page *page; void *fsdata; pgoff_t index, curidx; loff_t curpos; unsigned zerofrom, offset, len; int err = 0; index = pos >> PAGE_CACHE_SHIFT; offset = pos & ~PAGE_CACHE_MASK; while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) { zerofrom = curpos & ~PAGE_CACHE_MASK; if (zerofrom & (blocksize-1)) { *bytes |= (blocksize-1); (*bytes)++; } len = PAGE_CACHE_SIZE - zerofrom; err = <API key>(file, mapping, curpos, len, <API key>, &page, &fsdata); if (err) goto out; zero_user(page, zerofrom, len); err = pagecache_write_end(file, mapping, curpos, len, len, page, fsdata); if (err < 0) goto out; BUG_ON(err != len); err = 0; <API key>(mapping); } /* page covers the boundary, find the boundary offset */ if (index == curidx) { zerofrom = curpos & ~PAGE_CACHE_MASK; /* if we will expand the thing last block will be filled */ if (offset <= zerofrom) { goto out; } if (zerofrom & (blocksize-1)) { *bytes |= (blocksize-1); (*bytes)++; } len = offset - zerofrom; err = <API key>(file, mapping, curpos, len, <API key>, &page, &fsdata); if (err) goto out; zero_user(page, zerofrom, len); err = pagecache_write_end(file, mapping, curpos, len, len, page, fsdata); if (err < 0) goto out; BUG_ON(err != len); err = 0; } out: return err; } /* * For moronic filesystems that do not allow holes in file. * We may have to extend the file. */ int cont_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata, get_block_t *get_block, loff_t *bytes) { struct inode *inode = mapping->host; unsigned blocksize = 1 << inode->i_blkbits; unsigned zerofrom; int err; err = cont_expand_zero(file, mapping, pos, bytes); if (err) return err; zerofrom = *bytes & ~PAGE_CACHE_MASK; if (pos+len > *bytes && zerofrom & (blocksize-1)) { *bytes |= (blocksize-1); (*bytes)++; } return block_write_begin(mapping, pos, len, flags, pagep, get_block); } EXPORT_SYMBOL(cont_write_begin); int block_commit_write(struct page *page, unsigned from, unsigned to) { struct inode *inode = page->mapping->host; <API key>(inode,page,from,to); return 0; } EXPORT_SYMBOL(block_commit_write); /* * block_page_mkwrite() is not allowed to change the file size as it gets * called from a page fault handler when a page is first dirtied. Hence we must * be careful to check for EOF conditions here. We set the page up correctly * for a written page which means we get ENOSPC checking when writing into * holes and correct delalloc and unwritten extent mapping on filesystems that * support these features. * * We are not allowed to take the i_mutex here so we have to play games to * protect against truncate races as the page could now be beyond EOF. Because * truncate writes the inode size before removing pages, once we have the * page lock we can determine safely if the page is beyond EOF. If it is not * beyond EOF, then the page is guaranteed safe against truncation until we * unlock the page. * * Direct callers of this function should protect against filesystem freezing * using sb_start_write() - sb_end_write() functions. */ int <API key>(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block) { struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_path.dentry->d_inode; unsigned long end; loff_t size; int ret; /* * Update file times before taking page lock. We may end up failing the * fault so this update may be superfluous but who really cares... */ file_update_time(vma->vm_file); lock_page(page); size = i_size_read(inode); if ((page->mapping != inode->i_mapping) || (page_offset(page) > size)) { /* We overload EFAULT to mean page got truncated */ ret = -EFAULT; goto out_unlock; } /* page is wholly or partially inside EOF */ if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) end = size & ~PAGE_CACHE_MASK; else end = PAGE_CACHE_SIZE; ret = __block_write_begin(page, 0, end, get_block); if (!ret) ret = block_commit_write(page, 0, end); if (unlikely(ret < 0)) goto out_unlock; set_page_dirty(page); <API key>(page); return 0; out_unlock: unlock_page(page); return ret; } EXPORT_SYMBOL(<API key>); int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block) { int ret; struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb; sb_start_pagefault(sb); ret = <API key>(vma, vmf, get_block); sb_end_pagefault(sb); return <API key>(ret); } EXPORT_SYMBOL(block_page_mkwrite); /* * nobh_write_begin()'s prereads are special: the buffer_heads are freed * immediately, while under the page lock. So it needs a special end_io * handler which does not touch the bh after unlocking it. */ static void <API key>(struct buffer_head *bh, int uptodate) { <API key>(bh, uptodate); } static void attach_nobh_buffers(struct page *page, struct buffer_head *head) { struct buffer_head *bh; BUG_ON(!PageLocked(page)); spin_lock(&page->mapping->private_lock); bh = head; do { if (PageDirty(page)) set_buffer_dirty(bh); if (!bh->b_this_page) bh->b_this_page = head; bh = bh->b_this_page; } while (bh != head); attach_page_buffers(page, head); spin_unlock(&page->mapping->private_lock); } /* * On entry, the page is fully not uptodate. * On exit the page is fully uptodate in the areas outside (from,to) * The filesystem needs to handle block truncation upon failure. */ int nobh_write_begin(struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata, get_block_t *get_block) { struct inode *inode = mapping->host; const unsigned blkbits = inode->i_blkbits; const unsigned blocksize = 1 << blkbits; struct buffer_head *head, *bh; struct page *page; pgoff_t index; unsigned from, to; unsigned block_in_page; unsigned block_start, block_end; sector_t block_in_file; int nr_reads = 0; int ret = 0; int is_mapped_to_disk = 1; index = pos >> PAGE_CACHE_SHIFT; from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; page = <API key>(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; *fsdata = NULL; if (page_has_buffers(page)) { ret = __block_write_begin(page, pos, len, get_block); if (unlikely(ret)) goto out_release; return ret; } if (PageMappedToDisk(page)) return 0; /* * Allocate buffers so that we can keep track of state, and potentially * attach them to the page if an error occurs. In the common case of * no error, they will just be freed again without ever being attached * to the page (which is all OK, because we're under the page lock). * * Be careful: the buffer linked list is a NULL terminated one, rather * than the circular one we're used to. */ head = alloc_page_buffers(page, blocksize, 0); if (!head) { ret = -ENOMEM; goto out_release; } block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); /* * We loop across all blocks in the page, whether or not they are * part of the affected region. This is so we can discover if the * page is fully mapped-to-disk. */ for (block_start = 0, block_in_page = 0, bh = head; block_start < PAGE_CACHE_SIZE; block_in_page++, block_start += blocksize, bh = bh->b_this_page) { int create; block_end = block_start + blocksize; bh->b_state = 0; create = 1; if (block_start >= to) create = 0; ret = get_block(inode, block_in_file + block_in_page, bh, create); if (ret) goto failed; if (!buffer_mapped(bh)) is_mapped_to_disk = 0; if (buffer_new(bh)) <API key>(bh->b_bdev, bh->b_blocknr); if (PageUptodate(page)) { set_buffer_uptodate(bh); continue; } if (buffer_new(bh) || !buffer_mapped(bh)) { zero_user_segments(page, block_start, from, to, block_end); continue; } if (buffer_uptodate(bh)) continue; /* reiserfs does this */ if (block_start < from || block_end > to) { lock_buffer(bh); bh->b_end_io = <API key>; submit_bh(READ, bh); nr_reads++; } } if (nr_reads) { /* * The page is locked, so these buffers are protected from * any VM or truncate activity. Hence we don't need to care * for the buffer_head refcounts. */ for (bh = head; bh; bh = bh->b_this_page) { wait_on_buffer(bh); if (!buffer_uptodate(bh)) ret = -EIO; } if (ret) goto failed; } if (is_mapped_to_disk) SetPageMappedToDisk(page); *fsdata = head; /* to be released by nobh_write_end */ return 0; failed: BUG_ON(!ret); /* * Error recovery is a bit difficult. We need to zero out blocks that * were newly allocated, and dirty them to ensure they get written out. * Buffers need to be attached to the page at this point, otherwise * the handling of potential IO errors during writeout would be hard * (could try doing synchronous writeout, but what if that fails too?) */ attach_nobh_buffers(page, head); <API key>(page, from, to); out_release: unlock_page(page); page_cache_release(page); *pagep = NULL; return ret; } EXPORT_SYMBOL(nobh_write_begin); int nobh_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; struct buffer_head *head = fsdata; struct buffer_head *bh; BUG_ON(fsdata != NULL && page_has_buffers(page)); if (unlikely(copied < len) && head) attach_nobh_buffers(page, head); if (page_has_buffers(page)) return generic_write_end(file, mapping, pos, len, copied, page, fsdata); SetPageUptodate(page); set_page_dirty(page); if (pos+copied > inode->i_size) { i_size_write(inode, pos+copied); mark_inode_dirty(inode); } unlock_page(page); page_cache_release(page); while (head) { bh = head; head = head->b_this_page; free_buffer_head(bh); } return copied; } EXPORT_SYMBOL(nobh_write_end); /* * nobh_writepage() - based on <API key>() except * that it tries to operate without attaching bufferheads to * the page. */ int nobh_writepage(struct page *page, get_block_t *get_block, struct writeback_control *wbc) { struct inode * const inode = page->mapping->host; loff_t i_size = i_size_read(inode); const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset; int ret; /* Is the page fully inside i_size? */ if (page->index < end_index) goto out; /* Is the page fully outside i_size? (truncate in progress) */ offset = i_size & (PAGE_CACHE_SIZE-1); if (page->index >= end_index+1 || !offset) { /* * The page may have dirty, unmapped buffers. For example, * they may have been added in ext3_writepage(). Make them * freeable here, so the page does not leak. */ #if 0 /* Not really sure about this - do we need this ? */ if (page->mapping->a_ops->invalidatepage) page->mapping->a_ops->invalidatepage(page, offset); #endif unlock_page(page); return 0; /* don't care */ } /* * The page straddles i_size. It must be zeroed out on each and every * writepage invocation because it may be mmapped. "A file is mapped * in multiples of the page size. For a file that is not a multiple of * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ zero_user_segment(page, offset, PAGE_CACHE_SIZE); out: ret = mpage_writepage(page, get_block, wbc); if (ret == -EAGAIN) ret = <API key>(inode, page, get_block, wbc, <API key>); return ret; } EXPORT_SYMBOL(nobh_writepage); int nobh_truncate_page(struct address_space *mapping, loff_t from, get_block_t *get_block) { pgoff_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); unsigned blocksize; sector_t iblock; unsigned length, pos; struct inode *inode = mapping->host; struct page *page; struct buffer_head map_bh; int err; blocksize = 1 << inode->i_blkbits; length = offset & (blocksize - 1); /* Block boundary? Nothing to do */ if (!length) return 0; length = blocksize - length; iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); page = grab_cache_page(mapping, index); err = -ENOMEM; if (!page) goto out; if (page_has_buffers(page)) { has_buffers: unlock_page(page); page_cache_release(page); return block_truncate_page(mapping, from, get_block); } /* Find the buffer that contains "offset" */ pos = blocksize; while (offset >= pos) { iblock++; pos += blocksize; } map_bh.b_size = blocksize; map_bh.b_state = 0; err = get_block(inode, iblock, &map_bh, 0); if (err) goto unlock; /* unmapped? It's a hole - nothing to do */ if (!buffer_mapped(&map_bh)) goto unlock; /* Ok, it's mapped. Make sure it's up-to-date */ if (!PageUptodate(page)) { err = mapping->a_ops->readpage(NULL, page); if (err) { page_cache_release(page); goto out; } lock_page(page); if (!PageUptodate(page)) { err = -EIO; goto unlock; } if (page_has_buffers(page)) goto has_buffers; } zero_user(page, offset, length); set_page_dirty(page); err = 0; unlock: unlock_page(page); page_cache_release(page); out: return err; } EXPORT_SYMBOL(nobh_truncate_page); int block_truncate_page(struct address_space *mapping, loff_t from, get_block_t *get_block) { pgoff_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); unsigned blocksize; sector_t iblock; unsigned length, pos; struct inode *inode = mapping->host; struct page *page; struct buffer_head *bh; int err; blocksize = 1 << inode->i_blkbits; length = offset & (blocksize - 1); /* Block boundary? Nothing to do */ if (!length) return 0; length = blocksize - length; iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); page = grab_cache_page(mapping, index); err = -ENOMEM; if (!page) goto out; if (!page_has_buffers(page)) <API key>(page, blocksize, 0); /* Find the buffer that contains "offset" */ bh = page_buffers(page); pos = blocksize; while (offset >= pos) { bh = bh->b_this_page; iblock++; pos += blocksize; } err = 0; if (!buffer_mapped(bh)) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, iblock, bh, 0); if (err) goto unlock; /* unmapped? It's a hole - nothing to do */ if (!buffer_mapped(bh)) goto unlock; } /* Ok, it's mapped. Make sure it's up-to-date */ if (PageUptodate(page)) set_buffer_uptodate(bh); if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { err = -EIO; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; } zero_user(page, offset, length); mark_buffer_dirty(bh); err = 0; unlock: unlock_page(page); page_cache_release(page); out: return err; } EXPORT_SYMBOL(block_truncate_page); /* * The generic ->writepage function for buffer-backed address_spaces * this form passes in the end_io handler used to finish the IO. */ int <API key>(struct page *page, get_block_t *get_block, struct writeback_control *wbc, bh_end_io_t *handler) { struct inode * const inode = page->mapping->host; loff_t i_size = i_size_read(inode); const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset; /* Is the page fully inside i_size? */ if (page->index < end_index) return <API key>(inode, page, get_block, wbc, handler); /* Is the page fully outside i_size? (truncate in progress) */ offset = i_size & (PAGE_CACHE_SIZE-1); if (page->index >= end_index+1 || !offset) { /* * The page may have dirty, unmapped buffers. For example, * they may have been added in ext3_writepage(). Make them * freeable here, so the page does not leak. */ do_invalidatepage(page, 0); unlock_page(page); return 0; /* don't care */ } /* * The page straddles i_size. It must be zeroed out on each and every * writepage invocation because it may be mmapped. "A file is mapped * in multiples of the page size. For a file that is not a multiple of * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ zero_user_segment(page, offset, PAGE_CACHE_SIZE); return <API key>(inode, page, get_block, wbc, handler); } EXPORT_SYMBOL(<API key>); /* * The generic ->writepage function for buffer-backed address_spaces */ int <API key>(struct page *page, get_block_t *get_block, struct writeback_control *wbc) { return <API key>(page, get_block, wbc, <API key>); } EXPORT_SYMBOL(<API key>); sector_t generic_block_bmap(struct address_space *mapping, sector_t block, get_block_t *get_block) { struct buffer_head tmp; struct inode *inode = mapping->host; tmp.b_state = 0; tmp.b_blocknr = 0; tmp.b_size = 1 << inode->i_blkbits; get_block(inode, block, &tmp, 0); return tmp.b_blocknr; } EXPORT_SYMBOL(generic_block_bmap); static void end_bio_bh_io_sync(struct bio *bio, int err) { struct buffer_head *bh = bio->bi_private; if (err == -EOPNOTSUPP) { set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); } if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags))) set_bit(BH_Quiet, &bh->b_state); bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); bio_put(bio); } int submit_bh(int rw, struct buffer_head * bh) { struct bio *bio; int ret = 0; BUG_ON(!buffer_locked(bh)); BUG_ON(!buffer_mapped(bh)); BUG_ON(!bh->b_end_io); BUG_ON(buffer_delay(bh)); BUG_ON(buffer_unwritten(bh)); /* * Only clear out a write error when rewriting */ if (test_set_buffer_req(bh) && (rw & WRITE)) <API key>(bh); /* * from here on down, it's all bio -- do the initial mapping, * submit_bio -> <API key> may further map this bio around */ bio = bio_alloc(GFP_NOIO, 1); bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_bdev = bh->b_bdev; bio->bi_io_vec[0].bv_page = bh->b_page; bio->bi_io_vec[0].bv_len = bh->b_size; bio->bi_io_vec[0].bv_offset = bh_offset(bh); bio->bi_vcnt = 1; bio->bi_idx = 0; bio->bi_size = bh->b_size; bio->bi_end_io = end_bio_bh_io_sync; bio->bi_private = bh; bio_get(bio); submit_bio(rw, bio); if (bio_flagged(bio, BIO_EOPNOTSUPP)) ret = -EOPNOTSUPP; bio_put(bio); return ret; } EXPORT_SYMBOL(submit_bh); /** * ll_rw_block: low-level access to block devices (DEPRECATED) * @rw: whether to %READ or %WRITE or maybe %READA (readahead) * @nr: number of &struct buffer_heads in the array * @bhs: array of pointers to &struct buffer_head * * ll_rw_block() takes an array of pointers to &struct buffer_heads, and * requests an I/O operation on them, either a %READ or a %WRITE. The third * %READA option is described in the documentation for <API key>() * which ll_rw_block() calls. * * This function drops any buffer that it cannot get a lock on (with the * BH_Lock state bit), any buffer that appears to be clean when doing a write * request, and any buffer that appears to be up-to-date when doing read * request. Further it marks as clean buffers that are processed for * writing (the buffer cache won't assume that they are actually clean * until the buffer gets unlocked). * * ll_rw_block sets b_end_io to simple completion handler that marks * the buffer up-to-date (if approriate), unlocks the buffer and wakes * any waiters. * * All of the buffers must be for the same device, and must also be a * multiple of the current approved size for the device. */ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) { int i; for (i = 0; i < nr; i++) { struct buffer_head *bh = bhs[i]; if (!trylock_buffer(bh)) continue; if (rw == WRITE) { if (<API key>(bh)) { bh->b_end_io = <API key>; get_bh(bh); submit_bh(WRITE, bh); continue; } } else { if (!buffer_uptodate(bh)) { bh->b_end_io = <API key>; get_bh(bh); submit_bh(rw, bh); continue; } } unlock_buffer(bh); } } EXPORT_SYMBOL(ll_rw_block); void write_dirty_buffer(struct buffer_head *bh, int rw) { lock_buffer(bh); if (!<API key>(bh)) { unlock_buffer(bh); return; } bh->b_end_io = <API key>; get_bh(bh); submit_bh(rw, bh); } EXPORT_SYMBOL(write_dirty_buffer); /* * For a data-integrity writeout, we need to wait upon any in-progress I/O * and then start new I/O and then wait upon it. The caller must have a ref on * the buffer_head. */ int __sync_dirty_buffer(struct buffer_head *bh, int rw) { int ret = 0; WARN_ON(atomic_read(&bh->b_count) < 1); lock_buffer(bh); if (<API key>(bh)) { get_bh(bh); bh->b_end_io = <API key>; ret = submit_bh(rw, bh); wait_on_buffer(bh); if (!ret && !buffer_uptodate(bh)) ret = -EIO; } else { unlock_buffer(bh); } return ret; } EXPORT_SYMBOL(__sync_dirty_buffer); int sync_dirty_buffer(struct buffer_head *bh) { return __sync_dirty_buffer(bh, WRITE_SYNC); } EXPORT_SYMBOL(sync_dirty_buffer); /* * try_to_free_buffers() checks if all the buffers on this particular page * are unused, and releases them if so. * * Exclusion against try_to_free_buffers may be obtained by either * locking the page or by holding its mapping's private_lock. * * If the page is dirty but all the buffers are clean then we need to * be sure to mark the page clean as well. This is because the page * may be against a block device, and a later reattachment of buffers * to a dirty page will set *all* buffers dirty. Which would corrupt * filesystem data on the same device. * * The same applies to regular filesystem pages: if all the buffers are * clean then we set the page clean and proceed. To do that, we require * total exclusion from <API key>(). That is obtained with * private_lock. * * try_to_free_buffers() is non-blocking. */ static inline int buffer_busy(struct buffer_head *bh) { return atomic_read(&bh->b_count) | (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); } static int drop_buffers(struct page *page, struct buffer_head **buffers_to_free) { struct buffer_head *head = page_buffers(page); struct buffer_head *bh; bh = head; do { if (<API key>(bh) && page->mapping) set_bit(AS_EIO, &page->mapping->flags); if (buffer_busy(bh)) goto failed; bh = bh->b_this_page; } while (bh != head); do { struct buffer_head *next = bh->b_this_page; if (bh->b_assoc_map) <API key>(bh); bh = next; } while (bh != head); *buffers_to_free = head; <API key>(page); return 1; failed: return 0; } int try_to_free_buffers(struct page *page) { struct address_space * const mapping = page->mapping; struct buffer_head *buffers_to_free = NULL; int ret = 0; BUG_ON(!PageLocked(page)); if (PageWriteback(page)) return 0; if (mapping == NULL) { /* can this still happen? */ ret = drop_buffers(page, &buffers_to_free); goto out; } spin_lock(&mapping->private_lock); ret = drop_buffers(page, &buffers_to_free); /* * If the filesystem writes its buffers by hand (eg ext3) * then we can have clean buffers against a dirty page. We * clean the page here; otherwise the VM will never notice * that the filesystem did any IO at all. * * Also, during truncate, discard_buffer will have marked all * the page's buffers clean. We discover that here and clean * the page also. * * private_lock must be held over this entire operation in order * to synchronise against <API key> and prevent the * dirty bit from being lost. */ if (ret) cancel_dirty_page(page, PAGE_CACHE_SIZE); spin_unlock(&mapping->private_lock); out: if (buffers_to_free) { struct buffer_head *bh = buffers_to_free; do { struct buffer_head *next = bh->b_this_page; free_buffer_head(bh); bh = next; } while (bh != buffers_to_free); } return ret; } EXPORT_SYMBOL(try_to_free_buffers); /* * There are no bdflush tunables left. But distributions are * still running obsolete flush daemons, so we terminate them here. * * Use of bdflush() is deprecated and will be removed in a future kernel. * The `flush-X' kernel threads fully replace bdflush daemons and this call. */ SYSCALL_DEFINE2(bdflush, int, func, long, data) { static int msg_count; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (msg_count < 5) { msg_count++; printk(KERN_INFO "warning: process `%s' used the obsolete bdflush" " system call\n", current->comm); printk(KERN_INFO "Fix your initscripts?\n"); } if (func == 1) do_exit(0); return 0; } /* * Buffer-head allocation */ static struct kmem_cache *bh_cachep __read_mostly; /* * Once the number of bh's in the machine exceeds this level, we start * stripping them in writeback. */ static int max_buffer_heads; int <API key>; struct bh_accounting { int nr; /* Number of live bh's */ int ratelimit; /* Limit cacheline bouncing */ }; static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; static void recalc_bh_state(void) { int i; int tot = 0; if (<API key>(bh_accounting.ratelimit) - 1 < 4096) return; __this_cpu_write(bh_accounting.ratelimit, 0); for_each_online_cpu(i) tot += per_cpu(bh_accounting, i).nr; <API key> = (tot > max_buffer_heads); } struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) { struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); if (ret) { INIT_LIST_HEAD(&ret->b_assoc_buffers); preempt_disable(); __this_cpu_inc(bh_accounting.nr); recalc_bh_state(); preempt_enable(); } return ret; } EXPORT_SYMBOL(alloc_buffer_head); void free_buffer_head(struct buffer_head *bh) { BUG_ON(!list_empty(&bh->b_assoc_buffers)); kmem_cache_free(bh_cachep, bh); preempt_disable(); __this_cpu_dec(bh_accounting.nr); recalc_bh_state(); preempt_enable(); } EXPORT_SYMBOL(free_buffer_head); static void buffer_exit_cpu(int cpu) { int i; struct bh_lru *b = &per_cpu(bh_lrus, cpu); for (i = 0; i < BH_LRU_SIZE; i++) { brelse(b->bhs[i]); b->bhs[i] = NULL; } this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); per_cpu(bh_accounting, cpu).nr = 0; } static int buffer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) buffer_exit_cpu((unsigned long)hcpu); return NOTIFY_OK; } /** * bh_uptodate_or_lock - Test whether the buffer is uptodate * @bh: struct buffer_head * * Return true if the buffer is up-to-date and false, * with the buffer locked, if not. */ int bh_uptodate_or_lock(struct buffer_head *bh) { if (!buffer_uptodate(bh)) { lock_buffer(bh); if (!buffer_uptodate(bh)) return 0; unlock_buffer(bh); } return 1; } EXPORT_SYMBOL(bh_uptodate_or_lock); /** * bh_submit_read - Submit a locked buffer for reading * @bh: struct buffer_head * * Returns zero on success and -EIO on error. */ int bh_submit_read(struct buffer_head *bh) { BUG_ON(!buffer_locked(bh)); if (buffer_uptodate(bh)) { unlock_buffer(bh); return 0; } get_bh(bh); bh->b_end_io = <API key>; submit_bh(READ, bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return 0; return -EIO; } EXPORT_SYMBOL(bh_submit_read); void __init buffer_init(void) { int nrpages; bh_cachep = kmem_cache_create("buffer_head", sizeof(struct buffer_head), 0, (<API key>|SLAB_PANIC| SLAB_MEM_SPREAD), NULL); /* * Limit the bh occupancy to 10% of ZONE_NORMAL */ nrpages = (<API key>() * 10) / 100; max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); hotcpu_notifier(buffer_cpu_notify, 0); }
# Testing and Debugging ## Testing [Moved here](faq_misc.md#testing) ## Debugging :id=debugging [Moved here](faq_debug.md#debugging)
package com.skia; import javax.microedition.khronos.egl.EGL10; import javax.microedition.khronos.egl.EGLConfig; import javax.microedition.khronos.egl.EGLDisplay; import javax.microedition.khronos.opengles.GL10; import android.content.Context; import android.opengl.EGL14; import android.opengl.GLSurfaceView; import android.os.Build; import android.util.Log; import android.view.MotionEvent; public class SkiaSampleView extends GLSurfaceView { private final SkiaSampleRenderer mSampleRenderer; private boolean mRequestedOpenGLAPI; // true == use (desktop) OpenGL. false == use OpenGL ES. private int <API key>; public SkiaSampleView(Context ctx, String cmdLineFlags, boolean useOpenGL, int msaaSampleCount) { super(ctx); mSampleRenderer = new SkiaSampleRenderer(this, cmdLineFlags); <API key> = msaaSampleCount; <API key>(2); if (Build.VERSION.SDK_INT < Build.VERSION_CODES.JELLY_BEAN_MR1) { setEGLConfigChooser(8, 8, 8, 8, 0, 8); } else { mRequestedOpenGLAPI = useOpenGL; setEGLConfigChooser(new <API key>()); } setRenderer(mSampleRenderer); setRenderMode(GLSurfaceView.<API key>); } @Override public boolean onTouchEvent(MotionEvent event) { int count = event.getPointerCount(); for (int i = 0; i < count; i++) { final float x = event.getX(i); final float y = event.getY(i); final int owner = event.getPointerId(i); int action = event.getAction() & MotionEvent.ACTION_MASK; switch (action) { case MotionEvent.ACTION_POINTER_UP: action = MotionEvent.ACTION_UP; break; case MotionEvent.ACTION_POINTER_DOWN: action = MotionEvent.ACTION_DOWN; break; default: break; } final int finalAction = action; queueEvent(new Runnable() { @Override public void run() { mSampleRenderer.handleClick(owner, x, y, finalAction); } }); } return true; } public void inval() { queueEvent(new Runnable() { @Override public void run() { mSampleRenderer.postInval(); } }); } public void terminate() { queueEvent(new Runnable() { @Override public void run() { mSampleRenderer.term(); } }); } public void showOverview() { queueEvent(new Runnable() { @Override public void run() { mSampleRenderer.showOverview(); } }); } public void nextSample() { queueEvent(new Runnable() { @Override public void run() { mSampleRenderer.nextSample(); } }); } public void previousSample() { queueEvent(new Runnable() { @Override public void run() { mSampleRenderer.previousSample(); } }); } public void goToSample(final int position) { queueEvent(new Runnable() { @Override public void run() { mSampleRenderer.goToSample(position); } }); } public void toggleRenderingMode() { queueEvent(new Runnable() { @Override public void run() { mSampleRenderer.toggleRenderingMode(); } }); } public void toggleSlideshow() { queueEvent(new Runnable() { @Override public void run() { mSampleRenderer.toggleSlideshow(); } }); } public void toggleFPS() { queueEvent(new Runnable() { @Override public void run() { mSampleRenderer.toggleFPS(); } }); } public void toggleTiling() { queueEvent(new Runnable() { @Override public void run() { mSampleRenderer.toggleTiling(); } }); } public void toggleBBox() { queueEvent(new Runnable() { @Override public void run() { mSampleRenderer.toggleBBox(); } }); } public void saveToPDF() { queueEvent(new Runnable() { @Override public void run() { mSampleRenderer.saveToPDF(); } }); } public boolean getUsesOpenGLAPI() { return mRequestedOpenGLAPI; } public int getMSAASampleCount() { return mSampleRenderer.getMSAASampleCount(); } private class <API key> implements GLSurfaceView.EGLConfigChooser { @Override public EGLConfig chooseConfig(EGL10 egl, EGLDisplay display) { int numConfigs = 0; int[] configSpec = null; int[] value = new int[1]; int[] validAPIs = new int[] { EGL14.EGL_OPENGL_API, EGL14.EGL_OPENGL_ES_API }; int initialAPI = mRequestedOpenGLAPI ? 0 : 1; for (int i = initialAPI; i < validAPIs.length && numConfigs == 0; i++) { int currentAPI = validAPIs[i]; EGL14.eglBindAPI(currentAPI); // setup the renderableType which will only be included in the // spec if we are attempting to get access to the OpenGL APIs. int renderableType = EGL14.EGL_OPENGL_BIT; if (currentAPI == EGL14.EGL_OPENGL_API) { renderableType = EGL14.EGL_OPENGL_ES2_BIT; } if (<API key> > 0) { configSpec = new int[] { EGL10.EGL_RED_SIZE, 8, EGL10.EGL_GREEN_SIZE, 8, EGL10.EGL_BLUE_SIZE, 8, EGL10.EGL_ALPHA_SIZE, 8, EGL10.EGL_DEPTH_SIZE, 0, EGL10.EGL_STENCIL_SIZE, 8, EGL10.EGL_SAMPLE_BUFFERS, 1, EGL10.EGL_SAMPLES, <API key>, EGL10.EGL_RENDERABLE_TYPE, renderableType, EGL10.EGL_NONE }; // EGL_RENDERABLE_TYPE is only needed when attempting to use // the OpenGL API (not ES) and causes many EGL drivers to fail // with a BAD_ATTRIBUTE error. if (!mRequestedOpenGLAPI) { configSpec[16] = EGL10.EGL_NONE; Log.i("Skia", "spec: " + configSpec); } if (!egl.eglChooseConfig(display, configSpec, null, 0, value)) { Log.i("Skia", "Could not get MSAA context count: " + <API key>); } numConfigs = value[0]; } if (numConfigs <= 0) { // Try without multisampling. configSpec = new int[] { EGL10.EGL_RED_SIZE, 8, EGL10.EGL_GREEN_SIZE, 8, EGL10.EGL_BLUE_SIZE, 8, EGL10.EGL_ALPHA_SIZE, 8, EGL10.EGL_DEPTH_SIZE, 0, EGL10.EGL_STENCIL_SIZE, 8, EGL10.EGL_RENDERABLE_TYPE, renderableType, EGL10.EGL_NONE }; // EGL_RENDERABLE_TYPE is only needed when attempting to use // the OpenGL API (not ES) and causes many EGL drivers to fail // with a BAD_ATTRIBUTE error. if (!mRequestedOpenGLAPI) { configSpec[12] = EGL10.EGL_NONE; Log.i("Skia", "spec: " + configSpec); } if (!egl.eglChooseConfig(display, configSpec, null, 0, value)) { Log.i("Skia", "Could not get non-MSAA context count"); } numConfigs = value[0]; } } if (numConfigs <= 0) { throw new <API key>("No configs match configSpec"); } // Get all matching configurations. EGLConfig[] configs = new EGLConfig[numConfigs]; if (!egl.eglChooseConfig(display, configSpec, configs, numConfigs, value)) { throw new <API key>("Could not get config data"); } for (int i = 0; i < configs.length; ++i) { EGLConfig config = configs[i]; if (findConfigAttrib(egl, display, config , EGL10.EGL_RED_SIZE, 0) == 8 && findConfigAttrib(egl, display, config, EGL10.EGL_BLUE_SIZE, 0) == 8 && findConfigAttrib(egl, display, config, EGL10.EGL_GREEN_SIZE, 0) == 8 && findConfigAttrib(egl, display, config, EGL10.EGL_ALPHA_SIZE, 0) == 8 && findConfigAttrib(egl, display, config, EGL10.EGL_STENCIL_SIZE, 0) == 8) { return config; } } throw new <API key>("Could not find suitable EGL config"); } private int findConfigAttrib(EGL10 egl, EGLDisplay display, EGLConfig config, int attribute, int defaultValue) { int[] value = new int[1]; if (egl.eglGetConfigAttrib(display, config, attribute, value)) { return value[0]; } return defaultValue; } } }
## m.sync This method takes a list of promises and returns a promise that resolves when all promises in the input list have resolved. See [`m.deferred`](mithril.deferred.md) for more information on promises. Usage javascript var greetAsync = function(delay) { var deferred = m.deferred(); setTimeout(function() { deferred.resolve("hello"); }, delay); return deferred.promise; }; m.sync([ greetAsync(1000), greetAsync(1500) ]).then(function(args) { console.log(args); // ["hello", "hello"] }); Signature [How to read signatures](<API key>.md) clike Promise sync(Array<Promise> promises) where: Promise :: GetterSetter { Promise then(any successCallback(any value), any errorCallback(any value)) } GetterSetter :: any getterSetter([any value]) - **Array<Promise> promises** A list of promises to synchronize - **return Promise promise** The promise of the deferred object that is resolved when all input promises have been resolved The callbacks for this promise receive as a parameter an Array containing the values of all the input promises
#include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/fs.h> #include <hwregs/reg_rdwr.h> #include <hwregs/reg_map.h> #include <hwregs/timer_defs.h> #include <hwregs/intr_vect_defs.h> #include <linux/ptrace.h> extern void stop_watchdog(void); /* We use this if we don't have any better idle routine. */ void default_idle(void) { local_irq_enable(); /* Halt until exception. */ __asm__ volatile("halt"); } /* * Free current thread data structures etc.. */ extern void deconfigure_bp(long pid); void exit_thread(struct task_struct *tsk) { deconfigure_bp(tsk->pid); } /* * If the watchdog is enabled, disable interrupts and enter an infinite loop. * The watchdog will reset the CPU after 0.1s. If the watchdog isn't enabled * then enable it and wait. */ extern void arch_enable_nmi(void); void hard_reset_now(void) { /* * Don't declare this variable elsewhere. We don't want any other * code to know about it than the watchdog handler in entry.S and * this code, implementing hard reset through the watchdog. */ #if defined(<API key>) extern int cause_of_death; #endif printk("*** HARD RESET ***\n"); local_irq_disable(); #if defined(<API key>) cause_of_death = 0xbedead; #else { <API key> wd_ctrl = {0}; stop_watchdog(); wd_ctrl.key = 16; /* Arbitrary key. */ wd_ctrl.cnt = 1; /* Minimum time. */ wd_ctrl.cmd = regk_timer_start; arch_enable_nmi(); REG_WR(timer, regi_timer0, rw_wd_ctrl, wd_ctrl); } #endif while (1) ; /* Wait for reset. */ } /* * Return saved PC of a blocked thread. */ unsigned long thread_saved_pc(struct task_struct *t) { return task_pt_regs(t)->erp; } /* * Setup the child's kernel stack with a pt_regs and call switch_stack() on it. * It will be unnested during _resume and _ret_from_sys_call when the new thread * is scheduled. * * Also setup the thread switching structure which is used to keep * thread-specific data during _resumes. */ extern asmlinkage void ret_from_fork(void); extern asmlinkage void <API key>(void); int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct task_struct *p) { struct pt_regs *childregs = task_pt_regs(p); struct switch_stack *swstack = ((struct switch_stack *) childregs) - 1; /* * Put the pt_regs structure at the end of the new kernel stack page and * fix it up. Note: the task_struct doubles as the kernel stack for the * task. */ if (unlikely(p->flags & PF_KTHREAD)) { memset(swstack, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs)); swstack->r1 = usp; swstack->r2 = arg; childregs->ccs = 1 << (I_CCS_BITNR + CCS_SHIFT); swstack->return_ip = (unsigned long) <API key>; p->thread.ksp = (unsigned long) swstack; p->thread.usp = 0; return 0; } *childregs = *current_pt_regs(); /* Struct copy of pt_regs. */ childregs->r10 = 0; /* Child returns 0 after a fork/clone. */ /* Set a new TLS ? * The TLS is in $mof because it is the 5th argument to sys_clone. */ if (p->mm && (clone_flags & CLONE_SETTLS)) { task_thread_info(p)->tls = childregs->mof; } /* Put the switch stack right below the pt_regs. */ /* Parameter to ret_from_sys_call. 0 is don't restart the syscall. */ swstack->r9 = 0; /* * We want to return into ret_from_sys_call after the _resume. * ret_from_fork will call ret_from_sys_call. */ swstack->return_ip = (unsigned long) ret_from_fork; /* Fix the user-mode and kernel-mode stackpointer. */ p->thread.usp = usp ?: rdusp(); p->thread.ksp = (unsigned long) swstack; return 0; } unsigned long get_wchan(struct task_struct *p) { /* TODO */ return 0; } #undef last_sched #undef first_sched void show_regs(struct pt_regs * regs) { unsigned long usp = rdusp(); <API key>(KERN_DEFAULT); printk("ERP: %08lx SRP: %08lx CCS: %08lx USP: %08lx MOF: %08lx\n", regs->erp, regs->srp, regs->ccs, usp, regs->mof); printk(" r0: %08lx r1: %08lx r2: %08lx r3: %08lx\n", regs->r0, regs->r1, regs->r2, regs->r3); printk(" r4: %08lx r5: %08lx r6: %08lx r7: %08lx\n", regs->r4, regs->r5, regs->r6, regs->r7); printk(" r8: %08lx r9: %08lx r10: %08lx r11: %08lx\n", regs->r8, regs->r9, regs->r10, regs->r11); printk("r12: %08lx r13: %08lx oR10: %08lx\n", regs->r12, regs->r13, regs->orig_r10); }
#include "dm_services.h" #include "dce/dce_11_2_d.h" #include "dce/dce_11_2_sh_mask.h" #include "gmc/gmc_8_1_sh_mask.h" #include "gmc/gmc_8_1_d.h" #include "include/logger_interface.h" #include "dce112_compressor.h" #define DCP_REG(reg)\ (reg + cp110->offsets.dcp_offset) #define DMIF_REG(reg)\ (reg + cp110->offsets.dmif_offset) static const struct <API key> reg_offsets[] = { { .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), .dmif_offset = (<API key> - <API key>), }, { .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), .dmif_offset = (<API key> - <API key>), }, { .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), .dmif_offset = (<API key> - <API key>), } }; static const uint32_t <API key> = 2560 * 1600; enum fbc_idle_force { /* Bit 0 - Display registers updated */ <API key> = 0x00000001, /* Bit 2 - FBC_GRPH_COMP_EN register updated */ <API key> = 0x00000002, /* Bit 3 - FBC_SRC_SEL register updated */ <API key> = 0x00000004, /* Bit 4 - FBC_MIN_COMPRESSION register updated */ <API key> = 0x00000008, /* Bit 5 - FBC_ALPHA_COMP_EN register updated */ <API key> = 0x00000010, /* Bit 6 - <API key> register updated */ <API key> = 0x00000020, /* Bit 7 - <API key> register updated */ <API key> = 0x00000040, /* Bit 24 - Memory write to region 0 defined by MC registers. */ <API key> = 0x01000000, /* Bit 25 - Memory write to region 1 defined by MC registers */ <API key> = 0x02000000, /* Bit 26 - Memory write to region 2 defined by MC registers */ <API key> = 0x04000000, /* Bit 27 - Memory write to region 3 defined by MC registers. */ <API key> = 0x08000000, /* Bit 28 - Memory write from any client other than MCIF */ <API key> = 0x10000000, /* Bit 29 - CG statics screen signal is inactive */ <API key> = 0x20000000, }; static uint32_t lpt_size_alignment(struct dce112_compressor *cp110) { /*LPT_ALIGNMENT (in bytes) = ROW_SIZE * #BANKS * # DRAM CHANNELS. */ return cp110->base.raw_size * cp110->base.banks_num * cp110->base.dram_channels_num; } static uint32_t <API key>(struct dce112_compressor *cp110, uint32_t lpt_control) { /*LPT MC Config */ if (cp110->base.options.bits.LPT_MC_CONFIG == 1) { /* POSSIBLE VALUES for LPT NUM_PIPES (DRAM CHANNELS): * 00 - 1 CHANNEL * 01 - 2 CHANNELS * 02 - 4 OR 6 CHANNELS * (Only for discrete GPU, N/A for CZ) * 03 - 8 OR 12 CHANNELS * (Only for discrete GPU, N/A for CZ) */ switch (cp110->base.dram_channels_num) { case 2: set_reg_field_value( lpt_control, 1, <API key>, <API key>); break; case 1: set_reg_field_value( lpt_control, 0, <API key>, <API key>); break; default: dm_logger_write( cp110->base.ctx->logger, LOG_WARNING, "%s: Invalid LPT NUM_PIPES!!!", __func__); break; } /* The mapping for LPT NUM_BANKS is in * GRPH_CONTROL.GRPH_NUM_BANKS register field * Specifies the number of memory banks for tiling * purposes. Only applies to 2D and 3D tiling modes. * POSSIBLE VALUES: * 00 - <API key>: ADDR_SURF_2_BANK * 01 - <API key>: ADDR_SURF_4_BANK * 02 - <API key>: ADDR_SURF_8_BANK * 03 - <API key>: ADDR_SURF_16_BANK */ switch (cp110->base.banks_num) { case 16: set_reg_field_value( lpt_control, 3, <API key>, <API key>); break; case 8: set_reg_field_value( lpt_control, 2, <API key>, <API key>); break; case 4: set_reg_field_value( lpt_control, 1, <API key>, <API key>); break; case 2: set_reg_field_value( lpt_control, 0, <API key>, <API key>); break; default: dm_logger_write( cp110->base.ctx->logger, LOG_WARNING, "%s: Invalid LPT NUM_BANKS!!!", __func__); break; } /* The mapping is in DMIF_ADDR_CALC. * <API key> register field for * Carrizo specifies the memory interleave per pipe. * It effectively specifies the location of pipe bits in * the memory address. * POSSIBLE VALUES: * 00 - <API key>: 256 byte * interleave * 01 - <API key>: 512 byte * interleave */ switch (cp110->base.<API key>) { case 256: /*256B */ set_reg_field_value( lpt_control, 0, <API key>, <API key>); break; case 512: /*512B */ set_reg_field_value( lpt_control, 1, <API key>, <API key>); break; default: dm_logger_write( cp110->base.ctx->logger, LOG_WARNING, "%s: Invalid LPT INTERLEAVE_SIZE!!!", __func__); break; } /* The mapping for <API key> is in * DMIF_ADDR_CALC.<API key> register field * for Carrizo. Specifies the size of dram row in bytes. * This should match up with NOOFCOLS field in * MC_ARB_RAMCFG (ROW_SIZE = 4 * 2 ^^ columns). * This register DMIF_ADDR_CALC is not used by the * hardware as it is only used for addrlib assertions. * POSSIBLE VALUES: * 00 - ADDR_CONFIG_1KB_ROW: Treat 1KB as DRAM row * boundary * 01 - ADDR_CONFIG_2KB_ROW: Treat 2KB as DRAM row * boundary * 02 - ADDR_CONFIG_4KB_ROW: Treat 4KB as DRAM row * boundary */ switch (cp110->base.raw_size) { case 4096: /*4 KB */ set_reg_field_value( lpt_control, 2, <API key>, <API key>); break; case 2048: set_reg_field_value( lpt_control, 1, <API key>, <API key>); break; case 1024: set_reg_field_value( lpt_control, 0, <API key>, <API key>); break; default: dm_logger_write( cp110->base.ctx->logger, LOG_WARNING, "%s: Invalid LPT ROW_SIZE!!!", __func__); break; } } else { dm_logger_write( cp110->base.ctx->logger, LOG_WARNING, "%s: LPT MC Configuration is not provided", __func__); } return lpt_control; } static bool <API key>( struct dce112_compressor *cp110, uint32_t source_view_width, uint32_t source_view_height) { if (cp110->base.<API key> != 0 && cp110->base.<API key> != 0 && ((source_view_width * source_view_height) > (cp110->base.<API key> * cp110->base.<API key>))) return true; return false; } static uint32_t <API key>( struct dce112_compressor *cp110, uint32_t pixels) { return 256 * ((pixels + 255) / 256); } static void <API key>( struct dce112_compressor *cp110, bool enabled) { uint8_t counter = 0; uint32_t addr = mmFBC_STATUS; uint32_t value; while (counter < 10) { value = dm_read_reg(cp110->base.ctx, addr); if (get_reg_field_value( value, FBC_STATUS, FBC_ENABLE_STATUS) == enabled) break; udelay(10); counter++; } if (counter == 10) { dm_logger_write( cp110->base.ctx->logger, LOG_WARNING, "%s: wait counter exceeded, changes to HW not applied", __func__); } } void <API key>(struct compressor *compressor) { uint32_t value; uint32_t addr; addr = mmFBC_CNTL; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN); set_reg_field_value(value, 1, FBC_CNTL, FBC_EN); set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE); if (compressor->options.bits.CLK_GATING_DISABLED == 1) { /* HW needs to do power measurement comparison. */ set_reg_field_value( value, 0, FBC_CNTL, <API key>); } dm_write_reg(compressor->ctx, addr, value); addr = mmFBC_COMP_MODE; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN); set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN); set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN); dm_write_reg(compressor->ctx, addr, value); addr = mmFBC_COMP_CNTL; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN); dm_write_reg(compressor->ctx, addr, value); /*FBC_MIN_COMPRESSION 0 ==> 2:1 */ /* 1 ==> 4:1 */ /* 2 ==> 8:1 */ /* 0xF ==> 1:1 */ set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION); dm_write_reg(compressor->ctx, addr, value); compressor->min_compress_ratio = <API key>; value = 0; dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value); value = 0xFFFFFF; dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value); } void <API key>( struct compressor *compressor, uint32_t paths_num, struct <API key> *params) { struct dce112_compressor *cp110 = <API key>(compressor); if (compressor->options.bits.FBC_SUPPORT && (compressor->options.bits.DUMMY_BACKEND == 0) && (!<API key>(compressor, NULL)) && (!<API key>( cp110, params->source_view_width, params->source_view_height))) { uint32_t addr; uint32_t value; /* Before enabling FBC first need to enable LPT if applicable * LPT state should always be changed (enable/disable) while FBC * is disabled */ if (compressor->options.bits.LPT_SUPPORT && (paths_num < 2) && (params->source_view_width * params->source_view_height <= <API key>)) { <API key>(compressor); } addr = mmFBC_CNTL; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN); set_reg_field_value( value, params->inst, FBC_CNTL, FBC_SRC_SEL); dm_write_reg(compressor->ctx, addr, value); /* Keep track of enum controller_id FBC is attached to */ compressor->is_enabled = true; compressor->attached_inst = params->inst; cp110->offsets = reg_offsets[params->inst]; /*Toggle it as there is bug in HW */ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN); dm_write_reg(compressor->ctx, addr, value); set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN); dm_write_reg(compressor->ctx, addr, value); <API key>(cp110, true); } } void <API key>(struct compressor *compressor) { struct dce112_compressor *cp110 = <API key>(compressor); if (compressor->options.bits.FBC_SUPPORT && <API key>(compressor, NULL)) { uint32_t reg_data; /* Turn off compression */ reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL); set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN); dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data); /* Reset enum controller_id to undefined */ compressor->attached_inst = 0; compressor->is_enabled = false; /* Whenever disabling FBC make sure LPT is disabled if LPT * supported */ if (compressor->options.bits.LPT_SUPPORT) <API key>(compressor); <API key>(cp110, false); } } bool <API key>( struct compressor *compressor, uint32_t *inst) { /* Check the hardware register */ uint32_t value; value = dm_read_reg(compressor->ctx, mmFBC_STATUS); if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) { if (inst != NULL) *inst = compressor->attached_inst; return true; } value = dm_read_reg(compressor->ctx, mmFBC_MISC); if (get_reg_field_value(value, FBC_MISC, <API key>)) { value = dm_read_reg(compressor->ctx, mmFBC_CNTL); if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) { if (inst != NULL) *inst = compressor->attached_inst; return true; } } return false; } bool <API key>(struct compressor *compressor) { /* Check the hardware register */ uint32_t value = dm_read_reg(compressor->ctx, <API key>); return get_reg_field_value( value, <API key>, <API key>); } void <API key>( struct compressor *compressor, struct <API key> *params) { struct dce112_compressor *cp110 = <API key>(compressor); uint32_t value = 0; uint32_t fbc_pitch = 0; uint32_t <API key> = compressor-><API key>.addr.low_part; /* Clear content first. */ dm_write_reg( compressor->ctx, DCP_REG(<API key>), 0); dm_write_reg(compressor->ctx, DCP_REG(<API key>), 0); if (compressor->options.bits.LPT_SUPPORT) { uint32_t lpt_alignment = lpt_size_alignment(cp110); if (lpt_alignment != 0) { <API key> = ((<API key> + (lpt_alignment - 1)) / lpt_alignment) * lpt_alignment; } } /* Write address, HIGH has to be first. */ dm_write_reg(compressor->ctx, DCP_REG(<API key>), compressor-><API key>.addr.high_part); dm_write_reg(compressor->ctx, DCP_REG(<API key>), <API key>); fbc_pitch = <API key>( cp110, params->source_view_width); if (compressor->min_compress_ratio == <API key>) fbc_pitch = fbc_pitch / 8; else dm_logger_write( compressor->ctx->logger, LOG_WARNING, "%s: Unexpected DCE11 compression ratio", __func__); /* Clear content first. */ dm_write_reg(compressor->ctx, DCP_REG(<API key>), 0); /* Write FBC Pitch. */ set_reg_field_value( value, fbc_pitch, GRPH_COMPRESS_PITCH, GRPH_COMPRESS_PITCH); dm_write_reg(compressor->ctx, DCP_REG(<API key>), value); } void <API key>(struct compressor *compressor) { struct dce112_compressor *cp110 = <API key>(compressor); uint32_t value; uint32_t addr; uint32_t inx; /* Disable all pipes LPT Stutter */ for (inx = 0; inx < 3; inx++) { value = dm_read_reg( compressor->ctx, DMIF_REG(<API key>)); set_reg_field_value( value, 0, <API key>, <API key>); dm_write_reg( compressor->ctx, DMIF_REG(<API key>), value); } /* Disable Underlay pipe LPT Stutter */ addr = <API key>; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, 0, <API key>, <API key>); dm_write_reg(compressor->ctx, addr, value); /* Disable LPT */ addr = <API key>; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, 0, <API key>, <API key>); dm_write_reg(compressor->ctx, addr, value); /* Clear selection of Channel(s) containing Compressed Surface */ addr = mmGMCON_LPT_TARGET; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, 0xFFFFFFFF, GMCON_LPT_TARGET, STCTRL_LPT_TARGET); dm_write_reg(compressor->ctx, mmGMCON_LPT_TARGET, value); } void <API key>(struct compressor *compressor) { struct dce112_compressor *cp110 = <API key>(compressor); uint32_t value; uint32_t addr; uint32_t value_control; uint32_t channels; /* Enable LPT Stutter from Display pipe */ value = dm_read_reg(compressor->ctx, DMIF_REG(<API key>)); set_reg_field_value( value, 1, <API key>, <API key>); dm_write_reg(compressor->ctx, DMIF_REG(<API key>), value); /* Enable Underlay pipe LPT Stutter */ addr = <API key>; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, 1, <API key>, <API key>); dm_write_reg(compressor->ctx, addr, value); /* Selection of Channel(s) containing Compressed Surface: 0xfffffff * will disable LPT. * STCTRL_LPT_TARGETn corresponds to channel n. */ addr = <API key>; value_control = dm_read_reg(compressor->ctx, addr); channels = get_reg_field_value(value_control, <API key>, <API key>); addr = mmGMCON_LPT_TARGET; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, channels + 1, /* not mentioned in programming guide, but follow DCE8.1 */ GMCON_LPT_TARGET, STCTRL_LPT_TARGET); dm_write_reg(compressor->ctx, addr, value); /* Enable LPT */ addr = <API key>; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, 1, <API key>, <API key>); dm_write_reg(compressor->ctx, addr, value); } void <API key>( struct compressor *compressor, struct <API key> *params) { struct dce112_compressor *cp110 = <API key>(compressor); uint32_t rows_per_channel; uint32_t lpt_alignment; uint32_t source_view_width; uint32_t source_view_height; uint32_t lpt_control = 0; if (!compressor->options.bits.LPT_SUPPORT) return; lpt_control = dm_read_reg(compressor->ctx, <API key>); /* POSSIBLE VALUES for Low Power Tiling Mode: * 00 - Use channel 0 * 01 - Use Channel 0 and 1 * 02 - Use Channel 0,1,2,3 * 03 - reserved */ switch (compressor->lpt_channels_num) { /* case 2: * Use Channel 0 & 1 / Not used for DCE 11 */ case 1: /*Use Channel 0 for LPT for DCE 11 */ set_reg_field_value( lpt_control, 0, <API key>, <API key>); break; default: dm_logger_write( compressor->ctx->logger, LOG_WARNING, "%s: Invalid selected DRAM channels for LPT!!!", __func__); break; } lpt_control = <API key>(cp110, lpt_control); /* Program <API key> field which depends on * FBC compressed surface pitch. * <API key> = Roundup ((Surface Height * * Surface Pitch) / (Row Size * Number of Channels * * Number of Banks)). */ rows_per_channel = 0; lpt_alignment = lpt_size_alignment(cp110); source_view_width = <API key>( cp110, params->source_view_width); source_view_height = (params->source_view_height + 1) & (~0x1); if (lpt_alignment != 0) { rows_per_channel = source_view_width * source_view_height * 4; rows_per_channel = (rows_per_channel % lpt_alignment) ? (rows_per_channel / lpt_alignment + 1) : rows_per_channel / lpt_alignment; } set_reg_field_value( lpt_control, rows_per_channel, <API key>, <API key>); dm_write_reg(compressor->ctx, <API key>, lpt_control); } /* * DCE 11 Frame Buffer Compression Implementation */ void <API key>( struct compressor *compressor, uint32_t fbc_trigger) { /* Disable region hit event, <API key> = 0 (bits 16-19) * for DCE 11 regions cannot be used - does not work with S/G */ uint32_t addr = <API key>; uint32_t value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, 0, <API key>, <API key>); dm_write_reg(compressor->ctx, addr, value); /* Setup events when to clear all CSM entries (effectively marking * current compressed data invalid) * For DCE 11 CSM metadata 11111 means - "Not Compressed" * Used as the initial value of the metadata sent to the compressor * after invalidation, to indicate that the compressor should attempt * to compress all chunks on the current pass. Also used when the chunk * is not successfully written to memory. * When this CSM value is detected, FBC reads from the uncompressed * buffer. Set events according to passed in value, these events are * valid for DCE11: * - bit 0 - display register updated * - bit 28 - memory write from any client except from MCIF * - bit 29 - CG static screen signal is inactive * In addition, DCE11.1 also needs to set new DCE11.1 specific events * that are used to trigger invalidation on certain register changes, * for example enabling of Alpha Compression may trigger invalidation of * FBC once bit is set. These events are as follows: * - Bit 2 - FBC_GRPH_COMP_EN register updated * - Bit 3 - FBC_SRC_SEL register updated * - Bit 4 - FBC_MIN_COMPRESSION register updated * - Bit 5 - FBC_ALPHA_COMP_EN register updated * - Bit 6 - <API key> register updated * - Bit 7 - <API key> register updated */ addr = <API key>; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, fbc_trigger | <API key> | <API key> | <API key> | <API key> | <API key> | <API key>, <API key>, <API key>); dm_write_reg(compressor->ctx, addr, value); } void <API key>(struct dce112_compressor *compressor, struct dc_context *ctx) { struct dc_bios *bp = ctx->dc_bios; struct embedded_panel_info panel_info; compressor->base.options.raw = 0; compressor->base.options.bits.FBC_SUPPORT = true; compressor->base.options.bits.LPT_SUPPORT = true; /* For DCE 11 always use one DRAM channel for LPT */ compressor->base.lpt_channels_num = 1; compressor->base.options.bits.DUMMY_BACKEND = false; /* Check if this system has more than 1 DRAM channel; if only 1 then LPT * should not be supported */ if (compressor->base.memory_bus_width == 64) compressor->base.options.bits.LPT_SUPPORT = false; compressor->base.options.bits.CLK_GATING_DISABLED = false; compressor->base.ctx = ctx; compressor->base.<API key> = 0; compressor->base.<API key> = 0; compressor->base.memory_bus_width = ctx->asic_id.vram_width; compressor->base.allocated_size = 0; compressor->base.<API key> = 0; compressor->base.min_compress_ratio = <API key>; compressor->base.banks_num = 0; compressor->base.raw_size = 0; compressor->base.<API key> = 0; compressor->base.dram_channels_num = 0; compressor->base.lpt_channels_num = 0; compressor->base.attached_inst = 0; compressor->base.is_enabled = false; if (BP_RESULT_OK == bp->funcs-><API key>(bp, &panel_info)) { compressor->base.<API key> = panel_info.lcd_timing.<API key>; compressor->base.<API key> = panel_info.lcd_timing.<API key>; } } struct compressor *<API key>(struct dc_context *ctx) { struct dce112_compressor *cp110 = kzalloc(sizeof(struct dce112_compressor), GFP_KERNEL); if (!cp110) return NULL; <API key>(cp110, ctx); return &cp110->base; } void <API key>(struct compressor **compressor) { kfree(<API key>(*compressor)); *compressor = NULL; }
// @(#)root/tmva $Id$ #ifndef <API key> #define <API key> // Configurable // // Base class for all classes with option parsing // #include "TNamed.h" #include "TList.h" #include "TMVA/Option.h" namespace TMVA { class Configurable : public TNamed { public: // constructur Configurable( const TString& theOption = "" ); // default destructur virtual ~Configurable(); // parse the internal option string virtual void ParseOptions(); // print list of defined options void PrintOptions() const; const char* GetConfigName() const { return GetName(); } const char* <API key>() const { return fConfigDescription; } void SetConfigName ( const char* n ) { SetName(n); } void <API key>( const char* d ) { fConfigDescription = TString(d); } // Declare option and bind it to a variable template<class T> OptionBase* DeclareOptionRef( T& ref, const TString& name, const TString& desc = "" ); template<class T> OptionBase* DeclareOptionRef( T*& ref, Int_t size, const TString& name, const TString& desc = "" ); // Add a predefined value to the last declared option template<class T> void AddPreDefVal(const T&); // Add a predefined value to the option named optname template<class T> void AddPreDefVal(const TString&optname ,const T&); void <API key>() const; const TString& GetOptions() const { return fOptions; } void SetOptions(const TString& s) { fOptions = s; } void <API key> ( std::ostream& o, const TString& prefix ) const; void <API key>( std::istream& istr ); void AddOptionsXMLTo( void* parent ) const; void ReadOptionsFromXML( void* node ); protected: Bool_t <API key>() const { return <API key>; } void EnableLooseOptions( Bool_t b = kTRUE ) { <API key> = b; } void <API key>(); void ResetSetFlag(); const TString& GetReferenceFile() const { return fReferenceFile; } private: // splits the option string at ':' and fills the list 'loo' with the primitive strings void SplitOptions(const TString& theOpt, TList& loo) const; TString fOptions; // options string Bool_t <API key>; // checker for option string // classes and method related to easy and flexible option parsing OptionBase* fLastDeclaredOption; //! last declared option TList fListOfOptions; // option list TString fConfigDescription; // description of this configurable TString fReferenceFile; // reference file for options writing public: // the mutable declaration is needed to use the logger in const methods MsgLogger& Log() const { return *fLogger; } // set message type void SetMsgType( EMsgType t ) { fLogger->SetMinType(t); } protected: mutable MsgLogger* fLogger; //! message logger private: template <class T> void AssignOpt( const TString& name, T& valAssign ) const; public: ClassDef(Configurable,1); // Virtual base class for all TMVA method }; } // namespace TMVA // Template Declarations go here //<API key> template <class T> TMVA::OptionBase* TMVA::Configurable::DeclareOptionRef( T& ref, const TString& name, const TString& desc) { // set the reference for an option OptionBase* o = new Option<T>(ref, name, desc); fListOfOptions.Add(o); fLastDeclaredOption = o; return o; } template <class T> TMVA::OptionBase* TMVA::Configurable::DeclareOptionRef( T*& ref, Int_t size, const TString& name, const TString& desc) { // set the reference for an option OptionBase* o = new Option<T*>(ref, size, name, desc); fListOfOptions.Add(o); fLastDeclaredOption = o; return o; } //<API key> template<class T> void TMVA::Configurable::AddPreDefVal(const T& val) { // add predefined option value to the last declared option Option<T>* oc = dynamic_cast<Option<T>*>(fLastDeclaredOption); if(oc!=0) oc->AddPreDefVal(val); } //<API key> template<class T> void TMVA::Configurable::AddPreDefVal(const TString &optname, const T& val) { // add predefined option value to the option named optname TListIter optIt( &fListOfOptions ); while (OptionBase * op = (OptionBase *) optIt()) { if (optname == TString(op->TheName())){ Option<T>* oc = dynamic_cast<Option<T>*>(op); if(oc!=0){ oc->AddPreDefVal(val); return; } else{ Log() << kFATAL << "Option \"" << optname << "\" was found, but somehow I could not convert the pointer propperly.. please check the syntax of your option declaration" << Endl; return; } } } Log() << kFATAL << "Option \"" << optname << "\" is not declared, hence cannot add predefined value, please check the syntax of your option declaration" << Endl; } //<API key> template <class T> void TMVA::Configurable::AssignOpt(const TString& name, T& valAssign) const { // assign an option TObject* opt = fListOfOptions.FindObject(name); if (opt!=0) valAssign = ((Option<T>*)opt)->Value(); else Log() << kFATAL << "Option \"" << name << "\" not declared, please check the syntax of your option string" << Endl; } #endif
#include "debug.h" #include <stdarg.h> #include <windows.h> #include <stdio.h> /* vsprintf */ #define DPRINTF_BUF_SZ 1024 void OutputDebugStringf(char *fmt, ...) { #ifdef _DEBUG va_list args; char buf[DPRINTF_BUF_SZ]; va_start(args, fmt); vsprintf(buf, fmt, args); OutputDebugString(buf); #endif }
<?php namespace PhpOffice\PhpWord\Tests\Exception; use PhpOffice\PhpWord\Exception\<API key>; /** * Test class for PhpOffice\PhpWord\Exception\<API key> * * @coversDefaultClass \PhpOffice\PhpWord\Exception\<API key> * @<API key> */ class <API key> extends \<API key> { /** * Throw new exception * * @expectedException \PhpOffice\PhpWord\Exception\<API key> * @covers \PhpOffice\PhpWord\Exception\<API key> */ public function testThrowException() { throw new <API key>; } }
<!DOCTYPE html> <link rel="help" href="https://drafts.csswg.org/css-backgrounds/#border-image-slice" /> <link rel="help" href="https://drafts.csswg.org/css-backgrounds/#border-image" /> <script src="/resources/testharness.js"></script> <script src="/resources/testharnessreport.js"></script> <style> div { border: 1px solid; border-image-slice: 1; } div { /* Should reset border-image-slice */ border-image: linear-gradient(black, black); } </style> <div>This text should not have a border, just corner dots</div> <script> test(() => { assert_equals(getComputedStyle(document.querySelector("div")).borderImageSlice, "100%"); }, "Check that the border-image shorthand resets border-image-slice to its initial value."); </script>
#include <sys/mman.h> #include "syscall.h" int mlockall(int flags) { return syscall(SYS_mlockall, flags); }
(function ($, Drupal) { /** * Toggle show/hide links for off canvas layout. */ Drupal.behaviors.<API key> = { attach: function (context) { $('#off-canvas').click(function(e) { if (!$(this).hasClass('is-visible')) { $(this).addClass('is-visible'); e.preventDefault(); e.stopPropagation(); } }); $('#off-canvas-hide').click(function(e) { $(this).parent().removeClass('is-visible'); e.preventDefault(); e.stopPropagation(); }); $('.l-page').click(function(e) { if($(' $('#off-canvas').removeClass('is-visible'); e.stopPropagation(); } }); } }; })(jQuery, Drupal);
/* reference.h */ /* This file is part of: */ /* GODOT ENGINE */ /* a copy of this software and associated documentation files (the */ /* "Software"), to deal in the Software without restriction, including */ /* without limitation the rights to use, copy, modify, merge, publish, */ /* permit persons to whom the Software is furnished to do so, subject to */ /* the following conditions: */ /* included in all copies or substantial portions of the Software. */ /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef REFERENCE_H #define REFERENCE_H #include "object.h" #include "safe_refcount.h" #include "ref_ptr.h" #include "object_type_db.h" /** @author Juan Linietsky <reduzio@gmail.com> */ class Reference : public Object{ OBJ_TYPE( Reference, Object ); friend class RefBase; SafeRefCount refcount; SafeRefCount refcount_init; protected: static void _bind_methods(); public: _FORCE_INLINE_ bool is_referenced() const { return refcount_init.get()<1; } bool init_ref(); void reference(); bool unreference(); int reference_get_count() const; Reference(); ~Reference(); }; #if 0 class RefBase { protected: void ref_inc(Reference *p_reference); bool ref_dec(Reference *p_reference); Reference *first_ref(Reference *p_reference); Reference * <API key>(const RefBase &p_base); virtual Reference * get_reference() const=0; char * get_refptr_data(const RefPtr &p_refptr) const; public: virtual ~RefBase() {} }; #endif template<class T> class Ref { T *reference; void ref( const Ref& p_from ) { if (p_from.reference==reference) return; unref(); reference=p_from.reference; if (reference) reference->reference(); } void ref_pointer( T* p_ref ) { ERR_FAIL_COND(!p_ref); if (p_ref->init_ref()) reference=p_ref; } //virtual Reference * get_reference() const { return reference; } public: _FORCE_INLINE_ bool operator<(const Ref<T>& p_r) const { return reference<p_r.reference; } _FORCE_INLINE_ bool operator==(const Ref<T>& p_r) const { return reference==p_r.reference; } _FORCE_INLINE_ bool operator!=(const Ref<T>& p_r) const { return reference!=p_r.reference; } _FORCE_INLINE_ T* operator->() { return reference; } _FORCE_INLINE_ T* operator*() { return reference; } _FORCE_INLINE_ const T* operator->() const { return reference; } _FORCE_INLINE_ const T* ptr() const { return reference; } _FORCE_INLINE_ T* ptr() { return reference; } _FORCE_INLINE_ const T* operator*() const { return reference; } RefPtr get_ref_ptr() const { RefPtr refptr; Ref<Reference> * irr = reinterpret_cast<Ref<Reference>*>( refptr.get_data() ); *irr = *this; return refptr; }; #if 0 // go to RefPtr operator RefPtr() const { return get_ref_ptr(); } #endif #if 1 operator Variant() const { return Variant( get_ref_ptr() ); } #endif void operator=( const Ref& p_from ) { ref(p_from); } template<class T_Other> void operator=( const Ref<T_Other>& p_from ) { Reference *refb = const_cast<Reference*>(static_cast<const Reference*>(p_from.ptr())); if (!refb) { unref(); return; } Ref r; r.reference=refb->cast_to<T>(); ref(r); r.reference=NULL; } void operator=( const RefPtr& p_refptr ) { Ref<Reference> * irr = reinterpret_cast<Ref<Reference>*>( p_refptr.get_data() ); Reference *refb = irr->ptr(); if (!refb) { unref(); return; } Ref r; r.reference=refb->cast_to<T>(); ref(r); r.reference=NULL; } void operator=( const Variant& p_variant ) { RefPtr refptr=p_variant; Ref<Reference> * irr = reinterpret_cast<Ref<Reference>*>( refptr.get_data() ); Reference *refb = irr->ptr(); if (!refb) { unref(); return; } Ref r; r.reference=refb->cast_to<T>(); ref(r); r.reference=NULL; } Ref( const Ref& p_from ) { reference=NULL; ref(p_from); } template<class T_Other> Ref( const Ref<T_Other>& p_from ) { reference=NULL; Reference *refb = const_cast<Reference*>(static_cast<const Reference*>(p_from.ptr())); if (!refb) { unref(); return; } Ref r; r.reference=refb->cast_to<T>(); ref(r); r.reference=NULL; } Ref( T* p_reference ) { if (p_reference) ref_pointer(p_reference); else reference=NULL; } Ref( const Variant& p_variant) { RefPtr refptr=p_variant; Ref<Reference> * irr = reinterpret_cast<Ref<Reference>*>( refptr.get_data() ); reference=NULL; Reference *refb = irr->ptr(); if (!refb) { unref(); return; } Ref r; r.reference=refb->cast_to<T>(); ref(r); r.reference=NULL; } Ref( const RefPtr& p_refptr) { Ref<Reference> * irr = reinterpret_cast<Ref<Reference>*>( p_refptr.get_data() ); reference=NULL; Reference *refb = irr->ptr(); if (!refb) { unref(); return; } Ref r; r.reference=refb->cast_to<T>(); ref(r); r.reference=NULL; } inline bool is_valid() const { return reference!=NULL; } inline bool is_null() const { return reference==NULL; } void unref() { //TODO this should be moved to mutexes, since this engine does not really // do a lot of referencing on references and stuff // mutexes will avoid more crashes? if (reference && reference->unreference()) { memdelete(reference); } reference=NULL; } void instance() { ref( memnew( T )); } Ref() { reference=NULL; } ~Ref() { unref(); } }; typedef Ref<Reference> REF; class WeakRef : public Reference { OBJ_TYPE(WeakRef,Reference); ObjectID ref; protected: static void _bind_methods(); public: Variant get_ref() const; void set_obj(Object *p_object); void set_ref(const REF& p_ref); WeakRef(); }; #endif // REFERENCE_H
<reference path="MediaStream.d.ts" /> <reference path="RTCPeerConnection.d.ts" /> var config: RTCConfiguration = { iceServers: [{ urls: "stun.l.google.com:19302" }] }; var constraints: RTCMediaConstraints = { mandatory: { offerToReceiveAudio: true, offerToReceiveVideo: true } }; var peerConnection: RTCPeerConnection = new RTCPeerConnection(config, constraints); navigator.getUserMedia({ audio: true, video: true }, stream => { peerConnection.addStream(stream); }, error => { console.log('Error message: ' + error.message); console.log('Error name: ' + error.name); }); peerConnection.onaddstream = ev => console.log(ev.type); peerConnection.ondatachannel = ev => console.log(ev.type); peerConnection.<API key> = ev => console.log(ev.type); peerConnection.onnegotiationneeded = ev => console.log(ev.type); peerConnection.onopen = ev => console.log(ev.type); peerConnection.onicecandidate = ev => console.log(ev.type); peerConnection.onremovestream = ev => console.log(ev.type); peerConnection.onstatechange = ev => console.log(ev.type); peerConnection.createOffer( offer => { peerConnection.setLocalDescription(offer, () => console.log("set local description"), error => console.log("Error setting local description: " + error)); }, error => console.log("Error creating offer: " + error)); var type: string = RTCSdpType[RTCSdpType.offer]; var offer: <API key> = { type: type, sdp: "some sdp" }; var sessionDescription = new <API key>(offer); peerConnection.<API key>(sessionDescription, () => { peerConnection.createAnswer( answer => { peerConnection.setLocalDescription(answer, () => console.log('Set local description'), error => console.log( "Error setting local description from created answer: " + error + "; answer.sdp=" + answer.sdp)); }, error => console.log("Error creating answer: " + error)); }, error => console.log('Error setting remote description: ' + error + "; offer.sdp=" + offer.sdp)); var <API key> = new <API key>(offer); peerConnection.<API key>(<API key>, () => { peerConnection.createAnswer( answer => { peerConnection.setLocalDescription(answer, () => console.log('Set local description'), error => console.log( "Error setting local description from created answer: " + error + "; answer.sdp=" + answer.sdp)); }, error => console.log("Error creating answer: " + error)); }, error => console.log('Error setting remote description: ' + error + "; offer.sdp=" + offer.sdp)); var <API key> = new <API key>(offer); peerConnection.<API key>(<API key>, () => { peerConnection.createAnswer( answer => { peerConnection.setLocalDescription(answer, () => console.log('Set local description'), error => console.log( "Error setting local description from created answer: " + error + "; answer.sdp=" + answer.sdp)); }, error => console.log("Error creating answer: " + error)); }, error => console.log('Error setting remote description: ' + error + "; offer.sdp=" + offer.sdp)); var wkPeerConnection: <API key> = new <API key>(config, constraints);
.oo-ui-icon-bigger { background-image: url("themes/mediawiki/images/icons/bigger-rtl.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bigger-rtl.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bigger-rtl.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bigger-rtl.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/bigger-rtl-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bigger-rtl-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bigger-rtl-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bigger-rtl-invert.png"); } .oo-ui-icon-smaller { background-image: url("themes/mediawiki/images/icons/smaller-rtl.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/smaller-rtl.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/smaller-rtl.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/smaller-rtl.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/smaller-rtl-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/smaller-rtl-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/smaller-rtl-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/smaller-rtl-invert.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/subscript-rtl.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/subscript-rtl.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/subscript-rtl.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/subscript-rtl.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/superscript-rtl.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/superscript-rtl.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/superscript-rtl.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/superscript-rtl.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } .oo-ui-icon-bold { background-image: url("themes/mediawiki/images/icons/bold-a.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-a.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-a.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-a.png"); } /* @noflip */ .oo-ui-icon-bold:lang(ar) { background-image: url("themes/mediawiki/images/icons/bold-arab-ain.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-arab-ain.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-arab-ain.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-arab-ain.png"); } /* @noflip */ .oo-ui-icon-bold:lang(be) { background-image: url("themes/mediawiki/images/icons/bold-cyrl-te.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-te.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-te.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-cyrl-te.png"); } /* @noflip */ .oo-ui-icon-bold:lang(cs) { background-image: url("themes/mediawiki/images/icons/bold-b.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b.png"); } /* @noflip */ .oo-ui-icon-bold:lang(en) { background-image: url("themes/mediawiki/images/icons/bold-b.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b.png"); } /* @noflip */ .oo-ui-icon-bold:lang(he) { background-image: url("themes/mediawiki/images/icons/bold-b.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b.png"); } /* @noflip */ .oo-ui-icon-bold:lang(ml) { background-image: url("themes/mediawiki/images/icons/bold-b.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b.png"); } /* @noflip */ .oo-ui-icon-bold:lang(pl) { background-image: url("themes/mediawiki/images/icons/bold-b.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b.png"); } /* @noflip */ .oo-ui-icon-bold:lang(sco) { background-image: url("themes/mediawiki/images/icons/bold-b.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b.png"); } /* @noflip */ .oo-ui-icon-bold:lang(da) { background-image: url("themes/mediawiki/images/icons/bold-f.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f.png"); } /* @noflip */ .oo-ui-icon-bold:lang(de) { background-image: url("themes/mediawiki/images/icons/bold-f.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f.png"); } /* @noflip */ .oo-ui-icon-bold:lang(hu) { background-image: url("themes/mediawiki/images/icons/bold-f.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f.png"); } /* @noflip */ .oo-ui-icon-bold:lang(ksh) { background-image: url("themes/mediawiki/images/icons/bold-f.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f.png"); } /* @noflip */ .oo-ui-icon-bold:lang(nn) { background-image: url("themes/mediawiki/images/icons/bold-f.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f.png"); } /* @noflip */ .oo-ui-icon-bold:lang(no) { background-image: url("themes/mediawiki/images/icons/bold-f.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f.png"); } /* @noflip */ .oo-ui-icon-bold:lang(sv) { background-image: url("themes/mediawiki/images/icons/bold-f.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f.png"); } /* @noflip */ .oo-ui-icon-bold:lang(es) { background-image: url("themes/mediawiki/images/icons/bold-n.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-n.png"); } /* @noflip */ .oo-ui-icon-bold:lang(gl) { background-image: url("themes/mediawiki/images/icons/bold-n.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-n.png"); } /* @noflip */ .oo-ui-icon-bold:lang(pt) { background-image: url("themes/mediawiki/images/icons/bold-n.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-n.png"); } /* @noflip */ .oo-ui-icon-bold:lang(eu) { background-image: url("themes/mediawiki/images/icons/bold-l.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-l.png"); } /* @noflip */ .oo-ui-icon-bold:lang(fi) { background-image: url("themes/mediawiki/images/icons/bold-l.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-l.png"); } /* @noflip */ .oo-ui-icon-bold:lang(fa) { background-image: url("themes/mediawiki/images/icons/bold-arab-dad.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-arab-dad.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-arab-dad.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-arab-dad.png"); } /* @noflip */ .oo-ui-icon-bold:lang(fr) { background-image: url("themes/mediawiki/images/icons/bold-g.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-g.png"); } /* @noflip */ .oo-ui-icon-bold:lang(it) { background-image: url("themes/mediawiki/images/icons/bold-g.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-g.png"); } /* @noflip */ .oo-ui-icon-bold:lang(hy) { background-image: url("themes/mediawiki/images/icons/bold-armn-to.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-armn-to.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-armn-to.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-armn-to.png"); } /* @noflip */ .oo-ui-icon-bold:lang(ka) { background-image: url("themes/mediawiki/images/icons/bold-geor-man.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-geor-man.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-geor-man.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-geor-man.png"); } /* @noflip */ .oo-ui-icon-bold:lang(ky) { background-image: url("themes/mediawiki/images/icons/bold-cyrl-zhe.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-zhe.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-zhe.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-cyrl-zhe.png"); } /* @noflip */ .oo-ui-icon-bold:lang(ru) { background-image: url("themes/mediawiki/images/icons/bold-cyrl-zhe.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-zhe.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-zhe.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-cyrl-zhe.png"); } /* @noflip */ .oo-ui-icon-bold:lang(nl) { background-image: url("themes/mediawiki/images/icons/bold-v.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-v.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-v.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-v.png"); } /* @noflip */ .oo-ui-icon-bold:lang(os) { background-image: url("themes/mediawiki/images/icons/bold-cyrl-be.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-be.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-be.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-cyrl-be.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/bold-a-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-a-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-a-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-a-invert.png"); } /* @noflip */ .<API key>:lang(ar) { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } /* @noflip */ .<API key>:lang(be) { background-image: url("themes/mediawiki/images/icons/bold-cyrl-te-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-te-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-te-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-cyrl-te-invert.png"); } /* @noflip */ .<API key>:lang(cs) { background-image: url("themes/mediawiki/images/icons/bold-b-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b-invert.png"); } /* @noflip */ .<API key>:lang(en) { background-image: url("themes/mediawiki/images/icons/bold-b-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b-invert.png"); } /* @noflip */ .<API key>:lang(he) { background-image: url("themes/mediawiki/images/icons/bold-b-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b-invert.png"); } /* @noflip */ .<API key>:lang(ml) { background-image: url("themes/mediawiki/images/icons/bold-b-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b-invert.png"); } /* @noflip */ .<API key>:lang(pl) { background-image: url("themes/mediawiki/images/icons/bold-b-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b-invert.png"); } /* @noflip */ .<API key>:lang(sco) { background-image: url("themes/mediawiki/images/icons/bold-b-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b-invert.png"); } /* @noflip */ .<API key>:lang(da) { background-image: url("themes/mediawiki/images/icons/bold-f-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f-invert.png"); } /* @noflip */ .<API key>:lang(de) { background-image: url("themes/mediawiki/images/icons/bold-f-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f-invert.png"); } /* @noflip */ .<API key>:lang(hu) { background-image: url("themes/mediawiki/images/icons/bold-f-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f-invert.png"); } /* @noflip */ .<API key>:lang(ksh) { background-image: url("themes/mediawiki/images/icons/bold-f-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f-invert.png"); } /* @noflip */ .<API key>:lang(nn) { background-image: url("themes/mediawiki/images/icons/bold-f-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f-invert.png"); } /* @noflip */ .<API key>:lang(no) { background-image: url("themes/mediawiki/images/icons/bold-f-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f-invert.png"); } /* @noflip */ .<API key>:lang(sv) { background-image: url("themes/mediawiki/images/icons/bold-f-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f-invert.png"); } /* @noflip */ .<API key>:lang(es) { background-image: url("themes/mediawiki/images/icons/bold-n-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-n-invert.png"); } /* @noflip */ .<API key>:lang(gl) { background-image: url("themes/mediawiki/images/icons/bold-n-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-n-invert.png"); } /* @noflip */ .<API key>:lang(pt) { background-image: url("themes/mediawiki/images/icons/bold-n-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-n-invert.png"); } /* @noflip */ .<API key>:lang(eu) { background-image: url("themes/mediawiki/images/icons/bold-l-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-l-invert.png"); } /* @noflip */ .<API key>:lang(fi) { background-image: url("themes/mediawiki/images/icons/bold-l-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-l-invert.png"); } /* @noflip */ .<API key>:lang(fa) { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } /* @noflip */ .<API key>:lang(fr) { background-image: url("themes/mediawiki/images/icons/bold-g-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-g-invert.png"); } /* @noflip */ .<API key>:lang(it) { background-image: url("themes/mediawiki/images/icons/bold-g-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-g-invert.png"); } /* @noflip */ .<API key>:lang(hy) { background-image: url("themes/mediawiki/images/icons/bold-armn-to-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-armn-to-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-armn-to-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-armn-to-invert.png"); } /* @noflip */ .<API key>:lang(ka) { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } /* @noflip */ .<API key>:lang(ky) { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } /* @noflip */ .<API key>:lang(ru) { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } /* @noflip */ .<API key>:lang(nl) { background-image: url("themes/mediawiki/images/icons/bold-v-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-v-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-v-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-v-invert.png"); } /* @noflip */ .<API key>:lang(os) { background-image: url("themes/mediawiki/images/icons/bold-cyrl-be-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-be-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-be-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-cyrl-be-invert.png"); } .oo-ui-icon-italic { background-image: url("themes/mediawiki/images/icons/italic-a.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-a.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-a.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-a.png"); } /* @noflip */ .oo-ui-icon-italic:lang(ar) { background-image: url("themes/mediawiki/images/icons/italic-arab-meem.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-arab-meem.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-arab-meem.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-arab-meem.png"); } /* @noflip */ .oo-ui-icon-italic:lang(cs) { background-image: url("themes/mediawiki/images/icons/italic-i.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png"); } /* @noflip */ .oo-ui-icon-italic:lang(en) { background-image: url("themes/mediawiki/images/icons/italic-i.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png"); } /* @noflip */ .oo-ui-icon-italic:lang(fr) { background-image: url("themes/mediawiki/images/icons/italic-i.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png"); } /* @noflip */ .oo-ui-icon-italic:lang(he) { background-image: url("themes/mediawiki/images/icons/italic-i.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png"); } /* @noflip */ .oo-ui-icon-italic:lang(ml) { background-image: url("themes/mediawiki/images/icons/italic-i.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png"); } /* @noflip */ .oo-ui-icon-italic:lang(pl) { background-image: url("themes/mediawiki/images/icons/italic-i.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png"); } /* @noflip */ .oo-ui-icon-italic:lang(pt) { background-image: url("themes/mediawiki/images/icons/italic-i.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png"); } /* @noflip */ .oo-ui-icon-italic:lang(sco) { background-image: url("themes/mediawiki/images/icons/italic-i.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png"); } /* @noflip */ .oo-ui-icon-italic:lang(be) { background-image: url("themes/mediawiki/images/icons/italic-k.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png"); } /* @noflip */ .oo-ui-icon-italic:lang(da) { background-image: url("themes/mediawiki/images/icons/italic-k.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png"); } /* @noflip */ .oo-ui-icon-italic:lang(de) { background-image: url("themes/mediawiki/images/icons/italic-k.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png"); } /* @noflip */ .oo-ui-icon-italic:lang(fi) { background-image: url("themes/mediawiki/images/icons/italic-k.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png"); } /* @noflip */ .oo-ui-icon-italic:lang(ky) { background-image: url("themes/mediawiki/images/icons/italic-k.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png"); } /* @noflip */ .oo-ui-icon-italic:lang(nn) { background-image: url("themes/mediawiki/images/icons/italic-k.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png"); } /* @noflip */ .oo-ui-icon-italic:lang(no) { background-image: url("themes/mediawiki/images/icons/italic-k.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png"); } /* @noflip */ .oo-ui-icon-italic:lang(os) { background-image: url("themes/mediawiki/images/icons/italic-k.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png"); } /* @noflip */ .oo-ui-icon-italic:lang(sv) { background-image: url("themes/mediawiki/images/icons/italic-k.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png"); } /* @noflip */ .oo-ui-icon-italic:lang(ru) { background-image: url("themes/mediawiki/images/icons/italic-k.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png"); } /* @noflip */ .oo-ui-icon-italic:lang(es) { background-image: url("themes/mediawiki/images/icons/italic-c.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c.png"); } /* @noflip */ .oo-ui-icon-italic:lang(gl) { background-image: url("themes/mediawiki/images/icons/italic-c.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c.png"); } /* @noflip */ .oo-ui-icon-italic:lang(it) { background-image: url("themes/mediawiki/images/icons/italic-c.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c.png"); } /* @noflip */ .oo-ui-icon-italic:lang(nl) { background-image: url("themes/mediawiki/images/icons/italic-c.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c.png"); } /* @noflip */ .oo-ui-icon-italic:lang(eu) { background-image: url("themes/mediawiki/images/icons/italic-e.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-e.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-e.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-e.png"); } /* @noflip */ .oo-ui-icon-italic:lang(fa) { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } /* @noflip */ .oo-ui-icon-italic:lang(hu) { background-image: url("themes/mediawiki/images/icons/italic-d.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-d.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-d.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-d.png"); } /* @noflip */ .oo-ui-icon-italic:lang(hy) { background-image: url("themes/mediawiki/images/icons/italic-armn-sha.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-armn-sha.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-armn-sha.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-armn-sha.png"); } /* @noflip */ .oo-ui-icon-italic:lang(ksh) { background-image: url("themes/mediawiki/images/icons/italic-s.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-s.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-s.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-s.png"); } /* @noflip */ .oo-ui-icon-italic:lang(ka) { background-image: url("themes/mediawiki/images/icons/italic-geor-kan.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-geor-kan.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-geor-kan.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-geor-kan.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/italic-a-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-a-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-a-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-a-invert.png"); } /* @noflip */ .<API key>:lang(ar) { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } /* @noflip */ .<API key>:lang(cs) { background-image: url("themes/mediawiki/images/icons/italic-i-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png"); } /* @noflip */ .<API key>:lang(en) { background-image: url("themes/mediawiki/images/icons/italic-i-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png"); } /* @noflip */ .<API key>:lang(fr) { background-image: url("themes/mediawiki/images/icons/italic-i-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png"); } /* @noflip */ .<API key>:lang(he) { background-image: url("themes/mediawiki/images/icons/italic-i-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png"); } /* @noflip */ .<API key>:lang(ml) { background-image: url("themes/mediawiki/images/icons/italic-i-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png"); } /* @noflip */ .<API key>:lang(pl) { background-image: url("themes/mediawiki/images/icons/italic-i-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png"); } /* @noflip */ .<API key>:lang(pt) { background-image: url("themes/mediawiki/images/icons/italic-i-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png"); } /* @noflip */ .<API key>:lang(sco) { background-image: url("themes/mediawiki/images/icons/italic-i-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png"); } /* @noflip */ .<API key>:lang(be) { background-image: url("themes/mediawiki/images/icons/italic-k-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png"); } /* @noflip */ .<API key>:lang(da) { background-image: url("themes/mediawiki/images/icons/italic-k-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png"); } /* @noflip */ .<API key>:lang(de) { background-image: url("themes/mediawiki/images/icons/italic-k-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png"); } /* @noflip */ .<API key>:lang(fi) { background-image: url("themes/mediawiki/images/icons/italic-k-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png"); } /* @noflip */ .<API key>:lang(ky) { background-image: url("themes/mediawiki/images/icons/italic-k-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png"); } /* @noflip */ .<API key>:lang(nn) { background-image: url("themes/mediawiki/images/icons/italic-k-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png"); } /* @noflip */ .<API key>:lang(no) { background-image: url("themes/mediawiki/images/icons/italic-k-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png"); } /* @noflip */ .<API key>:lang(os) { background-image: url("themes/mediawiki/images/icons/italic-k-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png"); } /* @noflip */ .<API key>:lang(sv) { background-image: url("themes/mediawiki/images/icons/italic-k-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png"); } /* @noflip */ .<API key>:lang(ru) { background-image: url("themes/mediawiki/images/icons/italic-k-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png"); } /* @noflip */ .<API key>:lang(es) { background-image: url("themes/mediawiki/images/icons/italic-c-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c-invert.png"); } /* @noflip */ .<API key>:lang(gl) { background-image: url("themes/mediawiki/images/icons/italic-c-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c-invert.png"); } /* @noflip */ .<API key>:lang(it) { background-image: url("themes/mediawiki/images/icons/italic-c-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c-invert.png"); } /* @noflip */ .<API key>:lang(nl) { background-image: url("themes/mediawiki/images/icons/italic-c-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c-invert.png"); } /* @noflip */ .<API key>:lang(eu) { background-image: url("themes/mediawiki/images/icons/italic-e-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-e-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-e-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-e-invert.png"); } /* @noflip */ .<API key>:lang(fa) { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } /* @noflip */ .<API key>:lang(hu) { background-image: url("themes/mediawiki/images/icons/italic-d-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-d-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-d-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-d-invert.png"); } /* @noflip */ .<API key>:lang(hy) { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } /* @noflip */ .<API key>:lang(ksh) { background-image: url("themes/mediawiki/images/icons/italic-s-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-s-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-s-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-s-invert.png"); } /* @noflip */ .<API key>:lang(ka) { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/strikethrough-a.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-a.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-a.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/strikethrough-a.png"); } /* @noflip */ .<API key>:lang(en) { background-image: url("themes/mediawiki/images/icons/strikethrough-s.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-s.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-s.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/strikethrough-s.png"); } /* @noflip */ .<API key>:lang(fi) { background-image: url("themes/mediawiki/images/icons/strikethrough-y.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-y.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-y.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/strikethrough-y.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } /* @noflip */ .<API key>:lang(en) { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } /* @noflip */ .<API key>:lang(fi) { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/underline-a.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-a.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-a.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/underline-a.png"); } /* @noflip */ .<API key>:lang(en) { background-image: url("themes/mediawiki/images/icons/underline-u.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-u.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-u.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/underline-u.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/underline-a-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-a-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-a-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/underline-a-invert.png"); } /* @noflip */ .<API key>:lang(en) { background-image: url("themes/mediawiki/images/icons/underline-u-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-u-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-u-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/underline-u-invert.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/language.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/language.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/language.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/language.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/language-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/language-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/language-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/language-invert.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/<API key>.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/<API key>.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/<API key>.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/text-style.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-style.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-style.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/text-style.png"); } .<API key> { background-image: url("themes/mediawiki/images/icons/text-style-invert.png"); background-image: -<API key>(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-style-invert.svg"); background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-style-invert.svg"); background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/text-style-invert.png"); }
#!/usr/bin/env python2 from test_framework.test_framework import <API key> from test_framework.util import * from test_framework.mininode import CTransaction, NetworkThread from test_framework.blocktools import create_coinbase, create_block from test_framework.comptool import TestInstance, TestManager from test_framework.script import CScript, OP_1NEGATE, OP_NOP2, OP_DROP from binascii import hexlify, unhexlify import cStringIO import time def cltv_invalidate(tx): '''Modify the signature in vin 0 of the tx to fail CLTV Prepends -1 CLTV DROP in the scriptSig itself. ''' tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_NOP2, OP_DROP] + list(CScript(tx.vin[0].scriptSig))) ''' This test is meant to exercise BIP65 (CHECKLOCKTIMEVERIFY) Connect to a single node. Mine 2 (version 3) blocks (save the coinbases for later). Generate 98 more version 3 blocks, verify the node accepts. Mine 749 version 4 blocks, verify the node accepts. Check that the new CLTV rules are not enforced on the 750th version 4 block. Check that the new CLTV rules are enforced on the 751st version 4 block. Mine 199 new version blocks. Mine 1 old-version block. Mine 1 new version block. Mine 1 old version block, see that the node rejects. ''' class BIP65Test(<API key>): def __init__(self): self.num_nodes = 1 def setup_network(self): # Must set the blockversion for this test self.nodes = start_nodes(1, self.options.tmpdir, extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=3']], binary=[self.options.testbinary]) def run_test(self): test = TestManager(self, self.options.tmpdir) test.add_all_connections(self.nodes) NetworkThread().start() # Start up network handling in another thread test.run() def create_transaction(self, node, coinbase, to_address, amount): from_txid = node.getblock(coinbase)['tx'][0] inputs = [{ "txid" : from_txid, "vout" : 0}] outputs = { to_address : amount } rawtx = node.<API key>(inputs, outputs) signresult = node.signrawtransaction(rawtx) tx = CTransaction() f = cStringIO.StringIO(unhexlify(signresult['hex'])) tx.deserialize(f) return tx def get_tests(self): self.coinbase_blocks = self.nodes[0].setgenerate(True, 2) self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0) self.nodeaddress = self.nodes[0].getnewaddress() self.last_block_time = time.time() ''' 98 more version 3 blocks ''' test_blocks = [] for i in xrange(98): block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1) block.nVersion = 3 block.rehash() block.solve() test_blocks.append([block, True]) self.last_block_time += 1 self.tip = block.sha256 yield TestInstance(test_blocks, sync_every_block=False) ''' Mine 749 version 4 blocks ''' test_blocks = [] for i in xrange(749): block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1) block.nVersion = 4 block.rehash() block.solve() test_blocks.append([block, True]) self.last_block_time += 1 self.tip = block.sha256 yield TestInstance(test_blocks, sync_every_block=False) ''' Check that the new CLTV rules are not enforced in the 750th version 3 block. ''' spendtx = self.create_transaction(self.nodes[0], self.coinbase_blocks[0], self.nodeaddress, 1.0) cltv_invalidate(spendtx) spendtx.rehash() block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1) block.nVersion = 4 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.last_block_time += 1 self.tip = block.sha256 yield TestInstance([[block, True]]) ''' Check that the new CLTV rules are enforced in the 751st version 4 block. ''' spendtx = self.create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) cltv_invalidate(spendtx) spendtx.rehash() block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1) block.nVersion = 4 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.last_block_time += 1 yield TestInstance([[block, False]]) ''' Mine 199 new version blocks on last valid tip ''' test_blocks = [] for i in xrange(199): block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1) block.nVersion = 4 block.rehash() block.solve() test_blocks.append([block, True]) self.last_block_time += 1 self.tip = block.sha256 yield TestInstance(test_blocks, sync_every_block=False) ''' Mine 1 old version block ''' block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1) block.nVersion = 3 block.rehash() block.solve() self.last_block_time += 1 self.tip = block.sha256 yield TestInstance([[block, True]]) ''' Mine 1 new version block ''' block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1) block.nVersion = 4 block.rehash() block.solve() self.last_block_time += 1 self.tip = block.sha256 yield TestInstance([[block, True]]) ''' Mine 1 old version block, should be invalid ''' block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1) block.nVersion = 3 block.rehash() block.solve() self.last_block_time += 1 yield TestInstance([[block, False]]) if __name__ == '__main__': BIP65Test().main()
(function(root) { 'use strict'; var DependencyInjection = new (function DependencyInjection() { var _this = this, _interfaces = {}; function <API key>(factoryFunction) { if (typeof factoryFunction == 'function') { var funcString = factoryFunction .toString() // remove comments .replace(/((\/\/.*$)|(\/\*[\s\S]*?\*\/))/mg, ''); var matches = funcString.match(/^function\s*[^\(]*\s*\(\s*([^\)]*)\)/m); if (matches === null || matches.length < 2) { factoryFunction = [factoryFunction]; } else { factoryFunction = matches[1] .replace(/\s/g, '') .split(',') .filter(function(arg) { return arg.trim().length > 0; }) .concat(factoryFunction); } return factoryFunction; } else { var factoryArrayCopy = []; for (var i = 0; i < factoryFunction.length; i++) { factoryArrayCopy.push(factoryFunction[i]); } factoryFunction = factoryArrayCopy; } return factoryFunction; } function Injector(instanceName) { function _getInjections(dependencies, name, customDependencies, noError) { var interfaces = _interfaces[name].interfacesSupported, injections = [], i, j; for (i = 0; i < dependencies.length; i++) { var factory = null; if (customDependencies && typeof customDependencies[dependencies[i]] != 'undefined') { factory = customDependencies[dependencies[i]]; } else { for (j = 0; j < interfaces.length; j++) { if (!_interfaces[interfaces[j]]) { if (noError) { return false; } throw new Error('DependencyInjection: "' + interfaces[j] + '" interface is not registered.'); } factory = _interfaces[interfaces[j]].factories[dependencies[i]]; if (factory) { factory.interfaceName = interfaces[j]; break; } } } if (factory) { if (!factory.instantiated) { var deps = <API key>(factory.result); factory.result = deps.pop(); var factoryInjections = _getInjections(deps, factory.interfaceName); factory.result = factory.result.apply(_this, factoryInjections); factory.instantiated = true; } injections.push(factory.result); } else { if (noError) { return false; } throw new Error('DependencyInjection: "' + dependencies[i] + '" is not registered or accessible in ' + name + '.'); } } return injections; } this.get = function(factoryName, noError) { var injections = _getInjections([factoryName], instanceName, null, noError); if (injections.length) { return injections[0]; } return false; }; this.invoke = function(thisArg, func, customDependencies) { var dependencies = <API key>(func); func = dependencies.pop(); if (customDependencies) { var <API key> = {}, interfaceName, factory; for (interfaceName in customDependencies) { for (factory in customDependencies[interfaceName]) { <API key>[factory] = { interfaceName: interfaceName, instantiated: false, result: customDependencies[interfaceName][factory] }; } } customDependencies = <API key>; } var injections = _getInjections(dependencies, instanceName, customDependencies); return func.apply(thisArg, injections); }; } this.injector = {}; this.registerInterface = function(name, canInjectInterfaces) { if (_this[name]) { return _this; } _interfaces[name] = { interfacesSupported: (canInjectInterfaces || []).concat(name), factories: {} }; _this.injector[name] = new Injector(name); _this[name] = function <API key>(factoryName, factoryFunction, replaceIfExists) { if (!replaceIfExists && _interfaces[name].factories[factoryName]) { return _this; } _interfaces[name].factories[factoryName] = { instantiated: false, result: factoryFunction }; return _this; }; return _this; }; })(); if (typeof module != 'undefined' && typeof module.exports != 'undefined') { module.exports = DependencyInjection; } else { root.DependencyInjection = DependencyInjection; } })(this);
"use strict"; var ReactComponent = require("./ReactComponent"); var ReactContext = require("./ReactContext"); var ReactCurrentOwner = require("./ReactCurrentOwner"); var ReactElement = require("./ReactElement"); var <API key> = require("./<API key>"); var ReactEmptyComponent = require("./ReactEmptyComponent"); var ReactErrorUtils = require("./ReactErrorUtils"); var ReactLegacyElement = require("./ReactLegacyElement"); var ReactOwner = require("./ReactOwner"); var ReactPerf = require("./ReactPerf"); var ReactPropTransferer = require("./ReactPropTransferer"); var <API key> = require("./<API key>"); var <API key> = require("./<API key>"); var ReactUpdates = require("./ReactUpdates"); var assign = require("./Object.assign"); var <API key> = require("./<API key>"); var invariant = require("./invariant"); var keyMirror = require("./keyMirror"); var keyOf = require("./keyOf"); var monitorCodeUse = require("./monitorCodeUse"); var mapObject = require("./mapObject"); var <API key> = require("./<API key>"); var warning = require("./warning"); var MIXINS_KEY = keyOf({mixins: null}); /** * Policies that describe methods in `<API key>`. */ var SpecPolicy = keyMirror({ /** * These methods may be defined only once by the class specification or mixin. */ DEFINE_ONCE: null, /** * These methods may be defined by both the class specification and mixins. * Subsequent definitions will be chained. These methods must return void. */ DEFINE_MANY: null, /** * These methods are overriding the base <API key> class. */ OVERRIDE_BASE: null, /** * These methods are similar to DEFINE_MANY, except we assume they return * objects. We try to merge the keys of the return values of all the mixed in * functions. If there is a key conflict we throw. */ DEFINE_MANY_MERGED: null }); var injectedMixins = []; /** * Composite components are higher-level components that compose other composite * or native components. * * To create a new type of `<API key>`, pass a specification of * your new class to `React.createClass`. The only requirement of your class * specification is that you implement a `render` method. * * var MyComponent = React.createClass({ * render: function() { * return <div>Hello World</div>; * } * }); * * The class specification supports a specific protocol of methods that have * special meaning (e.g. `render`). See `<API key>` for * more the comprehensive protocol. Any other properties and methods in the * class specification will available on the prototype. * * @interface <API key> * @internal */ var <API key> = { /** * An array of Mixin objects to include when defining your component. * * @type {array} * @optional */ mixins: SpecPolicy.DEFINE_MANY, /** * An object containing properties and methods that should be defined on * the component's constructor instead of its prototype (static methods). * * @type {object} * @optional */ statics: SpecPolicy.DEFINE_MANY, /** * Definition of prop types for this component. * * @type {object} * @optional */ propTypes: SpecPolicy.DEFINE_MANY, /** * Definition of context types for this component. * * @type {object} * @optional */ contextTypes: SpecPolicy.DEFINE_MANY, /** * Definition of context types this component sets for its children. * * @type {object} * @optional */ childContextTypes: SpecPolicy.DEFINE_MANY, /** * Invoked when the component is mounted. Values in the mapping will be set on * `this.props` if that prop is not specified (i.e. using an `in` check). * * This method is invoked before `getInitialState` and therefore cannot rely * on `this.state` or use `this.setState`. * * @return {object} * @optional */ getDefaultProps: SpecPolicy.DEFINE_MANY_MERGED, /** * Invoked once before the component is mounted. The return value will be used * as the initial value of `this.state`. * * getInitialState: function() { * return { * isOn: false, * fooBaz: new BazFoo() * } * } * * @return {object} * @optional */ getInitialState: SpecPolicy.DEFINE_MANY_MERGED, /** * @return {object} * @optional */ getChildContext: SpecPolicy.DEFINE_MANY_MERGED, /** * Uses props from `this.props` and state from `this.state` to render the * structure of the component. * * No guarantees are made about when or how often this method is invoked, so * it must not have side effects. * * render: function() { * var name = this.props.name; * return <div>Hello, {name}!</div>; * } * * @return {ReactComponent} * @nosideeffects * @required */ render: SpecPolicy.DEFINE_ONCE, /** * Invoked when the component is initially created and about to be mounted. * This may have side effects, but any external subscriptions or data created * by this method must be cleaned up in `<API key>`. * * @optional */ componentWillMount: SpecPolicy.DEFINE_MANY, /** * Invoked when the component has been mounted and has a DOM representation. * However, there is no guarantee that the DOM node is in the document. * * Use this as an opportunity to operate on the DOM when the component has * been mounted (initialized and rendered) for the first time. * * @param {DOMElement} rootNode DOM element representing the component. * @optional */ componentDidMount: SpecPolicy.DEFINE_MANY, /** * Invoked before the component receives new props. * * Use this as an opportunity to react to a prop transition by updating the * state using `this.setState`. Current props are accessed via `this.props`. * * <API key>: function(nextProps, nextContext) { * this.setState({ * likesIncreasing: nextProps.likeCount > this.props.likeCount * }); * } * * NOTE: There is no equivalent `<API key>`. An incoming prop * transition may cause a state change, but the opposite is not true. If you * need it, you are probably looking for `componentWillUpdate`. * * @param {object} nextProps * @optional */ <API key>: SpecPolicy.DEFINE_MANY, /** * Invoked while deciding if the component should be updated as a result of * receiving new props, state and/or context. * * Use this as an opportunity to `return false` when you're certain that the * transition to the new props/state/context will not require a component * update. * * <API key>: function(nextProps, nextState, nextContext) { * return !equal(nextProps, this.props) || * !equal(nextState, this.state) || * !equal(nextContext, this.context); * } * * @param {object} nextProps * @param {?object} nextState * @param {?object} nextContext * @return {boolean} True if the component should update. * @optional */ <API key>: SpecPolicy.DEFINE_ONCE, /** * Invoked when the component is about to update due to a transition from * `this.props`, `this.state` and `this.context` to `nextProps`, `nextState` * and `nextContext`. * * Use this as an opportunity to perform preparation before an update occurs. * * NOTE: You **cannot** use `this.setState()` in this method. * * @param {object} nextProps * @param {?object} nextState * @param {?object} nextContext * @param {<API key>} transaction * @optional */ componentWillUpdate: SpecPolicy.DEFINE_MANY, /** * Invoked when the component's DOM representation has been updated. * * Use this as an opportunity to operate on the DOM when the component has * been updated. * * @param {object} prevProps * @param {?object} prevState * @param {?object} prevContext * @param {DOMElement} rootNode DOM element representing the component. * @optional */ componentDidUpdate: SpecPolicy.DEFINE_MANY, /** * Invoked when the component is about to be removed from its parent and have * its DOM representation destroyed. * * Use this as an opportunity to deallocate any external resources. * * NOTE: There is no `componentDidUnmount` since your component will have been * destroyed by that point. * * @optional */ <API key>: SpecPolicy.DEFINE_MANY, /** * Updates the component's currently mounted DOM representation. * * By default, this implements React's rendering and reconciliation algorithm. * Sophisticated clients may wish to override this. * * @param {<API key>} transaction * @internal * @overridable */ updateComponent: SpecPolicy.OVERRIDE_BASE }; /** * Mapping from class specification keys to special processing functions. * * Although these are declared like instance properties in the specification * when defining classes using `React.createClass`, they are actually static * and are accessible on the constructor instead of the prototype. Despite * being static, they must be defined outside of the "statics" key under * which all other static methods are defined. */ var RESERVED_SPEC_KEYS = { displayName: function(Constructor, displayName) { Constructor.displayName = displayName; }, mixins: function(Constructor, mixins) { if (mixins) { for (var i = 0; i < mixins.length; i++) { <API key>(Constructor, mixins[i]); } } }, childContextTypes: function(Constructor, childContextTypes) { validateTypeDef( Constructor, childContextTypes, <API key>.childContext ); Constructor.childContextTypes = assign( {}, Constructor.childContextTypes, childContextTypes ); }, contextTypes: function(Constructor, contextTypes) { validateTypeDef( Constructor, contextTypes, <API key>.context ); Constructor.contextTypes = assign( {}, Constructor.contextTypes, contextTypes ); }, /** * Special case getDefaultProps which should move into statics but requires * automatic merging. */ getDefaultProps: function(Constructor, getDefaultProps) { if (Constructor.getDefaultProps) { Constructor.getDefaultProps = <API key>( Constructor.getDefaultProps, getDefaultProps ); } else { Constructor.getDefaultProps = getDefaultProps; } }, propTypes: function(Constructor, propTypes) { validateTypeDef( Constructor, propTypes, <API key>.prop ); Constructor.propTypes = assign( {}, Constructor.propTypes, propTypes ); }, statics: function(Constructor, statics) { <API key>(Constructor, statics); } }; function <API key>(component) { var owner = component._owner || null; if (owner && owner.constructor && owner.constructor.displayName) { return ' Check the render method of `' + owner.constructor.displayName + '`.'; } return ''; } function validateTypeDef(Constructor, typeDef, location) { for (var propName in typeDef) { if (typeDef.hasOwnProperty(propName)) { ("production" !== process.env.NODE_ENV ? invariant( typeof typeDef[propName] == 'function', '%s: %s type `%s` is invalid; it must be a function, usually from ' + 'React.PropTypes.', Constructor.displayName || '<API key>', <API key>[location], propName ) : invariant(typeof typeDef[propName] == 'function')); } } } function <API key>(proto, name) { var specPolicy = <API key>.hasOwnProperty(name) ? <API key>[name] : null; // Disallow overriding of base class methods unless explicitly allowed. if (<API key>.hasOwnProperty(name)) { ("production" !== process.env.NODE_ENV ? invariant( specPolicy === SpecPolicy.OVERRIDE_BASE, '<API key>: You are attempting to override ' + '`%s` from your class specification. Ensure that your method names ' + 'do not overlap with React methods.', name ) : invariant(specPolicy === SpecPolicy.OVERRIDE_BASE)); } // Disallow defining methods more than once unless explicitly allowed. if (proto.hasOwnProperty(name)) { ("production" !== process.env.NODE_ENV ? invariant( specPolicy === SpecPolicy.DEFINE_MANY || specPolicy === SpecPolicy.DEFINE_MANY_MERGED, '<API key>: You are attempting to define ' + '`%s` on your component more than once. This conflict may be due ' + 'to a mixin.', name ) : invariant(specPolicy === SpecPolicy.DEFINE_MANY || specPolicy === SpecPolicy.DEFINE_MANY_MERGED)); } } function <API key>(instance) { var <API key> = instance.<API key>; ("production" !== process.env.NODE_ENV ? invariant( instance.isMounted() || <API key> === CompositeLifeCycle.MOUNTING, 'replaceState(...): Can only update a mounted or mounting component.' ) : invariant(instance.isMounted() || <API key> === CompositeLifeCycle.MOUNTING)); ("production" !== process.env.NODE_ENV ? invariant( ReactCurrentOwner.current == null, 'replaceState(...): Cannot update during an existing state transition ' + '(such as within `render`). Render methods should be a pure function ' + 'of props and state.' ) : invariant(ReactCurrentOwner.current == null)); ("production" !== process.env.NODE_ENV ? invariant(<API key> !== CompositeLifeCycle.UNMOUNTING, 'replaceState(...): Cannot update while unmounting component. This ' + 'usually means you called setState() on an unmounted component.' ) : invariant(<API key> !== CompositeLifeCycle.UNMOUNTING)); } /** * Mixin helper which handles policy validation and reserved * specification keys when building `<API key>` classses. */ function <API key>(Constructor, spec) { if (!spec) { return; } ("production" !== process.env.NODE_ENV ? invariant( !ReactLegacyElement.isValidFactory(spec), '<API key>: You\'re attempting to ' + 'use a component class as a mixin. Instead, just use a regular object.' ) : invariant(!ReactLegacyElement.isValidFactory(spec))); ("production" !== process.env.NODE_ENV ? invariant( !ReactElement.isValidElement(spec), '<API key>: You\'re attempting to ' + 'use a component as a mixin. Instead, just use a regular object.' ) : invariant(!ReactElement.isValidElement(spec))); var proto = Constructor.prototype; // By handling mixins before any other properties, we ensure the same // chaining order is applied to methods with DEFINE_MANY policy, whether // mixins are listed before or after these methods in the spec. if (spec.hasOwnProperty(MIXINS_KEY)) { RESERVED_SPEC_KEYS.mixins(Constructor, spec.mixins); } for (var name in spec) { if (!spec.hasOwnProperty(name)) { continue; } if (name === MIXINS_KEY) { // We have already handled mixins in a special case above continue; } var property = spec[name]; <API key>(proto, name); if (RESERVED_SPEC_KEYS.hasOwnProperty(name)) { RESERVED_SPEC_KEYS[name](Constructor, property); } else { // Setup methods on prototype: // The following member methods should not be automatically bound: // 1. Expected <API key> methods (in the "interface"). // 2. Overridden methods (that were mixed in). var <API key> = <API key>.hasOwnProperty(name); var isAlreadyDefined = proto.hasOwnProperty(name); var markedDontBind = property && property.__reactDontBind; var isFunction = typeof property === 'function'; var shouldAutoBind = isFunction && !<API key> && !isAlreadyDefined && !markedDontBind; if (shouldAutoBind) { if (!proto.__reactAutoBindMap) { proto.__reactAutoBindMap = {}; } proto.__reactAutoBindMap[name] = property; proto[name] = property; } else { if (isAlreadyDefined) { var specPolicy = <API key>[name]; // These cases should already be caught by <API key> ("production" !== process.env.NODE_ENV ? invariant( <API key> && ( specPolicy === SpecPolicy.DEFINE_MANY_MERGED || specPolicy === SpecPolicy.DEFINE_MANY ), '<API key>: Unexpected spec policy %s for key %s ' + 'when mixing in component specs.', specPolicy, name ) : invariant(<API key> && ( specPolicy === SpecPolicy.DEFINE_MANY_MERGED || specPolicy === SpecPolicy.DEFINE_MANY ))); // For methods which are defined more than once, call the existing // methods before calling the new property, merging if appropriate. if (specPolicy === SpecPolicy.DEFINE_MANY_MERGED) { proto[name] = <API key>(proto[name], property); } else if (specPolicy === SpecPolicy.DEFINE_MANY) { proto[name] = <API key>(proto[name], property); } } else { proto[name] = property; if ("production" !== process.env.NODE_ENV) { // Add verbose displayName to the function, which helps when looking // at profiling tools. if (typeof property === 'function' && spec.displayName) { proto[name].displayName = spec.displayName + '_' + name; } } } } } } } function <API key>(Constructor, statics) { if (!statics) { return; } for (var name in statics) { var property = statics[name]; if (!statics.hasOwnProperty(name)) { continue; } var isReserved = name in RESERVED_SPEC_KEYS; ("production" !== process.env.NODE_ENV ? invariant( !isReserved, '<API key>: You are attempting to define a reserved ' + 'property, `%s`, that shouldn\'t be on the "statics" key. Define it ' + 'as an instance property instead; it will still be accessible on the ' + 'constructor.', name ) : invariant(!isReserved)); var isInherited = name in Constructor; ("production" !== process.env.NODE_ENV ? invariant( !isInherited, '<API key>: You are attempting to define ' + '`%s` on your component more than once. This conflict may be ' + 'due to a mixin.', name ) : invariant(!isInherited)); Constructor[name] = property; } } /** * Merge two objects, but throw if both contain the same key. * * @param {object} one The first object, which is mutated. * @param {object} two The second object * @return {object} one after it has been mutated to contain everything in two. */ function <API key>(one, two) { ("production" !== process.env.NODE_ENV ? invariant( one && two && typeof one === 'object' && typeof two === 'object', '<API key>(): Cannot merge non-objects' ) : invariant(one && two && typeof one === 'object' && typeof two === 'object')); mapObject(two, function(value, key) { ("production" !== process.env.NODE_ENV ? invariant( one[key] === undefined, '<API key>(): ' + 'Tried to merge two objects with the same key: `%s`. This conflict ' + 'may be due to a mixin; in particular, this may be caused by two ' + 'getInitialState() or getDefaultProps() methods returning objects ' + 'with clashing keys.', key ) : invariant(one[key] === undefined)); one[key] = value; }); return one; } /** * Creates a function that invokes two functions and merges their return values. * * @param {function} one Function to invoke first. * @param {function} two Function to invoke second. * @return {function} Function that invokes the two argument functions. * @private */ function <API key>(one, two) { return function mergedResult() { var a = one.apply(this, arguments); var b = two.apply(this, arguments); if (a == null) { return b; } else if (b == null) { return a; } return <API key>(a, b); }; } /** * Creates a function that invokes two functions and ignores their return vales. * * @param {function} one Function to invoke first. * @param {function} two Function to invoke second. * @return {function} Function that invokes the two argument functions. * @private */ function <API key>(one, two) { return function chainedFunction() { one.apply(this, arguments); two.apply(this, arguments); }; } var CompositeLifeCycle = keyMirror({ /** * Components in the process of being mounted respond to state changes * differently. */ MOUNTING: null, /** * Components in the process of being unmounted are guarded against state * changes. */ UNMOUNTING: null, /** * Components that are mounted and receiving new props respond to state * changes differently. */ RECEIVING_PROPS: null }); /** * @lends {<API key>.prototype} */ var <API key> = { /** * Base constructor for all composite component. * * @param {ReactElement} element * @final * @internal */ construct: function(element) { // Children can be either an array or more than one argument ReactComponent.Mixin.construct.apply(this, arguments); ReactOwner.Mixin.construct.apply(this, arguments); this.state = null; this._pendingState = null; // This is the public post-processed context. The real context and pending // context lives on the element. this.context = null; this.<API key> = null; }, /** * Checks whether or not this composite component is mounted. * @return {boolean} True if mounted, false otherwise. * @protected * @final */ isMounted: function() { return ReactComponent.Mixin.isMounted.call(this) && this.<API key> !== CompositeLifeCycle.MOUNTING; }, /** * Initializes the component, renders markup, and registers event listeners. * * @param {string} rootID DOM ID of the root node. * @param {<API key>|<API key>} transaction * @param {number} mountDepth number of components in the owner hierarchy * @return {?string} Rendered markup to be inserted into the DOM. * @final * @internal */ mountComponent: ReactPerf.measure( '<API key>', 'mountComponent', function(rootID, transaction, mountDepth) { ReactComponent.Mixin.mountComponent.call( this, rootID, transaction, mountDepth ); this.<API key> = CompositeLifeCycle.MOUNTING; if (this.__reactAutoBindMap) { this.<API key>(); } this.context = this._processContext(this._currentElement._context); this.props = this._processProps(this.props); this.state = this.getInitialState ? this.getInitialState() : null; ("production" !== process.env.NODE_ENV ? invariant( typeof this.state === 'object' && !Array.isArray(this.state), '%s.getInitialState(): must return an object or null', this.constructor.displayName || '<API key>' ) : invariant(typeof this.state === 'object' && !Array.isArray(this.state))); this._pendingState = null; this._pendingForceUpdate = false; if (this.componentWillMount) { this.componentWillMount(); // When mounting, calls to `setState` by `componentWillMount` will set // `this._pendingState` without triggering a re-render. if (this._pendingState) { this.state = this._pendingState; this._pendingState = null; } } this._renderedComponent = <API key>( this.<API key>(), this._currentElement.type // The wrapping type ); // Done with mounting, `setState` will now trigger UI changes. this.<API key> = null; var markup = this._renderedComponent.mountComponent( rootID, transaction, mountDepth + 1 ); if (this.componentDidMount) { transaction.getReactMountReady().enqueue(this.componentDidMount, this); } return markup; } ), /** * Releases any resources allocated by `mountComponent`. * * @final * @internal */ unmountComponent: function() { this.<API key> = CompositeLifeCycle.UNMOUNTING; if (this.<API key>) { this.<API key>(); } this.<API key> = null; this._renderedComponent.unmountComponent(); this._renderedComponent = null; ReactComponent.Mixin.unmountComponent.call(this); // Some existing components rely on this.props even after they've been // destroyed (in event handlers). // TODO: this.props = null; // TODO: this.state = null; }, /** * Sets a subset of the state. Always use this or `replaceState` to mutate * state. You should treat `this.state` as immutable. * * There is no guarantee that `this.state` will be immediately updated, so * accessing `this.state` after calling this method may return the old value. * * There is no guarantee that calls to `setState` will run synchronously, * as they may eventually be batched together. You can provide an optional * callback that will be executed when the call to setState is actually * completed. * * @param {object} partialState Next partial state to be merged with state. * @param {?function} callback Called after state is updated. * @final * @protected */ setState: function(partialState, callback) { ("production" !== process.env.NODE_ENV ? invariant( typeof partialState === 'object' || partialState == null, 'setState(...): takes an object of state variables to update.' ) : invariant(typeof partialState === 'object' || partialState == null)); if ("production" !== process.env.NODE_ENV){ ("production" !== process.env.NODE_ENV ? warning( partialState != null, 'setState(...): You passed an undefined or null state object; ' + 'instead, use forceUpdate().' ) : null); } // Merge with `_pendingState` if it exists, otherwise with existing state. this.replaceState( assign({}, this._pendingState || this.state, partialState), callback ); }, /** * Replaces all of the state. Always use this or `setState` to mutate state. * You should treat `this.state` as immutable. * * There is no guarantee that `this.state` will be immediately updated, so * accessing `this.state` after calling this method may return the old value. * * @param {object} completeState Next state. * @param {?function} callback Called after state is updated. * @final * @protected */ replaceState: function(completeState, callback) { <API key>(this); this._pendingState = completeState; if (this.<API key> !== CompositeLifeCycle.MOUNTING) { // If we're in a componentWillMount handler, don't enqueue a rerender // because ReactUpdates assumes we're in a browser context (which is wrong // for server rendering) and we're about to do a render anyway. // TODO: The callback here is ignored when setState is called from // componentWillMount. Either fix it or disallow doing so completely in // favor of getInitialState. ReactUpdates.enqueueUpdate(this, callback); } }, /** * Filters the context object to only contain keys specified in * `contextTypes`, and asserts that they are valid. * * @param {object} context * @return {?object} * @private */ _processContext: function(context) { var maskedContext = null; var contextTypes = this.constructor.contextTypes; if (contextTypes) { maskedContext = {}; for (var contextName in contextTypes) { maskedContext[contextName] = context[contextName]; } if ("production" !== process.env.NODE_ENV) { this._checkPropTypes( contextTypes, maskedContext, <API key>.context ); } } return maskedContext; }, /** * @param {object} currentContext * @return {object} * @private */ <API key>: function(currentContext) { var childContext = this.getChildContext && this.getChildContext(); var displayName = this.constructor.displayName || '<API key>'; if (childContext) { ("production" !== process.env.NODE_ENV ? invariant( typeof this.constructor.childContextTypes === 'object', '%s.getChildContext(): childContextTypes must be defined in order to ' + 'use getChildContext().', displayName ) : invariant(typeof this.constructor.childContextTypes === 'object')); if ("production" !== process.env.NODE_ENV) { this._checkPropTypes( this.constructor.childContextTypes, childContext, <API key>.childContext ); } for (var name in childContext) { ("production" !== process.env.NODE_ENV ? invariant( name in this.constructor.childContextTypes, '%s.getChildContext(): key "%s" is not defined in childContextTypes.', displayName, name ) : invariant(name in this.constructor.childContextTypes)); } return assign({}, currentContext, childContext); } return currentContext; }, /** * Processes props by setting default values for unspecified props and * asserting that the props are valid. Does not mutate its argument; returns * a new props object with defaults merged in. * * @param {object} newProps * @return {object} * @private */ _processProps: function(newProps) { if ("production" !== process.env.NODE_ENV) { var propTypes = this.constructor.propTypes; if (propTypes) { this._checkPropTypes(propTypes, newProps, <API key>.prop); } } return newProps; }, /** * Assert that the props are valid * * @param {object} propTypes Map of prop name to a ReactPropType * @param {object} props * @param {string} location e.g. "prop", "context", "child context" * @private */ _checkPropTypes: function(propTypes, props, location) { // TODO: Stop validating prop types here and only use the element // validation. var componentName = this.constructor.displayName; for (var propName in propTypes) { if (propTypes.hasOwnProperty(propName)) { var error = propTypes[propName](props, propName, componentName, location); if (error instanceof Error) { // We may want to extend this logic for similar errors in // renderComponent calls, so I'm abstracting it away into // a function to minimize refactoring in the future var addendum = <API key>(this); ("production" !== process.env.NODE_ENV ? warning(false, error.message + addendum) : null); } } } }, /** * If any of `_pendingElement`, `_pendingState`, or `_pendingForceUpdate` * is set, update the component. * * @param {<API key>} transaction * @internal */ <API key>: function(transaction) { var <API key> = this.<API key>; // Do not trigger a state transition if we are in the middle of mounting or // receiving props because both of those will already be doing this. if (<API key> === CompositeLifeCycle.MOUNTING || <API key> === CompositeLifeCycle.RECEIVING_PROPS) { return; } if (this._pendingElement == null && this._pendingState == null && !this._pendingForceUpdate) { return; } var nextContext = this.context; var nextProps = this.props; var nextElement = this._currentElement; if (this._pendingElement != null) { nextElement = this._pendingElement; nextContext = this._processContext(nextElement._context); nextProps = this._processProps(nextElement.props); this._pendingElement = null; this.<API key> = CompositeLifeCycle.RECEIVING_PROPS; if (this.<API key>) { this.<API key>(nextProps, nextContext); } } this.<API key> = null; var nextState = this._pendingState || this.state; this._pendingState = null; var shouldUpdate = this._pendingForceUpdate || !this.<API key> || this.<API key>(nextProps, nextState, nextContext); if ("production" !== process.env.NODE_ENV) { if (typeof shouldUpdate === "undefined") { console.warn( (this.constructor.displayName || '<API key>') + '.<API key>(): Returned undefined instead of a ' + 'boolean value. Make sure to return true or false.' ); } } if (shouldUpdate) { this._pendingForceUpdate = false; // Will set `this.props`, `this.state` and `this.context`. this.<API key>( nextElement, nextProps, nextState, nextContext, transaction ); } else { // If it's determined that a component should not update, we still want // to set props and state. this._currentElement = nextElement; this.props = nextProps; this.state = nextState; this.context = nextContext; // Owner cannot change because <API key> doesn't allow // it. TODO: Remove this._owner completely. this._owner = nextElement._owner; } }, /** * Merges new props and state, notifies delegate methods of update and * performs update. * * @param {ReactElement} nextElement Next element * @param {object} nextProps Next public object to set as properties. * @param {?object} nextState Next object to set as state. * @param {?object} nextContext Next public object to set as context. * @param {<API key>} transaction * @private */ <API key>: function( nextElement, nextProps, nextState, nextContext, transaction ) { var prevElement = this._currentElement; var prevProps = this.props; var prevState = this.state; var prevContext = this.context; if (this.componentWillUpdate) { this.componentWillUpdate(nextProps, nextState, nextContext); } this._currentElement = nextElement; this.props = nextProps; this.state = nextState; this.context = nextContext; // Owner cannot change because <API key> doesn't allow // it. TODO: Remove this._owner completely. this._owner = nextElement._owner; this.updateComponent( transaction, prevElement ); if (this.componentDidUpdate) { transaction.getReactMountReady().enqueue( this.componentDidUpdate.bind(this, prevProps, prevState, prevContext), this ); } }, receiveComponent: function(nextElement, transaction) { if (nextElement === this._currentElement && nextElement._owner != null) { // Since elements are immutable after the owner is rendered, // we can do a cheap identity compare here to determine if this is a // superfluous reconcile. It's possible for state to be mutable but such // change should trigger an update of the owner which would recreate // the element. We explicitly check for the existence of an owner since // it's possible for a element created outside a composite to be // deeply mutated and reused. return; } ReactComponent.Mixin.receiveComponent.call( this, nextElement, transaction ); }, /** * Updates the component's currently mounted DOM representation. * * By default, this implements React's rendering and reconciliation algorithm. * Sophisticated clients may wish to override this. * * @param {<API key>} transaction * @param {ReactElement} prevElement * @internal * @overridable */ updateComponent: ReactPerf.measure( '<API key>', 'updateComponent', function(transaction, prevParentElement) { ReactComponent.Mixin.updateComponent.call( this, transaction, prevParentElement ); var <API key> = this._renderedComponent; var prevElement = <API key>._currentElement; var nextElement = this.<API key>(); if (<API key>(prevElement, nextElement)) { <API key>.receiveComponent(nextElement, transaction); } else { // These two IDs are actually the same! But nothing should rely on that. var thisID = this._rootNodeID; var prevComponentID = <API key>._rootNodeID; <API key>.unmountComponent(); this._renderedComponent = <API key>( nextElement, this._currentElement.type ); var nextMarkup = this._renderedComponent.mountComponent( thisID, transaction, this._mountDepth + 1 ); ReactComponent.BackendIDOperations.<API key>( prevComponentID, nextMarkup ); } } ), /** * Forces an update. This should only be invoked when it is known with * certainty that we are **not** in a DOM transaction. * * You may want to call this when you know that some deeper aspect of the * component's state has changed but `setState` was not called. * * This will not invoke `<API key>`, but it will invoke * `componentWillUpdate` and `componentDidUpdate`. * * @param {?function} callback Called after update is complete. * @final * @protected */ forceUpdate: function(callback) { var <API key> = this.<API key>; ("production" !== process.env.NODE_ENV ? invariant( this.isMounted() || <API key> === CompositeLifeCycle.MOUNTING, 'forceUpdate(...): Can only force an update on mounted or mounting ' + 'components.' ) : invariant(this.isMounted() || <API key> === CompositeLifeCycle.MOUNTING)); ("production" !== process.env.NODE_ENV ? invariant( <API key> !== CompositeLifeCycle.UNMOUNTING && ReactCurrentOwner.current == null, 'forceUpdate(...): Cannot force an update while unmounting component ' + 'or within a `render` function.' ) : invariant(<API key> !== CompositeLifeCycle.UNMOUNTING && ReactCurrentOwner.current == null)); this._pendingForceUpdate = true; ReactUpdates.enqueueUpdate(this, callback); }, /** * @private */ <API key>: ReactPerf.measure( '<API key>', '<API key>', function() { var renderedComponent; var previousContext = ReactContext.current; ReactContext.current = this.<API key>( this._currentElement._context ); ReactCurrentOwner.current = this; try { renderedComponent = this.render(); if (renderedComponent === null || renderedComponent === false) { renderedComponent = ReactEmptyComponent.getEmptyComponent(); ReactEmptyComponent.<API key>(this._rootNodeID); } else { ReactEmptyComponent.<API key>(this._rootNodeID); } } finally { ReactContext.current = previousContext; ReactCurrentOwner.current = null; } ("production" !== process.env.NODE_ENV ? invariant( ReactElement.isValidElement(renderedComponent), '%s.render(): A valid ReactComponent must be returned. You may have ' + 'returned undefined, an array or some other invalid object.', this.constructor.displayName || '<API key>' ) : invariant(ReactElement.isValidElement(renderedComponent))); return renderedComponent; } ), /** * @private */ <API key>: function() { for (var autoBindKey in this.__reactAutoBindMap) { if (!this.__reactAutoBindMap.hasOwnProperty(autoBindKey)) { continue; } var method = this.__reactAutoBindMap[autoBindKey]; this[autoBindKey] = this._bindAutoBindMethod(ReactErrorUtils.guard( method, this.constructor.displayName + '.' + autoBindKey )); } }, /** * Binds a method to the component. * * @param {function} method Method to be bound. * @private */ _bindAutoBindMethod: function(method) { var component = this; var boundMethod = method.bind(component); if ("production" !== process.env.NODE_ENV) { boundMethod.__reactBoundContext = component; boundMethod.__reactBoundMethod = method; boundMethod.<API key> = null; var componentName = component.constructor.displayName; var _bind = boundMethod.bind; boundMethod.bind = function(newThis ) {for (var args=[],$__0=1,$__1=arguments.length;$__0<$__1;$__0++) args.push(arguments[$__0]); // User is trying to bind() an autobound method; we effectively will // ignore the value of "this" that the user is trying to use, so // let's warn. if (newThis !== component && newThis !== null) { monitorCodeUse('react_bind_warning', { component: componentName }); console.warn( 'bind(): React component methods may only be bound to the ' + 'component instance. See ' + componentName ); } else if (!args.length) { monitorCodeUse('react_bind_warning', { component: componentName }); console.warn( 'bind(): You are binding a component method to the component. ' + 'React does this for you automatically in a high-performance ' + 'way, so you can safely remove this call. See ' + componentName ); return boundMethod; } var reboundMethod = _bind.apply(boundMethod, arguments); reboundMethod.__reactBoundContext = component; reboundMethod.__reactBoundMethod = method; reboundMethod.<API key> = args; return reboundMethod; }; } return boundMethod; } }; var <API key> = function() {}; assign( <API key>.prototype, ReactComponent.Mixin, ReactOwner.Mixin, ReactPropTransferer.Mixin, <API key> ); /** * Module for creating composite components. * * @class <API key> * @extends ReactComponent * @extends ReactOwner * @extends ReactPropTransferer */ var <API key> = { LifeCycle: CompositeLifeCycle, Base: <API key>, /** * Creates a composite component class given a class specification. * * @param {object} spec Class specification (which must define `render`). * @return {function} Component constructor function. * @public */ createClass: function(spec) { var Constructor = function(props) { // This constructor is overridden by mocks. The argument is used // by mocks to assert on what gets mounted. This will later be used // by the stand-alone class implementation. }; Constructor.prototype = new <API key>(); Constructor.prototype.constructor = Constructor; injectedMixins.forEach( <API key>.bind(null, Constructor) ); <API key>(Constructor, spec); // Initialize the defaultProps property after all mixins have been merged if (Constructor.getDefaultProps) { Constructor.defaultProps = Constructor.getDefaultProps(); } ("production" !== process.env.NODE_ENV ? invariant( Constructor.prototype.render, 'createClass(...): Class specification must implement a `render` method.' ) : invariant(Constructor.prototype.render)); if ("production" !== process.env.NODE_ENV) { if (Constructor.prototype.<API key>) { monitorCodeUse( '<API key>', { component: spec.displayName } ); console.warn( (spec.displayName || 'A component') + ' has a method called ' + '<API key>(). Did you mean <API key>()? ' + 'The name is phrased as a question because the function is ' + 'expected to return a value.' ); } } // Reduce time spent doing lookups by setting these on the prototype. for (var methodName in <API key>) { if (!Constructor.prototype[methodName]) { Constructor.prototype[methodName] = null; } } if ("production" !== process.env.NODE_ENV) { return ReactLegacyElement.wrapFactory( <API key>.createFactory(Constructor) ); } return ReactLegacyElement.wrapFactory( ReactElement.createFactory(Constructor) ); }, injection: { injectMixin: function(mixin) { injectedMixins.push(mixin); } } }; module.exports = <API key>;
<?php defined('BASEPATH') OR exit('No direct script access allowed'); $lang['ftp_no_connection'] = 'Tidak dapat menemukan ID koneksi yang sah. Pastikan Anda terhubung sebelum melakukan rutinitas berkas.'; $lang['<API key>'] = 'Tidak dapat terhubung ke server FTP Anda menggunakan nama host yang disediakan.'; $lang['ftp_unable_to_login'] = 'Tidak dapat masuk ke server FTP Anda. Silakan periksa nama pengguna dan password Anda.'; $lang['ftp_unable_to_mkdir'] = 'Tidak dapat membuat direktori yang telah Anda tentukan.'; $lang['<API key>'] = 'Tidak dapat mengubah direktori.'; $lang['ftp_unable_to_chmod'] = 'Tidak dapat mengatur hak akses berkas. Silakan periksa jalur Anda.'; $lang['<API key>'] = 'Tidak dapat mengunggah berkas yang ditentukan. Silakan periksa jalur Anda.'; $lang['<API key>'] = 'Tidak dapat mengunduh berkas yang ditentukan. Silakan periksa jalur Anda.'; $lang['ftp_no_source_file'] = 'Tidak dapat menemukan sumber berkas. Silakan periksa jalur Anda.'; $lang['<API key>'] = 'Tidak dapat mengubah nama berkas.'; $lang['<API key>'] = 'Tidak dapat menghapus berkas.'; $lang['ftp_unable_to_move'] = 'Tidak dapat memindahkan berkas. Pastikan direktori tujuan ada.';
ALTER TABLE db_version CHANGE COLUMN <API key> <API key> bit; DELETE FROM command WHERE name IN ('ticket'); INSERT INTO command (name, security, help) VALUES ('ticket',2,'Syntax: .ticket on\r\n .ticket off\r\n .ticket #num\r\n .ticket $character_name\r\n .ticket respond #num $response\r\n .ticket respond $character_name $response\r\n\r\non/off for GMs to show or not a new ticket directly, $character_name to show ticket of this character, #num to show ticket #num.');
// <API key>: GPL-2.0+ #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/videodev2.h> #include <media/rcar-fcp.h> #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_brx.h" #include "vsp1_clu.h" #include "vsp1_dl.h" #include "vsp1_drm.h" #include "vsp1_hgo.h" #include "vsp1_hgt.h" #include "vsp1_hsit.h" #include "vsp1_lif.h" #include "vsp1_lut.h" #include "vsp1_pipe.h" #include "vsp1_rwpf.h" #include "vsp1_sru.h" #include "vsp1_uds.h" #include "vsp1_uif.h" #include "vsp1_video.h" static irqreturn_t vsp1_irq_handler(int irq, void *data) { u32 mask = VI6_WFP_IRQ_STA_DFE | VI6_WFP_IRQ_STA_FRE; struct vsp1_device *vsp1 = data; irqreturn_t ret = IRQ_NONE; unsigned int i; u32 status; for (i = 0; i < vsp1->info->wpf_count; ++i) { struct vsp1_rwpf *wpf = vsp1->wpf[i]; if (wpf == NULL) continue; status = vsp1_read(vsp1, VI6_WPF_IRQ_STA(i)); vsp1_write(vsp1, VI6_WPF_IRQ_STA(i), ~status & mask); if (status & VI6_WFP_IRQ_STA_DFE) { <API key>(wpf->entity.pipe); ret = IRQ_HANDLED; } } return ret; } /* * <API key> - Create links from all sources to the given sink * * This function creates media links from all valid sources to the given sink * pad. Links that would be invalid according to the VSP1 hardware capabilities * are skipped. Those include all links * * - from a UDS to a UDS (UDS entities can't be chained) * - from an entity to itself (no loops are allowed) * * Furthermore, the BRS can't be connected to histogram generators, but no * special check is currently needed as all VSP instances that include a BRS * have no histogram generator. */ static int <API key>(struct vsp1_device *vsp1, struct vsp1_entity *sink) { struct media_entity *entity = &sink->subdev.entity; struct vsp1_entity *source; unsigned int pad; int ret; list_for_each_entry(source, &vsp1->entities, list_dev) { u32 flags; if (source->type == sink->type) continue; if (source->type == VSP1_ENTITY_HGO || source->type == VSP1_ENTITY_HGT || source->type == VSP1_ENTITY_LIF || source->type == VSP1_ENTITY_WPF) continue; flags = source->type == VSP1_ENTITY_RPF && sink->type == VSP1_ENTITY_WPF && source->index == sink->index ? <API key> : 0; for (pad = 0; pad < entity->num_pads; ++pad) { if (!(entity->pads[pad].flags & MEDIA_PAD_FL_SINK)) continue; ret = <API key>(&source->subdev.entity, source->source_pad, entity, pad, flags); if (ret < 0) return ret; if (flags & <API key>) source->sink = sink; } } return 0; } static int <API key>(struct vsp1_device *vsp1) { struct vsp1_entity *entity; unsigned int i; int ret; list_for_each_entry(entity, &vsp1->entities, list_dev) { if (entity->type == VSP1_ENTITY_LIF || entity->type == VSP1_ENTITY_RPF) continue; ret = <API key>(vsp1, entity); if (ret < 0) return ret; } if (vsp1->hgo) { ret = <API key>(&vsp1->hgo->histo.entity.subdev.entity, HISTO_PAD_SOURCE, &vsp1->hgo->histo.video.entity, 0, <API key> | <API key>); if (ret < 0) return ret; } if (vsp1->hgt) { ret = <API key>(&vsp1->hgt->histo.entity.subdev.entity, HISTO_PAD_SOURCE, &vsp1->hgt->histo.video.entity, 0, <API key> | <API key>); if (ret < 0) return ret; } for (i = 0; i < vsp1->info->lif_count; ++i) { if (!vsp1->lif[i]) continue; ret = <API key>(&vsp1->wpf[i]->entity.subdev.entity, RWPF_PAD_SOURCE, &vsp1->lif[i]->entity.subdev.entity, LIF_PAD_SINK, 0); if (ret < 0) return ret; } for (i = 0; i < vsp1->info->rpf_count; ++i) { struct vsp1_rwpf *rpf = vsp1->rpf[i]; ret = <API key>(&rpf->video->video.entity, 0, &rpf->entity.subdev.entity, RWPF_PAD_SINK, <API key> | <API key>); if (ret < 0) return ret; } for (i = 0; i < vsp1->info->wpf_count; ++i) { /* * Connect the video device to the WPF. All connections are * immutable. */ struct vsp1_rwpf *wpf = vsp1->wpf[i]; ret = <API key>(&wpf->entity.subdev.entity, RWPF_PAD_SOURCE, &wpf->video->video.entity, 0, <API key> | <API key>); if (ret < 0) return ret; } return 0; } static void <API key>(struct vsp1_device *vsp1) { struct vsp1_entity *entity, *_entity; struct vsp1_video *video, *_video; <API key>(entity, _entity, &vsp1->entities, list_dev) { list_del(&entity->list_dev); vsp1_entity_destroy(entity); } <API key>(video, _video, &vsp1->videos, list) { list_del(&video->list); vsp1_video_cleanup(video); } <API key>(&vsp1->v4l2_dev); if (vsp1->info->uapi) <API key>(&vsp1->media_dev); <API key>(&vsp1->media_dev); if (!vsp1->info->uapi) vsp1_drm_cleanup(vsp1); } static int <API key>(struct vsp1_device *vsp1) { struct media_device *mdev = &vsp1->media_dev; struct v4l2_device *vdev = &vsp1->v4l2_dev; struct vsp1_entity *entity; unsigned int i; int ret; mdev->dev = vsp1->dev; mdev->hw_revision = vsp1->version; strscpy(mdev->model, vsp1->info->model, sizeof(mdev->model)); snprintf(mdev->bus_info, sizeof(mdev->bus_info), "platform:%s", dev_name(mdev->dev)); media_device_init(mdev); vsp1->media_ops.link_setup = <API key>; /* * Don't perform link validation when the userspace API is disabled as * the pipeline is configured internally by the driver in that case, and * its configuration can thus be trusted. */ if (vsp1->info->uapi) vsp1->media_ops.link_validate = <API key>; vdev->mdev = mdev; ret = <API key>(vsp1->dev, vdev); if (ret < 0) { dev_err(vsp1->dev, "V4L2 device registration failed (%d)\n", ret); goto done; } /* Instantiate all the entities. */ if (vsp1_feature(vsp1, VSP1_HAS_BRS)) { vsp1->brs = vsp1_brx_create(vsp1, VSP1_ENTITY_BRS); if (IS_ERR(vsp1->brs)) { ret = PTR_ERR(vsp1->brs); goto done; } list_add_tail(&vsp1->brs->entity.list_dev, &vsp1->entities); } if (vsp1_feature(vsp1, VSP1_HAS_BRU)) { vsp1->bru = vsp1_brx_create(vsp1, VSP1_ENTITY_BRU); if (IS_ERR(vsp1->bru)) { ret = PTR_ERR(vsp1->bru); goto done; } list_add_tail(&vsp1->bru->entity.list_dev, &vsp1->entities); } if (vsp1_feature(vsp1, VSP1_HAS_CLU)) { vsp1->clu = vsp1_clu_create(vsp1); if (IS_ERR(vsp1->clu)) { ret = PTR_ERR(vsp1->clu); goto done; } list_add_tail(&vsp1->clu->entity.list_dev, &vsp1->entities); } vsp1->hsi = vsp1_hsit_create(vsp1, true); if (IS_ERR(vsp1->hsi)) { ret = PTR_ERR(vsp1->hsi); goto done; } list_add_tail(&vsp1->hsi->entity.list_dev, &vsp1->entities); vsp1->hst = vsp1_hsit_create(vsp1, false); if (IS_ERR(vsp1->hst)) { ret = PTR_ERR(vsp1->hst); goto done; } list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities); if (vsp1_feature(vsp1, VSP1_HAS_HGO) && vsp1->info->uapi) { vsp1->hgo = vsp1_hgo_create(vsp1); if (IS_ERR(vsp1->hgo)) { ret = PTR_ERR(vsp1->hgo); goto done; } list_add_tail(&vsp1->hgo->histo.entity.list_dev, &vsp1->entities); } if (vsp1_feature(vsp1, VSP1_HAS_HGT) && vsp1->info->uapi) { vsp1->hgt = vsp1_hgt_create(vsp1); if (IS_ERR(vsp1->hgt)) { ret = PTR_ERR(vsp1->hgt); goto done; } list_add_tail(&vsp1->hgt->histo.entity.list_dev, &vsp1->entities); } /* * The LIFs are only supported when used in conjunction with the DU, in * which case the userspace API is disabled. If the userspace API is * enabled skip the LIFs, even when present. */ if (!vsp1->info->uapi) { for (i = 0; i < vsp1->info->lif_count; ++i) { struct vsp1_lif *lif; lif = vsp1_lif_create(vsp1, i); if (IS_ERR(lif)) { ret = PTR_ERR(lif); goto done; } vsp1->lif[i] = lif; list_add_tail(&lif->entity.list_dev, &vsp1->entities); } } if (vsp1_feature(vsp1, VSP1_HAS_LUT)) { vsp1->lut = vsp1_lut_create(vsp1); if (IS_ERR(vsp1->lut)) { ret = PTR_ERR(vsp1->lut); goto done; } list_add_tail(&vsp1->lut->entity.list_dev, &vsp1->entities); } for (i = 0; i < vsp1->info->rpf_count; ++i) { struct vsp1_rwpf *rpf; rpf = vsp1_rpf_create(vsp1, i); if (IS_ERR(rpf)) { ret = PTR_ERR(rpf); goto done; } vsp1->rpf[i] = rpf; list_add_tail(&rpf->entity.list_dev, &vsp1->entities); if (vsp1->info->uapi) { struct vsp1_video *video = vsp1_video_create(vsp1, rpf); if (IS_ERR(video)) { ret = PTR_ERR(video); goto done; } list_add_tail(&video->list, &vsp1->videos); } } if (vsp1_feature(vsp1, VSP1_HAS_SRU)) { vsp1->sru = vsp1_sru_create(vsp1); if (IS_ERR(vsp1->sru)) { ret = PTR_ERR(vsp1->sru); goto done; } list_add_tail(&vsp1->sru->entity.list_dev, &vsp1->entities); } for (i = 0; i < vsp1->info->uds_count; ++i) { struct vsp1_uds *uds; uds = vsp1_uds_create(vsp1, i); if (IS_ERR(uds)) { ret = PTR_ERR(uds); goto done; } vsp1->uds[i] = uds; list_add_tail(&uds->entity.list_dev, &vsp1->entities); } for (i = 0; i < vsp1->info->uif_count; ++i) { struct vsp1_uif *uif; uif = vsp1_uif_create(vsp1, i); if (IS_ERR(uif)) { ret = PTR_ERR(uif); goto done; } vsp1->uif[i] = uif; list_add_tail(&uif->entity.list_dev, &vsp1->entities); } for (i = 0; i < vsp1->info->wpf_count; ++i) { struct vsp1_rwpf *wpf; wpf = vsp1_wpf_create(vsp1, i); if (IS_ERR(wpf)) { ret = PTR_ERR(wpf); goto done; } vsp1->wpf[i] = wpf; list_add_tail(&wpf->entity.list_dev, &vsp1->entities); if (vsp1->info->uapi) { struct vsp1_video *video = vsp1_video_create(vsp1, wpf); if (IS_ERR(video)) { ret = PTR_ERR(video); goto done; } list_add_tail(&video->list, &vsp1->videos); } } /* Register all subdevs. */ list_for_each_entry(entity, &vsp1->entities, list_dev) { ret = <API key>(&vsp1->v4l2_dev, &entity->subdev); if (ret < 0) goto done; } /* * Create links and register subdev nodes if the userspace API is * enabled or initialize the DRM pipeline otherwise. */ if (vsp1->info->uapi) { ret = <API key>(vsp1); if (ret < 0) goto done; ret = <API key>(&vsp1->v4l2_dev); if (ret < 0) goto done; ret = <API key>(mdev); } else { ret = vsp1_drm_init(vsp1); } done: if (ret < 0) <API key>(vsp1); return ret; } int vsp1_reset_wpf(struct vsp1_device *vsp1, unsigned int index) { unsigned int timeout; u32 status; status = vsp1_read(vsp1, VI6_STATUS); if (!(status & VI6_STATUS_SYS_ACT(index))) return 0; vsp1_write(vsp1, VI6_SRESET, VI6_SRESET_SRTS(index)); for (timeout = 10; timeout > 0; --timeout) { status = vsp1_read(vsp1, VI6_STATUS); if (!(status & VI6_STATUS_SYS_ACT(index))) break; usleep_range(1000, 2000); } if (!timeout) { dev_err(vsp1->dev, "failed to reset wpf.%u\n", index); return -ETIMEDOUT; } return 0; } static int vsp1_device_init(struct vsp1_device *vsp1) { unsigned int i; int ret; /* Reset any channel that might be running. */ for (i = 0; i < vsp1->info->wpf_count; ++i) { ret = vsp1_reset_wpf(vsp1, i); if (ret < 0) return ret; } vsp1_write(vsp1, VI6_CLK_DCSWT, (8 << <API key>) | (8 << <API key>)); for (i = 0; i < vsp1->info->rpf_count; ++i) vsp1_write(vsp1, VI6_DPR_RPF_ROUTE(i), VI6_DPR_NODE_UNUSED); for (i = 0; i < vsp1->info->uds_count; ++i) vsp1_write(vsp1, VI6_DPR_UDS_ROUTE(i), VI6_DPR_NODE_UNUSED); for (i = 0; i < vsp1->info->uif_count; ++i) vsp1_write(vsp1, VI6_DPR_UIF_ROUTE(i), VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_SRU_ROUTE, VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_LUT_ROUTE, VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_CLU_ROUTE, VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_HST_ROUTE, VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_HSI_ROUTE, VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_BRU_ROUTE, VI6_DPR_NODE_UNUSED); if (vsp1_feature(vsp1, VSP1_HAS_BRS)) vsp1_write(vsp1, <API key>, VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_HGO_SMPPT, (7 << <API key>) | (VI6_DPR_NODE_UNUSED << <API key>)); vsp1_write(vsp1, VI6_DPR_HGT_SMPPT, (7 << <API key>) | (VI6_DPR_NODE_UNUSED << <API key>)); vsp1_dlm_setup(vsp1); return 0; } /* * vsp1_device_get - Acquire the VSP1 device * * Make sure the device is not suspended and initialize it if needed. * * Return 0 on success or a negative error code otherwise. */ int vsp1_device_get(struct vsp1_device *vsp1) { int ret; ret = pm_runtime_get_sync(vsp1->dev); if (ret < 0) { <API key>(vsp1->dev); return ret; } return 0; } /* * vsp1_device_put - Release the VSP1 device * * Decrement the VSP1 reference count and cleanup the device if the last * reference is released. */ void vsp1_device_put(struct vsp1_device *vsp1) { pm_runtime_put_sync(vsp1->dev); } static int __maybe_unused vsp1_pm_suspend(struct device *dev) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); /* * When used as part of a display pipeline, the VSP is stopped and * restarted explicitly by the DU. */ if (!vsp1->drm) vsp1_video_suspend(vsp1); <API key>(vsp1->dev); return 0; } static int __maybe_unused vsp1_pm_resume(struct device *dev) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); <API key>(vsp1->dev); /* * When used as part of a display pipeline, the VSP is stopped and * restarted explicitly by the DU. */ if (!vsp1->drm) vsp1_video_resume(vsp1); return 0; } static int __maybe_unused <API key>(struct device *dev) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); rcar_fcp_disable(vsp1->fcp); return 0; } static int __maybe_unused <API key>(struct device *dev) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); int ret; if (vsp1->info) { ret = vsp1_device_init(vsp1); if (ret < 0) return ret; } return rcar_fcp_enable(vsp1->fcp); } static const struct dev_pm_ops vsp1_pm_ops = { <API key>(vsp1_pm_suspend, vsp1_pm_resume) SET_RUNTIME_PM_OPS(<API key>, <API key>, NULL) }; static const struct vsp1_device_info vsp1_device_infos[] = { { .version = <API key>, .model = "VSP1-S", .gen = 2, .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP, .rpf_count = 5, .uds_count = 3, .wpf_count = 4, .num_bru_inputs = 4, .uapi = true, }, { .version = <API key>, .model = "VSP1-R", .gen = 2, .features = VSP1_HAS_BRU | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP, .rpf_count = 5, .uds_count = 3, .wpf_count = 4, .num_bru_inputs = 4, .uapi = true, }, { .version = <API key>, .model = "VSP1-D", .gen = 2, .features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_LUT, .lif_count = 1, .rpf_count = 4, .uds_count = 1, .wpf_count = 1, .num_bru_inputs = 4, .uapi = true, }, { .version = <API key>, .model = "VSP1-S", .gen = 2, .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP, .rpf_count = 5, .uds_count = 1, .wpf_count = 4, .num_bru_inputs = 4, .uapi = true, }, { .version = <API key>, .model = "VSP1V-S", .gen = 2, .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP, .rpf_count = 4, .uds_count = 1, .wpf_count = 4, .num_bru_inputs = 4, .uapi = true, }, { .version = <API key>, .model = "VSP1V-D", .gen = 2, .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT, .lif_count = 1, .rpf_count = 4, .uds_count = 1, .wpf_count = 1, .num_bru_inputs = 4, .uapi = true, }, { .version = <API key>, .model = "VSP2-I", .gen = 3, .features = VSP1_HAS_CLU | VSP1_HAS_HGO | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_HFLIP | VSP1_HAS_WPF_VFLIP, .rpf_count = 1, .uds_count = 1, .wpf_count = 1, .uapi = true, }, { .version = <API key>, .model = "VSP2-BD", .gen = 3, .features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP, .rpf_count = 5, .wpf_count = 1, .num_bru_inputs = 5, .uapi = true, }, { .version = <API key>, .model = "VSP2-BC", .gen = 3, .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO | VSP1_HAS_LUT | VSP1_HAS_WPF_VFLIP, .rpf_count = 5, .wpf_count = 1, .num_bru_inputs = 5, .uapi = true, }, { .version = <API key>, .model = "VSP2-BS", .gen = 3, .features = VSP1_HAS_BRS | VSP1_HAS_WPF_VFLIP, .rpf_count = 2, .wpf_count = 1, .uapi = true, }, { .version = <API key>, .model = "VSP2-D", .gen = 3, .features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP | VSP1_HAS_EXT_DL, .lif_count = 1, .rpf_count = 5, .uif_count = 1, .wpf_count = 2, .num_bru_inputs = 5, }, { .version = <API key>, .model = "VSP2-D", .gen = 3, .features = VSP1_HAS_BRS | VSP1_HAS_BRU, .lif_count = 1, .rpf_count = 5, .uif_count = 1, .wpf_count = 1, .num_bru_inputs = 5, }, { .version = <API key>, .model = "VSP2-DL", .gen = 3, .features = VSP1_HAS_BRS | VSP1_HAS_BRU | VSP1_HAS_EXT_DL, .lif_count = 2, .rpf_count = 5, .uif_count = 2, .wpf_count = 2, .num_bru_inputs = 5, }, }; static int vsp1_probe(struct platform_device *pdev) { struct vsp1_device *vsp1; struct device_node *fcp_node; struct resource *irq; struct resource *io; unsigned int i; int ret; vsp1 = devm_kzalloc(&pdev->dev, sizeof(*vsp1), GFP_KERNEL); if (vsp1 == NULL) return -ENOMEM; vsp1->dev = &pdev->dev; INIT_LIST_HEAD(&vsp1->entities); INIT_LIST_HEAD(&vsp1->videos); <API key>(pdev, vsp1); /* I/O and IRQ resources (clock managed by the clock PM domain). */ io = <API key>(pdev, IORESOURCE_MEM, 0); vsp1->mmio = <API key>(&pdev->dev, io); if (IS_ERR(vsp1->mmio)) return PTR_ERR(vsp1->mmio); irq = <API key>(pdev, IORESOURCE_IRQ, 0); if (!irq) { dev_err(&pdev->dev, "missing IRQ\n"); return -EINVAL; } ret = devm_request_irq(&pdev->dev, irq->start, vsp1_irq_handler, IRQF_SHARED, dev_name(&pdev->dev), vsp1); if (ret < 0) { dev_err(&pdev->dev, "failed to request IRQ\n"); return ret; } /* FCP (optional). */ fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0); if (fcp_node) { vsp1->fcp = rcar_fcp_get(fcp_node); of_node_put(fcp_node); if (IS_ERR(vsp1->fcp)) { dev_dbg(&pdev->dev, "FCP not found (%ld)\n", PTR_ERR(vsp1->fcp)); return PTR_ERR(vsp1->fcp); } /* * When the FCP is present, it handles all bus master accesses * for the VSP and must thus be used in place of the VSP device * to map DMA buffers. */ vsp1->bus_master = rcar_fcp_get_device(vsp1->fcp); } else { vsp1->bus_master = vsp1->dev; } /* Configure device parameters based on the version register. */ pm_runtime_enable(&pdev->dev); ret = vsp1_device_get(vsp1); if (ret < 0) goto done; vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION); vsp1_device_put(vsp1); for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) { if ((vsp1->version & <API key>) == vsp1_device_infos[i].version) { vsp1->info = &vsp1_device_infos[i]; break; } } if (!vsp1->info) { dev_err(&pdev->dev, "unsupported IP version 0x%08x\n", vsp1->version); ret = -ENXIO; goto done; } dev_dbg(&pdev->dev, "IP version 0x%08x\n", vsp1->version); /* Instantiate entities. */ ret = <API key>(vsp1); if (ret < 0) { dev_err(&pdev->dev, "failed to create entities\n"); goto done; } done: if (ret) pm_runtime_disable(&pdev->dev); return ret; } static int vsp1_remove(struct platform_device *pdev) { struct vsp1_device *vsp1 = <API key>(pdev); <API key>(vsp1); rcar_fcp_put(vsp1->fcp); pm_runtime_disable(&pdev->dev); return 0; } static const struct of_device_id vsp1_of_match[] = { { .compatible = "renesas,vsp1" }, { .compatible = "renesas,vsp2" }, { }, }; MODULE_DEVICE_TABLE(of, vsp1_of_match); static struct platform_driver <API key> = { .probe = vsp1_probe, .remove = vsp1_remove, .driver = { .name = "vsp1", .pm = &vsp1_pm_ops, .of_match_table = vsp1_of_match, }, }; <API key>(<API key>); MODULE_ALIAS("vsp1"); MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); MODULE_DESCRIPTION("Renesas VSP1 Driver"); MODULE_LICENSE("GPL");
#pragma once #include "FileItem.h" #include <string> #include <Neptune/Source/Core/NptReferences.h> #include <Neptune/Source/Core/NptStrings.h> #include <Neptune/Source/Core/NptTypes.h> class CUPnPServer; class CFileItem; class CThumbLoader; class PLT_DeviceData; class <API key>; class <API key>; class PLT_MediaObject; class NPT_String; namespace MUSIC_INFO { class CMusicInfoTag; } class CVideoInfoTag; namespace UPNP { enum UPnPService { UPnPServiceNone = 0, UPnPClient, <API key>, UPnPPlayer, UPnPRenderer }; class CResourceFinder { public: CResourceFinder(const char* protocol, const char* content = NULL); bool operator()(const <API key>& resource) const; private: NPT_String m_Protocol; NPT_String m_Content; }; enum EClientQuirks { ECLIENTQUIRKS_NONE = 0x0 /* Client requires folder's to be marked as storageFolders as vendor type (360)*/ , <API key> = 0x01 /* Client can't handle subtypes for videoItems (360) */ , <API key> = 0x02 /* Client requires album to be set to [Unknown Series] to show title (WMP) */ , <API key> = 0x04 }; EClientQuirks GetClientQuirks(const <API key>* context); enum <API key> { <API key> = 0x00 /* Media Controller expects MIME type video/x-mkv instead of video/x-matroska (Samsung) */ , <API key> = 0x01 }; <API key> <API key>(const PLT_DeviceData *device); const char* <API key>(const char* extension, const <API key>* context = NULL); NPT_String GetMimeType(const CFileItem& item, const <API key>* context = NULL); NPT_String GetMimeType(const char* filename, const <API key>* context = NULL); const NPT_String GetProtocolInfo(const CFileItem& item, const char* protocol, const <API key>* context = NULL); const std::string& <API key>(const std::string &item); NPT_Result <API key>(MUSIC_INFO::CMusicInfoTag& tag, PLT_MediaObject& object, <API key>* resource = NULL, UPnPService service = UPnPServiceNone); NPT_Result <API key>(CVideoInfoTag& tag, PLT_MediaObject& object, <API key>* resource = NULL, UPnPService service = UPnPServiceNone); NPT_Result <API key>(MUSIC_INFO::CMusicInfoTag& tag, PLT_MediaObject& object, NPT_String* file_path, <API key>* resource, EClientQuirks quirks, UPnPService service = UPnPServiceNone); NPT_Result <API key>(CVideoInfoTag& tag, PLT_MediaObject& object, NPT_String* file_path, <API key>* resource, EClientQuirks quirks, UPnPService service = UPnPServiceNone); PLT_MediaObject* BuildObject(CFileItem& item, NPT_String& file_path, bool with_count, NPT_Reference<CThumbLoader>& thumb_loader, const <API key>* context = NULL, CUPnPServer* upnp_server = NULL, UPnPService upnp_service = UPnPServiceNone); CFileItemPtr BuildObject(PLT_MediaObject* entry, UPnPService upnp_service = UPnPServiceNone); bool GetResource(const PLT_MediaObject* entry, CFileItem& item); CFileItemPtr GetFileItem(const NPT_String& uri, const NPT_String& meta); }
#ifdef HAVE_CONFIG_H #include <config.h> #endif #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include "libusb.h" #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include "carlu.h" #include "usb.h" #include "debug.h" int carlu_fw_check(struct carlu *ar) { struct <API key> *otus_desc; otus_desc = carlfw_find_desc(ar->fw, (uint8_t *) OTUS_MAGIC, sizeof(*otus_desc), <API key>); if (!otus_desc) { err("No valid OTUS descriptor found.\n"); return -EINVAL; } if (!carl9170fw_supports(otus_desc->feature_set, <API key>)) { err("Invalid Firmware Descriptor.\n"); return -EIO; } if (carl9170fw_supports(otus_desc->feature_set, CARL9170FW_UNUSABLE)) dbg("Firmware is marked as unuseable.\n"); info("Firmware Version: %d.\n", otus_desc->api_ver); return 0; } int carlusb_fw_check(struct carlu *ar) { struct <API key> *otus_desc; otus_desc = carlfw_find_desc(ar->fw, (uint8_t *) OTUS_MAGIC, sizeof(*otus_desc), <API key>); if (!otus_desc) { err("No valid USB descriptor found.\n"); return -ENODATA; } if (!carl9170fw_supports(otus_desc->feature_set, <API key>)) { err("Invalid Firmware Descriptor.\n"); return -EINVAL; } if (!carl9170fw_supports(otus_desc->feature_set, <API key>)) { err("Firmware does not know how to initialize USB core.\n"); return -EOPNOTSUPP; } if (carl9170fw_supports(otus_desc->feature_set, <API key>)) { dbg("Enabled tx stream mode.\n"); ar->tx_stream = true; ar->extra_headroom = sizeof(struct ar9170_stream); } if (carl9170fw_supports(otus_desc->feature_set, <API key>)) { dbg("Enabled rx stream mode.\n"); ar->rx_stream = true; } if (carl9170fw_supports(otus_desc->feature_set, <API key>)) dbg("Firmware sends traps over EP2.\n"); ar->dma_chunk_size = le16_to_cpu(otus_desc->tx_frag_len); ar->dma_chunks = otus_desc->tx_descs; ar->rx_max = le16_to_cpu(otus_desc->rx_max_frame_len); if (carl9170fw_supports(otus_desc->feature_set, CARL9170FW_MINIBOOT)) ar->miniboot_size = le16_to_cpu(otus_desc->miniboot_size); return 0; } void carlu_fw_info(struct carlu *ar) { struct <API key> *motd_desc; unsigned int fw_date; motd_desc = carlfw_find_desc(ar->fw, (uint8_t *) MOTD_MAGIC, sizeof(*motd_desc), <API key>); if (motd_desc) { fw_date = le32_to_cpu(motd_desc->fw_year_month_day); info("Firmware Date: 2%.3d-%.2d-%.2d\n", CARL9170FW_GET_YEAR(fw_date), <API key>(fw_date), CARL9170FW_GET_DAY(fw_date)); } }
#pragma once #include <memory> #include "AudioCommon/Mixer.h" #include "Common/CommonTypes.h" class SoundStream { protected: std::unique_ptr<Mixer> m_mixer; public: SoundStream() : m_mixer(new Mixer(48000)) {} virtual ~SoundStream() {} static bool isValid() { return false; } Mixer* GetMixer() const { return m_mixer.get(); } virtual bool Init() { return false; } virtual void SetVolume(int) {} virtual void SoundLoop() {} virtual void Update() {} virtual bool SetRunning(bool running) { return false; } };
@import Foundation; @interface NSEntityDescription : NSObject @end @interface <API key> : NSObject @end @interface NSManagedObject : NSObject - (__kindof NSManagedObject *)initWithEntity:(NSEntityDescription *)entity <API key>:(<API key> *)context; @property(nonatomic, readonly, strong) NSEntityDescription *entity; + (NSEntityDescription *)entity; @end
cask "prezi-video" do version "1.13.0" sha256 "<SHA256-like>" url "https://desktopassets.prezi.com/mac/prezi-video/releases/Prezi_Video_#{version}.dmg" name "Prezi Video" desc "Lets you interact with your content live as you stream or record" homepage "https://prezi.com/video/" pkg "Install Prezi Video.pkg" uninstall quit: "com.prezi.PreziCast", launchctl: "com.prezi.prezivideo.vcam.assistant", pkgutil: [ "com.prezi.PreziCast", "com.prezi.prezivideo.vcam.plugin", ], delete: [ "/Applications/Prezi Video.app", "/Library/CoreMediaIO/Plug-Ins/DAL/PreziAR.plugin", ] zap trash: [ "~/Library/Application Support/com.prezi.PreziCast", "~/Library/Preferences/com.prezi.PreziCast.plist", "~/Library/Preferences/com.prezi.PreziVideo.vcam", ] end
<?php if (!defined('IN_CKFINDER')) exit; class <API key> { public static function getErrorMessage($number, $arg = "") { $langCode = 'en'; if (!empty($_GET['langCode']) && preg_match("/^[a-z\-]+$/", $_GET['langCode'])) { if (file_exists(<API key> . "/" . $_GET['langCode'] . ".php")) $langCode = $_GET['langCode']; } include <API key> . "/" . $langCode . ".php"; if ($number) { if (!empty ($GLOBALS['CKFLang']['Errors'][$number])) { $errorMessage = str_replace("%1", $arg, $GLOBALS['CKFLang']['Errors'][$number]); } else { $errorMessage = str_replace("%1", $number, $GLOBALS['CKFLang']['ErrorUnknown']); } } else { $errorMessage = ""; } return $errorMessage; } /** * Simulate the encodeURIComponent() function available in JavaScript * @param string $str * @return string */ public static function encodeURIComponent($str) { $revert = array('%21'=>'!', '%2A'=>'*', '%27'=>"'", '%28'=>'(', '%29'=>')'); return strtr(rawurlencode($str), $revert); } /** * Convert any value to boolean, strings like "false", "FalSE" and "off" are also considered as false * * @static * @access public * @param mixed $value * @return boolean */ public static function booleanValue($value) { if (strcasecmp("false", $value) == 0 || strcasecmp("off", $value) == 0 || !$value) { return false; } else { return true; } } public static function <API key> (&$dst_image, $src_image, $dst_x, $dst_y, $src_x, $src_y, $dst_w, $dst_h, $src_w, $src_h, $quality = 3) { if (empty($src_image) || empty($dst_image)) { return false; } if ($quality <= 1) { $temp = <API key> ($dst_w + 1, $dst_h + 1); imagecopyresized ($temp, $src_image, $dst_x, $dst_y, $src_x, $src_y, $dst_w + 1, $dst_h + 1, $src_w, $src_h); imagecopyresized ($dst_image, $temp, 0, 0, 0, 0, $dst_w, $dst_h, $dst_w, $dst_h); imagedestroy ($temp); } elseif ($quality < 5 && (($dst_w * $quality) < $src_w || ($dst_h * $quality) < $src_h)) { $tmp_w = $dst_w * $quality; $tmp_h = $dst_h * $quality; $temp = <API key> ($tmp_w + 1, $tmp_h + 1); imagecopyresized ($temp, $src_image, 0, 0, $src_x, $src_y, $tmp_w + 1, $tmp_h + 1, $src_w, $src_h); imagecopyresampled ($dst_image, $temp, $dst_x, $dst_y, 0, 0, $dst_w, $dst_h, $tmp_w, $tmp_h); imagedestroy ($temp); } else { imagecopyresampled ($dst_image, $src_image, $dst_x, $dst_y, $src_x, $src_y, $dst_w, $dst_h, $src_w, $src_h); } return true; } public static function setMemoryForImage($imageWidth, $imageHeight, $imageBits, $imageChannels) { $MB = 1048576; // number of bytes in 1M $K64 = 65536; // number of bytes in 64K $TWEAKFACTOR = 2.4; // Or whatever works for you $memoryNeeded = round( ( $imageWidth * $imageHeight * $imageBits * $imageChannels / 8 + $K64 ) * $TWEAKFACTOR ) + 3*$MB; //ini_get('memory_limit') only works if compiled with "--enable-memory-limit" also //Default memory limit is 8MB so well stick with that. //To find out what yours is, view your php.ini file. $memoryLimit = <API key>::returnBytes(@ini_get('memory_limit'))/$MB; // There are no memory limits, nothing to do if ($memoryLimit == -1) { return true; } if (!$memoryLimit) { $memoryLimit = 8; } $memoryLimitMB = $memoryLimit * $MB; if (function_exists('memory_get_usage')) { if (memory_get_usage() + $memoryNeeded > $memoryLimitMB) { $newLimit = $memoryLimit + ceil( ( memory_get_usage() + $memoryNeeded - $memoryLimitMB ) / $MB ); if (@ini_set( 'memory_limit', $newLimit . 'M' ) === false) { return false; } } } else { if ($memoryNeeded + 3*$MB > $memoryLimitMB) { $newLimit = $memoryLimit + ceil(( 3*$MB + $memoryNeeded - $memoryLimitMB ) / $MB ); if (false === @ini_set( 'memory_limit', $newLimit . 'M' )) { return false; } } } return true; } public static function returnBytes($val) { $val = trim($val); if (!$val) { return 0; } $last = strtolower($val[strlen($val)-1]); switch($last) { // The 'G' modifier is available since PHP 5.1.0 case 'g': $val *= 1024; case 'm': $val *= 1024; case 'k': $val *= 1024; } return $val; } /** * Checks if a value exists in an array (case insensitive) * * @static * @access public * @param string $needle * @param array $haystack * @return boolean */ public static function <API key>($needle, $haystack) { if (!$haystack || !is_array($haystack)) { return false; } $lcase = array(); foreach ($haystack as $key => $val) { $lcase[$key] = strtolower($val); } return in_array($needle, $lcase); } /** * UTF-8 compatible version of basename() * * @static * @access public * @param string $file * @return string */ public static function mbBasename($file) { $explode = explode('/', str_replace("\\", "/", $file)); return end($explode); } /** * Checks whether the string is valid UTF8 * @static * @access public * @param string $string * @return boolean */ public static function isValidUTF8($string) { if (strlen($string) == 0) { return true; } return (preg_match('/^./us', $string) == 1); } public static function imageCreateFromBmp($filename) { //20 seconds seems to be a reasonable value to not kill a server and process images up to 1680x1050 @set_time_limit(20); if (false === ($f1 = fopen($filename, "rb"))) { return false; } $FILE = unpack("vfile_type/Vfile_size/Vreserved/Vbitmap_offset", fread($f1, 14)); if ($FILE['file_type'] != 19778) { return false; } $BMP = unpack('Vheader_size/Vwidth/Vheight/vplanes/vbits_per_pixel'. '/Vcompression/Vsize_bitmap/Vhoriz_resolution'. '/Vvert_resolution/Vcolors_used/Vcolors_important', fread($f1, 40)); $BMP['colors'] = pow(2,$BMP['bits_per_pixel']); if ($BMP['size_bitmap'] == 0) { $BMP['size_bitmap'] = $FILE['file_size'] - $FILE['bitmap_offset']; } $BMP['bytes_per_pixel'] = $BMP['bits_per_pixel']/8; $BMP['bytes_per_pixel2'] = ceil($BMP['bytes_per_pixel']); $BMP['decal'] = ($BMP['width']*$BMP['bytes_per_pixel']/4); $BMP['decal'] -= floor($BMP['width']*$BMP['bytes_per_pixel']/4); $BMP['decal'] = 4-(4*$BMP['decal']); if ($BMP['decal'] == 4) { $BMP['decal'] = 0; } $PALETTE = array(); if ($BMP['colors'] < 16777216) { $PALETTE = unpack('V'.$BMP['colors'], fread($f1, $BMP['colors']*4)); } //2048x1536px@24bit don't even try to process larger files as it will probably fail if ($BMP['size_bitmap'] > 3 * 2048 * 1536) { return false; } $IMG = fread($f1, $BMP['size_bitmap']); fclose($f1); $VIDE = chr(0); $res = <API key>($BMP['width'],$BMP['height']); $P = 0; $Y = $BMP['height']-1; $line_length = $BMP['bytes_per_pixel']*$BMP['width']; if ($BMP['bits_per_pixel'] == 24) { while ($Y >= 0) { $X=0; $temp = unpack( "C*", substr($IMG, $P, $line_length)); while ($X < $BMP['width']) { $offset = $X*3; imagesetpixel($res, $X++, $Y, ($temp[$offset+3] << 16) + ($temp[$offset+2] << 8) + $temp[$offset+1]); } $Y $P += $line_length + $BMP['decal']; } } elseif ($BMP['bits_per_pixel'] == 8) { while ($Y >= 0) { $X=0; $temp = unpack( "C*", substr($IMG, $P, $line_length)); while ($X < $BMP['width']) { imagesetpixel($res, $X++, $Y, $PALETTE[$temp[$X] +1]); } $Y $P += $line_length + $BMP['decal']; } } elseif ($BMP['bits_per_pixel'] == 4) { while ($Y >= 0) { $X=0; $i = 1; $low = true; $temp = unpack( "C*", substr($IMG, $P, $line_length)); while ($X < $BMP['width']) { if ($low) { $index = $temp[$i] >> 4; } else { $index = $temp[$i++] & 0x0F; } $low = !$low; imagesetpixel($res, $X++, $Y, $PALETTE[$index +1]); } $Y $P += $line_length + $BMP['decal']; } } elseif ($BMP['bits_per_pixel'] == 1) { $COLOR = unpack("n",$VIDE.substr($IMG,floor($P),1)); if (($P*8)%8 == 0) $COLOR[1] = $COLOR[1] >>7; elseif (($P*8)%8 == 1) $COLOR[1] = ($COLOR[1] & 0x40)>>6; elseif (($P*8)%8 == 2) $COLOR[1] = ($COLOR[1] & 0x20)>>5; elseif (($P*8)%8 == 3) $COLOR[1] = ($COLOR[1] & 0x10)>>4; elseif (($P*8)%8 == 4) $COLOR[1] = ($COLOR[1] & 0x8)>>3; elseif (($P*8)%8 == 5) $COLOR[1] = ($COLOR[1] & 0x4)>>2; elseif (($P*8)%8 == 6) $COLOR[1] = ($COLOR[1] & 0x2)>>1; elseif (($P*8)%8 == 7) $COLOR[1] = ($COLOR[1] & 0x1); $COLOR[1] = $PALETTE[$COLOR[1]+1]; } else { return false; } return $res; } }
angular .module('menuDemoPosition', ['ngMaterial']) .config(function($mdIconProvider) { $mdIconProvider .iconSet("call", 'img/icons/sets/communication-icons.svg', 24) .iconSet("social", 'img/icons/sets/social-icons.svg', 24); }) .controller('PositionDemoCtrl', function DemoCtrl($mdDialog) { var originatorEv; this.openMenu = function($mdOpenMenu, ev) { originatorEv = ev; $mdOpenMenu(ev); }; this.announceClick = function(index) { $mdDialog.show( $mdDialog.alert() .title('You clicked!') .textContent('You clicked the menu item at index ' + index) .ok('Nice') .targetEvent(originatorEv) ); originatorEv = null; }; });
/* * MT safe */ #include "config.h" /* Uncomment the next line (and the corresponding line in gmain.c) to * enable debugging printouts if the environment variable * G_MAIN_POLL_DEBUG is set to some value. */ /* #define G_MAIN_POLL_DEBUG */ #ifdef _WIN32 /* Always enable debugging printout on Windows, as it is more often * needed there... */ #define G_MAIN_POLL_DEBUG #endif #include "glib.h" #include <sys/types.h> #include <time.h> #include <stdlib.h> #ifdef HAVE_SYS_TIME_H #include <sys/time.h> #endif /* HAVE_SYS_TIME_H */ #ifdef <API key> # include <sys/poll.h> # undef events /* AIX 4.1.5 & 4.3.2 define this for SVR3,4 compatibility */ # undef revents /* AIX 4.1.5 & 4.3.2 define this for SVR3,4 compatibility */ /* The poll() emulation on OS/X doesn't handle fds=NULL, nfds=0, * so we prefer our own poll emulation. */ #if defined(_POLL_EMUL_H_) || defined(BROKEN_POLL) #undef HAVE_POLL #endif #endif /* <API key> */ #ifdef HAVE_UNISTD_H #include <unistd.h> #endif /* HAVE_UNISTD_H */ #include <errno.h> #ifdef G_OS_WIN32 #define STRICT #include <windows.h> #endif /* G_OS_WIN32 */ #include "galias.h" #ifdef G_MAIN_POLL_DEBUG extern gboolean _g_main_poll_debug; #endif #ifdef HAVE_POLL /* SunOS has poll, but doesn't provide a prototype. */ # if defined (sun) && !defined (__SVR4) extern gint poll (struct pollfd *fds, guint nfsd, gint timeout); # endif /* !sun */ /** * g_poll: * @fds: file descriptors to poll * @nfds: the number of file descriptors in @fds * @timeout: amount of time to wait, in milliseconds, or -1 to wait forever * * Polls @fds, as with the poll() system call, but portably. (On * systems that don't have poll(), it is emulated using select().) * This is used internally by #GMainContext, but it can be called * directly if you need to block until a file descriptor is ready, but * don't want to run the full main loop. * * Each element of @fds is a #GPollFD describing a single file * descriptor to poll. The %fd field indicates the file descriptor, * and the %events field indicates the events to poll for. On return, * the %revents fields will be filled with the events that actually * occurred. * * On POSIX systems, the file descriptors in @fds can be any sort of * file descriptor, but the situation is much more complicated on * Windows. If you need to use g_poll() in code that has to run on * Windows, the easiest solution is to construct all of your * #GPollFD<!-- -->s with <API key>(). * * Return value: the number of entries in @fds whose %revents fields * were filled in, or 0 if the operation timed out, or -1 on error or * if the call was interrupted. * * Since: 2.20 **/ gint g_poll (GPollFD *fds, guint nfds, gint timeout) { return poll ((struct pollfd *)fds, nfds, timeout); } #else /* !HAVE_POLL */ #ifdef G_OS_WIN32 static int poll_rest (gboolean poll_msgs, HANDLE *handles, gint nhandles, GPollFD *fds, guint nfds, gint timeout) { DWORD ready; GPollFD *f; int recursed_result; if (poll_msgs) { /* Wait for either messages or handles * -> Use <API key> */ if (_g_main_poll_debug) g_print (" <API key>(%d, %d)\n", nhandles, timeout); ready = <API key> (nhandles, handles, timeout, QS_ALLINPUT, MWMO_ALERTABLE); if (ready == WAIT_FAILED) { gchar *emsg = <API key> (GetLastError ()); g_warning ("<API key> failed: %s", emsg); g_free (emsg); } } else if (nhandles == 0) { /* No handles to wait for, just the timeout */ if (timeout == INFINITE) ready = WAIT_FAILED; else { SleepEx (timeout, TRUE); ready = WAIT_TIMEOUT; } } else { /* Wait for just handles * -> Use <API key> */ if (_g_main_poll_debug) g_print (" <API key>(%d, %d)\n", nhandles, timeout); ready = <API key> (nhandles, handles, FALSE, timeout, TRUE); if (ready == WAIT_FAILED) { gchar *emsg = <API key> (GetLastError ()); g_warning ("<API key> failed: %s", emsg); g_free (emsg); } } if (_g_main_poll_debug) g_print (" wait returns %ld%s\n", ready, (ready == WAIT_FAILED ? " (WAIT_FAILED)" : (ready == WAIT_TIMEOUT ? " (WAIT_TIMEOUT)" : (poll_msgs && ready == WAIT_OBJECT_0 + nhandles ? " (msg)" : "")))); if (ready == WAIT_FAILED) return -1; else if (ready == WAIT_TIMEOUT || ready == WAIT_IO_COMPLETION) return 0; else if (poll_msgs && ready == WAIT_OBJECT_0 + nhandles) { for (f = fds; f < &fds[nfds]; ++f) if (f->fd == G_WIN32_MSG_HANDLE && f->events & G_IO_IN) f->revents |= G_IO_IN; /* If we have a timeout, or no handles to poll, be satisfied * with just noticing we have messages waiting. */ if (timeout != 0 || nhandles == 0) return 1; /* If no timeout and handles to poll, recurse to poll them, * too. */ recursed_result = poll_rest (FALSE, handles, nhandles, fds, nfds, 0); return (recursed_result == -1) ? -1 : 1 + recursed_result; } else if (ready >= WAIT_OBJECT_0 && ready < WAIT_OBJECT_0 + nhandles) { for (f = fds; f < &fds[nfds]; ++f) { if ((HANDLE) f->fd == handles[ready - WAIT_OBJECT_0]) { f->revents = f->events; if (_g_main_poll_debug) g_print (" got event %p\n", (HANDLE) f->fd); } } /* If no timeout and polling several handles, recurse to poll * the rest of them. */ if (timeout == 0 && nhandles > 1) { /* Remove the handle that fired */ int i; if (ready < nhandles - 1) for (i = ready - WAIT_OBJECT_0 + 1; i < nhandles; i++) handles[i-1] = handles[i]; nhandles recursed_result = poll_rest (FALSE, handles, nhandles, fds, nfds, 0); return (recursed_result == -1) ? -1 : 1 + recursed_result; } return 1; } return 0; } gint g_poll (GPollFD *fds, guint nfds, gint timeout) { HANDLE handles[<API key>]; gboolean poll_msgs = FALSE; GPollFD *f; gint nhandles = 0; int retval; if (_g_main_poll_debug) g_print ("g_poll: waiting for"); for (f = fds; f < &fds[nfds]; ++f) if (f->fd == G_WIN32_MSG_HANDLE && (f->events & G_IO_IN)) { if (_g_main_poll_debug && !poll_msgs) g_print (" MSG"); poll_msgs = TRUE; } else if (f->fd > 0) { /* Don't add the same handle several times into the array, as * docs say that is not allowed, even if it actually does seem * to work. */ gint i; for (i = 0; i < nhandles; i++) if (handles[i] == (HANDLE) f->fd) break; if (i == nhandles) { if (nhandles == <API key>) { g_warning ("Too many handles to wait for!\n"); break; } else { if (_g_main_poll_debug) g_print (" %p", (HANDLE) f->fd); handles[nhandles++] = (HANDLE) f->fd; } } } if (_g_main_poll_debug) g_print ("\n"); for (f = fds; f < &fds[nfds]; ++f) f->revents = 0; if (timeout == -1) timeout = INFINITE; /* Polling for several things? */ if (nhandles > 1 || (nhandles > 0 && poll_msgs)) { /* First check if one or several of them are immediately * available */ retval = poll_rest (poll_msgs, handles, nhandles, fds, nfds, 0); /* If not, and we have a significant timeout, poll again with * timeout then. Note that this will return indication for only * one event, or only for messages. We ignore timeouts less than * ten milliseconds as they are mostly pointless on Windows, the * <API key>() call will timeout right away * anyway. */ if (retval == 0 && (timeout == INFINITE || timeout >= 10)) retval = poll_rest (poll_msgs, handles, nhandles, fds, nfds, timeout); } else { /* Just polling for one thing, so no need to check first if * available immediately */ retval = poll_rest (poll_msgs, handles, nhandles, fds, nfds, timeout); } if (retval == -1) for (f = fds; f < &fds[nfds]; ++f) f->revents = 0; return retval; } #else /* !G_OS_WIN32 */ #include <string.h> /* for bzero on BSD systems */ #ifdef HAVE_SYS_SELECT_H #include <sys/select.h> #endif /* HAVE_SYS_SELECT_H */ #ifdef G_OS_BEOS #undef NO_FD_SET #endif /* G_OS_BEOS */ #ifndef NO_FD_SET # define SELECT_MASK fd_set #else /* !NO_FD_SET */ # ifndef _AIX typedef long fd_mask; # endif /* _AIX */ # ifdef _IBMR2 # define SELECT_MASK void # else /* !_IBMR2 */ # define SELECT_MASK int # endif /* !_IBMR2 */ #endif /* !NO_FD_SET */ gint g_poll (GPollFD *fds, guint nfds, gint timeout) { struct timeval tv; SELECT_MASK rset, wset, xset; GPollFD *f; int ready; int maxfd = 0; FD_ZERO (&rset); FD_ZERO (&wset); FD_ZERO (&xset); for (f = fds; f < &fds[nfds]; ++f) if (f->fd >= 0) { if (f->events & G_IO_IN) FD_SET (f->fd, &rset); if (f->events & G_IO_OUT) FD_SET (f->fd, &wset); if (f->events & G_IO_PRI) FD_SET (f->fd, &xset); if (f->fd > maxfd && (f->events & (G_IO_IN|G_IO_OUT|G_IO_PRI))) maxfd = f->fd; } tv.tv_sec = timeout / 1000; tv.tv_usec = (timeout % 1000) * 1000; ready = select (maxfd + 1, &rset, &wset, &xset, timeout == -1 ? NULL : &tv); if (ready > 0) for (f = fds; f < &fds[nfds]; ++f) { f->revents = 0; if (f->fd >= 0) { if (FD_ISSET (f->fd, &rset)) f->revents |= G_IO_IN; if (FD_ISSET (f->fd, &wset)) f->revents |= G_IO_OUT; if (FD_ISSET (f->fd, &xset)) f->revents |= G_IO_PRI; } } return ready; } #endif /* !G_OS_WIN32 */ #endif /* !HAVE_POLL */ #define __G_POLL_C__ #include "galiasdef.c"
.oo-ui-icon-bell { background-image: /* @embed */ url(themes/mediawiki/images/icons/bell.png); } .oo-ui-icon-bellOn { background-image: /* @embed */ url(themes/mediawiki/images/icons/bellOn-ltr.png); } .oo-ui-icon-eye { background-image: /* @embed */ url(themes/mediawiki/images/icons/eye.png); } .<API key> { background-image: /* @embed */ url(themes/mediawiki/images/icons/eyeClosed.png); } .oo-ui-icon-message { background-image: /* @embed */ url(themes/mediawiki/images/icons/message-ltr.png); } .<API key> { background-image: /* @embed */ url(themes/mediawiki/images/icons/signature-ltr.png); } .<API key> { background-image: /* @embed */ url(themes/mediawiki/images/icons/speechBubble-ltr.png); } .<API key> { background-image: /* @embed */ url(themes/mediawiki/images/icons/speechBubbleAdd-ltr.png); } .<API key> { background-image: /* @embed */ url(themes/mediawiki/images/icons/speechBubbles-ltr.png); }
<reference path="fourslash.ts" /> // @BaselineFile: <API key>.baseline // @declaration: true // @sourceMap: true // @jsx: react // @Filename: inputFile1.ts // @emitThisFile: true /// regular ts file / var t: number = 5; / class Bar { / x : string; / y : number / } / // @Filename: inputFile2.tsx // @emitThisFile: true / declare var React: any; / var y = "my div"; / var x = <div name= {y} /> / goTo.marker("1"); verify.<API key>(0); goTo.marker("2"); verify.<API key>(0); verify.<API key>();
// basic_socket.hpp #ifndef <API key> #define <API key> #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/basic_io_object.hpp" #include "asio/detail/<API key>.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/socket_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { Provides socket functionality. /** * The basic_socket class template provides functionality that is common to both * stream-oriented and datagram-oriented sockets. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template <typename Protocol, typename SocketService> class basic_socket : public basic_io_object<SocketService>, public socket_base { public: (Deprecated: Use native_handle_type.) The native representation of a socket. typedef typename SocketService::native_handle_type native_type; The native representation of a socket. typedef typename SocketService::native_handle_type native_handle_type; The protocol type. typedef Protocol protocol_type; The endpoint type. typedef typename Protocol::endpoint endpoint_type; A basic_socket is always the lowest layer. typedef basic_socket<Protocol, SocketService> lowest_layer_type; Construct a basic_socket without opening it. /** * This constructor creates a socket without opening it. * * @param io_service The io_service object that the socket will use to * dispatch handlers for any asynchronous operations performed on the socket. */ explicit basic_socket(asio::io_service& io_service) : basic_io_object<SocketService>(io_service) { } Construct and open a basic_socket. /** * This constructor creates and opens a socket. * * @param io_service The io_service object that the socket will use to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ basic_socket(asio::io_service& io_service, const protocol_type& protocol) : basic_io_object<SocketService>(io_service) { asio::error_code ec; this->get_service().open(this->get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); } Construct a basic_socket, opening it and binding it to the given local endpoint. /** * This constructor creates a socket and automatically opens it bound to the * specified endpoint on the local machine. The protocol used is the protocol * associated with the given endpoint. * * @param io_service The io_service object that the socket will use to * dispatch handlers for any asynchronous operations performed on the socket. * * @param endpoint An endpoint on the local machine to which the socket will * be bound. * * @throws asio::system_error Thrown on failure. */ basic_socket(asio::io_service& io_service, const endpoint_type& endpoint) : basic_io_object<SocketService>(io_service) { asio::error_code ec; const protocol_type protocol = endpoint.protocol(); this->get_service().open(this->get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); this->get_service().bind(this->get_implementation(), endpoint, ec); asio::detail::throw_error(ec, "bind"); } Construct a basic_socket on an existing native socket. /** * This constructor creates a socket object to hold an existing native socket. * * @param io_service The io_service object that the socket will use to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket A native socket. * * @throws asio::system_error Thrown on failure. */ basic_socket(asio::io_service& io_service, const protocol_type& protocol, const native_handle_type& native_socket) : basic_io_object<SocketService>(io_service) { asio::error_code ec; this->get_service().assign(this->get_implementation(), protocol, native_socket, ec); asio::detail::throw_error(ec, "assign"); } #if defined(ASIO_HAS_MOVE) || defined(<API key>) Move-construct a basic_socket from another. /** * This constructor moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(io_service&) constructor. */ basic_socket(basic_socket&& other) : basic_io_object<SocketService>( ASIO_MOVE_CAST(basic_socket)(other)) { } Move-assign a basic_socket from another. /** * This assignment operator moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(io_service&) constructor. */ basic_socket& operator=(basic_socket&& other) { basic_io_object<SocketService>::operator=( ASIO_MOVE_CAST(basic_socket)(other)); return *this; } // All sockets have access to each other's implementations. template <typename Protocol1, typename SocketService1> friend class basic_socket; Move-construct a basic_socket from a socket of another protocol type. /** * This constructor moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(io_service&) constructor. */ template <typename Protocol1, typename SocketService1> basic_socket(basic_socket<Protocol1, SocketService1>&& other, typename enable_if<is_convertible<Protocol1, Protocol>::value>::type* = 0) : basic_io_object<SocketService>(other.get_io_service()) { this->get_service().template <API key><Protocol1>( this->get_implementation(), other.get_implementation()); } Move-assign a basic_socket from a socket of another protocol type. /** * This assignment operator moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(io_service&) constructor. */ template <typename Protocol1, typename SocketService1> typename enable_if<is_convertible<Protocol1, Protocol>::value, basic_socket>::type& operator=( basic_socket<Protocol1, SocketService1>&& other) { basic_socket tmp(ASIO_MOVE_CAST2(basic_socket< Protocol1, SocketService1>)(other)); basic_io_object<SocketService>::operator=( ASIO_MOVE_CAST(basic_socket)(tmp)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(<API key>) Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * layers. Since a basic_socket cannot contain any further layers, it simply * returns a reference to itself. * * @return A reference to the lowest layer in the stack of layers. Ownership * is not transferred to the caller. */ lowest_layer_type& lowest_layer() { return *this; } Get a const reference to the lowest layer. /** * This function returns a const reference to the lowest layer in a stack of * layers. Since a basic_socket cannot contain any further layers, it simply * returns a reference to itself. * * @return A const reference to the lowest layer in the stack of layers. * Ownership is not transferred to the caller. */ const lowest_layer_type& lowest_layer() const { return *this; } Open the socket using the specified protocol. /** * This function opens the socket so that it will use the specified protocol. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * socket.open(asio::ip::tcp::v4()); * @endcode */ void open(const protocol_type& protocol = protocol_type()) { asio::error_code ec; this->get_service().open(this->get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); } Open the socket using the specified protocol. /** * This function opens the socket so that it will use the specified protocol. * * @param protocol An object specifying which protocol is to be used. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * asio::error_code ec; * socket.open(asio::ip::tcp::v4(), ec); * if (ec) * { * // An error occurred. * } * @endcode */ asio::error_code open(const protocol_type& protocol, asio::error_code& ec) { return this->get_service().open(this->get_implementation(), protocol, ec); } Assign an existing native socket to the socket. /* * This function opens the socket to hold an existing native socket. * * @param protocol An object specifying which protocol is to be used. * * @param native_socket A native socket. * * @throws asio::system_error Thrown on failure. */ void assign(const protocol_type& protocol, const native_handle_type& native_socket) { asio::error_code ec; this->get_service().assign(this->get_implementation(), protocol, native_socket, ec); asio::detail::throw_error(ec, "assign"); } Assign an existing native socket to the socket. /* * This function opens the socket to hold an existing native socket. * * @param protocol An object specifying which protocol is to be used. * * @param native_socket A native socket. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code assign(const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { return this->get_service().assign(this->get_implementation(), protocol, native_socket, ec); } Determine whether the socket is open. bool is_open() const { return this->get_service().is_open(this->get_implementation()); } Close the socket. /** * This function is used to close the socket. Any asynchronous send, receive * or connect operations will be cancelled immediately, and will complete * with the asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. Note that, even if * the function indicates an error, the underlying descriptor is closed. * * @note For portable behaviour with respect to graceful closure of a * connected socket, call shutdown() before closing the socket. */ void close() { asio::error_code ec; this->get_service().close(this->get_implementation(), ec); asio::detail::throw_error(ec, "close"); } Close the socket. /** * This function is used to close the socket. Any asynchronous send, receive * or connect operations will be cancelled immediately, and will complete * with the asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. Note that, even if * the function indicates an error, the underlying descriptor is closed. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::error_code ec; * socket.close(ec); * if (ec) * { * // An error occurred. * } * @endcode * * @note For portable behaviour with respect to graceful closure of a * connected socket, call shutdown() before closing the socket. */ asio::error_code close(asio::error_code& ec) { return this->get_service().close(this->get_implementation(), ec); } (Deprecated: Use native_handle().) Get the native socket representation. /** * This function may be used to obtain the underlying representation of the * socket. This is intended to allow access to native socket functionality * that is not otherwise provided. */ native_type native() { return this->get_service().native_handle(this->get_implementation()); } Get the native socket representation. /** * This function may be used to obtain the underlying representation of the * socket. This is intended to allow access to native socket functionality * that is not otherwise provided. */ native_handle_type native_handle() { return this->get_service().native_handle(this->get_implementation()); } Cancel all asynchronous operations associated with the socket. /** * This function causes all outstanding asynchronous connect, send and receive * operations to finish immediately, and the handlers for cancelled operations * will be passed the asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. * * @note Calls to cancel() will always fail with * asio::error::<API key> when run on Windows XP, Windows * Server 2003, and earlier versions of Windows, unless * <API key> is defined. However, the CancelIo function has * two issues that should be considered before enabling its use: * * @li It will only cancel asynchronous operations that were initiated in the * current thread. * * @li It can appear to complete without error, but the request to cancel the * unfinished operations may be silently ignored by the operating system. * Whether it works or not seems to depend on the drivers that are installed. * * For portable cancellation, consider using one of the following * alternatives: * * @li Disable asio's I/O completion port backend by defining * ASIO_DISABLE_IOCP. * * @li Use the close() function to simultaneously cancel the outstanding * operations and close the socket. * * When running on Windows Vista, Windows Server 2008, and later, the * CancelIoEx function is always used. This function does not have the * problems described above. */ #if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \ && !defined(<API key>) __declspec(deprecated("By default, this function always fails with " "<API key> when used on Windows XP, Windows Server 2003, " "or earlier. Consult documentation for details.")) #endif void cancel() { asio::error_code ec; this->get_service().cancel(this->get_implementation(), ec); asio::detail::throw_error(ec, "cancel"); } Cancel all asynchronous operations associated with the socket. /** * This function causes all outstanding asynchronous connect, send and receive * operations to finish immediately, and the handlers for cancelled operations * will be passed the asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. * * @note Calls to cancel() will always fail with * asio::error::<API key> when run on Windows XP, Windows * Server 2003, and earlier versions of Windows, unless * <API key> is defined. However, the CancelIo function has * two issues that should be considered before enabling its use: * * @li It will only cancel asynchronous operations that were initiated in the * current thread. * * @li It can appear to complete without error, but the request to cancel the * unfinished operations may be silently ignored by the operating system. * Whether it works or not seems to depend on the drivers that are installed. * * For portable cancellation, consider using one of the following * alternatives: * * @li Disable asio's I/O completion port backend by defining * ASIO_DISABLE_IOCP. * * @li Use the close() function to simultaneously cancel the outstanding * operations and close the socket. * * When running on Windows Vista, Windows Server 2008, and later, the * CancelIoEx function is always used. This function does not have the * problems described above. */ #if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \ && !defined(<API key>) __declspec(deprecated("By default, this function always fails with " "<API key> when used on Windows XP, Windows Server 2003, " "or earlier. Consult documentation for details.")) #endif asio::error_code cancel(asio::error_code& ec) { return this->get_service().cancel(this->get_implementation(), ec); } Determine whether the socket is at the out-of-band data mark. /** * This function is used to check whether the socket input is currently * positioned at the out-of-band data mark. * * @return A bool indicating whether the socket is at the out-of-band data * mark. * * @throws asio::system_error Thrown on failure. */ bool at_mark() const { asio::error_code ec; bool b = this->get_service().at_mark(this->get_implementation(), ec); asio::detail::throw_error(ec, "at_mark"); return b; } Determine whether the socket is at the out-of-band data mark. /** * This function is used to check whether the socket input is currently * positioned at the out-of-band data mark. * * @param ec Set to indicate what error occurred, if any. * * @return A bool indicating whether the socket is at the out-of-band data * mark. */ bool at_mark(asio::error_code& ec) const { return this->get_service().at_mark(this->get_implementation(), ec); } Determine the number of bytes available for reading. /** * This function is used to determine the number of bytes that may be read * without blocking. * * @return The number of bytes that may be read without blocking, or 0 if an * error occurs. * * @throws asio::system_error Thrown on failure. */ std::size_t available() const { asio::error_code ec; std::size_t s = this->get_service().available( this->get_implementation(), ec); asio::detail::throw_error(ec, "available"); return s; } Determine the number of bytes available for reading. /** * This function is used to determine the number of bytes that may be read * without blocking. * * @param ec Set to indicate what error occurred, if any. * * @return The number of bytes that may be read without blocking, or 0 if an * error occurs. */ std::size_t available(asio::error_code& ec) const { return this->get_service().available(this->get_implementation(), ec); } Bind the socket to the given local endpoint. /** * This function binds the socket to the specified endpoint on the local * machine. * * @param endpoint An endpoint on the local machine to which the socket will * be bound. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * socket.open(asio::ip::tcp::v4()); * socket.bind(asio::ip::tcp::endpoint( * asio::ip::tcp::v4(), 12345)); * @endcode */ void bind(const endpoint_type& endpoint) { asio::error_code ec; this->get_service().bind(this->get_implementation(), endpoint, ec); asio::detail::throw_error(ec, "bind"); } Bind the socket to the given local endpoint. /** * This function binds the socket to the specified endpoint on the local * machine. * * @param endpoint An endpoint on the local machine to which the socket will * be bound. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * socket.open(asio::ip::tcp::v4()); * asio::error_code ec; * socket.bind(asio::ip::tcp::endpoint( * asio::ip::tcp::v4(), 12345), ec); * if (ec) * { * // An error occurred. * } * @endcode */ asio::error_code bind(const endpoint_type& endpoint, asio::error_code& ec) { return this->get_service().bind(this->get_implementation(), endpoint, ec); } Connect the socket to the specified endpoint. /** * This function is used to connect a socket to the specified remote endpoint. * The function call will block until the connection is successfully made or * an error occurs. * * The socket is automatically opened if it is not already open. If the * connect fails, and the socket was automatically opened, the socket is * not returned to the closed state. * * @param peer_endpoint The remote endpoint to which the socket will be * connected. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * asio::ip::tcp::endpoint endpoint( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.connect(endpoint); * @endcode */ void connect(const endpoint_type& peer_endpoint) { asio::error_code ec; if (!is_open()) { this->get_service().open(this->get_implementation(), peer_endpoint.protocol(), ec); asio::detail::throw_error(ec, "connect"); } this->get_service().connect(this->get_implementation(), peer_endpoint, ec); asio::detail::throw_error(ec, "connect"); } Connect the socket to the specified endpoint. /** * This function is used to connect a socket to the specified remote endpoint. * The function call will block until the connection is successfully made or * an error occurs. * * The socket is automatically opened if it is not already open. If the * connect fails, and the socket was automatically opened, the socket is * not returned to the closed state. * * @param peer_endpoint The remote endpoint to which the socket will be * connected. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * asio::ip::tcp::endpoint endpoint( * asio::ip::address::from_string("1.2.3.4"), 12345); * asio::error_code ec; * socket.connect(endpoint, ec); * if (ec) * { * // An error occurred. * } * @endcode */ asio::error_code connect(const endpoint_type& peer_endpoint, asio::error_code& ec) { if (!is_open()) { if (this->get_service().open(this->get_implementation(), peer_endpoint.protocol(), ec)) { return ec; } } return this->get_service().connect( this->get_implementation(), peer_endpoint, ec); } Start an asynchronous connect. /** * This function is used to asynchronously connect a socket to the specified * remote endpoint. The function call always returns immediately. * * The socket is automatically opened if it is not already open. If the * connect fails, and the socket was automatically opened, the socket is * not returned to the closed state. * * @param peer_endpoint The remote endpoint to which the socket will be * connected. Copies will be made of the endpoint object as required. * * @param handler The handler to be called when the connection operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * @code * void connect_handler(const asio::error_code& error) * { * if (!error) * { * // Connect succeeded. * } * } * * ... * * asio::ip::tcp::socket socket(io_service); * asio::ip::tcp::endpoint endpoint( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.async_connect(endpoint, connect_handler); * @endcode */ template <typename ConnectHandler> <API key>(ConnectHandler, void (asio::error_code)) async_connect(const endpoint_type& peer_endpoint, ASIO_MOVE_ARG(ConnectHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ConnectHandler. <API key>(ConnectHandler, handler) type_check; if (!is_open()) { asio::error_code ec; const protocol_type protocol = peer_endpoint.protocol(); if (this->get_service().open(this->get_implementation(), protocol, ec)) { detail::async_result_init< ConnectHandler, void (asio::error_code)> init( ASIO_MOVE_CAST(ConnectHandler)(handler)); this->get_io_service().post( asio::detail::bind_handler( ASIO_MOVE_CAST(ASIO_HANDLER_TYPE( ConnectHandler, void (asio::error_code)))( init.handler), ec)); return init.result.get(); } } return this->get_service().async_connect(this->get_implementation(), peer_endpoint, ASIO_MOVE_CAST(ConnectHandler)(handler)); } Set an option on the socket. /** * This function is used to set an option on the socket. * * @param option The new option value to be set on the socket. * * @throws asio::system_error Thrown on failure. * * @sa <API key> @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::<API key> @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Setting the IPPROTO_TCP/TCP_NODELAY option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::no_delay option(true); * socket.set_option(option); * @endcode */ template <typename <API key>> void set_option(const <API key>& option) { asio::error_code ec; this->get_service().set_option(this->get_implementation(), option, ec); asio::detail::throw_error(ec, "set_option"); } Set an option on the socket. /** * This function is used to set an option on the socket. * * @param option The new option value to be set on the socket. * * @param ec Set to indicate what error occurred, if any. * * @sa <API key> @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::<API key> @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Setting the IPPROTO_TCP/TCP_NODELAY option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::no_delay option(true); * asio::error_code ec; * socket.set_option(option, ec); * if (ec) * { * // An error occurred. * } * @endcode */ template <typename <API key>> asio::error_code set_option(const <API key>& option, asio::error_code& ec) { return this->get_service().set_option( this->get_implementation(), option, ec); } Get an option from the socket. /** * This function is used to get the current value of an option on the socket. * * @param option The option value to be obtained from the socket. * * @throws asio::system_error Thrown on failure. * * @sa <API key> @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::<API key> @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Getting the value of the SOL_SOCKET/SO_KEEPALIVE option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::socket::keep_alive option; * socket.get_option(option); * bool is_set = option.value(); * @endcode */ template <typename <API key>> void get_option(<API key>& option) const { asio::error_code ec; this->get_service().get_option(this->get_implementation(), option, ec); asio::detail::throw_error(ec, "get_option"); } Get an option from the socket. /** * This function is used to get the current value of an option on the socket. * * @param option The option value to be obtained from the socket. * * @param ec Set to indicate what error occurred, if any. * * @sa <API key> @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::<API key> @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Getting the value of the SOL_SOCKET/SO_KEEPALIVE option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::socket::keep_alive option; * asio::error_code ec; * socket.get_option(option, ec); * if (ec) * { * // An error occurred. * } * bool is_set = option.value(); * @endcode */ template <typename <API key>> asio::error_code get_option(<API key>& option, asio::error_code& ec) const { return this->get_service().get_option( this->get_implementation(), option, ec); } Perform an IO control command on the socket. /** * This function is used to execute an IO control command on the socket. * * @param command The IO control command to be performed on the socket. * * @throws asio::system_error Thrown on failure. * * @sa IoControlCommand @n * asio::socket_base::bytes_readable @n * asio::socket_base::non_blocking_io * * @par Example * Getting the number of bytes ready to read: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::socket::bytes_readable command; * socket.io_control(command); * std::size_t bytes_readable = command.get(); * @endcode */ template <typename IoControlCommand> void io_control(IoControlCommand& command) { asio::error_code ec; this->get_service().io_control(this->get_implementation(), command, ec); asio::detail::throw_error(ec, "io_control"); } Perform an IO control command on the socket. /** * This function is used to execute an IO control command on the socket. * * @param command The IO control command to be performed on the socket. * * @param ec Set to indicate what error occurred, if any. * * @sa IoControlCommand @n * asio::socket_base::bytes_readable @n * asio::socket_base::non_blocking_io * * @par Example * Getting the number of bytes ready to read: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::socket::bytes_readable command; * asio::error_code ec; * socket.io_control(command, ec); * if (ec) * { * // An error occurred. * } * std::size_t bytes_readable = command.get(); * @endcode */ template <typename IoControlCommand> asio::error_code io_control(IoControlCommand& command, asio::error_code& ec) { return this->get_service().io_control( this->get_implementation(), command, ec); } Gets the non-blocking mode of the socket. /** * @returns @c true if the socket's synchronous operations will fail with * asio::error::would_block if they are unable to perform the requested * operation immediately. If @c false, synchronous operations will block * until complete. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ bool non_blocking() const { return this->get_service().non_blocking(this->get_implementation()); } Sets the non-blocking mode of the socket. /** * @param mode If @c true, the socket's synchronous operations will fail with * asio::error::would_block if they are unable to perform the requested * operation immediately. If @c false, synchronous operations will block * until complete. * * @throws asio::system_error Thrown on failure. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ void non_blocking(bool mode) { asio::error_code ec; this->get_service().non_blocking(this->get_implementation(), mode, ec); asio::detail::throw_error(ec, "non_blocking"); } Sets the non-blocking mode of the socket. /** * @param mode If @c true, the socket's synchronous operations will fail with * asio::error::would_block if they are unable to perform the requested * operation immediately. If @c false, synchronous operations will block * until complete. * * @param ec Set to indicate what error occurred, if any. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ asio::error_code non_blocking( bool mode, asio::error_code& ec) { return this->get_service().non_blocking( this->get_implementation(), mode, ec); } Gets the non-blocking mode of the native socket implementation. /** * This function is used to retrieve the non-blocking mode of the underlying * native socket. This mode has no effect on the behaviour of the socket * object's synchronous operations. * * @returns @c true if the underlying socket is in non-blocking mode and * direct system calls may fail with asio::error::would_block (or the * equivalent system error). * * @note The current non-blocking mode is cached by the socket object. * Consequently, the return value may be incorrect if the non-blocking mode * was set directly on the native socket. * * @par Example * This function is intended to allow the encapsulation of arbitrary * non-blocking system calls as asynchronous operations, in a way that is * transparent to the user of the socket object. The following example * illustrates how Linux's @c sendfile system call might be encapsulated: * @code template <typename Handler> * struct sendfile_op * { * tcp::socket& sock_; * int fd_; * Handler handler_; * off_t offset_; * std::size_t <API key>; * * // Function call operator meeting WriteHandler requirements. * // Used as the handler for the async_write_some operation. * void operator()(asio::error_code ec, std::size_t) * { * // Put the underlying socket into non-blocking mode. * if (!ec) * if (!sock_.native_non_blocking()) * sock_.native_non_blocking(true, ec); * * if (!ec) * { * for (;;) * { * // Try the system call. * errno = 0; * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); * ec = asio::error_code(n < 0 ? errno : 0, * asio::error::get_system_category()); * <API key> += ec ? 0 : n; * * // Retry operation immediately if interrupted by signal. * if (ec == asio::error::interrupted) * continue; * * // Check if we need to run the operation again. * if (ec == asio::error::would_block * || ec == asio::error::try_again) * { * // We have to wait for the socket to become ready again. * sock_.async_write_some(asio::null_buffers(), *this); * return; * } * * if (ec || n == 0) * { * // An error occurred, or we have reached the end of the file. * // Either way we must exit the loop so we can call the handler. * break; * } * * // Loop around to try calling sendfile again. * } * } * * // Pass result back to user's handler. * handler_(ec, <API key>); * } * }; * * template <typename Handler> * void async_sendfile(tcp::socket& sock, int fd, Handler h) * { * sendfile_op<Handler> op = { sock, fd, h, 0, 0 }; * sock.async_write_some(asio::null_buffers(), op); * } @endcode */ bool native_non_blocking() const { return this->get_service().native_non_blocking(this->get_implementation()); } Sets the non-blocking mode of the native socket implementation. /** * This function is used to modify the non-blocking mode of the underlying * native socket. It has no effect on the behaviour of the socket object's * synchronous operations. * * @param mode If @c true, the underlying socket is put into non-blocking * mode and direct system calls may fail with asio::error::would_block * (or the equivalent system error). * * @throws asio::system_error Thrown on failure. If the @c mode is * @c false, but the current value of @c non_blocking() is @c true, this * function fails with asio::error::invalid_argument, as the * combination does not make sense. * * @par Example * This function is intended to allow the encapsulation of arbitrary * non-blocking system calls as asynchronous operations, in a way that is * transparent to the user of the socket object. The following example * illustrates how Linux's @c sendfile system call might be encapsulated: * @code template <typename Handler> * struct sendfile_op * { * tcp::socket& sock_; * int fd_; * Handler handler_; * off_t offset_; * std::size_t <API key>; * * // Function call operator meeting WriteHandler requirements. * // Used as the handler for the async_write_some operation. * void operator()(asio::error_code ec, std::size_t) * { * // Put the underlying socket into non-blocking mode. * if (!ec) * if (!sock_.native_non_blocking()) * sock_.native_non_blocking(true, ec); * * if (!ec) * { * for (;;) * { * // Try the system call. * errno = 0; * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); * ec = asio::error_code(n < 0 ? errno : 0, * asio::error::get_system_category()); * <API key> += ec ? 0 : n; * * // Retry operation immediately if interrupted by signal. * if (ec == asio::error::interrupted) * continue; * * // Check if we need to run the operation again. * if (ec == asio::error::would_block * || ec == asio::error::try_again) * { * // We have to wait for the socket to become ready again. * sock_.async_write_some(asio::null_buffers(), *this); * return; * } * * if (ec || n == 0) * { * // An error occurred, or we have reached the end of the file. * // Either way we must exit the loop so we can call the handler. * break; * } * * // Loop around to try calling sendfile again. * } * } * * // Pass result back to user's handler. * handler_(ec, <API key>); * } * }; * * template <typename Handler> * void async_sendfile(tcp::socket& sock, int fd, Handler h) * { * sendfile_op<Handler> op = { sock, fd, h, 0, 0 }; * sock.async_write_some(asio::null_buffers(), op); * } @endcode */ void native_non_blocking(bool mode) { asio::error_code ec; this->get_service().native_non_blocking( this->get_implementation(), mode, ec); asio::detail::throw_error(ec, "native_non_blocking"); } Sets the non-blocking mode of the native socket implementation. /** * This function is used to modify the non-blocking mode of the underlying * native socket. It has no effect on the behaviour of the socket object's * synchronous operations. * * @param mode If @c true, the underlying socket is put into non-blocking * mode and direct system calls may fail with asio::error::would_block * (or the equivalent system error). * * @param ec Set to indicate what error occurred, if any. If the @c mode is * @c false, but the current value of @c non_blocking() is @c true, this * function fails with asio::error::invalid_argument, as the * combination does not make sense. * * @par Example * This function is intended to allow the encapsulation of arbitrary * non-blocking system calls as asynchronous operations, in a way that is * transparent to the user of the socket object. The following example * illustrates how Linux's @c sendfile system call might be encapsulated: * @code template <typename Handler> * struct sendfile_op * { * tcp::socket& sock_; * int fd_; * Handler handler_; * off_t offset_; * std::size_t <API key>; * * // Function call operator meeting WriteHandler requirements. * // Used as the handler for the async_write_some operation. * void operator()(asio::error_code ec, std::size_t) * { * // Put the underlying socket into non-blocking mode. * if (!ec) * if (!sock_.native_non_blocking()) * sock_.native_non_blocking(true, ec); * * if (!ec) * { * for (;;) * { * // Try the system call. * errno = 0; * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); * ec = asio::error_code(n < 0 ? errno : 0, * asio::error::get_system_category()); * <API key> += ec ? 0 : n; * * // Retry operation immediately if interrupted by signal. * if (ec == asio::error::interrupted) * continue; * * // Check if we need to run the operation again. * if (ec == asio::error::would_block * || ec == asio::error::try_again) * { * // We have to wait for the socket to become ready again. * sock_.async_write_some(asio::null_buffers(), *this); * return; * } * * if (ec || n == 0) * { * // An error occurred, or we have reached the end of the file. * // Either way we must exit the loop so we can call the handler. * break; * } * * // Loop around to try calling sendfile again. * } * } * * // Pass result back to user's handler. * handler_(ec, <API key>); * } * }; * * template <typename Handler> * void async_sendfile(tcp::socket& sock, int fd, Handler h) * { * sendfile_op<Handler> op = { sock, fd, h, 0, 0 }; * sock.async_write_some(asio::null_buffers(), op); * } @endcode */ asio::error_code native_non_blocking( bool mode, asio::error_code& ec) { return this->get_service().native_non_blocking( this->get_implementation(), mode, ec); } Get the local endpoint of the socket. /** * This function is used to obtain the locally bound endpoint of the socket. * * @returns An object that represents the local endpoint of the socket. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::endpoint endpoint = socket.local_endpoint(); * @endcode */ endpoint_type local_endpoint() const { asio::error_code ec; endpoint_type ep = this->get_service().local_endpoint( this->get_implementation(), ec); asio::detail::throw_error(ec, "local_endpoint"); return ep; } Get the local endpoint of the socket. /** * This function is used to obtain the locally bound endpoint of the socket. * * @param ec Set to indicate what error occurred, if any. * * @returns An object that represents the local endpoint of the socket. * Returns a default-constructed endpoint object if an error occurred. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::error_code ec; * asio::ip::tcp::endpoint endpoint = socket.local_endpoint(ec); * if (ec) * { * // An error occurred. * } * @endcode */ endpoint_type local_endpoint(asio::error_code& ec) const { return this->get_service().local_endpoint(this->get_implementation(), ec); } Get the remote endpoint of the socket. /** * This function is used to obtain the remote endpoint of the socket. * * @returns An object that represents the remote endpoint of the socket. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::endpoint endpoint = socket.remote_endpoint(); * @endcode */ endpoint_type remote_endpoint() const { asio::error_code ec; endpoint_type ep = this->get_service().remote_endpoint( this->get_implementation(), ec); asio::detail::throw_error(ec, "remote_endpoint"); return ep; } Get the remote endpoint of the socket. /** * This function is used to obtain the remote endpoint of the socket. * * @param ec Set to indicate what error occurred, if any. * * @returns An object that represents the remote endpoint of the socket. * Returns a default-constructed endpoint object if an error occurred. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::error_code ec; * asio::ip::tcp::endpoint endpoint = socket.remote_endpoint(ec); * if (ec) * { * // An error occurred. * } * @endcode */ endpoint_type remote_endpoint(asio::error_code& ec) const { return this->get_service().remote_endpoint(this->get_implementation(), ec); } Disable sends or receives on the socket. /** * This function is used to disable send operations, receive operations, or * both. * * @param what Determines what types of operation will no longer be allowed. * * @throws asio::system_error Thrown on failure. * * @par Example * Shutting down the send side of the socket: * @code * asio::ip::tcp::socket socket(io_service); * ... * socket.shutdown(asio::ip::tcp::socket::shutdown_send); * @endcode */ void shutdown(shutdown_type what) { asio::error_code ec; this->get_service().shutdown(this->get_implementation(), what, ec); asio::detail::throw_error(ec, "shutdown"); } Disable sends or receives on the socket. /** * This function is used to disable send operations, receive operations, or * both. * * @param what Determines what types of operation will no longer be allowed. * * @param ec Set to indicate what error occurred, if any. * * @par Example * Shutting down the send side of the socket: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::error_code ec; * socket.shutdown(asio::ip::tcp::socket::shutdown_send, ec); * if (ec) * { * // An error occurred. * } * @endcode */ asio::error_code shutdown(shutdown_type what, asio::error_code& ec) { return this->get_service().shutdown(this->get_implementation(), what, ec); } protected: Protected destructor to prevent deletion through this type. ~basic_socket() { } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // <API key>
<?php namespace ZendTest\Validator\File; use Zend\Validator\File\ExcludeMimeType; /** * ExcludeMimeType testbed * * @category Zend * @package Zend_Validator_File * @subpackage UnitTests * @group Zend_Validator */ class ExcludeMimeTypeTest extends \<API key> { /** * @return array */ public function <API key>() { $testFile = __DIR__ . '/_files/picture.jpg'; $fileUpload = array( 'tmp_name' => $testFile, 'name' => basename($testFile), 'size' => 200, 'error' => 0, 'type' => 'image/jpeg' ); return array( // Options, isValid Param, Expected value array('image/gif', $fileUpload, true), array('image', $fileUpload, false), array('test/notype', $fileUpload, true), array('image/gif, image/jpeg', $fileUpload, false), array(array('image/vasa', 'image/gif'), $fileUpload, true), array(array('image/gif', 'jpeg'), $fileUpload, false), array(array('image/gif', 'gif'), $fileUpload, true), ); } /** * Ensures that the validator follows expected behavior * * @dataProvider <API key> * @return void */ public function testBasic($options, $isValidParam, $expected) { $validator = new ExcludeMimeType($options); $validator->enableHeaderCheck(); $this->assertEquals($expected, $validator->isValid($isValidParam)); } /** * Ensures that the validator follows expected behavior for legacy Zend\Transfer API * * @dataProvider <API key> * @return void */ public function testLegacy($options, $isValidParam, $expected) { if (is_array($isValidParam)) { $validator = new ExcludeMimeType($options); $validator->enableHeaderCheck(); $this->assertEquals($expected, $validator->isValid($isValidParam['tmp_name'], $isValidParam)); } } /** * Ensures that getMimeType() returns expected value * * @return void */ public function testGetMimeType() { $validator = new ExcludeMimeType('image/gif'); $this->assertEquals('image/gif', $validator->getMimeType()); $validator = new ExcludeMimeType(array('image/gif', 'video', 'text/test')); $this->assertEquals('image/gif,video,text/test', $validator->getMimeType()); $validator = new ExcludeMimeType(array('image/gif', 'video', 'text/test')); $this->assertEquals(array('image/gif', 'video', 'text/test'), $validator->getMimeType(true)); } /** * Ensures that setMimeType() returns expected value * * @return void */ public function testSetMimeType() { $validator = new ExcludeMimeType('image/gif'); $validator->setMimeType('image/jpeg'); $this->assertEquals('image/jpeg', $validator->getMimeType()); $this->assertEquals(array('image/jpeg'), $validator->getMimeType(true)); $validator->setMimeType('image/gif, text/test'); $this->assertEquals('image/gif,text/test', $validator->getMimeType()); $this->assertEquals(array('image/gif', 'text/test'), $validator->getMimeType(true)); $validator->setMimeType(array('video/mpeg', 'gif')); $this->assertEquals('video/mpeg,gif', $validator->getMimeType()); $this->assertEquals(array('video/mpeg', 'gif'), $validator->getMimeType(true)); } /** * Ensures that addMimeType() returns expected value * * @return void */ public function testAddMimeType() { $validator = new ExcludeMimeType('image/gif'); $validator->addMimeType('text'); $this->assertEquals('image/gif,text', $validator->getMimeType()); $this->assertEquals(array('image/gif', 'text'), $validator->getMimeType(true)); $validator->addMimeType('jpg, to'); $this->assertEquals('image/gif,text,jpg,to', $validator->getMimeType()); $this->assertEquals(array('image/gif', 'text', 'jpg', 'to'), $validator->getMimeType(true)); $validator->addMimeType(array('zip', 'ti')); $this->assertEquals('image/gif,text,jpg,to,zip,ti', $validator->getMimeType()); $this->assertEquals(array('image/gif', 'text', 'jpg', 'to', 'zip', 'ti'), $validator->getMimeType(true)); $validator->addMimeType(''); $this->assertEquals('image/gif,text,jpg,to,zip,ti', $validator->getMimeType()); $this->assertEquals(array('image/gif', 'text', 'jpg', 'to', 'zip', 'ti'), $validator->getMimeType(true)); } }
require "spec_helper" describe Mongoid::Changeable do describe "#attribute_change" do context "when the attribute has changed from the persisted value" do context "when using the setter" do let(:person) do Person.new(title: "Grand Poobah").tap(&:move_changes) end before do person.title = "Captain Obvious" end it "returns an array of the old value and new value" do expect(person.send(:attribute_change, "title")).to eq( [ "Grand Poobah", "Captain Obvious" ] ) end it "allows access via (attribute)_change" do expect(person.title_change).to eq( [ "Grand Poobah", "Captain Obvious" ] ) end context "when the field is aliased" do let(:person) do Person.new(test: "Aliased 1").tap(&:move_changes) end before do person.test = "Aliased 2" end it "returns an array of the old value and new value" do expect(person.send(:attribute_change, "test")).to eq( [ "Aliased 1", "Aliased 2" ] ) end it "allows access via (attribute)_change" do expect(person.test_change).to eq( [ "Aliased 1", "Aliased 2" ] ) end end end context "when using [] methods" do let(:person) do Person.new(title: "Grand Poobah").tap(&:move_changes) end before do person[:title] = "Captain Obvious" end it "returns an array of the old value and new value" do expect(person.send(:attribute_change, "title")).to eq( [ "Grand Poobah", "Captain Obvious" ] ) end it "allows access via (attribute)_change" do expect(person.title_change).to eq( [ "Grand Poobah", "Captain Obvious" ] ) end end end context "when the attribute has changed from the default value" do context "when using the setter" do let(:person) do Person.new(pets: true) end it "returns an array of nil and new value" do expect(person.send(:attribute_change, "pets")).to eq([ nil, true ]) end it "allows access via (attribute)_change" do expect(person.pets_change).to eq([ nil, true ]) end end context "when using [] methods" do context "when the field is defined" do let(:person) do Person.new end before do person[:pets] = true end it "returns an array of nil and new value" do expect(person.send(:attribute_change, "pets")).to eq([ nil, true ]) end it "allows access via (attribute)_change" do expect(person.pets_change).to eq([ nil, true ]) end end context "when the field is not defined" do let(:person) do Person.new end before do person[:t] = "test" end it "returns an array of nil and new value" do expect(person.send(:attribute_change, "t")).to eq([ nil, "test" ]) end end end end context "when the attribute changes multiple times" do let(:person) do Person.new(title: "Grand Poobah").tap(&:move_changes) end before do person.title = "Captain Obvious" person.title = "Dark Helmet" end it "returns an array of the original value and new value" do expect(person.send(:attribute_change, "title")).to eq( [ "Grand Poobah", "Dark Helmet" ] ) end it "allows access via (attribute)_change" do expect(person.title_change).to eq( [ "Grand Poobah", "Dark Helmet" ] ) end end context "when the attribute is modified in place" do context "when the attribute is an array" do let(:person) do Person.new(aliases: [ "Grand Poobah" ]).tap(&:move_changes) end before do person.aliases[0] = "Dark Helmet" end it "returns an array of the original value and new value" do expect(person.send(:attribute_change, "aliases")).to eq( [[ "Grand Poobah" ], [ "Dark Helmet" ]] ) end it "allows access via (attribute)_change" do expect(person.aliases_change).to eq( [[ "Grand Poobah" ], [ "Dark Helmet" ]] ) end context "when the attribute changes multiple times" do before do person.aliases << "Colonel Sanders" end it "returns an array of the original value and new value" do expect(person.send(:attribute_change, "aliases")).to eq( [[ "Grand Poobah" ], [ "Dark Helmet", "Colonel Sanders" ]] ) end end end context "when the attribute is a hash" do let(:person) do Person.new(map: { location: "Home" }).tap(&:move_changes) end before do person.map[:location] = "Work" end it "returns an array of the original value and new value" do expect(person.send(:attribute_change, "map")).to eq( [{ location: "Home" }, { location: "Work" }] ) end it "allows access via (attribute)_change" do expect(person.map_change).to eq( [{ location: "Home" }, { location: "Work" }] ) end context "when the attribute changes multiple times" do before do person.map[:lat] = 20.0 end it "returns an array of the original value and new value" do expect(person.send(:attribute_change, "map")).to eq( [{ location: "Home" }, { location: "Work", lat: 20.0 }] ) end end context "when the values are arrays" do let(:map) do { "stack1" => [ 1, 2, 3, 4 ], "stack2" => [ 1, 2, 3, 4 ], "stack3" => [ 1, 2, 3, 4 ] } end before do person.map = map person.move_changes end context "when reordering the arrays inline" do before do person.map["stack1"].reverse! end it "flags the attribute as changed" do expect(person.send(:attribute_change, "map")).to eq( [ { "stack1" => [ 1, 2, 3, 4 ], "stack2" => [ 1, 2, 3, 4 ], "stack3" => [ 1, 2, 3, 4 ] }, { "stack1" => [ 4, 3, 2, 1 ], "stack2" => [ 1, 2, 3, 4 ], "stack3" => [ 1, 2, 3, 4 ] }, ] ) end end end end end context "when the attribute has not changed from the persisted value" do let(:person) do Person.new(title: nil) end it "returns nil" do expect(person.send(:attribute_change, "title")).to be_nil end end context "when the attribute has not changed from the default value" do context "when the attribute differs from the persisted value" do let(:person) do Person.new end it "returns the change" do expect(person.send(:attribute_change, "pets")).to eq([ nil, false ]) end end context "when the attribute does not differ from the persisted value" do let(:person) do Person.instantiate("pets" => false) end it "returns nil" do expect(person.send(:attribute_change, "pets")).to be_nil end end end context "when the attribute has been set with the same value" do let(:person) do Person.new(title: "Grand Poobah").tap(&:move_changes) end before do person.title = "Grand Poobah" end it "returns an empty array" do expect(person.send(:attribute_change, "title")).to be_nil end end context "when the attribute is removed" do let(:person) do Person.new(title: "Grand Poobah").tap(&:move_changes) end before do person.remove_attribute(:title) end it "returns an empty array" do expect(person.send(:attribute_change, "title")).to eq( [ "Grand Poobah", nil ] ) end end end describe "#attribute_changed?" do context "when the attribute has changed from the persisted value" do let(:person) do Person.new(title: "Grand Poobah") end before do person.title = "Captain Obvious" end it "returns true" do expect(person.send(:attribute_changed?, "title")).to be true end it "allows access via (attribute)_changed?" do expect(person.title_changed?).to be true end context "when the field is aliased" do let(:person) do Person.new(test: "Aliased 1") end before do person.test = "Aliased 2" end it "returns true" do expect(person.send(:attribute_changed?, "test")).to be true end it "allows access via (attribute)_changed?" do expect(person.test_changed?).to be true end end end context "when the attribute has changed from the default value" do let(:person) do Person.new end before do person.pets = true end it "returns true" do expect(person.send(:attribute_changed?, "pets")).to be true end it "allows access via (attribute)_changed?" do expect(person.pets_changed?).to be true end end context "when the attribute has not changed the persisted value" do let!(:person) do Person.new(title: "Grand Poobah").tap(&:move_changes) end it "returns false" do expect(person.send(:attribute_changed?, "title")).to be false end end context "when the attribute has not changed from the default value" do context "when the attribute is not enumerable" do context "when the attribute differs from the persisted value" do let!(:person) do Person.new end it "returns true" do expect(person.send(:attribute_changed?, "pets")).to be true end end context "when the attribute does not differ from the persisted value" do let!(:person) do Person.instantiate("pets" => false) end it "returns false" do expect(person.send(:attribute_changed?, "pets")).to be false end end end context "when the attribute is an array" do let!(:person) do Person.new(aliases: [ "Bond" ]) end context "when the array is only accessed" do before do person.move_changes person.aliases end it "returns false" do expect(person).to_not be_aliases_changed end end end context "when the attribute is a hash" do let!(:person) do Person.new(map: { key: "value" }) end context "when the hash is only accessed" do before do person.move_changes person.map end it "returns false" do expect(person).to_not be_map_changed end end end end end describe "#<API key>?" do context "when the attribute differs from the default value" do let(:person) do Person.new(age: 33) end it "returns true" do expect(person).to <API key> end end context "when the attribute is the same as the default" do let(:person) do Person.new end it "returns false" do expect(person).to_not <API key> end end end describe "#attribute_was" do context "when the attribute has changed from the persisted value" do let(:person) do Person.new(title: "Grand Poobah").tap(&:move_changes) end before do person.title = "Captain Obvious" end it "returns the old value" do expect(person.send(:attribute_was, "title")).to eq("Grand Poobah") end it "allows access via (attribute)_was" do expect(person.title_was).to eq("Grand Poobah") end context "when the field is aliased" do let(:person) do Person.new(test: "Aliased 1").tap(&:move_changes) end before do person.test = "Aliased 2" end it "returns the old value" do expect(person.send(:attribute_was, "test")).to eq("Aliased 1") end it "allows access via (attribute)_was" do expect(person.test_was).to eq("Aliased 1") end end end context "when the attribute has not changed from the persisted value" do let!(:person) do Person.new(title: "Grand Poobah").tap(&:move_changes) end it "returns the original value" do expect(person.send(:attribute_was, "title")).to eq("Grand Poobah") end end end describe "#<API key>!" do let(:aliases) do [ "007" ] end let(:person) do Person.new(aliases: aliases, test: "Aliased 1") end before do person.changed_attributes.clear end context "when the value has not changed" do before do person.aliases_will_change! end let(:changes) do person.changes end it "does not return the value in the changes" do expect(changes).to be_empty end it "is not flagged as changed" do expect(person).to_not be_changed end end context "when the value has changed" do before do person.aliases_will_change! person.aliases << "008" end let(:changes) do person.changes end it "returns the value in the changes" do expect(changes).to eq({ "aliases" => [[ "007" ], [ "007", "008" ]] }) end end context "when the value is duplicable" do context "when the attribute has not been cloned" do before do person.aliases_will_change! end let(:changed) do person.changed_attributes end it "clones the value" do expect(changed["aliases"]).to_not equal(aliases) end it "puts the old value in the changes" do expect(changed["aliases"]).to eq(aliases) end end context "when the attribute has been flagged" do before do person.changed_attributes["aliases"] = aliases expect(aliases).to receive(:clone).never person.aliases_will_change! end let(:changed) do person.changed_attributes end it "does not clone the value" do expect(changed["aliases"]).to equal(aliases) end it "retains the first value in the changes" do expect(changed["aliases"]).to eq(aliases) end end end end describe "#changed" do context "when the document has changed" do let(:person) do Person.instantiate(title: "Grand Poobah") end before do person.title = "Captain Obvious" end it "returns an array of changed field names" do expect(person.changed).to include("title") end end context "when the document has not changed" do let(:person) do Person.instantiate({}) end it "does not include non changed fields" do expect(person.changed).to_not include("title") end end context "when the document is embedded" do let(:person) do Person.create end let!(:name) do person.create_name(first_name: "Layne", last_name: "Staley") end context "when changing attributes via []" do before do person.name["a"] = "testing" end it "returns true" do expect(person.name).to be_changed end end end end describe "#changed?" do context "when the document has changed" do let(:person) do Person.new(title: "Grand Poobah") end before do person.title = "Captain Obvious" end it "returns true" do expect(person).to be_changed end end context "when a hash field has been accessed" do context "when the field has not changed" do let(:person) do Person.create(map: { name: "value" }) end before do person.map end it "returns false" do expect(person).to_not be_changed end end context "when the field is changed" do let(:person) do Person.create(map: { name: "value" }) end before do person.map = { name: "another" } end it "returns true" do expect(person).to be_changed end end context "when a dynamic field is changed in place" do let(:person) do Person.create(other_name: { full: {first: 'first', last: 'last'} }) end before do person.other_name[:full][:first] = 'Name' end it "returns true" do expect(person.changes).to_not be_empty expect(person).to be_changed end end end context "when the document has not changed" do let(:acolyte) do Acolyte.instantiate("_id" => BSON::ObjectId.new) end it "returns false" do expect(acolyte).to_not be_changed end end context "when a child has changed" do let(:person) do Person.create end let!(:address) do person.addresses.create(street: "hobrecht") end before do address.number = 10 end it "returns true" do expect(person).to be_changed end end context "when a deeply embedded child has changed" do let(:person) do Person.create end let(:address) do person.addresses.create(street: "hobrecht") end let!(:location) do address.locations.create(name: "home") end before do location.name = "work" end it "returns true" do expect(person).to be_changed end end context "when a child is new" do let(:person) do Person.create end let!(:address) do person.addresses.build(street: "hobrecht") end it "returns true" do expect(person).to be_changed end end context "when a deeply embedded child is new" do let(:person) do Person.create end let(:address) do person.addresses.create(street: "hobrecht") end let!(:location) do address.locations.build(name: "home") end it "returns true" do expect(person).to be_changed end end end describe "#changes" do context "when the document has changed" do let(:person) do Person.instantiate(title: "Grand Poobah") end before do person.title = "Captain Obvious" end it "returns a hash of changes" do expect(person.changes["title"]).to eq( [ nil, "Captain Obvious" ] ) end it "returns a hash with indifferent access" do expect(person.changes["title"]).to eq( [ nil, "Captain Obvious" ] ) end end context "when the document has not changed" do let(:acolyte) do Acolyte.instantiate("_id" => BSON::ObjectId.new) end it "returns an empty hash" do expect(acolyte.changes).to be_empty end end end describe "#setters" do context "when the document has changed" do let(:person) do Person.new(aliases: [ "007" ]).tap do |p| p.new_record = false p.move_changes end end context "when an array field has changed" do context "when the array has values removed" do before do person.aliases.delete_one("007") end let!(:setters) do person.setters end it "contains array changes in the setters" do expect(setters).to eq({ "aliases" => [] }) end end context "when the array has values added" do before do person.aliases << "008" end let!(:setters) do person.setters end it "contains array changes in the setters" do expect(setters).to eq({ "aliases" => [ "007", "008" ] }) end end context "when the array has changed completely" do before do person.aliases << "008" person.aliases.delete_one("007") end let!(:setters) do person.setters end it "does not contain array changes in the setters" do expect(setters).to eq({ "aliases" => [ "008" ]}) end end end context "when the document is a root document" do let(:person) do Person.instantiate(title: "Grand Poobah") end before do person.title = "Captain Obvious" end it "returns a hash of field names and new values" do expect(person.setters["title"]).to eq("Captain Obvious") end end context "when the document is embedded" do let(:person) do Person.instantiate(title: "Grand Poobah") end let(:address) do Address.instantiate(street: "Oxford St") end before do person.addresses << address person.<API key>(:@new_record, false) address.<API key>(:@new_record, false) address.street = "Bond St" end it "returns a hash of field names and new values" do expect(address.setters).to eq( { "addresses.0.street" => "Bond St" } ) end context "when the document is embedded multiple levels" do let(:location) do Location.new(name: "Home") end before do location.<API key>(:@new_record, false) address.locations << location location.name = "Work" end it "returns the proper hash with locations" do expect(location.setters).to eq( { "addresses.0.locations.0.name" => "Work" } ) end end end end context "when the document has not changed" do let(:acolyte) do Acolyte.instantiate("_id" => BSON::ObjectId.new) end it "returns an empty hash" do expect(acolyte.setters).to be_empty end end end describe "#previous_changes" do let(:person) do Person.new(title: "Grand Poobah") end before do person.title = "Captain Obvious" end context "when the document has been saved" do before do person.save! end it "returns the changes before the save" do expect(person.previous_changes["title"]).to eq( [ nil, "Captain Obvious" ] ) end end context "when the document has not been saved" do it "returns an empty hash" do expect(person.previous_changes).to be_empty end end end describe "#move_changes" do let(:person) do Person.new(title: "Sir") end before do person.atomic_pulls["addresses"] = Address.new person.atomic_unsets << Address.new person.delayed_atomic_sets["addresses"] = Address.new person.move_changes end it "clears the atomic pulls" do expect(person.atomic_pulls).to be_empty end it "clears the atomic unsets" do expect(person.atomic_unsets).to be_empty end it "clears the delayed atomic sets" do expect(person.delayed_atomic_sets).to be_empty end it "clears the changed attributes" do expect(person.changed_attributes).to be_empty end end describe "#reset_attribute!" do context "when the attribute has changed" do let(:person) do Person.instantiate(title: "Grand Poobah") end before do person.title = "Captain Obvious" person.send(:reset_attribute!, "title") end it "resets the value to the original" do expect(person.title).to be_nil end it "allows access via reset_(attribute)!" do expect(person.title).to be_nil end it "removes the field from the changes" do expect(person.changed).to_not include("title") end context "when the field is aliased" do let(:person) do Person.instantiate(test: "Aliased 1") end before do person.test = "Aliased 2" person.send(:reset_attribute!, "test") end it "resets the value to the original" do expect(person.test).to be_nil end it "removes the field from the changes" do expect(person.changed).to_not include("test") end end end context "when the attribute has not changed" do let(:person) do Person.instantiate(title: "Grand Poobah") end before do person.send(:reset_attribute!, "title") end it "does nothing" do expect(person.title).to be_nil end end end context "when fields have been defined pre-dirty inclusion" do let(:document) do Dokument.new end it "defines a _change method" do expect(document.updated_at_change).to be_nil end it "defines a _changed? method" do expect(document.updated_at_changed?).to be false end it "defines a _changes method" do expect(document.updated_at_was).to be_nil end end context "when only embedded documents change" do let!(:person) do Person.create end context "when the child is an embeds one" do context "when the child is new" do let!(:name) do person.build_name(first_name: "Gordon", last_name: "Ramsay") end it "flags the parent as changed" do expect(person).to be_changed end end context "when the child is modified" do let!(:name) do person.create_name(first_name: "Gordon", last_name: "Ramsay") end before do name.first_name = "G" end it "flags the parent as changed" do expect(person).to be_changed end end context "when the child is not modified" do let!(:name) do person.create_name(first_name: "Gordon", last_name: "Ramsay") end it "does not flag the parent as changed" do expect(person).to_not be_changed end end end context "when the child is an embeds many" do context "when a child is new" do let!(:address) do person.addresses.build(street: "jakobstr.") end it "flags the parent as changed" do expect(person).to be_changed end end context "when a child is modified" do let!(:address) do person.addresses.create(street: "jakobstr.") end before do address.city = "Berlin" end it "flags the parent as changed" do expect(person).to be_changed end end context "when no child is modified" do let!(:address) do person.addresses.create(street: "skalitzerstr.") end it "does not flag the parent as changed" do expect(person).to_not be_changed end end end end context "when changing a hash of hashes" do let!(:person) do Person.create(map: { "test" => {}}) end before do person.map["test"]["value"] = 10 end it "records the changes" do expect(person.changes).to eq( { "map" => [{ "test" => {}}, { "test" => { "value" => 10 }}]} ) end end context "when modifying a many to many key" do let!(:person) do Person.create end let!(:preference) do Preference.create(name: "dirty") end before do person.update_attributes(preference_ids: [ preference.id ]) end it "records the foreign key dirty changes" do expect(person.previous_changes["preference_ids"]).to eq( [nil, [ preference.id ]] ) end end context "when accessing an array field" do let!(:person) do Person.create end let(:from_db) do Person.find(person.id) end context "when the field is not changed" do before do from_db.preference_ids end it "flags the change" do expect(from_db.changes["preference_ids"]).to eq([ nil, []]) end it "does not include the changes in the setters" do expect(from_db.setters).to be_empty end end end context "when reloading an unchanged document" do let!(:person) do Person.create end let(:from_db) do Person.find(person.id) end before do from_db.reload end it "clears the changed attributes" do expect(from_db.changed_attributes).to be_empty end end context "when fields are getting changed" do let(:person) do Person.create( title: "MC", some_dynamic_field: 'blah' ) end before do person.title = "DJ" person.write_attribute(:ssn, "222-22-2222") person.some_dynamic_field = 'bloop' end it "marks the document as changed" do expect(person).to be_changed end it "marks field changes" do expect(person.changes).to eq({ "title" => [ "MC", "DJ" ], "ssn" => [ nil, "222-22-2222" ], "some_dynamic_field" => [ "blah", "bloop" ] }) end it "marks changed fields" do expect(person.changed).to eq([ "title", "ssn", "some_dynamic_field" ]) end it "marks the field as changed" do expect(person.title_changed?).to be true end it "stores previous field values" do expect(person.title_was).to eq("MC") end it "marks field changes" do expect(person.title_change).to eq([ "MC", "DJ" ]) end it "allows reset of field changes" do person.reset_title! expect(person.title).to eq("MC") expect(person.changed).to eq([ "ssn", "some_dynamic_field" ]) end context "after a save" do before do person.save! end it "clears changes" do expect(person).to_not be_changed end it "stores previous changes" do expect(person.previous_changes["title"]).to eq([ "MC", "DJ" ]) expect(person.previous_changes["ssn"]).to eq([ nil, "222-22-2222" ]) end end context "when the previous value is nil" do before do person.score = 100 person.reset_score! end it "removes the attribute from the document" do expect(person.score).to be_nil end end end context "when accessing dirty attributes in callbacks" do context "when the document is persisted" do let!(:acolyte) do Acolyte.create(name: "callback-test") end before do Acolyte.set_callback(:save, :after, if: :callback_test?) do |doc| doc[:changed_in_callback] = doc.changes.dup end end after do Acolyte._save_callbacks.select do |callback| callback.kind == :after end.each do |callback| Acolyte._save_callbacks.delete(callback) end end it "retains the changes until after all callbacks" do acolyte.update_attribute(:status, "testing") expect(acolyte.changed_in_callback).to eq({ "status" => [ nil, "testing" ] }) end end context "when the document is new" do let!(:acolyte) do Acolyte.new(name: "callback-test") end before do Acolyte.set_callback(:save, :after, if: :callback_test?) do |doc| doc[:changed_in_callback] = doc.changes.dup end end after do Acolyte._save_callbacks.select do |callback| callback.kind == :after end.each do |callback| Acolyte._save_callbacks.delete(callback) end end it "retains the changes until after all callbacks" do acolyte.save expect(acolyte.changed_in_callback["name"]).to eq([ nil, "callback-test" ]) end end end context "when associations are getting changed" do let(:person) do Person.create(addresses: [ Address.new ]) end before do person.addresses = [ Address.new ] end it "does not set the association to nil when hitting the database" do expect(person.setters).to_not eq({ "addresses" => nil }) end end end
// <API key>: GPL-2.0 #include <linux/perf_event.h> #include <linux/nospec.h> #include <asm/intel-family.h> enum perf_msr_id { PERF_MSR_TSC = 0, PERF_MSR_APERF = 1, PERF_MSR_MPERF = 2, PERF_MSR_PPERF = 3, PERF_MSR_SMI = 4, PERF_MSR_PTSC = 5, PERF_MSR_IRPERF = 6, PERF_MSR_THERM = 7, PERF_MSR_THERM_SNAP = 8, PERF_MSR_THERM_UNIT = 9, PERF_MSR_EVENT_MAX, }; static bool test_aperfmperf(int idx) { return boot_cpu_has(<API key>); } static bool test_ptsc(int idx) { return boot_cpu_has(X86_FEATURE_PTSC); } static bool test_irperf(int idx) { return boot_cpu_has(X86_FEATURE_IRPERF); } static bool test_therm_status(int idx) { return boot_cpu_has(X86_FEATURE_DTHERM); } static bool test_intel(int idx) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || boot_cpu_data.x86 != 6) return false; switch (boot_cpu_data.x86_model) { case INTEL_FAM6_NEHALEM: case <API key>: case <API key>: case <API key>: case INTEL_FAM6_WESTMERE: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: if (idx == PERF_MSR_SMI) return true; break; case <API key>: case <API key>: case <API key>: case <API key>: case <API key>: if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF) return true; break; } return false; } struct perf_msr { u64 msr; struct <API key> *attr; bool (*test)(int idx); }; <API key>(tsc, evattr_tsc, "event=0x00" ); <API key>(aperf, evattr_aperf, "event=0x01" ); <API key>(mperf, evattr_mperf, "event=0x02" ); <API key>(pperf, evattr_pperf, "event=0x03" ); <API key>(smi, evattr_smi, "event=0x04" ); <API key>(ptsc, evattr_ptsc, "event=0x05" ); <API key>(irperf, evattr_irperf, "event=0x06" ); <API key>(cpu_thermal_margin, evattr_therm, "event=0x07" ); <API key>(cpu_thermal_margin.snapshot, evattr_therm_snap, "1" ); <API key>(cpu_thermal_margin.unit, evattr_therm_unit, "C" ); static struct perf_msr msr[] = { [PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, }, [PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, }, [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, }, [PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, }, [PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, }, [PERF_MSR_PTSC] = { MSR_F15H_PTSC, &evattr_ptsc, test_ptsc, }, [PERF_MSR_IRPERF] = { MSR_F17H_IRPERF, &evattr_irperf, test_irperf, }, [PERF_MSR_THERM] = { <API key>, &evattr_therm, test_therm_status, }, [PERF_MSR_THERM_SNAP] = { <API key>, &evattr_therm_snap, test_therm_status, }, [PERF_MSR_THERM_UNIT] = { <API key>, &evattr_therm_unit, test_therm_status, }, }; static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = { NULL, }; static struct attribute_group events_attr_group = { .name = "events", .attrs = events_attrs, }; PMU_FORMAT_ATTR(event, "config:0-63"); static struct attribute *format_attrs[] = { &format_attr_event.attr, NULL, }; static struct attribute_group format_attr_group = { .name = "format", .attrs = format_attrs, }; static const struct attribute_group *attr_groups[] = { &events_attr_group, &format_attr_group, NULL, }; static int msr_event_init(struct perf_event *event) { u64 cfg = event->attr.config; if (event->attr.type != event->pmu->type) return -ENOENT; /* unsupported modes and filters */ if (event->attr.exclude_user || event->attr.exclude_kernel || event->attr.exclude_hv || event->attr.exclude_idle || event->attr.exclude_host || event->attr.exclude_guest || event->attr.sample_period) /* no sampling */ return -EINVAL; if (cfg >= PERF_MSR_EVENT_MAX) return -EINVAL; cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX); if (!msr[cfg].attr) return -EINVAL; event->hw.idx = -1; event->hw.event_base = msr[cfg].msr; event->hw.config = cfg; return 0; } static inline u64 msr_read_counter(struct perf_event *event) { u64 now; if (event->hw.event_base) rdmsrl(event->hw.event_base, now); else now = rdtsc_ordered(); return now; } static void msr_event_update(struct perf_event *event) { u64 prev, now; s64 delta; /* Careful, an NMI might modify the previous event value: */ again: prev = local64_read(&event->hw.prev_count); now = msr_read_counter(event); if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev) goto again; delta = now - prev; if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) { delta = sign_extend64(delta, 31); local64_add(delta, &event->count); } else if (unlikely(event->hw.event_base == <API key>)) { /* If valid, extract digital readout, otherwise set to -1: */ now = now & (1ULL << 31) ? (now >> 16) & 0x3f : -1; local64_set(&event->count, now); } else { local64_add(delta, &event->count); } } static void msr_event_start(struct perf_event *event, int flags) { u64 now = msr_read_counter(event); local64_set(&event->hw.prev_count, now); } static void msr_event_stop(struct perf_event *event, int flags) { msr_event_update(event); } static void msr_event_del(struct perf_event *event, int flags) { msr_event_stop(event, PERF_EF_UPDATE); } static int msr_event_add(struct perf_event *event, int flags) { if (flags & PERF_EF_START) msr_event_start(event, flags); return 0; } static struct pmu pmu_msr = { .task_ctx_nr = perf_sw_context, .attr_groups = attr_groups, .event_init = msr_event_init, .add = msr_event_add, .del = msr_event_del, .start = msr_event_start, .stop = msr_event_stop, .read = msr_event_update, .capabilities = <API key>, }; static int __init msr_init(void) { int i, j = 0; if (!boot_cpu_has(X86_FEATURE_TSC)) { pr_cont("no MSR PMU driver.\n"); return 0; } /* Probe the MSRs. */ for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) { u64 val; /* Virt sucks; you cannot tell if a R/O MSR is present :/ */ if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val)) msr[i].attr = NULL; } /* List remaining MSRs in the sysfs attrs. */ for (i = 0; i < PERF_MSR_EVENT_MAX; i++) { if (msr[i].attr) events_attrs[j++] = &msr[i].attr->attr.attr; } events_attrs[j] = NULL; perf_pmu_register(&pmu_msr, "msr", -1); return 0; } device_initcall(msr_init);
#include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <mach/irqs.h> #include <mach/msm_iomap.h> #include <mach/dma.h> #include <mach/board.h> #include "devices.h" #include <asm/mach/flash.h> #include <mach/mmc.h> static struct resource resources_uart3[] = { { .start = INT_UART3, .end = INT_UART3, .flags = IORESOURCE_IRQ, }, { .start = MSM_UART3_PHYS, .end = MSM_UART3_PHYS + MSM_UART3_SIZE - 1, .flags = IORESOURCE_MEM, }, }; struct platform_device msm_device_uart3 = { .name = "msm_serial", .id = 2, .num_resources = ARRAY_SIZE(resources_uart3), .resource = resources_uart3, }; struct clk msm_clocks_8x50[] = { CLK_PCOM("adm_clk", ADM_CLK, NULL, 0), CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN), CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0), CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0), CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF | CLK_MINMAX), CLK_PCOM("gp_clk", GP_CLK, NULL, 0), CLK_PCOM("grp_clk", GRP_3D_CLK, NULL, 0), CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0), CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0), CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF), CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0), CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX), CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF), CLK_PCOM("mdp_lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, NULL, 0), CLK_PCOM("<API key>", <API key>, NULL, 0), CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, NULL, 0), CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN), CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0), CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF), CLK_PCOM("spi_clk", SPI_CLK, NULL, 0), CLK_PCOM("tsif_clk", TSIF_CLK, NULL, 0), CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0), CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0), CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0), CLK_PCOM("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF), CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF), CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF), CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0), CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF | CLK_MIN), CLK_PCOM("vfe_clk", VFE_CLK, NULL, OFF), CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF), CLK_PCOM("vfe_axi_clk", VFE_AXI_CLK, NULL, OFF), CLK_PCOM("usb_hs2_clk", USB_HS2_CLK, NULL, OFF), CLK_PCOM("usb_hs2_pclk", USB_HS2_P_CLK, NULL, OFF), CLK_PCOM("usb_hs3_clk", USB_HS3_CLK, NULL, OFF), CLK_PCOM("usb_hs3_pclk", USB_HS3_P_CLK, NULL, OFF), CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0), }; unsigned msm_num_clocks_8x50 = ARRAY_SIZE(msm_clocks_8x50);
#include <linux/rwsem.h> #include <linux/module.h> #include <linux/smp.h> #include <linux/user.h> #include <linux/elfcore.h> #include <linux/sched.h> #include <linux/in6.h> #include <linux/interrupt.h> #include <linux/screen_info.h> #include <asm/semaphore.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/checksum.h> #include <asm/io.h> #include <asm/delay.h> #include <asm/irq.h> extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); /* platform dependent support */ EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(enable_irq); EXPORT_SYMBOL(disable_irq); EXPORT_SYMBOL(kernel_thread); /* Networking helper routines. */ EXPORT_SYMBOL(<API key>); EXPORT_SYMBOL(strstr); #ifdef CONFIG_VT EXPORT_SYMBOL(screen_info); #endif EXPORT_SYMBOL(__down); EXPORT_SYMBOL(__down_trylock); EXPORT_SYMBOL(__up); EXPORT_SYMBOL(__put_user_asm_l); EXPORT_SYMBOL(__get_user_asm_l); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memscan); EXPORT_SYMBOL(strchr); EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(flush_dcache_page); /* For ext3 */ EXPORT_SYMBOL(sh64_page_clear); /* Ugh. These come in from libgcc.a at link time. */ extern void __sdivsi3(void); extern void __muldi3(void); extern void __udivsi3(void); extern char __div_table; EXPORT_SYMBOL(__sdivsi3); EXPORT_SYMBOL(__muldi3); EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__div_table);
/* Tell sim-arange.h it's us. */ #define SIM_ARANGE_C #include "libiberty.h" #include "sim-basics.h" #include "sim-assert.h" #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #define DEFINE_INLINE_P (! defined (<API key>)) #define DEFINE_NON_INLINE_P defined (<API key>) #if DEFINE_NON_INLINE_P /* Insert a range. */ static void insert_range (ADDR_SUBRANGE **pos, ADDR_SUBRANGE *asr) { asr->next = *pos; *pos = asr; } /* Delete a range. */ static void delete_range (ADDR_SUBRANGE **thisasrp) { ADDR_SUBRANGE *thisasr; thisasr = *thisasrp; *thisasrp = thisasr->next; free (thisasr); } /* Add or delete an address range. This code was borrowed from linux's locks.c:posix_lock_file(). ??? Todo: Given our simpler needs this could be simplified (split into two fns). */ static void frob_range (ADDR_RANGE *ar, address_word start, address_word end, int delete_p) { ADDR_SUBRANGE *asr; ADDR_SUBRANGE *new_asr, *new_asr2; ADDR_SUBRANGE *left = NULL; ADDR_SUBRANGE *right = NULL; ADDR_SUBRANGE **before; ADDR_SUBRANGE init_caller; ADDR_SUBRANGE *caller = &init_caller; int added_p = 0; memset (caller, 0, sizeof (ADDR_SUBRANGE)); new_asr = ZALLOC (ADDR_SUBRANGE); new_asr2 = ZALLOC (ADDR_SUBRANGE); caller->start = start; caller->end = end; before = &ar->ranges; while ((asr = *before) != NULL) { if (! delete_p) { /* Try next range if current range preceeds new one and not adjacent or overlapping. */ if (asr->end < caller->start - 1) goto next_range; /* Break out if new range preceeds current one and not adjacent or overlapping. */ if (asr->start > caller->end + 1) break; /* If we come here, the new and current ranges are adjacent or overlapping. Make one range yielding from the lower start address of both ranges to the higher end address. */ if (asr->start > caller->start) asr->start = caller->start; else caller->start = asr->start; if (asr->end < caller->end) asr->end = caller->end; else caller->end = asr->end; if (added_p) { delete_range (before); continue; } caller = asr; added_p = 1; } else /* deleting a range */ { /* Try next range if current range preceeds new one. */ if (asr->end < caller->start) goto next_range; /* Break out if new range preceeds current one. */ if (asr->start > caller->end) break; added_p = 1; if (asr->start < caller->start) left = asr; /* If the next range in the list has a higher end address than the new one, insert the new one here. */ if (asr->end > caller->end) { right = asr; break; } if (asr->start >= caller->start) { /* The new range completely replaces an old one (This may happen several times). */ if (added_p) { delete_range (before); continue; } /* Replace the old range with the new one. */ asr->start = caller->start; asr->end = caller->end; caller = asr; added_p = 1; } } /* Go on to next range. */ next_range: before = &asr->next; } if (!added_p) { if (delete_p) goto out; new_asr->start = caller->start; new_asr->end = caller->end; insert_range (before, new_asr); new_asr = NULL; } if (right) { if (left == right) { /* The new range breaks the old one in two pieces, so we have to use the second new range. */ new_asr2->start = right->start; new_asr2->end = right->end; left = new_asr2; insert_range (before, left); new_asr2 = NULL; } right->start = caller->end + 1; } if (left) { left->end = caller->start - 1; } out: if (new_asr) free (new_asr); if (new_asr2) free (new_asr2); } /* Free T and all subtrees. */ static void free_search_tree (ADDR_RANGE_TREE *t) { if (t != NULL) { free_search_tree (t->lower); free_search_tree (t->higher); free (t); } } /* Subroutine of build_search_tree to recursively build a balanced tree. ??? It's not an optimum tree though. */ static ADDR_RANGE_TREE * build_tree_1 (ADDR_SUBRANGE **asrtab, unsigned int n) { unsigned int mid = n / 2; ADDR_RANGE_TREE *t; if (n == 0) return NULL; t = (ADDR_RANGE_TREE *) xmalloc (sizeof (ADDR_RANGE_TREE)); t->start = asrtab[mid]->start; t->end = asrtab[mid]->end; if (mid != 0) t->lower = build_tree_1 (asrtab, mid); else t->lower = NULL; if (n > mid + 1) t->higher = build_tree_1 (asrtab + mid + 1, n - mid - 1); else t->higher = NULL; return t; } /* Build a search tree for address range AR. */ static void build_search_tree (ADDR_RANGE *ar) { /* ??? Simple version for now. */ ADDR_SUBRANGE *asr,**asrtab; unsigned int i, n; for (n = 0, asr = ar->ranges; asr != NULL; ++n, asr = asr->next) continue; asrtab = (ADDR_SUBRANGE **) xmalloc (n * sizeof (ADDR_SUBRANGE *)); for (i = 0, asr = ar->ranges; i < n; ++i, asr = asr->next) asrtab[i] = asr; ar->range_tree = build_tree_1 (asrtab, n); free (asrtab); } void sim_addr_range_add (ADDR_RANGE *ar, address_word start, address_word end) { frob_range (ar, start, end, 0); /* Rebuild the search tree. */ /* ??? Instead of rebuilding it here it could be done in a module resume handler, say by first checking for a `changed' flag, assuming of course this would never be done while the simulation is running. */ free_search_tree (ar->range_tree); build_search_tree (ar); } void <API key> (ADDR_RANGE *ar, address_word start, address_word end) { frob_range (ar, start, end, 1); /* Rebuild the search tree. */ /* ??? Instead of rebuilding it here it could be done in a module resume handler, say by first checking for a `changed' flag, assuming of course this would never be done while the simulation is running. */ free_search_tree (ar->range_tree); build_search_tree (ar); } #endif /* DEFINE_NON_INLINE_P */ #if DEFINE_INLINE_P SIM_ARANGE_INLINE int <API key> (ADDR_RANGE *ar, address_word addr) { ADDR_RANGE_TREE *t = ar->range_tree; while (t != NULL) { if (addr < t->start) t = t->lower; else if (addr > t->end) t = t->higher; else return 1; } return 0; } #endif /* DEFINE_INLINE_P */
#include <linux/kernel.h> #include <linux/device.h> #include <linux/if.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <linux/notifier.h> #include <net/mac80211.h> #include <net/cfg80211.h> #include "ieee80211_i.h" #include "rate.h" #include "debugfs.h" #include "debugfs_netdev.h" static ssize_t ieee80211_if_read( struct <API key> *sdata, char __user *userbuf, size_t count, loff_t *ppos, ssize_t (*format)(const struct <API key> *, char *, int)) { char buf[70]; ssize_t ret = -EINVAL; read_lock(&dev_base_lock); if (sdata->dev->reg_state == NETREG_REGISTERED) ret = (*format)(sdata, buf, sizeof(buf)); read_unlock(&dev_base_lock); if (ret != -EINVAL) ret = <API key>(userbuf, count, ppos, buf, ret); return ret; } static ssize_t ieee80211_if_write( struct <API key> *sdata, const char __user *userbuf, size_t count, loff_t *ppos, ssize_t (*write)(struct <API key> *, const char *, int)) { u8 *buf; ssize_t ret; buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; ret = -EFAULT; if (copy_from_user(buf, userbuf, count)) goto freebuf; ret = -ENODEV; rtnl_lock(); if (sdata->dev->reg_state == NETREG_REGISTERED) ret = (*write)(sdata, buf, count); rtnl_unlock(); freebuf: kfree(buf); return ret; } #define IEEE80211_IF_FMT(name, field, format_string) \ static ssize_t ieee80211_if_fmt_##name( \ const struct <API key> *sdata, char *buf, \ int buflen) \ { \ return scnprintf(buf, buflen, format_string, sdata->field); \ } #define <API key>(name, field) \ IEEE80211_IF_FMT(name, field, "%d\n") #define <API key>(name, field) \ IEEE80211_IF_FMT(name, field, "% #define <API key>(name, field) \ IEEE80211_IF_FMT(name, field, "%zd\n") #define <API key>(name, field) \ static ssize_t ieee80211_if_fmt_##name( \ const struct <API key> *sdata, \ char *buf, int buflen) \ { \ return scnprintf(buf, buflen, "%d\n", atomic_read(&sdata->field));\ } #define <API key>(name, field) \ static ssize_t ieee80211_if_fmt_##name( \ const struct <API key> *sdata, char *buf, \ int buflen) \ { \ return scnprintf(buf, buflen, "%pM\n", sdata->field); \ } #define <API key>(name, field) \ static ssize_t ieee80211_if_fmt_##name( \ const struct <API key> *sdata, \ char *buf, int buflen) \ { \ return scnprintf(buf, buflen, "%d\n", sdata->field / 16); \ } #define __IEEE80211_IF_FILE(name, _write) \ static ssize_t ieee80211_if_read_##name(struct file *file, \ char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ return ieee80211_if_read(file->private_data, \ userbuf, count, ppos, \ ieee80211_if_fmt_##name); \ } \ static const struct file_operations name##_ops = { \ .read = ieee80211_if_read_##name, \ .write = (_write), \ .open = <API key>, \ } #define <API key>(name) \ static ssize_t ieee80211_if_write_##name(struct file *file, \ const char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ return ieee80211_if_write(file->private_data, userbuf, count, \ ppos, ieee80211_if_parse_##name); \ } \ __IEEE80211_IF_FILE(name, ieee80211_if_write_##name) #define IEEE80211_IF_FILE(name, field, format) \ IEEE80211_IF_FMT_##format(name, field) \ __IEEE80211_IF_FILE(name, NULL) /* common attributes */ IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC); IEEE80211_IF_FILE(<API key>, rc_rateidx_mask[IEEE80211_BAND_2GHZ], HEX); IEEE80211_IF_FILE(<API key>, rc_rateidx_mask[IEEE80211_BAND_5GHZ], HEX); /* STA attributes */ IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); IEEE80211_IF_FILE(aid, u.mgd.aid, DEC); IEEE80211_IF_FILE(last_beacon, u.mgd.last_beacon_signal, DEC); IEEE80211_IF_FILE(ave_beacon, u.mgd.ave_beacon_signal, DEC_DIV_16); static int ieee80211_set_smps(struct <API key> *sdata, enum ieee80211_smps_mode smps_mode) { struct ieee80211_local *local = sdata->local; int err; if (!(local->hw.flags & <API key>) && smps_mode == <API key>) return -EINVAL; /* auto should be dynamic if in PS mode */ if (!(local->hw.flags & <API key>) && (smps_mode == <API key> || smps_mode == <API key>)) return -EINVAL; /* supported only on managed interfaces for now */ if (sdata->vif.type != <API key>) return -EOPNOTSUPP; mutex_lock(&local->iflist_mtx); err = <API key>(sdata, smps_mode); mutex_unlock(&local->iflist_mtx); return err; } static const char *smps_modes[<API key>] = { [<API key>] = "auto", [IEEE80211_SMPS_OFF] = "off", [<API key>] = "static", [<API key>] = "dynamic", }; static ssize_t <API key>(const struct <API key> *sdata, char *buf, int buflen) { if (sdata->vif.type != <API key>) return -EOPNOTSUPP; return snprintf(buf, buflen, "request: %s\nused: %s\n", smps_modes[sdata->u.mgd.req_smps], smps_modes[sdata->u.mgd.ap_smps]); } static ssize_t <API key>(struct <API key> *sdata, const char *buf, int buflen) { enum ieee80211_smps_mode mode; for (mode = 0; mode < <API key>; mode++) { if (strncmp(buf, smps_modes[mode], buflen) == 0) { int err = ieee80211_set_smps(sdata, mode); if (!err) return buflen; return err; } } return -EINVAL; } <API key>(smps); /* AP attributes */ IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); static ssize_t <API key>( const struct <API key> *sdata, char *buf, int buflen) { return scnprintf(buf, buflen, "%u\n", skb_queue_len(&sdata->u.ap.ps_bc_buf)); } __IEEE80211_IF_FILE(<API key>, NULL); /* WDS attributes */ IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); #ifdef <API key> /* Mesh stats attributes */ IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC); IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC); IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC); IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC); IEEE80211_IF_FILE(<API key>, u.mesh.mshstats.<API key>, DEC); IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC); /* Mesh parameters */ IEEE80211_IF_FILE(dot11MeshMaxRetries, u.mesh.mshcfg.dot11MeshMaxRetries, DEC); IEEE80211_IF_FILE(<API key>, u.mesh.mshcfg.<API key>, DEC); IEEE80211_IF_FILE(<API key>, u.mesh.mshcfg.<API key>, DEC); IEEE80211_IF_FILE(<API key>, u.mesh.mshcfg.<API key>, DEC); IEEE80211_IF_FILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC); IEEE80211_IF_FILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC); IEEE80211_IF_FILE(<API key>, u.mesh.mshcfg.<API key>, DEC); IEEE80211_IF_FILE(<API key>, u.mesh.mshcfg.<API key>, DEC); IEEE80211_IF_FILE(<API key>, u.mesh.mshcfg.<API key>, DEC); IEEE80211_IF_FILE(<API key>, u.mesh.mshcfg.<API key>, DEC); IEEE80211_IF_FILE(<API key>, u.mesh.mshcfg.<API key>, DEC); IEEE80211_IF_FILE(path_refresh_time, u.mesh.mshcfg.path_refresh_time, DEC); IEEE80211_IF_FILE(<API key>, u.mesh.mshcfg.<API key>, DEC); IEEE80211_IF_FILE(<API key>, u.mesh.mshcfg.<API key>, DEC); #endif #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0400, sdata->debugfs.dir, \ sdata, &name##_ops); #define DEBUGFS_ADD_MODE(name, mode) \ debugfs_create_file(#name, mode, sdata->debugfs.dir, \ sdata, &name##_ops); static void add_sta_files(struct <API key> *sdata) { DEBUGFS_ADD(drop_unencrypted); DEBUGFS_ADD(<API key>); DEBUGFS_ADD(<API key>); DEBUGFS_ADD(bssid); DEBUGFS_ADD(aid); DEBUGFS_ADD(last_beacon); DEBUGFS_ADD(ave_beacon); DEBUGFS_ADD_MODE(smps, 0600); } static void add_ap_files(struct <API key> *sdata) { DEBUGFS_ADD(drop_unencrypted); DEBUGFS_ADD(<API key>); DEBUGFS_ADD(<API key>); DEBUGFS_ADD(num_sta_ps); DEBUGFS_ADD(dtim_count); DEBUGFS_ADD(<API key>); } static void add_wds_files(struct <API key> *sdata) { DEBUGFS_ADD(drop_unencrypted); DEBUGFS_ADD(<API key>); DEBUGFS_ADD(<API key>); DEBUGFS_ADD(peer); } static void add_vlan_files(struct <API key> *sdata) { DEBUGFS_ADD(drop_unencrypted); DEBUGFS_ADD(<API key>); DEBUGFS_ADD(<API key>); } static void add_monitor_files(struct <API key> *sdata) { } #ifdef <API key> static void add_mesh_stats(struct <API key> *sdata) { struct dentry *dir = debugfs_create_dir("mesh_stats", sdata->debugfs.dir); #define MESHSTATS_ADD(name)\ debugfs_create_file(#name, 0400, dir, sdata, &name##_ops); MESHSTATS_ADD(fwded_mcast); MESHSTATS_ADD(fwded_unicast); MESHSTATS_ADD(fwded_frames); MESHSTATS_ADD(dropped_frames_ttl); MESHSTATS_ADD(<API key>); MESHSTATS_ADD(estab_plinks); #undef MESHSTATS_ADD } static void add_mesh_config(struct <API key> *sdata) { struct dentry *dir = debugfs_create_dir("mesh_config", sdata->debugfs.dir); #define MESHPARAMS_ADD(name) \ debugfs_create_file(#name, 0600, dir, sdata, &name##_ops); MESHPARAMS_ADD(dot11MeshMaxRetries); MESHPARAMS_ADD(<API key>); MESHPARAMS_ADD(<API key>); MESHPARAMS_ADD(<API key>); MESHPARAMS_ADD(dot11MeshTTL); MESHPARAMS_ADD(auto_open_plinks); MESHPARAMS_ADD(<API key>); MESHPARAMS_ADD(<API key>); MESHPARAMS_ADD(<API key>); MESHPARAMS_ADD(<API key>); MESHPARAMS_ADD(<API key>); MESHPARAMS_ADD(path_refresh_time); MESHPARAMS_ADD(<API key>); #undef MESHPARAMS_ADD } #endif static void add_files(struct <API key> *sdata) { if (!sdata->debugfs.dir) return; switch (sdata->vif.type) { case <API key>: #ifdef <API key> add_mesh_stats(sdata); add_mesh_config(sdata); #endif break; case <API key>: add_sta_files(sdata); break; case <API key>: break; case NL80211_IFTYPE_AP: add_ap_files(sdata); break; case NL80211_IFTYPE_WDS: add_wds_files(sdata); break; case <API key>: add_monitor_files(sdata); break; case <API key>: add_vlan_files(sdata); break; default: break; } } void <API key>(struct <API key> *sdata) { char buf[10+IFNAMSIZ]; sprintf(buf, "netdev:%s", sdata->name); sdata->debugfs.dir = debugfs_create_dir(buf, sdata->local->hw.wiphy->debugfsdir); add_files(sdata); } void <API key>(struct <API key> *sdata) { if (!sdata->debugfs.dir) return; <API key>(sdata->debugfs.dir); sdata->debugfs.dir = NULL; } void <API key>(struct <API key> *sdata) { struct dentry *dir; char buf[10 + IFNAMSIZ]; dir = sdata->debugfs.dir; if (!dir) return; sprintf(buf, "netdev:%s", sdata->name); if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf)) printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs " "dir to %s\n", buf); }
#ifndef <API key> #define <API key> #include <boost/fusion/functional/adapter/unfused.hpp> #endif
import { <API key>, <API key> } from './compile_metadata'; export interface IdentifierSpec { name: string; moduleUrl: string; runtime: any; } export declare class Identifiers { static <API key>: IdentifierSpec; static ElementRef: IdentifierSpec; static NgModuleRef: IdentifierSpec; static ViewContainerRef: IdentifierSpec; static ChangeDetectorRef: IdentifierSpec; static QueryList: IdentifierSpec; static TemplateRef: IdentifierSpec; static <API key>: IdentifierSpec; static <API key>: IdentifierSpec; static ComponentFactory: IdentifierSpec; static ComponentRef: IdentifierSpec; static NgModuleFactory: IdentifierSpec; static NgModuleInjector: IdentifierSpec; static <API key>: IdentifierSpec; static Injector: IdentifierSpec; static ViewEncapsulation: IdentifierSpec; static <API key>: IdentifierSpec; static SecurityContext: IdentifierSpec; static LOCALE_ID: IdentifierSpec; static TRANSLATIONS_FORMAT: IdentifierSpec; static inlineInterpolate: IdentifierSpec; static interpolate: IdentifierSpec; static EMPTY_ARRAY: IdentifierSpec; static EMPTY_MAP: IdentifierSpec; static Renderer: IdentifierSpec; static viewDef: IdentifierSpec; static elementDef: IdentifierSpec; static anchorDef: IdentifierSpec; static textDef: IdentifierSpec; static directiveDef: IdentifierSpec; static providerDef: IdentifierSpec; static queryDef: IdentifierSpec; static pureArrayDef: IdentifierSpec; static pureObjectDef: IdentifierSpec; static purePipeDef: IdentifierSpec; static pipeDef: IdentifierSpec; static nodeValue: IdentifierSpec; static ngContentDef: IdentifierSpec; static unwrapValue: IdentifierSpec; static createRendererType2: IdentifierSpec; static RendererType2: IdentifierSpec; static ViewDefinition: IdentifierSpec; static <API key>: IdentifierSpec; } export declare function assetUrl(pkg: string, path?: string, type?: string): string; export declare function resolveIdentifier(identifier: IdentifierSpec): any; export declare function createIdentifier(identifier: IdentifierSpec): <API key>; export declare function identifierToken(identifier: <API key>): <API key>; export declare function <API key>(identifier: IdentifierSpec): <API key>; export declare function <API key>(enumType: IdentifierSpec, name: string): <API key>;
using System; using System.Collections.Generic; using UnityEngine; namespace UnityTest { [Serializable] public class UnitTestResult : ITestResult { public bool Executed { get; set; } public string Name { get { return Test.MethodName; } } public string FullName { get { return Test.FullName; } } public TestResultState ResultState { get; set; } public UnitTestInfo Test { get; set; } public string Id { get { return Test.Id; } } public double Duration { get; set; } public string Message { get; set; } public string StackTrace { get; set; } public bool IsIgnored { get; set; } public string Logs { get; set; } public bool Outdated { get; set; } public void Update(ITestResult source, bool outdated) { ResultState = source.ResultState; Duration = source.Duration; Message = source.Message; Logs = source.Logs; StackTrace = source.StackTrace; Executed = source.Executed; IsIgnored = source.IsIgnored || (Test != null && Test.IsIgnored); Outdated = outdated; } #region Helper methods public bool IsFailure { get { return ResultState == TestResultState.Failure; } } public bool IsError { get { return ResultState == TestResultState.Error; } } public bool IsSuccess { get { return ResultState == TestResultState.Success; } } public bool IsInconclusive { get { return ResultState == TestResultState.Inconclusive; } } #endregion } }
declare namespace jsrsasign.KJUR.asn1.csr { /** * ASN.1 <API key> structure class * @param params associative array of parameters (ex. {}) * @description * ``` * // -- DEFINITION OF ASN.1 SYNTAX -- * // <API key> ::= SEQUENCE { * // version INTEGER { v1(0) } (v1,...), * // subject Name, * // subjectPKInfo <API key>{{ PKInfoAlgorithms }}, * // attributes [0] Attributes{{ CRIAttributes }} } * ``` * * @example * csri = new KJUR.asn1.csr.<API key>(); * csri.setSubjectByParam({'str': '/C=US/O=Test/CN=example.com'}); * csri.<API key>(pubKeyObj); */ class <API key> extends ASN1Object { constructor(); _initialize(): void; /** * set subject name field by parameter * @param x500NameParam X500Name parameter * @description * @example * csri.setSubjectByParam({'str': '/C=US/CN=b'}); * @see KJUR.asn1.x509.X500Name */ setSubjectByParam(x500NameParam: StringParam): void; /** * set subject public key info by RSA/ECDSA/DSA key parameter * @param keyParam public key parameter which passed to `KEYUTIL.getKey` argument * @example * csri.<API key>(certPEMString); // or * csri.<API key>(<API key>); // or * csir.<API key>(<API key>); // et.al. * @see KJUR.asn1.x509.<API key> * @see KEYUTIL.getKey */ <API key>( keyParam: RSAKey | crypto.ECDSA | crypto.DSA | jws.JWS.JsonWebKey | { n: string; e: string } | string, ): void; <API key>( name: string, extParams: | { ca: boolean; critical: boolean } | BinParam | x509.UriParam | ArrayParam<{ name: string }> | { kid: string } | ArrayParam<{ accessMethod: { oid: string }; accessLocation: x509.UriParam }>, ): void; getEncodedHex(): string; } }
<?php /** * @see <API key> */ require_once 'Zend/Tool/Project/Context/Filesystem/Directory.php'; class <API key> extends <API key> { /** * @var string */ protected $_filesystemName = 'controllers'; /** * getName() * * @return string */ public function getName() { return '<API key>'; } }
#include "rr.h" namespace rr { void Context::Init() { ClassBuilder("Context"). <API key>("New", &New). <API key>("GetCurrent", &GetCurrent). <API key>("GetEntered", &GetEntered). <API key>("GetCalling", &GetCalling). <API key>("InContext", &InContext). defineMethod("Dispose", &Dispose). defineMethod("Global", &Global). defineMethod("DetachGlobal", &Global). defineMethod("ReattachGlobal", &ReattachGlobal). defineMethod("SetSecurityToken", &SetSecurityToken). defineMethod("<API key>", &<API key>). defineMethod("GetSecurityToken", &GetSecurityToken). defineMethod("<API key>", &<API key>). defineMethod("SetEmbedderData", &SetEmbedderData). defineMethod("GetEmbedderData", &GetEmbedderData). defineMethod("<API key>", &<API key>). defineMethod("<API key>", &<API key>). defineMethod("Enter", &Enter). defineMethod("Exit", &Exit). store(&Class); ClassBuilder("<API key>"). <API key>("new", &<API key>::initialize). store(&<API key>::Class); } VALUE Context::Dispose(VALUE self) { Void(Context(self).dispose()) } VALUE Context::Global(VALUE self) { return Object(Context(self)->Global()); } VALUE Context::DetachGlobal(VALUE self) { Void(Context(self)->DetachGlobal()); } VALUE Context::ReattachGlobal(VALUE self, VALUE global) { Void(Context(self)->ReattachGlobal(Object(global))); } VALUE Context::GetEntered(VALUE self) { return Context(v8::Context::GetEntered()); } VALUE Context::GetCurrent(VALUE self) { return Context(v8::Context::GetCurrent()); } VALUE Context::GetCalling(VALUE self) { return Context(v8::Context::GetCalling()); } VALUE Context::SetSecurityToken(VALUE self, VALUE token) { Void(Context(self)->SetSecurityToken(Value(token))); } VALUE Context::<API key>(VALUE self) { Void(Context(self)-><API key>()); } VALUE Context::GetSecurityToken(VALUE self) { return Value(Context(self)->GetSecurityToken()); } VALUE Context::<API key>(VALUE self) { return Bool(Context(self)-><API key>()); } VALUE Context::InContext(VALUE self) { return Bool(v8::Context::InContext()); } VALUE Context::SetEmbedderData(VALUE self, VALUE index, VALUE data) { Void(Context(self)->SetEmbedderData(NUM2INT(index), Value(data))); } VALUE Context::GetEmbedderData(VALUE self, VALUE index) { Void(Context(self)->GetEmbedderData(NUM2INT(index))); } VALUE Context::<API key>(VALUE self, VALUE allow) { Void(Context(self)-><API key>(RTEST(allow))); } VALUE Context::<API key>(VALUE self) { return Bool(Context(self)-><API key>()); } VALUE <API key>::initialize(VALUE self, VALUE names) { int length = RARRAY_LENINT(names); const char* array[length]; for (int i = 0; i < length; i++) { array[i] = RSTRING_PTR(rb_ary_entry(names, i)); } return <API key>(new v8::<API key>(length, array)); } VALUE Context::New(int argc, VALUE argv[], VALUE self) { VALUE <API key>; VALUE global_template; VALUE global_object; rb_scan_args(argc, argv, "03", &<API key>, &global_template, &global_object); v8::Persistent<v8::Context> context(v8::Context::New( <API key>(<API key>), *ObjectTemplate(global_template), *Object(global_object) )); Context reference(context); context.Dispose(); return reference; } VALUE Context::Enter(VALUE self) { Void(Context(self)->Enter()); } VALUE Context::Exit(VALUE self) { Void(Context(self)->Exit()); } template <> void Pointer<v8::<API key>>::unwrap(VALUE value) { Data_Get_Struct(value, class v8::<API key>, pointer); } }
// Type definitions for <API key> 1.0 import { ExtensionDefinition } from 'jsreport-core'; declare module 'jsreport-core' { interface Template { recipe: '<API key>' | string; } } declare function <API key>(): ExtensionDefinition; export = <API key>;
#include <stdio.h> #include <assert.h> #include <time.h> #include <stdlib.h> #include <stdint.h> #include <string.h> typedef const char *str_t; #include "kbtree.h" KBTREE_INIT(int, uint32_t, kb_generic_cmp) KBTREE_INIT(str, str_t, kb_str_cmp) static int data_size = 5000000; static unsigned *int_data; static char **str_data; void ht_init_data() { int i; char buf[256]; printf("--- generating data... "); srand48(11); int_data = (unsigned*)calloc(data_size, sizeof(unsigned)); str_data = (char**)calloc(data_size, sizeof(char*)); for (i = 0; i < data_size; ++i) { int_data[i] = (unsigned)(data_size * drand48() / 4) * 271828183u; sprintf(buf, "%x", int_data[i]); str_data[i] = strdup(buf); } printf("done!\n"); } void ht_destroy_data() { int i; for (i = 0; i < data_size; ++i) free(str_data[i]); free(str_data); free(int_data); } void ht_khash_int() { int i; unsigned *data = int_data; uint32_t *l, *u; kbtree_t(int) *h; h = kb_init(int, KB_DEFAULT_SIZE); for (i = 0; i < data_size; ++i) { if (kb_get(int, h, data[i]) == 0) kb_put(int, h, data[i]); else kb_del(int, h, data[i]); } printf("[ht_khash_int] size: %d\n", kb_size(h)); if (1) { int cnt = 0; uint32_t x, y; kb_interval(int, h, 2174625464u, &l, &u); printf("interval for 2174625464: (%u, %u)\n", l? *l : 0, u? *u : 0); #define traverse_f(p) { if (cnt == 0) y = *p; ++cnt; } __kb_traverse(uint32_t, h, traverse_f); __kb_get_first(uint32_t, h, x); printf("# of elements from traversal: %d\n", cnt); printf("first element: %d == %d\n", x, y); } __kb_destroy(h); } void ht_khash_str() { int i; char **data = str_data; kbtree_t(str) *h; h = kb_init(str, KB_DEFAULT_SIZE); for (i = 0; i < data_size; ++i) { if (kb_get(str, h, data[i]) == 0) kb_put(str, h, data[i]); else kb_del(str, h, data[i]); } printf("[ht_khash_int] size: %d\n", kb_size(h)); __kb_destroy(h); } void ht_timing(void (*f)(void)) { clock_t t = clock(); (*f)(); printf("[ht_timing] %.3lf sec\n", (double)(clock() - t) / CLOCKS_PER_SEC); } int main(int argc, char *argv[]) { if (argc > 1) data_size = atoi(argv[1]); ht_init_data(); ht_timing(ht_khash_int); ht_timing(ht_khash_str); ht_destroy_data(); return 0; }
/** * This file implements POSIX lock type for Lustre. * Its policy properties are start and end of extent and PID. * * These locks are only done through MDS due to POSIX semantics requiring * e.g. that locks could be only partially released and as such split into * two parts, and also that two adjacent locks from the same process may be * merged into a single wider lock. * * Lock modes are mapped like this: * PR and PW for READ and WRITE locks * NL to request a releasing of a portion of the lock * * These flock locks never timeout. */ #define DEBUG_SUBSYSTEM S_LDLM #include <lustre_dlm.h> #include <obd_support.h> #include <obd_class.h> #include <lustre_lib.h> #include <linux/list.h> #include "ldlm_internal.h" /** * <API key> - iterate over the remaining entries in a list * and safeguard against removal of a list entry. * \param pos the &struct list_head to use as a loop counter. pos MUST * have been initialized prior to using it in this macro. * \param n another &struct list_head to use as temporary storage * \param head the head for your list. */ #define <API key>(pos, n, head) \ for (n = pos->next; pos != (head); pos = n, n = pos->next) static inline int <API key>(struct ldlm_lock *lock, struct ldlm_lock *new) { return((new->l_policy_data.l_flock.owner == lock->l_policy_data.l_flock.owner) && (new->l_export == lock->l_export)); } static inline int ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new) { return((new->l_policy_data.l_flock.start <= lock->l_policy_data.l_flock.end) && (new->l_policy_data.l_flock.end >= lock->l_policy_data.l_flock.start)); } static inline void ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags) { LDLM_DEBUG(lock, "%s(mode: %d, flags: 0x%llx)", __func__, mode, flags); /* Safe to not lock here, since it should be empty anyway */ LASSERT(hlist_unhashed(&lock->l_exp_flock_hash)); list_del_init(&lock->l_res_link); if (flags == <API key>) { /* client side - set a flag to prevent sending a CANCEL */ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING; /* when reaching here, it is under lock_res_and_lock(). Thus, * need call the nolock version of <API key> */ <API key>(lock, mode); } <API key>(lock); } /** * Process a granting attempt for flock lock. * Must be called under ns lock held. * * This function looks for any conflicts for \a lock in the granted or * waiting queues. The lock is granted if no conflicts are found in * either queue. * * It is also responsible for splitting a lock if a portion of the lock * is released. * * If \a first_enq is 0 (ie, called from <API key>): * - blocking ASTs have already been sent * * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue): * - blocking ASTs have not been sent yet, so list of conflicting locks * would be collected and ASTs sent. */ static int <API key>(struct ldlm_lock *req, __u64 *flags, int first_enq, enum ldlm_error *err, struct list_head *work_list) { struct ldlm_resource *res = req->l_resource; struct ldlm_namespace *ns = ldlm_res_to_ns(res); struct list_head *tmp; struct list_head *ownlocks = NULL; struct ldlm_lock *lock = NULL; struct ldlm_lock *new = req; struct ldlm_lock *new2 = NULL; enum ldlm_mode mode = req->l_req_mode; int added = (mode == LCK_NL); int overlaps = 0; int splitted = 0; const struct ldlm_callback_suite null_cbs = { }; CDEBUG(D_DLMTRACE, "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n", *flags, new->l_policy_data.l_flock.owner, new->l_policy_data.l_flock.pid, mode, req->l_policy_data.l_flock.start, req->l_policy_data.l_flock.end); *err = ELDLM_OK; /* No blocking ASTs are sent to the clients for * Posix file & record locks */ req->l_blocking_ast = NULL; reprocess: if ((*flags == <API key>) || (mode == LCK_NL)) { /* This loop determines where this processes locks start * in the resource lr_granted list. */ list_for_each(tmp, &res->lr_granted) { lock = list_entry(tmp, struct ldlm_lock, l_res_link); if (<API key>(lock, req)) { ownlocks = tmp; break; } } } else { int reprocess_failed = 0; lockmode_verify(mode); /* This loop determines if there are existing locks * that conflict with the new lock request. */ list_for_each(tmp, &res->lr_granted) { lock = list_entry(tmp, struct ldlm_lock, l_res_link); if (<API key>(lock, req)) { if (!ownlocks) ownlocks = tmp; continue; } /* locks are compatible, overlap doesn't matter */ if (lockmode_compat(lock->l_granted_mode, mode)) continue; if (!ldlm_flocks_overlap(lock, req)) continue; if (!first_enq) { reprocess_failed = 1; continue; } if (*flags & <API key>) { ldlm_flock_destroy(req, mode, *flags); *err = -EAGAIN; return LDLM_ITER_STOP; } if (*flags & LDLM_FL_TEST_LOCK) { ldlm_flock_destroy(req, mode, *flags); req->l_req_mode = lock->l_granted_mode; req->l_policy_data.l_flock.pid = lock->l_policy_data.l_flock.pid; req->l_policy_data.l_flock.start = lock->l_policy_data.l_flock.start; req->l_policy_data.l_flock.end = lock->l_policy_data.l_flock.end; *flags |= <API key>; return LDLM_ITER_STOP; } <API key>(res, &res->lr_waiting, req); *flags |= <API key>; return LDLM_ITER_STOP; } if (reprocess_failed) return LDLM_ITER_CONTINUE; } if (*flags & LDLM_FL_TEST_LOCK) { ldlm_flock_destroy(req, mode, *flags); req->l_req_mode = LCK_NL; *flags |= <API key>; return LDLM_ITER_STOP; } /* Scan the locks owned by this process that overlap this request. * We may have to merge or split existing locks. */ if (!ownlocks) ownlocks = &res->lr_granted; <API key>(ownlocks, tmp, &res->lr_granted) { lock = list_entry(ownlocks, struct ldlm_lock, l_res_link); if (!<API key>(lock, new)) break; if (lock->l_granted_mode == mode) { /* If the modes are the same then we need to process * locks that overlap OR adjoin the new lock. The extra * logic condition is necessary to deal with arithmetic * overflow and underflow. */ if ((new->l_policy_data.l_flock.start > (lock->l_policy_data.l_flock.end + 1)) && (lock->l_policy_data.l_flock.end != OBD_OBJECT_EOF)) continue; if ((new->l_policy_data.l_flock.end < (lock->l_policy_data.l_flock.start - 1)) && (lock->l_policy_data.l_flock.start != 0)) break; if (new->l_policy_data.l_flock.start < lock->l_policy_data.l_flock.start) { lock->l_policy_data.l_flock.start = new->l_policy_data.l_flock.start; } else { new->l_policy_data.l_flock.start = lock->l_policy_data.l_flock.start; } if (new->l_policy_data.l_flock.end > lock->l_policy_data.l_flock.end) { lock->l_policy_data.l_flock.end = new->l_policy_data.l_flock.end; } else { new->l_policy_data.l_flock.end = lock->l_policy_data.l_flock.end; } if (added) { ldlm_flock_destroy(lock, mode, *flags); } else { new = lock; added = 1; } continue; } if (new->l_policy_data.l_flock.start > lock->l_policy_data.l_flock.end) continue; if (new->l_policy_data.l_flock.end < lock->l_policy_data.l_flock.start) break; ++overlaps; if (new->l_policy_data.l_flock.start <= lock->l_policy_data.l_flock.start) { if (new->l_policy_data.l_flock.end < lock->l_policy_data.l_flock.end) { lock->l_policy_data.l_flock.start = new->l_policy_data.l_flock.end + 1; break; } ldlm_flock_destroy(lock, lock->l_req_mode, *flags); continue; } if (new->l_policy_data.l_flock.end >= lock->l_policy_data.l_flock.end) { lock->l_policy_data.l_flock.end = new->l_policy_data.l_flock.start - 1; continue; } /* split the existing lock into two locks */ /* if this is an F_UNLCK operation then we could avoid * allocating a new lock and use the req lock passed in * with the request but this would complicate the reply * processing since updates to req get reflected in the * reply. The client side replays the lock request so * it must see the original lock data in the reply. */ /* XXX - if ldlm_lock_new() can sleep we should * release the lr_lock, allocate the new lock, * and restart processing this lock. */ if (!new2) { unlock_res_and_lock(req); new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK, lock->l_granted_mode, &null_cbs, NULL, 0, LVB_T_NONE); lock_res_and_lock(req); if (IS_ERR(new2)) { ldlm_flock_destroy(req, lock->l_granted_mode, *flags); *err = PTR_ERR(new2); return LDLM_ITER_STOP; } goto reprocess; } splitted = 1; new2->l_granted_mode = lock->l_granted_mode; new2->l_policy_data.l_flock.pid = new->l_policy_data.l_flock.pid; new2->l_policy_data.l_flock.owner = new->l_policy_data.l_flock.owner; new2->l_policy_data.l_flock.start = lock->l_policy_data.l_flock.start; new2->l_policy_data.l_flock.end = new->l_policy_data.l_flock.start - 1; lock->l_policy_data.l_flock.start = new->l_policy_data.l_flock.end + 1; new2->l_conn_export = lock->l_conn_export; if (lock->l_export) { new2->l_export = <API key>(lock->l_export, new2); if (new2->l_export->exp_lock_hash && hlist_unhashed(&new2->l_exp_hash)) cfs_hash_add(new2->l_export->exp_lock_hash, &new2->l_remote_handle, &new2->l_exp_hash); } if (*flags == <API key>) <API key>(new2, lock->l_granted_mode); /* insert new2 at lock */ <API key>(res, ownlocks, new2); LDLM_LOCK_RELEASE(new2); break; } /* if new2 is created but never used, destroy it*/ if (splitted == 0 && new2) <API key>(new2); /* At this point we're granting the lock request. */ req->l_granted_mode = req->l_req_mode; if (!added) { list_del_init(&req->l_res_link); /* insert new lock before ownlocks in list. */ <API key>(res, ownlocks, req); } if (*flags != <API key>) { /* The only one possible case for client-side calls flock * policy function is <API key> inside which * carries <API key> flag. */ CERROR("Illegal parameter for client-side-only module.\n"); LBUG(); } /* In case we're reprocessing the requested lock we can't destroy * it until after calling <API key>() above so that laawi() * can bump the reference count on \a req. Otherwise \a req * could be freed before the completion AST can be sent. */ if (added) ldlm_flock_destroy(req, mode, *flags); ldlm_resource_dump(D_INFO, res); return LDLM_ITER_CONTINUE; } struct <API key> { struct ldlm_lock *fwd_lock; int fwd_generation; }; static void <API key>(void *data) { struct ldlm_lock *lock; lock = ((struct <API key> *)data)->fwd_lock; lock_res_and_lock(lock); /* client side - set flag to prevent lock from being put on LRU list */ ldlm_set_cbpending(lock); unlock_res_and_lock(lock); } /** * Flock completion callback function. * * \param lock [in,out]: A lock to be handled * \param flags [in]: flags * \param *data [in]: <API key>() will use ldlm_cb_set_arg * * \retval 0 : success * \retval <0 : failure */ int <API key>(struct ldlm_lock *lock, __u64 flags, void *data) { struct file_lock *getlk = lock->l_ast_data; struct obd_device *obd; struct obd_import *imp = NULL; struct <API key> fwd; struct l_wait_info lwi; enum ldlm_error err; int rc = 0; OBD_FAIL_TIMEOUT(<API key>, 4); if (OBD_FAIL_PRECHECK(<API key>)) { lock_res_and_lock(lock); lock->l_flags |= LDLM_FL_FAIL_LOC; unlock_res_and_lock(lock); OBD_FAIL_TIMEOUT(<API key>, 4); } CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n", flags, data, getlk); LASSERT(flags != <API key>); if (flags & LDLM_FL_FAILED) goto granted; if (!(flags & <API key>)) { if (!data) /* mds granted the lock in the reply */ goto granted; /* CP AST RPC: lock get granted, wake it up */ wake_up(&lock->l_waitq); return 0; } LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping"); fwd.fwd_lock = lock; obd = class_exp2obd(lock->l_conn_export); /* if this is a local lock, there is no import */ if (obd) imp = obd->u.cli.cl_import; if (imp) { spin_lock(&imp->imp_lock); fwd.fwd_generation = imp->imp_generation; spin_unlock(&imp->imp_lock); } lwi = LWI_TIMEOUT_INTR(0, NULL, <API key>, &fwd); /* Go to sleep until the lock is granted. */ rc = l_wait_event(lock->l_waitq, <API key>(lock), &lwi); if (rc) { LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)", rc); return rc; } granted: OBD_FAIL_TIMEOUT(<API key>, 10); if (OBD_FAIL_PRECHECK(<API key>)) { lock_res_and_lock(lock); /* DEADLOCK is always set with CBPENDING */ lock->l_flags |= <API key> | LDLM_FL_CBPENDING; unlock_res_and_lock(lock); OBD_FAIL_TIMEOUT(<API key>, 4); } if (OBD_FAIL_PRECHECK(<API key>)) { lock_res_and_lock(lock); /* DEADLOCK is always set with CBPENDING */ lock->l_flags |= LDLM_FL_FAIL_LOC | <API key> | LDLM_FL_CBPENDING; unlock_res_and_lock(lock); OBD_FAIL_TIMEOUT(<API key>, 4); } lock_res_and_lock(lock); /* * Protect against race where lock could have been just destroyed * due to overlap in <API key>(). */ if (ldlm_is_destroyed(lock)) { unlock_res_and_lock(lock); LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed"); /* * An error is still to be returned, to propagate it up to * <API key>() caller. */ return -EIO; } /* ldlm_lock_enqueue() has already placed lock on the granted list. */ <API key>(lock); /* * Import invalidation. We need to actually release the lock * references being held, so that it can go away. No point in * holding the lock even if app still believes it has it, since * server already dropped it anyway. Only for granted locks too. */ /* Do the same for DEADLOCK'ed locks. */ if (ldlm_is_failed(lock) || <API key>(lock)) { int mode; if (flags & LDLM_FL_TEST_LOCK) LASSERT(ldlm_is_test_lock(lock)); if (ldlm_is_test_lock(lock) || <API key>(lock)) mode = getlk->fl_type; else mode = lock->l_granted_mode; if (<API key>(lock)) { LDLM_DEBUG(lock, "client-side enqueue deadlock received"); rc = -EDEADLK; } ldlm_flock_destroy(lock, mode, <API key>); unlock_res_and_lock(lock); /* Need to wake up the waiter if we were evicted */ wake_up(&lock->l_waitq); /* * An error is still to be returned, to propagate it up to * <API key>() caller. */ return rc ? : -EIO; } LDLM_DEBUG(lock, "client-side enqueue granted"); if (flags & LDLM_FL_TEST_LOCK) { /* fcntl(F_GETLK) request */ /* The old mode was saved in getlk->fl_type so that if the mode * in the lock changes we can decref the appropriate refcount. */ LASSERT(ldlm_is_test_lock(lock)); ldlm_flock_destroy(lock, getlk->fl_type, <API key>); switch (lock->l_granted_mode) { case LCK_PR: getlk->fl_type = F_RDLCK; break; case LCK_PW: getlk->fl_type = F_WRLCK; break; default: getlk->fl_type = F_UNLCK; } getlk->fl_pid = -(pid_t)lock->l_policy_data.l_flock.pid; getlk->fl_start = (loff_t)lock->l_policy_data.l_flock.start; getlk->fl_end = (loff_t)lock->l_policy_data.l_flock.end; } else { __u64 noreproc = <API key>; /* We need to reprocess the lock to do merges or splits * with existing locks owned by this process. */ <API key>(lock, &noreproc, 1, &err, NULL); } unlock_res_and_lock(lock); return rc; } EXPORT_SYMBOL(<API key>); void <API key>(const union <API key> *wpolicy, union ldlm_policy_data *lpolicy) { lpolicy->l_flock.start = wpolicy->l_flock.lfw_start; lpolicy->l_flock.end = wpolicy->l_flock.lfw_end; lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid; lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner; } void <API key>(const union ldlm_policy_data *lpolicy, union <API key> *wpolicy) { memset(wpolicy, 0, sizeof(*wpolicy)); wpolicy->l_flock.lfw_start = lpolicy->l_flock.start; wpolicy->l_flock.lfw_end = lpolicy->l_flock.end; wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid; wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner; }
// The software source and binaries included in this development package are // Communications, Inc., its subsidiaries, or the respective owner including // agreement(s) terms. // <summary> // Wifi driver for AR6002 // </summary> // Double-link list definitions (adapted from Atheros SDIO stack) // Author(s): ="Atheros" #ifndef __DL_LIST_H___ #define __DL_LIST_H___ #include "a_osapi.h" #define A_CONTAINING_STRUCT(address, struct_type, field_name)\ ((struct_type *)((A_UINT32)(address) - (A_UINT32)(&((struct_type *)0)->field_name))) /* list functions */ /* pointers for the list */ typedef struct _DL_LIST { struct _DL_LIST *pPrev; struct _DL_LIST *pNext; }DL_LIST, *PDL_LIST; /* * DL_LIST_INIT , initialize doubly linked list */ #define DL_LIST_INIT(pList)\ {(pList)->pPrev = pList; (pList)->pNext = pList;} /* faster macro to init list and add a single item */ #define <API key>(pList,pItem) \ { (pList)->pPrev = (pItem); \ (pList)->pNext = (pItem); \ (pItem)->pNext = (pList); \ (pItem)->pPrev = (pList); \ } #define DL_LIST_IS_EMPTY(pList) (((pList)->pPrev == (pList)) && ((pList)->pNext == (pList))) #define <API key>(pList) (pList)->pNext #define <API key>(pList) (pList)->pPrev /* * ITERATE_OVER_LIST pStart is the list, pTemp is a temp list member * NOT: do not use this function if the items in the list are deleted inside the * iteration loop */ #define ITERATE_OVER_LIST(pStart, pTemp) \ for((pTemp) =(pStart)->pNext; pTemp != (pStart); (pTemp) = (pTemp)->pNext) /* safe iterate macro that allows the item to be removed from the list * the iteration continues to the next item in the list */ #define <API key>(pStart,pItem,st,offset) \ { \ PDL_LIST pTemp; \ pTemp = (pStart)->pNext; \ while (pTemp != (pStart)) { \ (pItem) = A_CONTAINING_STRUCT(pTemp,st,offset); \ pTemp = pTemp->pNext; \ #define ITERATE_END }} /* * DL_ListInsertTail - insert pAdd to the end of the list */ static INLINE PDL_LIST DL_ListInsertTail(PDL_LIST pList, PDL_LIST pAdd) { /* insert at tail */ pAdd->pPrev = pList->pPrev; pAdd->pNext = pList; pList->pPrev->pNext = pAdd; pList->pPrev = pAdd; return pAdd; } /* * DL_ListInsertHead - insert pAdd into the head of the list */ static INLINE PDL_LIST DL_ListInsertHead(PDL_LIST pList, PDL_LIST pAdd) { /* insert at head */ pAdd->pPrev = pList; pAdd->pNext = pList->pNext; pList->pNext->pPrev = pAdd; pList->pNext = pAdd; return pAdd; } #define DL_ListAdd(pList,pItem) DL_ListInsertHead((pList),(pItem)) /* * DL_ListRemove - remove pDel from list */ static INLINE PDL_LIST DL_ListRemove(PDL_LIST pDel) { pDel->pNext->pPrev = pDel->pPrev; pDel->pPrev->pNext = pDel->pNext; /* point back to itself just to be safe, incase remove is called again */ pDel->pNext = pDel; pDel->pPrev = pDel; return pDel; } /* * <API key> - get a list item from the head */ static INLINE PDL_LIST <API key>(PDL_LIST pList) { PDL_LIST pItem = NULL; if (pList->pNext != pList) { pItem = pList->pNext; /* remove the first item from head */ DL_ListRemove(pItem); } return pItem; } static INLINE PDL_LIST <API key>(PDL_LIST pList) { PDL_LIST pItem = NULL; if (pList->pPrev != pList) { pItem = pList->pPrev; /* remove the item from tail */ DL_ListRemove(pItem); } return pItem; } /* transfer src list items to the tail of the destination list */ static INLINE void <API key>(PDL_LIST pDest, PDL_LIST pSrc) { /* only concatenate if src is not empty */ if (!DL_LIST_IS_EMPTY(pSrc)) { /* cut out circular list in src and re-attach to end of dest */ pSrc->pPrev->pNext = pDest; pSrc->pNext->pPrev = pDest->pPrev; pDest->pPrev->pNext = pSrc->pNext; pDest->pPrev = pSrc->pPrev; /* terminate src list, it is now empty */ pSrc->pPrev = pSrc; pSrc->pNext = pSrc; } } #endif
#include <linux/err.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/of.h> #include <linux/iio/iio.h> #include <linux/iio/buffer.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #include <linux/regulator/consumer.h> struct adc081c { struct i2c_client *i2c; struct regulator *ref; /* 8, 10 or 12 */ int bits; }; #define REG_CONV_RES 0x00 static int adc081c_read_raw(struct iio_dev *iio, struct iio_chan_spec const *channel, int *value, int *shift, long mask) { struct adc081c *adc = iio_priv(iio); int err; switch (mask) { case IIO_CHAN_INFO_RAW: err = <API key>(adc->i2c, REG_CONV_RES); if (err < 0) return err; *value = (err & 0xFFF) >> (12 - adc->bits); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: err = <API key>(adc->ref); if (err < 0) return err; *value = err / 1000; *shift = adc->bits; return <API key>; default: break; } return -EINVAL; } #define ADCxx1C_CHAN(_bits) { \ .type = IIO_VOLTAGE, \ .<API key> = BIT(IIO_CHAN_INFO_SCALE), \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .scan_type = { \ .sign = 'u', \ .realbits = (_bits), \ .storagebits = 16, \ .shift = 12 - (_bits), \ .endianness = IIO_CPU, \ }, \ } #define <API key>(_name, _bits) \ static const struct iio_chan_spec _name ## _channels[] = { \ ADCxx1C_CHAN((_bits)), \ <API key>(1), \ }; \ #define <API key> 2 struct adcxx1c_model { const struct iio_chan_spec* channels; int bits; }; #define ADCxx1C_MODEL(_name, _bits) \ { \ .channels = _name ## _channels, \ .bits = (_bits), \ } <API key>(adc081c, 8); <API key>(adc101c, 10); <API key>(adc121c, 12); /* Model ids are indexes in _models array */ enum adcxx1c_model_id { ADC081C = 0, ADC101C = 1, ADC121C = 2, }; static struct adcxx1c_model adcxx1c_models[] = { ADCxx1C_MODEL(adc081c, 8), ADCxx1C_MODEL(adc101c, 10), ADCxx1C_MODEL(adc121c, 12), }; static const struct iio_info adc081c_info = { .read_raw = adc081c_read_raw, .driver_module = THIS_MODULE, }; static irqreturn_t <API key>(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adc081c *data = iio_priv(indio_dev); u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */ int ret; ret = <API key>(data->i2c, REG_CONV_RES); if (ret < 0) goto out; buf[0] = ret; <API key>(indio_dev, buf, iio_get_time_ns()); out: <API key>(indio_dev->trig); return IRQ_HANDLED; } static int adc081c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct iio_dev *iio; struct adc081c *adc; struct adcxx1c_model *model = &adcxx1c_models[id->driver_data]; int err; if (!<API key>(client->adapter, <API key>)) return -EOPNOTSUPP; iio = <API key>(&client->dev, sizeof(*adc)); if (!iio) return -ENOMEM; adc = iio_priv(iio); adc->i2c = client; adc->bits = model->bits; adc->ref = devm_regulator_get(&client->dev, "vref"); if (IS_ERR(adc->ref)) return PTR_ERR(adc->ref); err = regulator_enable(adc->ref); if (err < 0) return err; iio->dev.parent = &client->dev; iio->name = dev_name(&client->dev); iio->modes = INDIO_DIRECT_MODE; iio->info = &adc081c_info; iio->channels = model->channels; iio->num_channels = <API key>; err = <API key>(iio, NULL, <API key>, NULL); if (err < 0) { dev_err(&client->dev, "iio triggered buffer setup failed\n"); goto <API key>; } err = iio_device_register(iio); if (err < 0) goto err_buffer_cleanup; i2c_set_clientdata(client, iio); return 0; err_buffer_cleanup: <API key>(iio); <API key>: regulator_disable(adc->ref); return err; } static int adc081c_remove(struct i2c_client *client) { struct iio_dev *iio = i2c_get_clientdata(client); struct adc081c *adc = iio_priv(iio); <API key>(iio); <API key>(iio); regulator_disable(adc->ref); return 0; } static const struct i2c_device_id adc081c_id[] = { { "adc081c", ADC081C }, { "adc101c", ADC101C }, { "adc121c", ADC121C }, { } }; MODULE_DEVICE_TABLE(i2c, adc081c_id); #ifdef CONFIG_OF static const struct of_device_id adc081c_of_match[] = { { .compatible = "ti,adc081c" }, { .compatible = "ti,adc101c" }, { .compatible = "ti,adc121c" }, { } }; MODULE_DEVICE_TABLE(of, adc081c_of_match); #endif static struct i2c_driver adc081c_driver = { .driver = { .name = "adc081c", .of_match_table = of_match_ptr(adc081c_of_match), }, .probe = adc081c_probe, .remove = adc081c_remove, .id_table = adc081c_id, }; module_i2c_driver(adc081c_driver); MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); MODULE_DESCRIPTION("Texas Instruments ADC081C/ADC101C/ADC121C driver"); MODULE_LICENSE("GPL v2");
<!DOCTYPE html> <meta charset="utf-8"> <title>cross-origin webvtt returned by service worker is detected</title> <script src="/resources/testharness.js"></script> <script src="/resources/testharnessreport.js"></script> <script src="/common/get-host-info.sub.js"></script> <script src="resources/test-helpers.sub.js?pipe=sub"></script> <body> <script> // This file tests responses for WebVTT text track from a service worker. It // creates an iframe with a <track> element, controlled by a service worker. // Each test tries to load a text track, the service worker intercepts the // requests and responds with opaque or non-opaque responses. As the // crossorigin attribute is not set, request's mode is always "same-origin", // and as specified in https://fetch.spec.whatwg.org/#http-fetch, // a response from a service worker whose type is neither "basic" nor // "default" is rejected. const host_info = get_host_info(); const kScript = 'resources/<API key>.js'; // Add '?ignore' so the service worker falls back for the navigation. const kScope = 'resources/vtt-frame.html?ignore'; let frame; function load_track(url) { const track = frame.contentDocument.querySelector('track'); const result = new Promise((resolve, reject) => { track.onload = (e => { resolve('load event'); }); track.onerror = (e => { resolve('error event'); }); }); track.src = url; // Setting mode to hidden seems needed, or else the text track requests don't // occur. track.track.mode = 'hidden'; return result; } promise_test(t => { return <API key>(t, kScript, kScope) .then(registration => { promise_test(() => { frame.remove(); return registration.unregister(); }, 'restore global state'); return wait_for_state(t, registration.installing, 'activated'); }) .then(() => { return with_iframe(kScope); }) .then(f => { frame = f; }) }, 'initialize global state'); promise_test(t => { let url = '/media/foo.vtt'; // Add '?url' and tell the service worker to fetch a same-origin URL. url += '?url=' + host_info.HTTPS_ORIGIN + '/media/foo.vtt'; return load_track(url) .then(result => { assert_equals(result, 'load event'); }); }, 'same-origin text track should load'); promise_test(t => { let url = '/media/foo.vtt'; // Add '?url' and tell the service worker to fetch a cross-origin URL. url += '?url=' + get_host_info().HTTPS_REMOTE_ORIGIN + '/media/foo.vtt'; return load_track(url) .then(result => { assert_equals(result, 'error event'); }); }, 'cross-origin text track with no-cors request should not load'); promise_test(t => { let url = '/media/foo.vtt'; // Add '?url' and tell the service worker to fetch a cross-origin URL that // doesn't support CORS. url += '?url=' + get_host_info().HTTPS_REMOTE_ORIGIN + '/media/foo-no-cors.vtt'; // Add '&mode' to tell the service worker to do a CORS request. url += '&mode=cors'; return load_track(url) .then(result => { assert_equals(result, 'error event'); }); }, 'cross-origin text track with rejected cors request should not load'); promise_test(t => { let url = '/media/foo.vtt'; // Add '?url' and tell the service worker to fetch a cross-origin URL. url += '?url=' + get_host_info().HTTPS_REMOTE_ORIGIN + '/media/foo.vtt'; // Add '&mode' to tell the service worker to do a CORS request. url += '&mode=cors'; // Add '&credentials=same-origin' to allow <API key>=* so // that CORS will succeed if the service approves it. url += '&credentials=same-origin'; return load_track(url) .then(result => { assert_equals(result, 'error event'); }); }, 'cross-origin text track with approved cors request should not load'); // Redirect tests. promise_test(t => { let url = '/media/foo.vtt'; // Add '?url' and tell the service worker to fetch a same-origin URL that redirects... redirector_url = host_info.HTTPS_ORIGIN + base_path() + 'resources/redirect.py?Redirect='; // ... to a same-origin URL. redirect_target = host_info.HTTPS_ORIGIN + '/media/foo.vtt'; url += '?url=' + encodeURIComponent(redirector_url + encodeURIComponent(redirect_target)); return load_track(url) .then(result => { assert_equals(result, 'load event'); }); }, 'same-origin text track that redirects same-origin should load'); promise_test(t => { let url = '/media/foo.vtt'; // Add '?url' and tell the service worker to fetch a same-origin URL that redirects... redirector_url = host_info.HTTPS_ORIGIN + base_path() + 'resources/redirect.py?Redirect='; // ... to a cross-origin URL. redirect_target = host_info.HTTPS_REMOTE_ORIGIN + '/media/foo.vtt'; url += '?url=' + encodeURIComponent(redirector_url + encodeURIComponent(redirect_target)); return load_track(url) .then(result => { assert_equals(result, 'error event'); }); }, 'same-origin text track that redirects cross-origin should not load'); promise_test(t => { let url = '/media/foo.vtt'; // Add '?url' and tell the service worker to fetch a same-origin URL that redirects... redirector_url = host_info.HTTPS_ORIGIN + base_path() + 'resources/redirect.py?Redirect='; // ... to a cross-origin URL. redirect_target = host_info.HTTPS_REMOTE_ORIGIN + '/media/foo-no-cors.vtt'; url += '?url=' + encodeURIComponent(redirector_url + encodeURIComponent(redirect_target)); // Add '&mode' to tell the service worker to do a CORS request. url += '&mode=cors'; // Add '&credentials=same-origin' to allow <API key>=* so // that CORS will succeed if the server approves it. url += '&credentials=same-origin'; return load_track(url) .then(result => { assert_equals(result, 'error event'); }); }, 'same-origin text track that redirects to a cross-origin text track with rejected cors should not load'); promise_test(t => { let url = '/media/foo.vtt'; // Add '?url' and tell the service worker to fetch a same-origin URL that redirects... redirector_url = host_info.HTTPS_ORIGIN + base_path() + 'resources/redirect.py?Redirect='; // ... to a cross-origin URL. redirect_target = host_info.HTTPS_REMOTE_ORIGIN + '/media/foo.vtt'; url += '?url=' + encodeURIComponent(redirector_url + encodeURIComponent(redirect_target)); // Add '&mode' to tell the service worker to do a CORS request. url += '&mode=cors'; // Add '&credentials=same-origin' to allow <API key>=* so // that CORS will succeed if the server approves it. url += '&credentials=same-origin'; return load_track(url) .then(result => { assert_equals(result, 'error event'); }); }, 'same-origin text track that redirects to a cross-origin text track with approved cors should not load'); </script> </body>
#ifndef _YUV_LIBYUV_h #define _YUV_LIBYUV_h #include "<API key>.h" #endif
<html> <head> <script src="../../resources/js-test.js"></script> <script src="resources/shared.js"></script> </head> <body> <script src="resources/removed.js"></script> </body> </html>
// Use of this source code is governed by a BSD-style // Test parse.cc, dump.cc, and tostring.cc. #include <string> #include <vector> #include "util/test.h" #include "re2/regexp.h" namespace re2 { // Test that overflowed ref counts work. TEST(Regexp, BigRef) { Regexp* re; re = Regexp::Parse("x", Regexp::NoParseFlags, NULL); for (int i = 0; i < 100000; i++) re->Incref(); for (int i = 0; i < 100000; i++) re->Decref(); CHECK_EQ(re->Ref(), 1); re->Decref(); } // Test that very large Concats work. // Depends on overflowed ref counts working. TEST(Regexp, BigConcat) { Regexp* x; x = Regexp::Parse("x", Regexp::NoParseFlags, NULL); vector<Regexp*> v(90000, x); // ToString bails out at 100000 for (int i = 0; i < v.size(); i++) x->Incref(); CHECK_EQ(x->Ref(), 1 + v.size()) << x->Ref(); Regexp* re = Regexp::Concat(&v[0], v.size(), Regexp::NoParseFlags); CHECK_EQ(re->ToString(), string(v.size(), 'x')); re->Decref(); CHECK_EQ(x->Ref(), 1) << x->Ref(); x->Decref(); } TEST(Regexp, NamedCaptures) { Regexp* x; RegexpStatus status; x = Regexp::Parse( "(?P<g1>a+)|(e)(?P<g2>w*)+(?P<g1>b+)", Regexp::PerlX, &status); EXPECT_TRUE(status.ok()); EXPECT_EQ(4, x->NumCaptures()); const map<string, int>* have = x->NamedCaptures(); EXPECT_TRUE(have != NULL); EXPECT_EQ(2, have->size()); // there are only two named groups in // the regexp: 'g1' and 'g2'. map<string, int> want; want["g1"] = 1; want["g2"] = 3; EXPECT_EQ(want, *have); x->Decref(); delete have; } TEST(Regexp, CaptureNames) { Regexp* x; RegexpStatus status; x = Regexp::Parse( "(?P<g1>a+)|(e)(?P<g2>w*)+(?P<g1>b+)", Regexp::PerlX, &status); EXPECT_TRUE(status.ok()); EXPECT_EQ(4, x->NumCaptures()); const map<int, string>* have = x->CaptureNames(); EXPECT_TRUE(have != NULL); EXPECT_EQ(3, have->size()); map<int, string> want; want[1] = "g1"; want[3] = "g2"; want[4] = "g1"; EXPECT_EQ(want, *have); x->Decref(); delete have; } } // namespace re2
<?php /** <API key> */ require_once 'Zend/Log/Filter/Abstract.php'; class <API key> extends <API key> { /** * @var integer */ protected $_priority; /** * @var string */ protected $_operator; /** * Filter logging by $priority. By default, it will accept any log * event whose priority value is less than or equal to $priority. * * @param integer $priority Priority * @param string $operator Comparison operator * @return void * @throws Zend_Log_Exception */ public function __construct($priority, $operator = null) { if (! is_int($priority)) { require_once 'Zend/Log/Exception.php'; throw new Zend_Log_Exception('Priority must be an integer'); } $this->_priority = $priority; $this->_operator = $operator === null ? '<=' : $operator; } /** * Create a new instance of <API key> * * @param array|Zend_Config $config * @return <API key> */ static public function factory($config) { $config = self::_parseConfig($config); $config = array_merge(array( 'priority' => null, 'operator' => null, ), $config); // Add support for constants if (!is_numeric($config['priority']) && isset($config['priority']) && defined($config['priority'])) { $config['priority'] = constant($config['priority']); } return new self( (int) $config['priority'], $config['operator'] ); } /** * Returns TRUE to accept the message, FALSE to block it. * * @param array $event event data * @return boolean accepted? */ public function accept($event) { return version_compare($event['priority'], $this->_priority, $this->_operator); } }
THREE.XHRLoader = function ( manager ) { this.cache = new THREE.Cache(); this.manager = ( manager !== undefined ) ? manager : THREE.<API key>; }; THREE.XHRLoader.prototype = { constructor: THREE.XHRLoader, load: function ( url, onLoad, onProgress, onError ) { var scope = this; var cached = scope.cache.get( url ); if ( cached !== undefined ) { if ( onLoad ) onLoad( cached ); return; } var request = new XMLHttpRequest(); request.open( 'GET', url, true ); request.addEventListener( 'load', function ( event ) { scope.cache.add( url, this.response ); if ( onLoad ) onLoad( this.response ); scope.manager.itemEnd( url ); }, false ); if ( onProgress !== undefined ) { request.addEventListener( 'progress', function ( event ) { onProgress( event ); }, false ); } if ( onError !== undefined ) { request.addEventListener( 'error', function ( event ) { onError( event ); }, false ); } if ( this.crossOrigin !== undefined ) request.crossOrigin = this.crossOrigin; if ( this.responseType !== undefined ) request.responseType = this.responseType; request.send( null ); scope.manager.itemStart( url ); }, setResponseType: function ( value ) { this.responseType = value; }, setCrossOrigin: function ( value ) { this.crossOrigin = value; } };
import type { INoiseFactor } from "./INoiseFactor"; import type { IOptionLoader } from "../../IOptionLoader"; import type { INoiseDelay } from "./INoiseDelay"; export interface INoise extends IOptionLoader<INoise> { delay: INoiseDelay; enable: boolean; factor: INoiseFactor; }
#!/bin/sh # generate a set of ABI signatures from a shared library SHAREDLIB="$1" GDBSCRIPT="gdb_syms.$$" ( cat <<EOF set height 0 set width 0 EOF nm "$SHAREDLIB" | cut -d' ' -f2- | egrep '^[BDGTRVWS]' | grep -v @ | egrep -v ' (__bss_start|_edata|_init|_fini|_end)' | cut -c3- | sort | while read s; do echo "echo $s: " echo p $s done ) > $GDBSCRIPT # forcing the terminal avoids a problem on Fedora12 TERM=none gdb -batch -x $GDBSCRIPT "$SHAREDLIB" < /dev/null rm -f $GDBSCRIPT
// { dg-do compile } // 2001-08-27 Benjamin Kosnik <bkoz@redhat.com> // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // Free Software Foundation; either version 3, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // with this library; see the file COPYING3. If not see // 22.2.6.2 Template class money_put #include <locale> void test01() { // Check for required base class. typedef std::money_put<char> test_type; typedef std::locale::facet base_type; const test_type& obj = std::use_facet<test_type>(std::locale()); const base_type* base __attribute__((unused)) = &obj; }
#include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/i2c.h> #if defined(CONFIG_SPI) #include <linux/spi/spi.h> #endif #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/div64.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-chip-ident.h> #include <linux/videodev2.h> MODULE_AUTHOR("Bill Dirks, Justin Schoeman, Gerd Knorr"); MODULE_DESCRIPTION("misc helper functions for v4l2 device drivers"); MODULE_LICENSE("GPL"); /* * * V 4 L 2 D R I V E R H E L P E R A P I * */ /* * Video Standard Operations (contributed by Michael Schimek) */ /* Helper functions for control handling */ /* Check for correctness of the ctrl's value based on the data from struct v4l2_queryctrl and the available menu items. Note that menu_items may be NULL, in that case it is ignored. */ int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl, const char * const *menu_items) { if (qctrl->flags & <API key>) return -EINVAL; if (qctrl->flags & <API key>) return -EBUSY; if (qctrl->type == <API key>) return 0; if (qctrl->type == <API key> || qctrl->type == <API key> || qctrl->type == <API key>) return 0; if (ctrl->value < qctrl->minimum || ctrl->value > qctrl->maximum) return -ERANGE; if (qctrl->type == V4L2_CTRL_TYPE_MENU && menu_items != NULL) { if (menu_items[ctrl->value] == NULL || menu_items[ctrl->value][0] == '\0') return -EINVAL; } if (qctrl->type == <API key> && (ctrl->value & ~qctrl->maximum)) return -ERANGE; return 0; } EXPORT_SYMBOL(v4l2_ctrl_check); /* Fill in a struct v4l2_queryctrl */ int <API key>(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def) { const char *name; v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type, &min, &max, &step, &def, &qctrl->flags); if (name == NULL) return -EINVAL; qctrl->minimum = min; qctrl->maximum = max; qctrl->step = step; qctrl->default_value = def; qctrl->reserved[0] = qctrl->reserved[1] = 0; strlcpy(qctrl->name, name, sizeof(qctrl->name)); return 0; } EXPORT_SYMBOL(<API key>); /* Fill in a struct v4l2_querymenu based on the struct v4l2_queryctrl and the menu. The qctrl pointer may be NULL, in which case it is ignored. If menu_items is NULL, then the menu items are retrieved using v4l2_ctrl_get_menu. */ int <API key>(struct v4l2_querymenu *qmenu, struct v4l2_queryctrl *qctrl, const char * const *menu_items) { int i; qmenu->reserved = 0; if (menu_items == NULL) menu_items = v4l2_ctrl_get_menu(qmenu->id); if (menu_items == NULL || (qctrl && (qmenu->index < qctrl->minimum || qmenu->index > qctrl->maximum))) return -EINVAL; for (i = 0; i < qmenu->index && menu_items[i]; i++) ; if (menu_items[i] == NULL || menu_items[i][0] == '\0') return -EINVAL; strlcpy(qmenu->name, menu_items[qmenu->index], sizeof(qmenu->name)); return 0; } EXPORT_SYMBOL(<API key>); /* Fill in a struct v4l2_querymenu based on the specified array of valid menu items (terminated by <API key>). Use this if there are 'holes' in the list of valid menu items. */ int <API key>(struct v4l2_querymenu *qmenu, const u32 *ids) { const char * const *menu_items = v4l2_ctrl_get_menu(qmenu->id); qmenu->reserved = 0; if (menu_items == NULL || ids == NULL) return -EINVAL; while (*ids != <API key>) { if (*ids++ == qmenu->index) { strlcpy(qmenu->name, menu_items[qmenu->index], sizeof(qmenu->name)); return 0; } } return -EINVAL; } EXPORT_SYMBOL(<API key>); /* ctrl_classes points to an array of u32 pointers, the last element is a NULL pointer. Each u32 array is a 0-terminated array of control IDs. Each array must be sorted low to high and belong to the same control class. The array of u32 pointers must also be sorted, from low class IDs to high class IDs. This function returns the first ID that follows after the given ID. When no more controls are available 0 is returned. */ u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id) { u32 ctrl_class = V4L2_CTRL_ID2CLASS(id); const u32 *pctrl; if (ctrl_classes == NULL) return 0; /* if no query is desired, then check if the ID is part of ctrl_classes */ if ((id & <API key>) == 0) { /* find class */ while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) != ctrl_class) ctrl_classes++; if (*ctrl_classes == NULL) return 0; pctrl = *ctrl_classes; /* find control ID */ while (*pctrl && *pctrl != id) pctrl++; return *pctrl ? id : 0; } id &= V4L2_CTRL_ID_MASK; id++; /* select next control */ /* find first class that matches (or is greater than) the class of the ID */ while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) < ctrl_class) ctrl_classes++; /* no more classes */ if (*ctrl_classes == NULL) return 0; pctrl = *ctrl_classes; /* find first ctrl within the class that is >= ID */ while (*pctrl && *pctrl < id) pctrl++; if (*pctrl) return *pctrl; /* we are at the end of the controls of the current class. */ /* continue with next class if available */ ctrl_classes++; if (*ctrl_classes == NULL) return 0; return **ctrl_classes; } EXPORT_SYMBOL(v4l2_ctrl_next); int <API key>(const struct v4l2_dbg_match *match) { switch (match->type) { case <API key>: return match->addr == 0; default: return 0; } } EXPORT_SYMBOL(<API key>); #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE)) int <API key>(struct i2c_client *c, const struct v4l2_dbg_match *match) { int len; if (c == NULL || match == NULL) return 0; switch (match->type) { case <API key>: if (c->driver == NULL || c->driver->driver.name == NULL) return 0; len = strlen(c->driver->driver.name); /* legacy drivers have a ' suffix, don't try to match that */ if (len && c->driver->driver.name[len - 1] == '\'') len return len && !strncmp(c->driver->driver.name, match->name, len); case <API key>: return c->addr == match->addr; default: return 0; } } EXPORT_SYMBOL(<API key>); int <API key>(struct i2c_client *c, struct v4l2_dbg_chip_ident *chip, u32 ident, u32 revision) { if (!<API key>(c, &chip->match)) return 0; if (chip->ident == V4L2_IDENT_NONE) { chip->ident = ident; chip->revision = revision; } else { chip->ident = <API key>; chip->revision = 0; } return 0; } EXPORT_SYMBOL(<API key>); /* I2C Helper functions */ void <API key>(struct v4l2_subdev *sd, struct i2c_client *client, const struct v4l2_subdev_ops *ops) { v4l2_subdev_init(sd, ops); sd->flags |= <API key>; /* the owner is the same as the i2c_client's driver owner */ sd->owner = client->driver->driver.owner; /* i2c_client and v4l2_subdev point to one another */ v4l2_set_subdevdata(sd, client); i2c_set_clientdata(client, sd); /* initialize name */ snprintf(sd->name, sizeof(sd->name), "%s %d-%04x", client->driver->driver.name, i2c_adapter_id(client->adapter), client->addr); } EXPORT_SYMBOL_GPL(<API key>); /* Load an i2c sub-device. */ struct v4l2_subdev *<API key>(struct v4l2_device *v4l2_dev, struct i2c_adapter *adapter, struct i2c_board_info *info, const unsigned short *probe_addrs) { struct v4l2_subdev *sd = NULL; struct i2c_client *client; BUG_ON(!v4l2_dev); request_module(I2C_MODULE_PREFIX "%s", info->type); /* Create the i2c client */ if (info->addr == 0 && probe_addrs) client = <API key>(adapter, info, probe_addrs, NULL); else client = i2c_new_device(adapter, info); /* Note: by loading the module first we are certain that c->driver will be set if the driver was found. If the module was not loaded first, then the i2c core tries to delay-load the module for us, and then c->driver is still NULL until the module is finally loaded. This delay-load mechanism doesn't work if other drivers want to use the i2c device, so explicitly loading the module is the best alternative. */ if (client == NULL || client->driver == NULL) goto error; /* Lock the module so we can safely get the v4l2_subdev pointer */ if (!try_module_get(client->driver->driver.owner)) goto error; sd = i2c_get_clientdata(client); /* Register with the v4l2_device which increases the module's use count as well. */ if (<API key>(v4l2_dev, sd)) sd = NULL; /* Decrease the module use count to match the first try_module_get. */ module_put(client->driver->driver.owner); error: /* If we have a client but no subdev, then something went wrong and we must unregister the client. */ if (client && sd == NULL) <API key>(client); return sd; } EXPORT_SYMBOL_GPL(<API key>); struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, struct i2c_adapter *adapter, const char *client_type, u8 addr, const unsigned short *probe_addrs) { struct i2c_board_info info; /* Setup the i2c board info with the device type and the device address. */ memset(&info, 0, sizeof(info)); strlcpy(info.type, client_type, sizeof(info.type)); info.addr = addr; return <API key>(v4l2_dev, adapter, &info, probe_addrs); } EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev); /* Return i2c client address of v4l2_subdev. */ unsigned short <API key>(struct v4l2_subdev *sd) { struct i2c_client *client = v4l2_get_subdevdata(sd); return client ? client->addr : I2C_CLIENT_END; } EXPORT_SYMBOL_GPL(<API key>); /* Return a list of I2C tuner addresses to probe. Use only if the tuner addresses are unknown. */ const unsigned short *<API key>(enum v4l2_i2c_tuner_type type) { static const unsigned short radio_addrs[] = { #if defined(<API key>) || defined(<API key>) 0x10, #endif 0x60, I2C_CLIENT_END }; static const unsigned short demod_addrs[] = { 0x42, 0x43, 0x4a, 0x4b, I2C_CLIENT_END }; static const unsigned short tv_addrs[] = { 0x42, 0x43, 0x4a, 0x4b, /* tda8290 */ 0x60, 0x61, 0x62, 0x63, 0x64, I2C_CLIENT_END }; switch (type) { case ADDRS_RADIO: return radio_addrs; case ADDRS_DEMOD: return demod_addrs; case ADDRS_TV: return tv_addrs; case ADDRS_TV_WITH_DEMOD: return tv_addrs + 4; } return NULL; } EXPORT_SYMBOL_GPL(<API key>); #endif /* defined(CONFIG_I2C) */ #if defined(CONFIG_SPI) /* Load an spi sub-device. */ void <API key>(struct v4l2_subdev *sd, struct spi_device *spi, const struct v4l2_subdev_ops *ops) { v4l2_subdev_init(sd, ops); sd->flags |= <API key>; /* the owner is the same as the spi_device's driver owner */ sd->owner = spi->dev.driver->owner; /* spi_device and v4l2_subdev point to one another */ v4l2_set_subdevdata(sd, spi); spi_set_drvdata(spi, sd); /* initialize name */ strlcpy(sd->name, spi->dev.driver->name, sizeof(sd->name)); } EXPORT_SYMBOL_GPL(<API key>); struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev, struct spi_master *master, struct spi_board_info *info) { struct v4l2_subdev *sd = NULL; struct spi_device *spi = NULL; BUG_ON(!v4l2_dev); if (info->modalias[0]) request_module(info->modalias); spi = spi_new_device(master, info); if (spi == NULL || spi->dev.driver == NULL) goto error; if (!try_module_get(spi->dev.driver->owner)) goto error; sd = spi_get_drvdata(spi); /* Register with the v4l2_device which increases the module's use count as well. */ if (<API key>(v4l2_dev, sd)) sd = NULL; /* Decrease the module use count to match the first try_module_get. */ module_put(spi->dev.driver->owner); error: /* If we have a client but no subdev, then something went wrong and we must unregister the client. */ if (spi && sd == NULL) <API key>(spi); return sd; } EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev); #endif /* defined(CONFIG_SPI) */ /* Clamp x to be between min and max, aligned to a multiple of 2^align. min * and max don't have to be aligned, but there must be at least one valid * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples * of 16 between 17 and 31. */ static unsigned int clamp_align(unsigned int x, unsigned int min, unsigned int max, unsigned int align) { /* Bits that must be zero to be aligned */ unsigned int mask = ~((1 << align) - 1); /* Round to nearest aligned value */ if (align) x = (x + (1 << (align - 1))) & mask; /* Clamp to aligned value of min and max */ if (x < min) x = (min + ~mask) & mask; else if (x > max) x = max & mask; return x; } /* Bound an image to have a width between wmin and wmax, and height between * hmin and hmax, inclusive. Additionally, the width will be a multiple of * 2^walign, the height will be a multiple of 2^halign, and the overall size * (width*height) will be a multiple of 2^salign. The image may be shrunk * or enlarged to fit the alignment constraints. * * The width or height maximum must not be smaller than the corresponding * minimum. The alignments must not be so high there are no possible image * sizes within the allowed bounds. wmin and hmin must be at least 1 * (don't use 0). If you don't care about a certain alignment, specify 0, * as 2^0 is 1 and one byte alignment is equivalent to no alignment. If * you only want to adjust downward, specify a maximum that's the same as * the initial value. */ void <API key>(u32 *w, unsigned int wmin, unsigned int wmax, unsigned int walign, u32 *h, unsigned int hmin, unsigned int hmax, unsigned int halign, unsigned int salign) { *w = clamp_align(*w, wmin, wmax, walign); *h = clamp_align(*h, hmin, hmax, halign); /* Usually we don't need to align the size and are done now. */ if (!salign) return; /* How much alignment do we have? */ walign = __ffs(*w); halign = __ffs(*h); /* Enough to satisfy the image alignment? */ if (walign + halign < salign) { /* Max walign where there is still a valid width */ unsigned int wmaxa = __fls(wmax ^ (wmin - 1)); /* Max halign where there is still a valid height */ unsigned int hmaxa = __fls(hmax ^ (hmin - 1)); /* up the smaller alignment until we have enough */ do { if (halign >= hmaxa || (walign <= halign && walign < wmaxa)) { *w = clamp_align(*w, wmin, wmax, walign + 1); walign = __ffs(*w); } else { *h = clamp_align(*h, hmin, hmax, halign + 1); halign = __ffs(*h); } } while (halign + walign < salign); } } EXPORT_SYMBOL_GPL(<API key>); /** * <API key> - fill description of a digital video preset * @preset - preset value * @info - pointer to struct v4l2_dv_enum_preset * * drivers can use this helper function to fill description of dv preset * in info. */ int <API key>(u32 preset, struct v4l2_dv_enum_preset *info) { static const struct v4l2_dv_preset_info { u16 width; u16 height; const char *name; } dv_presets[] = { { 0, 0, "Invalid" }, /* V4L2_DV_INVALID */ { 720, 480, "480p@59.94" }, /* V4L2_DV_480P59_94 */ { 720, 576, "576p@50" }, /* V4L2_DV_576P50 */ { 1280, 720, "720p@24" }, /* V4L2_DV_720P24 */ { 1280, 720, "720p@25" }, /* V4L2_DV_720P25 */ { 1280, 720, "720p@30" }, /* V4L2_DV_720P30 */ { 1280, 720, "720p@50" }, /* V4L2_DV_720P50 */ { 1280, 720, "720p@59.94" }, /* V4L2_DV_720P59_94 */ { 1280, 720, "720p@60" }, /* V4L2_DV_720P60 */ { 1920, 1080, "1080i@29.97" }, /* V4L2_DV_1080I29_97 */ { 1920, 1080, "1080i@30" }, /* V4L2_DV_1080I30 */ { 1920, 1080, "1080i@25" }, /* V4L2_DV_1080I25 */ { 1920, 1080, "1080i@50" }, /* V4L2_DV_1080I50 */ { 1920, 1080, "1080i@60" }, /* V4L2_DV_1080I60 */ { 1920, 1080, "1080p@24" }, /* V4L2_DV_1080P24 */ { 1920, 1080, "1080p@25" }, /* V4L2_DV_1080P25 */ { 1920, 1080, "1080p@30" }, /* V4L2_DV_1080P30 */ { 1920, 1080, "1080p@50" }, /* V4L2_DV_1080P50 */ { 1920, 1080, "1080p@60" }, /* V4L2_DV_1080P60 */ }; if (info == NULL || preset >= ARRAY_SIZE(dv_presets)) return -EINVAL; info->preset = preset; info->width = dv_presets[preset].width; info->height = dv_presets[preset].height; strlcpy(info->name, dv_presets[preset].name, sizeof(info->name)); return 0; } EXPORT_SYMBOL_GPL(<API key>); /** * <API key> - check if two timings match * @t1 - compare this v4l2_dv_timings struct... * @t2 - with this struct. * @pclock_delta - the allowed pixelclock deviation. * * Compare t1 with t2 with a given margin of error for the pixelclock. */ bool <API key>(const struct v4l2_dv_timings *t1, const struct v4l2_dv_timings *t2, unsigned pclock_delta) { if (t1->type != t2->type || t1->type != V4L2_DV_BT_656_1120) return false; if (t1->bt.width == t2->bt.width && t1->bt.height == t2->bt.height && t1->bt.interlaced == t2->bt.interlaced && t1->bt.polarities == t2->bt.polarities && t1->bt.pixelclock >= t2->bt.pixelclock - pclock_delta && t1->bt.pixelclock <= t2->bt.pixelclock + pclock_delta && t1->bt.hfrontporch == t2->bt.hfrontporch && t1->bt.vfrontporch == t2->bt.vfrontporch && t1->bt.vsync == t2->bt.vsync && t1->bt.vbackporch == t2->bt.vbackporch && (!t1->bt.interlaced || (t1->bt.il_vfrontporch == t2->bt.il_vfrontporch && t1->bt.il_vsync == t2->bt.il_vsync && t1->bt.il_vbackporch == t2->bt.il_vbackporch))) return true; return false; } EXPORT_SYMBOL_GPL(<API key>); /* * CVT defines * Based on Coordinated Video Timings Standard * version 1.1 September 10, 2003 */ #define CVT_PXL_CLK_GRAN 250000 /* pixel clock granularity */ /* Normal blanking */ #define CVT_MIN_V_BPORCH 7 /* lines */ #define CVT_MIN_V_PORCH_RND 3 /* lines */ #define CVT_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */ /* Normal blanking for CVT uses GTF to calculate horizontal blanking */ #define CVT_CELL_GRAN 8 /* character cell granularity */ #define CVT_M 600 /* blanking formula gradient */ #define CVT_C 40 /* blanking formula offset */ #define CVT_K 128 /* blanking formula scaling factor */ #define CVT_J 20 /* blanking formula scaling factor */ #define CVT_C_PRIME (((CVT_C - CVT_J) * CVT_K / 256) + CVT_J) #define CVT_M_PRIME (CVT_K * CVT_M / 256) /* Reduced Blanking */ #define CVT_RB_MIN_V_BPORCH 7 /* lines */ #define CVT_RB_V_FPORCH 3 /* lines */ #define CVT_RB_MIN_V_BLANK 460 #define CVT_RB_H_SYNC 32 /* pixels */ #define CVT_RB_H_BPORCH 80 /* pixels */ #define CVT_RB_H_BLANK 160 /* pixels */ /** v4l2_detect_cvt - detect if the given timings follow the CVT standard * @frame_height - the total height of the frame (including blanking) in lines. * @hfreq - the horizontal frequency in Hz. * @vsync - the height of the vertical sync in lines. * @polarities - the horizontal and vertical polarities (same as struct * v4l2_bt_timings polarities). * @fmt - the resulting timings. * * This function will attempt to detect if the given values correspond to a * valid CVT format. If so, then it will return true, and fmt will be filled * in with the found CVT timings. */ bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync, u32 polarities, struct v4l2_dv_timings *fmt) { int v_fp, v_bp, h_fp, h_bp, hsync; int frame_width, image_height, image_width; bool reduced_blanking; unsigned pix_clk; if (vsync < 4 || vsync > 7) return false; if (polarities == <API key>) reduced_blanking = false; else if (polarities == <API key>) reduced_blanking = true; else return false; /* Vertical */ if (reduced_blanking) { v_fp = CVT_RB_V_FPORCH; v_bp = (CVT_RB_MIN_V_BLANK * hfreq + 999999) / 1000000; v_bp -= vsync + v_fp; if (v_bp < CVT_RB_MIN_V_BPORCH) v_bp = CVT_RB_MIN_V_BPORCH; } else { v_fp = CVT_MIN_V_PORCH_RND; v_bp = (CVT_MIN_VSYNC_BP * hfreq + 999999) / 1000000 - vsync; if (v_bp < CVT_MIN_V_BPORCH) v_bp = CVT_MIN_V_BPORCH; } image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1; /* Aspect ratio based on vsync */ switch (vsync) { case 4: image_width = (image_height * 4) / 3; break; case 5: image_width = (image_height * 16) / 9; break; case 6: image_width = (image_height * 16) / 10; break; case 7: /* special case */ if (image_height == 1024) image_width = (image_height * 5) / 4; else if (image_height == 768) image_width = (image_height * 15) / 9; else return false; break; default: return false; } image_width = image_width & ~7; /* Horizontal */ if (reduced_blanking) { pix_clk = (image_width + CVT_RB_H_BLANK) * hfreq; pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN; h_bp = CVT_RB_H_BPORCH; hsync = CVT_RB_H_SYNC; h_fp = CVT_RB_H_BLANK - h_bp - hsync; frame_width = image_width + CVT_RB_H_BLANK; } else { int h_blank; unsigned ideal_duty_cycle = CVT_C_PRIME - (CVT_M_PRIME * 1000) / hfreq; h_blank = (image_width * ideal_duty_cycle + (100 - ideal_duty_cycle) / 2) / (100 - ideal_duty_cycle); h_blank = h_blank - h_blank % (2 * CVT_CELL_GRAN); if (h_blank * 100 / image_width < 20) { h_blank = image_width / 5; h_blank = (h_blank + 0x7) & ~0x7; } pix_clk = (image_width + h_blank) * hfreq; pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN; h_bp = h_blank / 2; frame_width = image_width + h_blank; hsync = (frame_width * 8 + 50) / 100; hsync = hsync - hsync % CVT_CELL_GRAN; h_fp = h_blank - hsync - h_bp; } fmt->bt.polarities = polarities; fmt->bt.width = image_width; fmt->bt.height = image_height; fmt->bt.hfrontporch = h_fp; fmt->bt.vfrontporch = v_fp; fmt->bt.hsync = hsync; fmt->bt.vsync = vsync; fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync; fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync; fmt->bt.pixelclock = pix_clk; fmt->bt.standards = V4L2_DV_BT_STD_CVT; if (reduced_blanking) fmt->bt.flags |= <API key>; return true; } EXPORT_SYMBOL_GPL(v4l2_detect_cvt); /* * GTF defines * Based on Generalized Timing Formula Standard * Version 1.1 September 2, 1999 */ #define GTF_PXL_CLK_GRAN 250000 /* pixel clock granularity */ #define GTF_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */ #define GTF_V_FP 1 /* vertical front porch (lines) */ #define GTF_CELL_GRAN 8 /* character cell granularity */ /* Default */ #define GTF_D_M 600 /* blanking formula gradient */ #define GTF_D_C 40 /* blanking formula offset */ #define GTF_D_K 128 /* blanking formula scaling factor */ #define GTF_D_J 20 /* blanking formula scaling factor */ #define GTF_D_C_PRIME ((((GTF_D_C - GTF_D_J) * GTF_D_K) / 256) + GTF_D_J) #define GTF_D_M_PRIME ((GTF_D_K * GTF_D_M) / 256) /* Secondary */ #define GTF_S_M 3600 /* blanking formula gradient */ #define GTF_S_C 40 /* blanking formula offset */ #define GTF_S_K 128 /* blanking formula scaling factor */ #define GTF_S_J 35 /* blanking formula scaling factor */ #define GTF_S_C_PRIME ((((GTF_S_C - GTF_S_J) * GTF_S_K) / 256) + GTF_S_J) #define GTF_S_M_PRIME ((GTF_S_K * GTF_S_M) / 256) /** v4l2_detect_gtf - detect if the given timings follow the GTF standard * @frame_height - the total height of the frame (including blanking) in lines. * @hfreq - the horizontal frequency in Hz. * @vsync - the height of the vertical sync in lines. * @polarities - the horizontal and vertical polarities (same as struct * v4l2_bt_timings polarities). * @aspect - preferred aspect ratio. GTF has no method of determining the * aspect ratio in order to derive the image width from the * image height, so it has to be passed explicitly. Usually * the native screen aspect ratio is used for this. If it * is not filled in correctly, then 16:9 will be assumed. * @fmt - the resulting timings. * * This function will attempt to detect if the given values correspond to a * valid GTF format. If so, then it will return true, and fmt will be filled * in with the found GTF timings. */ bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync, u32 polarities, struct v4l2_fract aspect, struct v4l2_dv_timings *fmt) { int pix_clk; int v_fp, v_bp, h_fp, h_bp, hsync; int frame_width, image_height, image_width; bool default_gtf; int h_blank; if (vsync != 3) return false; if (polarities == <API key>) default_gtf = true; else if (polarities == <API key>) default_gtf = false; else return false; /* Vertical */ v_fp = GTF_V_FP; v_bp = (GTF_MIN_VSYNC_BP * hfreq + 999999) / 1000000 - vsync; image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1; if (aspect.numerator == 0 || aspect.denominator == 0) { aspect.numerator = 16; aspect.denominator = 9; } image_width = ((image_height * aspect.numerator) / aspect.denominator); /* Horizontal */ if (default_gtf) h_blank = ((image_width * GTF_D_C_PRIME * hfreq) - (image_width * GTF_D_M_PRIME * 1000) + (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) / 2) / (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000); else h_blank = ((image_width * GTF_S_C_PRIME * hfreq) - (image_width * GTF_S_M_PRIME * 1000) + (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) / 2) / (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000); h_blank = h_blank - h_blank % (2 * GTF_CELL_GRAN); frame_width = image_width + h_blank; pix_clk = (image_width + h_blank) * hfreq; pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN; hsync = (frame_width * 8 + 50) / 100; hsync = hsync - hsync % GTF_CELL_GRAN; h_fp = h_blank / 2 - hsync; h_bp = h_blank / 2; fmt->bt.polarities = polarities; fmt->bt.width = image_width; fmt->bt.height = image_height; fmt->bt.hfrontporch = h_fp; fmt->bt.vfrontporch = v_fp; fmt->bt.hsync = hsync; fmt->bt.vsync = vsync; fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync; fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync; fmt->bt.pixelclock = pix_clk; fmt->bt.standards = V4L2_DV_BT_STD_GTF; if (!default_gtf) fmt->bt.flags |= <API key>; return true; } EXPORT_SYMBOL_GPL(v4l2_detect_gtf); /** <API key> - calculate the aspect ratio based on bytes * 0x15 and 0x16 from the EDID. * @hor_landscape - byte 0x15 from the EDID. * @vert_portrait - byte 0x16 from the EDID. * * Determines the aspect ratio from the EDID. * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2: * "Horizontal and Vertical Screen Size or Aspect Ratio" */ struct v4l2_fract <API key>(u8 hor_landscape, u8 vert_portrait) { struct v4l2_fract aspect = { 16, 9 }; u32 tmp; u8 ratio; /* Nothing filled in, fallback to 16:9 */ if (!hor_landscape && !vert_portrait) return aspect; /* Both filled in, so they are interpreted as the screen size in cm */ if (hor_landscape && vert_portrait) { aspect.numerator = hor_landscape; aspect.denominator = vert_portrait; return aspect; } /* Only one is filled in, so interpret them as a ratio: (val + 99) / 100 */ ratio = hor_landscape | vert_portrait; /* Change some rounded values into the exact aspect ratio */ if (ratio == 79) { aspect.numerator = 16; aspect.denominator = 9; } else if (ratio == 34) { aspect.numerator = 4; aspect.numerator = 3; } else if (ratio == 68) { aspect.numerator = 15; aspect.numerator = 9; } else { aspect.numerator = hor_landscape + 99; aspect.denominator = 100; } if (hor_landscape) return aspect; /* The aspect ratio is for portrait, so swap numerator and denominator */ tmp = aspect.denominator; aspect.denominator = aspect.numerator; aspect.numerator = tmp; return aspect; } EXPORT_SYMBOL_GPL(<API key>); const struct <API key> *<API key>( const struct v4l2_discrete_probe *probe, s32 width, s32 height) { int i; u32 error, min_error = UINT_MAX; const struct <API key> *size, *best = NULL; if (!probe) return best; for (i = 0, size = probe->sizes; i < probe->num_sizes; i++, size++) { error = abs(size->width - width) + abs(size->height - height); if (error < min_error) { min_error = error; best = size; } if (!error) break; } return best; } EXPORT_SYMBOL_GPL(<API key>);
"""Local settings and globals.""" import sys from os.path import normpath, join from .base import * # Import secrets -- not needed #sys.path.append( # abspath(join(PROJECT_ROOT, '../secrets/TimelineJS/stg')) #from secrets import * # Set static URL STATIC_URL = '/static'
// @(#)root/matrix:$Id$ // Authors: Fons Rademakers, Eddy Offermann Nov 2003 #ifndef ROOT_TMatrixFBase #define ROOT_TMatrixFBase // TMatrixFBase // // Instantation of TMatrixTBase<Float_t> // #ifndef ROOT_TMatrixTBase #include "TMatrixTBase.h" #endif #ifndef ROOT_TMatrixFBase #include "TMatrixFBase.h" #endif #endif
import _ = require("../index"); // tslint:disable-next-line:<API key> type GlobalPartial<T> = Partial<T>; declare module "../index" { type PartialObject<T> = GlobalPartial<T>; type Many<T> = T | ReadonlyArray<T>; interface LoDashStatic { /** * Creates a lodash object which wraps value to enable implicit method chain sequences. * Methods that operate on and return arrays, collections, and functions can be chained together. * Methods that retrieve a single value or may return a primitive value will automatically end the * chain sequence and return the unwrapped value. Otherwise, the value must be unwrapped with value(). * * Explicit chain sequences, which must be unwrapped with value(), may be enabled using _.chain. * * The execution of chained methods is lazy, that is, it's deferred until value() is * implicitly or explicitly called. * * Lazy evaluation allows several methods to support shortcut fusion. Shortcut fusion * is an optimization to merge iteratee calls; this avoids the creation of intermediate * arrays and can greatly reduce the number of iteratee executions. Sections of a chain * sequence qualify for shortcut fusion if the section is applied to an array and iteratees * accept only one argument. The heuristic for whether a section qualifies for shortcut * fusion is subject to change. * * Chaining is supported in custom builds as long as the value() method is directly or * indirectly included in the build. * * In addition to lodash methods, wrappers have Array and String methods. * The wrapper Array methods are: * concat, join, pop, push, shift, sort, splice, and unshift. * The wrapper String methods are: * replace and split. * * The wrapper methods that support shortcut fusion are: * at, compact, drop, dropRight, dropWhile, filter, find, findLast, head, initial, last, * map, reject, reverse, slice, tail, take, takeRight, takeRightWhile, takeWhile, and toArray * * The chainable wrapper methods are: * after, ary, assign, assignIn, assignInWith, assignWith, at, before, bind, bindAll, bindKey, * castArray, chain, chunk, commit, compact, concat, conforms, constant, countBy, create, * curry, debounce, defaults, defaultsDeep, defer, delay, difference, differenceBy, differenceWith, * drop, dropRight, dropRightWhile, dropWhile, extend, extendWith, fill, filter, flatMap, * flatMapDeep, flatMapDepth, flatten, flattenDeep, flattenDepth, flip, flow, flowRight, * fromPairs, functions, functionsIn, groupBy, initial, intersection, intersectionBy, intersectionWith, * invert, invertBy, invokeMap, iteratee, keyBy, keys, keysIn, map, mapKeys, mapValues, * matches, matchesProperty, memoize, merge, mergeWith, method, methodOf, mixin, negate, * nthArg, omit, omitBy, once, orderBy, over, overArgs, overEvery, overSome, partial, partialRight, * partition, pick, pickBy, plant, property, propertyOf, pull, pullAll, pullAllBy, pullAllWith, pullAt, * push, range, rangeRight, rearg, reject, remove, rest, reverse, sampleSize, set, setWith, * shuffle, slice, sort, sortBy, sortedUniq, sortedUniqBy, splice, spread, tail, take, * takeRight, takeRightWhile, takeWhile, tap, throttle, thru, toArray, toPairs, toPairsIn, * toPath, toPlainObject, transform, unary, union, unionBy, unionWith, uniq, uniqBy, uniqWith, * unset, unshift, unzip, unzipWith, update, updateWith, values, valuesIn, without, wrap, * xor, xorBy, xorWith, zip, zipObject, zipObjectDeep, and zipWith. * * The wrapper methods that are not chainable by default are: * add, attempt, camelCase, capitalize, ceil, clamp, clone, cloneDeep, cloneDeepWith, cloneWith, * conformsTo, deburr, defaultTo, divide, each, eachRight, endsWith, eq, escape, escapeRegExp, * every, find, findIndex, findKey, findLast, findLastIndex, findLastKey, first, floor, forEach, * forEachRight, forIn, forInRight, forOwn, forOwnRight, get, gt, gte, has, hasIn, head, * identity, includes, indexOf, inRange, invoke, isArguments, isArray, isArrayBuffer, * isArrayLike, isArrayLikeObject, isBoolean, isBuffer, isDate, isElement, isEmpty, isEqual, isEqualWith, * isError, isFinite, isFunction, isInteger, isLength, isMap, isMatch, isMatchWith, isNaN, * isNative, isNil, isNull, isNumber, isObject, isObjectLike, isPlainObject, isRegExp, * isSafeInteger, isSet, isString, isUndefined, isTypedArray, isWeakMap, isWeakSet, join, * kebabCase, last, lastIndexOf, lowerCase, lowerFirst, lt, lte, max, maxBy, mean, meanBy, * min, minBy, multiply, noConflict, noop, now, nth, pad, padEnd, padStart, parseInt, pop, * random, reduce, reduceRight, repeat, result, round, runInContext, sample, shift, size, * snakeCase, some, sortedIndex, sortedIndexBy, sortedLastIndex, sortedLastIndexBy, startCase, * startsWith, stubArray, stubFalse, stubObject, stubString, stubTrue, subtract, sum, sumBy, * template, times, toFinite, toInteger, toJSON, toLength, toLower, toNumber, toSafeInteger, * toString, toUpper, trim, trimEnd, trimStart, truncate, unescape, uniqueId, upperCase, * upperFirst, value, and words. **/ <T>(value: T): <API key><T>; /** * The semantic version number. **/ VERSION: string; /** * By default, the template delimiters used by Lo-Dash are similar to those in embedded Ruby * (ERB). Change the following template settings to use alternative delimiters. **/ templateSettings: TemplateSettings; } /** * By default, the template delimiters used by Lo-Dash are similar to those in embedded Ruby * (ERB). Change the following template settings to use alternative delimiters. **/ interface TemplateSettings { /** * The "escape" delimiter. **/ escape?: RegExp; /** * The "evaluate" delimiter. **/ evaluate?: RegExp; /** * An object to import into the template as local variables. **/ imports?: Dictionary<any>; /** * The "interpolate" delimiter. **/ interpolate?: RegExp; /** * Used to reference the data object in the template text. **/ variable?: string; } /** * Creates a cache object to store key/value pairs. */ interface MapCache { /** * Removes `key` and its value from the cache. * @param key The key of the value to remove. * @return Returns `true` if the entry was removed successfully, else `false`. */ delete(key: any): boolean; /** * Gets the cached value for `key`. * @param key The key of the value to get. * @return Returns the cached value. */ get(key: any): any; /** * Checks if a cached value for `key` exists. * @param key The key of the entry to check. * @return Returns `true` if an entry for `key` exists, else `false`. */ has(key: any): boolean; /** * Sets `value` to `key` of the cache. * @param key The key of the value to cache. * @param value The value to cache. * @return Returns the cache object. */ set(key: any, value: any): this; /** * Removes all key-value entries from the map. */ clear?: () => void; } interface MapCacheConstructor { new (): MapCache; } interface <API key><TValue> extends LoDashWrapper<TValue> { pop<T>(this: <API key><List<T> | null | undefined>): T | undefined; push<T>(this: <API key><List<T> | null | undefined>, ...items: T[]): this; shift<T>(this: <API key><List<T> | null | undefined>): T | undefined; sort<T>(this: <API key><List<T> | null | undefined>, compareFn?: (a: T, b: T) => number): this; splice<T>(this: <API key><List<T> | null | undefined>, start: number, deleteCount?: number, ...items: T[]): this; unshift<T>(this: <API key><List<T> | null | undefined>, ...items: T[]): this; } interface <API key><TValue> extends LoDashWrapper<TValue> { pop<T>(this: <API key><List<T> | null | undefined>): <API key><T | undefined>; push<T>(this: <API key><List<T> | null | undefined>, ...items: T[]): this; shift<T>(this: <API key><List<T> | null | undefined>): <API key><T | undefined>; sort<T>(this: <API key><List<T> | null | undefined>, compareFn?: (a: T, b: T) => number): this; splice<T>(this: <API key><List<T> | null | undefined>, start: number, deleteCount?: number, ...items: T[]): this; unshift<T>(this: <API key><List<T> | null | undefined>, ...items: T[]): this; } type NotVoid = {} | null | undefined; type IterateeShorthand<T> = PropertyName | [PropertyName, any] | PartialDeep<T>; type ArrayIterator<T, TResult> = (value: T, index: number, collection: T[]) => TResult; type ListIterator<T, TResult> = (value: T, index: number, collection: List<T>) => TResult; type ListIteratee<T> = ListIterator<T, NotVoid> | IterateeShorthand<T>; type ListIterateeCustom<T, TResult> = ListIterator<T, TResult> | IterateeShorthand<T>; type <API key><T, S extends T> = (value: T, index: number, collection: List<T>) => value is S; // Note: key should be string, not keyof T, because the actual object may contain extra properties that were not specified in the type. type ObjectIterator<TObject, TResult> = (value: TObject[keyof TObject], key: string, collection: TObject) => TResult; type ObjectIteratee<TObject> = ObjectIterator<TObject, NotVoid> | IterateeShorthand<TObject[keyof TObject]>; type <API key><TObject, TResult> = ObjectIterator<TObject, TResult> | IterateeShorthand<TObject[keyof TObject]>; type <API key><TObject, S extends TObject[keyof TObject]> = (value: TObject[keyof TObject], key: string, collection: TObject) => value is S; type StringIterator<TResult> = (char: string, index: number, string: string) => TResult; /** @deprecated Use <API key> or <API key> instead. */ type MemoVoidIterator<T, TResult> = (prev: TResult, curr: T, indexOrKey: any, list: T[]) => void; /** @deprecated Use MemoListIterator or MemoObjectIterator instead. */ type MemoIterator<T, TResult> = (prev: TResult, curr: T, indexOrKey: any, list: T[]) => TResult; type MemoListIterator<T, TResult, TList> = (prev: TResult, curr: T, index: number, list: TList) => TResult; type MemoObjectIterator<T, TResult, TList> = (prev: TResult, curr: T, key: string, list: TList) => TResult; type MemoIteratorCapped<T, TResult> = (prev: TResult, curr: T) => TResult; type <API key><T, TResult> = (curr: T, prev: TResult) => TResult; type <API key><T, TResult> = (acc: TResult, curr: T, index: number, arr: T[]) => void; type <API key><T, TResult> = (acc: TResult, curr: T, key: string, dict: Dictionary<T>) => void; type <API key><T, TResult> = (acc: TResult, curr: T) => void; type ValueIteratee<T> = ((value: T) => NotVoid) | IterateeShorthand<T>; type ValueIterateeCustom<T, TResult> = ((value: T) => TResult) | IterateeShorthand<T>; type <API key><T, S extends T> = (value: T) => value is S; type ValueKeyIteratee<T> = ((value: T, key: string) => NotVoid) | IterateeShorthand<T>; type <API key><T, S extends T> = (value: T, key: string) => value is S; type Comparator<T> = (a: T, b: T) => boolean; type Comparator2<T1, T2> = (a: T1, b: T2) => boolean; type PropertyName = string | number | symbol; type PropertyPath = Many<PropertyName>; type Omit<T, K extends keyof T> = Pick<T, ({ [P in keyof T]: P } & { [P in K]: never } & { [x: string]: never })[keyof T]>; /** Common interface between Arrays and jQuery objects */ type List<T> = ArrayLike<T>; interface Dictionary<T> { [index: string]: T; } interface NumericDictionary<T> { [index: number]: T; } // Crazy typedef needed get _.omit to work properly with Dictionary and NumericDictionary type AnyKindOfDictionary = | Dictionary<{} | null | undefined> | NumericDictionary<{} | null | undefined>; interface Cancelable { cancel(): void; flush(): void; } type PartialDeep<T> = { [P in keyof T]?: PartialDeep<T[P]>; }; // For backwards compatibility type <API key><T> = <API key><T[]>; type <API key><T> = <API key><T[] | null | undefined>; type <API key><T> = <API key><T>; type <API key><T> = <API key><T | null | undefined>; type <API key> = <API key><number[]>; type <API key> = <API key><string>; type <API key><T> = <API key><T[]>; type <API key><T> = <API key><T[] | null | undefined>; type <API key><T> = <API key><T>; type <API key><T> = <API key><T | null | undefined>; type <API key> = <API key><number[]>; type <API key> = <API key><string>; type DictionaryIterator<T, TResult> = ObjectIterator<Dictionary<T>, TResult>; type DictionaryIteratee<T> = ObjectIteratee<Dictionary<T>>; type <API key><T, S extends T> = <API key><Dictionary<T>, S>; // NOTE: keys of objects at run time are always strings, even when a NumericDictionary is being iterated. type <API key><T, TResult> = (value: T, key: string, collection: NumericDictionary<T>) => TResult; type <API key><T> = <API key><T, NotVoid> | IterateeShorthand<T>; type <API key><T, TResult> = <API key><T, TResult> | IterateeShorthand<T>; }
{% extends 'airflow/master.html' %} {% block body %} <div> <h3 style="float: left"> {% block page_header %}Hive Metastore Browser{% endblock%} </h3> <div id="object" class="select2-drop-mask" style="margin-top: 25px; width: 400px;float: right"></div> <div style="clear: both"></div> </div> {% block plugin_content %}{% endblock %} {% endblock %} {% block head %} {{ super() }} <link rel="stylesheet" type="text/css" href="{{ url_for("static", filename="dataTables.bootstrap.css") }}"> <link href="/admin/static/vendor/select2/select2.css" rel="stylesheet"> {% endblock %} {% block tail %} {{ super() }} <script src="{{ url_for('static', filename='jquery.dataTables.min.js') }}"></script> <script src="{{ url_for('static', filename='dataTables.bootstrap.js') }}"></script> <script src="/admin/static/vendor/select2/select2.min.js" type="text/javascript"></script> <script> // Filling up the table selector url = "{{ url_for('.objects') }}"; $.get(url, function( data ) { $("#object").select2({ data: data, placeholder: "Table Selector", }) .on("change", function(e){ window.location = "{{ url_for('.table') }}?table=" + e.val; }); }, "json"); </script> {% endblock %}
class Orientdb < Formula desc "Graph database" homepage "https://orientdb.com" url "https://orientdb.com/download.php?email=unknown@unknown.com&file=<API key>.1.6.tar.gz&os=mac" version "2.1.6" sha256 "<SHA256-like>" bottle do cellar :any_skip_relocation sha256 "<SHA256-like>" => :el_capitan sha256 "<SHA256-like>" => :yosemite sha256 "<SHA256-like>" => :mavericks end # Fixing OrientDB init scripts patch do url "https://gist.githubusercontent.com/maggiolo00/<API key>/raw/<SHA1-like>/orientdbsh" sha256 "<SHA256-like>" end def install
/* Area: ffi_call, closure_call Purpose: Check structure alignment of uint16. Limitations: none. PR: none. Originator: <hos@tamanegi.org> 20031203 */ /* { dg-do run } */ #include "ffitest.h" typedef struct cls_struct_align { unsigned char a; unsigned short b; unsigned char c; } cls_struct_align; cls_struct_align cls_struct_align_fn(struct cls_struct_align a1, struct cls_struct_align a2) { struct cls_struct_align result; result.a = a1.a + a2.a; result.b = a1.b + a2.b; result.c = a1.c + a2.c; printf("%d %d %d %d %d %d: %d %d %d\n", a1.a, a1.b, a1.c, a2.a, a2.b, a2.c, result.a, result.b, result.c); return result; } static void cls_struct_align_gn(ffi_cif* cif __UNUSED__, void* resp, void** args, void* userdata __UNUSED__) { struct cls_struct_align a1, a2; a1 = *(struct cls_struct_align*)(args[0]); a2 = *(struct cls_struct_align*)(args[1]); *(cls_struct_align*)resp = cls_struct_align_fn(a1, a2); } int main (void) { ffi_cif cif; #ifndef USING_MMAP static ffi_closure cl; #endif ffi_closure *pcl; void* args_dbl[5]; ffi_type* cls_struct_fields[4]; ffi_type cls_struct_type; ffi_type* dbl_arg_types[5]; #ifdef USING_MMAP pcl = allocate_mmap (sizeof(ffi_closure)); #else pcl = &cl; #endif cls_struct_type.size = 0; cls_struct_type.alignment = 0; cls_struct_type.type = FFI_TYPE_STRUCT; cls_struct_type.elements = cls_struct_fields; struct cls_struct_align g_dbl = { 12, 4951, 127 }; struct cls_struct_align f_dbl = { 1, 9320, 13 }; struct cls_struct_align res_dbl; cls_struct_fields[0] = &ffi_type_uchar; cls_struct_fields[1] = &ffi_type_ushort; cls_struct_fields[2] = &ffi_type_uchar; cls_struct_fields[3] = NULL; dbl_arg_types[0] = &cls_struct_type; dbl_arg_types[1] = &cls_struct_type; dbl_arg_types[2] = NULL; CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &cls_struct_type, dbl_arg_types) == FFI_OK); args_dbl[0] = &g_dbl; args_dbl[1] = &f_dbl; args_dbl[2] = NULL; ffi_call(&cif, FFI_FN(cls_struct_align_fn), &res_dbl, args_dbl); /* { dg-output "12 4951 127 1 9320 13: 13 14271 140" } */ printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); /* { dg-output "\nres: 13 14271 140" } */ CHECK(ffi_prep_closure(pcl, &cif, cls_struct_align_gn, NULL) == FFI_OK); res_dbl = ((cls_struct_align(*)(cls_struct_align, cls_struct_align))(pcl))(g_dbl, f_dbl); /* { dg-output "\n12 4951 127 1 9320 13: 13 14271 140" } */ printf("res: %d %d %d\n", res_dbl.a, res_dbl.b, res_dbl.c); /* { dg-output "\nres: 13 14271 140" } */ exit(0); }
ext3_mount () { modprobe -q ext3 mkdir -p $2 mount -t ext3 -onoatime,data=journal,errors=continue $1 $2 } for arg in $CMDLINE; do optarg=`expr "x$arg" : 'x[^=]*=\(.*\)'` echo $arg xxx $optarg case $arg in ext3=*) dev=`expr "$optarg" : '\([^:]*\).*'` path=`expr "$optarg" : '[^:]*:\([^:]*\).*'` ext3_mount $dev $path ;; esac done
require 'fog/openstack/models/model' module Fog module Compute class OpenStack class Flavor < Fog::OpenStack::Model identity :id attribute :name attribute :ram attribute :disk attribute :vcpus attribute :links attribute :swap attribute :rxtx_factor attribute :metadata attribute :ephemeral, :aliases => 'OS-FLV-EXT-DATA:ephemeral' attribute :is_public, :aliases => 'os-flavor-access:is_public' attribute :disabled, :aliases => 'OS-FLV-DISABLED:disabled' def save requires :name, :ram, :vcpus, :disk attributes[:ephemeral] = self.ephemeral || 0 attributes[:is_public] = self.is_public || false attributes[:disabled] = self.disabled || false attributes[:swap] = self.swap || 0 attributes[:rxtx_factor] = self.rxtx_factor || 1.0 merge_attributes(service.create_flavor(self.attributes).body['flavor']) self end def destroy requires :id service.delete_flavor(self.id) true end def metadata service.get_flavor_metadata(self.id).body['extra_specs'] rescue Fog::Compute::OpenStack::NotFound nil end def create_metadata(metadata) service.<API key>(self.id, metadata) rescue Fog::Compute::OpenStack::NotFound nil end end end end end
#ifndef <API key> #define <API key> #include <linux/stringify.h> #include <asm/asm-compat.h> #define __REG_R0 0 #define __REG_R1 1 #define __REG_R2 2 #define __REG_R3 3 #define __REG_R4 4 #define __REG_R5 5 #define __REG_R6 6 #define __REG_R7 7 #define __REG_R8 8 #define __REG_R9 9 #define __REG_R10 10 #define __REG_R11 11 #define __REG_R12 12 #define __REG_R13 13 #define __REG_R14 14 #define __REG_R15 15 #define __REG_R16 16 #define __REG_R17 17 #define __REG_R18 18 #define __REG_R19 19 #define __REG_R20 20 #define __REG_R21 21 #define __REG_R22 22 #define __REG_R23 23 #define __REG_R24 24 #define __REG_R25 25 #define __REG_R26 26 #define __REG_R27 27 #define __REG_R28 28 #define __REG_R29 29 #define __REG_R30 30 #define __REG_R31 31 #define __REGA0_0 0 #define __REGA0_R1 1 #define __REGA0_R2 2 #define __REGA0_R3 3 #define __REGA0_R4 4 #define __REGA0_R5 5 #define __REGA0_R6 6 #define __REGA0_R7 7 #define __REGA0_R8 8 #define __REGA0_R9 9 #define __REGA0_R10 10 #define __REGA0_R11 11 #define __REGA0_R12 12 #define __REGA0_R13 13 #define __REGA0_R14 14 #define __REGA0_R15 15 #define __REGA0_R16 16 #define __REGA0_R17 17 #define __REGA0_R18 18 #define __REGA0_R19 19 #define __REGA0_R20 20 #define __REGA0_R21 21 #define __REGA0_R22 22 #define __REGA0_R23 23 #define __REGA0_R24 24 #define __REGA0_R25 25 #define __REGA0_R26 26 #define __REGA0_R27 27 #define __REGA0_R28 28 #define __REGA0_R29 29 #define __REGA0_R30 30 #define __REGA0_R31 31 /* sorted alphabetically */ #define PPC_INST_DCBA 0x7c0005ec #define PPC_INST_DCBA_MASK 0xfc0007fe #define PPC_INST_DCBAL 0x7c2005ec #define PPC_INST_DCBZL 0x7c2007ec #define PPC_INST_ISEL 0x7c00001e #define PPC_INST_ISEL_MASK 0xfc00003e #define PPC_INST_LDARX 0x7c0000a8 #define PPC_INST_LSWI 0x7c0004aa #define PPC_INST_LSWX 0x7c00042a #define PPC_INST_LWARX 0x7c000028 #define PPC_INST_LWSYNC 0x7c2004ac #define PPC_INST_LXVD2X 0x7c000698 #define PPC_INST_MCRXR 0x7c000400 #define PPC_INST_MCRXR_MASK 0xfc0007fe #define PPC_INST_MFSPR_PVR 0x7c1f42a6 #define <API key> 0xfc1fffff #define PPC_INST_MSGSND 0x7c00019c #define PPC_INST_NOP 0x60000000 #define PPC_INST_POPCNTB 0x7c0000f4 #define <API key> 0xfc0007fe #define PPC_INST_POPCNTD 0x7c0003f4 #define PPC_INST_POPCNTW 0x7c0002f4 #define PPC_INST_RFCI 0x4c000066 #define PPC_INST_RFDI 0x4c00004e #define PPC_INST_RFMCI 0x4c00004c #define PPC_INST_MFSPR_DSCR 0x7c1102a6 #define <API key> 0xfc1fffff #define PPC_INST_MTSPR_DSCR 0x7c1103a6 #define <API key> 0xfc1fffff #define PPC_INST_SLBFEE 0x7c0007a7 #define PPC_INST_STRING 0x7c00042a #define <API key> 0xfc0007fe #define <API key> 0xfc00067e #define PPC_INST_STSWI 0x7c0005aa #define PPC_INST_STSWX 0x7c00052a #define PPC_INST_STXVD2X 0x7c000798 #define PPC_INST_TLBIE 0x7c000264 #define PPC_INST_TLBILX 0x7c000024 #define PPC_INST_WAIT 0x7c00007c #define PPC_INST_TLBIVAX 0x7c000624 #define PPC_INST_TLBSRX_DOT 0x7c0006a5 #define PPC_INST_XXLOR 0xf0000510 #define PPC_INST_XVCPSGNDP 0xf0000780 #define PPC_INST_NAP 0x4c000364 #define PPC_INST_SLEEP 0x4c0003a4 /* A2 specific instructions */ #define PPC_INST_ERATWE 0x7c0001a6 #define PPC_INST_ERATRE 0x7c000166 #define PPC_INST_ERATILX 0x7c000066 #define PPC_INST_ERATIVAX 0x7c000666 #define PPC_INST_ERATSX 0x7c000126 #define PPC_INST_ERATSX_DOT 0x7c000127 /* Misc instructions for BPF compiler */ #define PPC_INST_LD 0xe8000000 #define PPC_INST_LHZ 0xa0000000 #define PPC_INST_LWZ 0x80000000 #define PPC_INST_STD 0xf8000000 #define PPC_INST_STDU 0xf8000001 #define PPC_INST_MFLR 0x7c0802a6 #define PPC_INST_MTLR 0x7c0803a6 #define PPC_INST_CMPWI 0x2c000000 #define PPC_INST_CMPDI 0x2c200000 #define PPC_INST_CMPLW 0x7c000040 #define PPC_INST_CMPLWI 0x28000000 #define PPC_INST_ADDI 0x38000000 #define PPC_INST_ADDIS 0x3c000000 #define PPC_INST_ADD 0x7c000214 #define PPC_INST_SUB 0x7c000050 #define PPC_INST_BLR 0x4e800020 #define PPC_INST_BLRL 0x4e800021 #define PPC_INST_MULLW 0x7c0001d6 #define PPC_INST_MULHWU 0x7c000016 #define PPC_INST_MULLI 0x1c000000 #define PPC_INST_DIVWU 0x7c0003d6 #define PPC_INST_RLWINM 0x54000000 #define PPC_INST_RLDICR 0x78000004 #define PPC_INST_SLW 0x7c000030 #define PPC_INST_SRW 0x7c000430 #define PPC_INST_AND 0x7c000038 #define PPC_INST_ANDDOT 0x7c000039 #define PPC_INST_OR 0x7c000378 #define PPC_INST_ANDI 0x70000000 #define PPC_INST_ORI 0x60000000 #define PPC_INST_ORIS 0x64000000 #define PPC_INST_NEG 0x7c0000d0 #define PPC_INST_BRANCH 0x48000000 #define <API key> 0x40800000 #define PPC_INST_LBZCIX 0x7c0006aa #define PPC_INST_STBCIX 0x7c0007aa /* macros to insert fields into opcodes */ #define ___PPC_RA(a) (((a) & 0x1f) << 16) #define ___PPC_RB(b) (((b) & 0x1f) << 11) #define ___PPC_RS(s) (((s) & 0x1f) << 21) #define ___PPC_RT(t) ___PPC_RS(t) #define __PPC_RA(a) ___PPC_RA(__REG_ #define __PPC_RA0(a) ___PPC_RA(__REGA0_ #define __PPC_RB(b) ___PPC_RB(__REG_ #define __PPC_RS(s) ___PPC_RS(__REG_ #define __PPC_RT(t) ___PPC_RT(__REG_ #define __PPC_XA(a) ((((a) & 0x1f) << 16) | (((a) & 0x20) >> 3)) #define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4)) #define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5)) #define __PPC_XT(s) __PPC_XS(s) #define __PPC_T_TLB(t) (((t) & 0x3) << 21) #define __PPC_WC(w) (((w) & 0x3) << 21) #define __PPC_WS(w) (((w) & 0x1f) << 11) #define __PPC_SH(s) __PPC_WS(s) #define __PPC_MB(s) (((s) & 0x1f) << 6) #define __PPC_ME(s) (((s) & 0x1f) << 1) #define __PPC_BI(s) (((s) & 0x1f) << 16) #ifdef CONFIG_PPC64 #define __PPC_EH(eh) (((eh) & 0x1) << 0) #else #define __PPC_EH(eh) 0 #endif /* Deal with instructions that older assemblers aren't aware of */ #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ __PPC_RA(a) | __PPC_RB(b)) #define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \ __PPC_RA(a) | __PPC_RB(b)) #define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ ___PPC_RT(t) | ___PPC_RA(a) | \ ___PPC_RB(b) | __PPC_EH(eh)) #define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ ___PPC_RT(t) | ___PPC_RA(a) | \ ___PPC_RB(b) | __PPC_EH(eh)) #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ ___PPC_RB(b)) #define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ __PPC_RA(a) | __PPC_RS(s)) #define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \ __PPC_RA(a) | __PPC_RS(s)) #define PPC_POPCNTW(a, s) stringify_in_c(.long PPC_INST_POPCNTW | \ __PPC_RA(a) | __PPC_RS(s)) #define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI) #define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI) #define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI) #define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_INST_TLBILX | \ __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b)) #define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b) #define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b) #define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b) #define PPC_WAIT(w) stringify_in_c(.long PPC_INST_WAIT | \ __PPC_WC(w)) #define PPC_TLBIE(lp,a) stringify_in_c(.long PPC_INST_TLBIE | \ ___PPC_RB(a) | ___PPC_RS(lp)) #define PPC_TLBSRX_DOT(a,b) stringify_in_c(.long PPC_INST_TLBSRX_DOT | \ __PPC_RA0(a) | __PPC_RB(b)) #define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \ __PPC_RA0(a) | __PPC_RB(b)) #define PPC_ERATWE(s, a, w) stringify_in_c(.long PPC_INST_ERATWE | \ __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) #define PPC_ERATRE(s, a, w) stringify_in_c(.long PPC_INST_ERATRE | \ __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) #define PPC_ERATILX(t, a, b) stringify_in_c(.long PPC_INST_ERATILX | \ __PPC_T_TLB(t) | __PPC_RA0(a) | \ __PPC_RB(b)) #define PPC_ERATIVAX(s, a, b) stringify_in_c(.long PPC_INST_ERATIVAX | \ __PPC_RS(s) | __PPC_RA0(a) | __PPC_RB(b)) #define PPC_ERATSX(t, a, w) stringify_in_c(.long PPC_INST_ERATSX | \ __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) #define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \ __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) #define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \ __PPC_RT(t) | __PPC_RB(b)) /* PASemi instructions */ #define LBZCIX(t,a,b) stringify_in_c(.long PPC_INST_LBZCIX | \ __PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b)) #define STBCIX(s,a,b) stringify_in_c(.long PPC_INST_STBCIX | \ __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b)) /* * Define what the VSX XX1 form instructions will look like, then add * the 128 bit load store instructions based on that. */ #define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b)) #define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b)) #define STXVD2X(s, a, b) stringify_in_c(.long PPC_INST_STXVD2X | \ VSX_XX1((s), a, b)) #define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \ VSX_XX1((s), a, b)) #define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \ VSX_XX3((t), a, b)) #define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_INST_XVCPSGNDP | \ VSX_XX3((t), (a), (b)))) #define PPC_NAP stringify_in_c(.long PPC_INST_NAP) #define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP) #endif /* <API key> */
/* * Quota code necessary even when VFS quota support is not compiled * into the kernel. The interesting stuff is over in dquot.c, here * we have symbols for initial quotactl(2) handling, the sysctl(2) * variables, etc - things needed even when quota support disabled. */ #include <linux/fs.h> #include <linux/namei.h> #include <linux/slab.h> #include <asm/current.h> #include <linux/uaccess.h> #include <linux/kernel.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/capability.h> #include <linux/quotaops.h> #include <linux/types.h> #include <linux/writeback.h> static int <API key>(struct super_block *sb, int type, int cmd, qid_t id) { switch (cmd) { /* these commands do not require any special privilegues */ case Q_GETFMT: case Q_SYNC: case Q_GETINFO: case Q_XGETQSTAT: case Q_XGETQSTATV: case Q_XQUOTASYNC: break; /* allow to query information for dquots we "own" */ case Q_GETQUOTA: case Q_XGETQUOTA: if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) || (type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id)))) break; /*FALLTHROUGH*/ default: if (!capable(CAP_SYS_ADMIN)) return -EPERM; } return security_quotactl(cmd, type, id, sb); } static void quota_sync_one(struct super_block *sb, void *arg) { int type = *(int *)arg; if (sb->s_qcop && sb->s_qcop->quota_sync && (sb->s_quota_types & (1 << type))) sb->s_qcop->quota_sync(sb, type); } static int quota_sync_all(int type) { int ret; if (type >= MAXQUOTAS) return -EINVAL; ret = security_quotactl(Q_SYNC, type, 0, NULL); if (!ret) iterate_supers(quota_sync_one, &type); return ret; } unsigned int qtype_enforce_flag(int type) { switch (type) { case USRQUOTA: return FS_QUOTA_UDQ_ENFD; case GRPQUOTA: return FS_QUOTA_GDQ_ENFD; case PRJQUOTA: return FS_QUOTA_PDQ_ENFD; } return 0; } static int quota_quotaon(struct super_block *sb, int type, qid_t id, struct path *path) { if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable) return -ENOSYS; if (sb->s_qcop->quota_enable) return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type)); if (IS_ERR(path)) return PTR_ERR(path); return sb->s_qcop->quota_on(sb, type, id, path); } static int quota_quotaoff(struct super_block *sb, int type) { if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable) return -ENOSYS; if (sb->s_qcop->quota_disable) return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type)); return sb->s_qcop->quota_off(sb, type); } static int quota_getfmt(struct super_block *sb, int type, void __user *addr) { __u32 fmt; mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!sb_has_quota_active(sb, type)) { mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); if (copy_to_user(addr, &fmt, sizeof(fmt))) return -EFAULT; return 0; } static int quota_getinfo(struct super_block *sb, int type, void __user *addr) { struct qc_state state; struct qc_type_state *tstate; struct if_dqinfo uinfo; int ret; /* This checks whether qc_state has enough entries... */ BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS); if (!sb->s_qcop->get_state) return -ENOSYS; ret = sb->s_qcop->get_state(sb, &state); if (ret) return ret; tstate = state.s_state + type; if (!(tstate->flags & QCI_ACCT_ENABLED)) return -ESRCH; memset(&uinfo, 0, sizeof(uinfo)); uinfo.dqi_bgrace = tstate->spc_timelimit; uinfo.dqi_igrace = tstate->ino_timelimit; if (tstate->flags & QCI_SYSFILE) uinfo.dqi_flags |= DQF_SYS_FILE; if (tstate->flags & QCI_ROOT_SQUASH) uinfo.dqi_flags |= DQF_ROOT_SQUASH; uinfo.dqi_valid = IIF_ALL; if (copy_to_user(addr, &uinfo, sizeof(uinfo))) return -EFAULT; return 0; } static int quota_setinfo(struct super_block *sb, int type, void __user *addr) { struct if_dqinfo info; struct qc_info qinfo; if (copy_from_user(&info, addr, sizeof(info))) return -EFAULT; if (!sb->s_qcop->set_info) return -ENOSYS; if (info.dqi_valid & ~(IIF_FLAGS | IIF_BGRACE | IIF_IGRACE)) return -EINVAL; memset(&qinfo, 0, sizeof(qinfo)); if (info.dqi_valid & IIF_FLAGS) { if (info.dqi_flags & ~DQF_SETINFO_MASK) return -EINVAL; if (info.dqi_flags & DQF_ROOT_SQUASH) qinfo.i_flags |= QCI_ROOT_SQUASH; qinfo.i_fieldmask |= QC_FLAGS; } if (info.dqi_valid & IIF_BGRACE) { qinfo.i_spc_timelimit = info.dqi_bgrace; qinfo.i_fieldmask |= QC_SPC_TIMER; } if (info.dqi_valid & IIF_IGRACE) { qinfo.i_ino_timelimit = info.dqi_igrace; qinfo.i_fieldmask |= QC_INO_TIMER; } return sb->s_qcop->set_info(sb, type, &qinfo); } static inline qsize_t qbtos(qsize_t blocks) { return blocks << QIF_DQBLKSIZE_BITS; } static inline qsize_t stoqb(qsize_t space) { return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; } static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src) { memset(dst, 0, sizeof(*dst)); dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit); dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit); dst->dqb_curspace = src->d_space; dst->dqb_ihardlimit = src->d_ino_hardlimit; dst->dqb_isoftlimit = src->d_ino_softlimit; dst->dqb_curinodes = src->d_ino_count; dst->dqb_btime = src->d_spc_timer; dst->dqb_itime = src->d_ino_timer; dst->dqb_valid = QIF_ALL; } static int quota_getquota(struct super_block *sb, int type, qid_t id, void __user *addr) { struct kqid qid; struct qc_dqblk fdq; struct if_dqblk idq; int ret; if (!sb->s_qcop->get_dqblk) return -ENOSYS; qid = make_kqid(current_user_ns(), type, id); if (!qid_has_mapping(sb->s_user_ns, qid)) return -EINVAL; ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); if (ret) return ret; copy_to_if_dqblk(&idq, &fdq); if (copy_to_user(addr, &idq, sizeof(idq))) return -EFAULT; return 0; } /* * Return quota for next active quota >= this id, if any exists, * otherwise return -ENOENT via ->get_nextdqblk */ static int quota_getnextquota(struct super_block *sb, int type, qid_t id, void __user *addr) { struct kqid qid; struct qc_dqblk fdq; struct if_nextdqblk idq; int ret; if (!sb->s_qcop->get_nextdqblk) return -ENOSYS; qid = make_kqid(current_user_ns(), type, id); if (!qid_has_mapping(sb->s_user_ns, qid)) return -EINVAL; ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq); if (ret) return ret; /* struct if_nextdqblk is a superset of struct if_dqblk */ copy_to_if_dqblk((struct if_dqblk *)&idq, &fdq); idq.dqb_id = from_kqid(current_user_ns(), qid); if (copy_to_user(addr, &idq, sizeof(idq))) return -EFAULT; return 0; } static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src) { dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit); dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit); dst->d_space = src->dqb_curspace; dst->d_ino_hardlimit = src->dqb_ihardlimit; dst->d_ino_softlimit = src->dqb_isoftlimit; dst->d_ino_count = src->dqb_curinodes; dst->d_spc_timer = src->dqb_btime; dst->d_ino_timer = src->dqb_itime; dst->d_fieldmask = 0; if (src->dqb_valid & QIF_BLIMITS) dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD; if (src->dqb_valid & QIF_SPACE) dst->d_fieldmask |= QC_SPACE; if (src->dqb_valid & QIF_ILIMITS) dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD; if (src->dqb_valid & QIF_INODES) dst->d_fieldmask |= QC_INO_COUNT; if (src->dqb_valid & QIF_BTIME) dst->d_fieldmask |= QC_SPC_TIMER; if (src->dqb_valid & QIF_ITIME) dst->d_fieldmask |= QC_INO_TIMER; } static int quota_setquota(struct super_block *sb, int type, qid_t id, void __user *addr) { struct qc_dqblk fdq; struct if_dqblk idq; struct kqid qid; if (copy_from_user(&idq, addr, sizeof(idq))) return -EFAULT; if (!sb->s_qcop->set_dqblk) return -ENOSYS; qid = make_kqid(current_user_ns(), type, id); if (!qid_has_mapping(sb->s_user_ns, qid)) return -EINVAL; copy_from_if_dqblk(&fdq, &idq); return sb->s_qcop->set_dqblk(sb, qid, &fdq); } static int quota_enable(struct super_block *sb, void __user *addr) { __u32 flags; if (copy_from_user(&flags, addr, sizeof(flags))) return -EFAULT; if (!sb->s_qcop->quota_enable) return -ENOSYS; return sb->s_qcop->quota_enable(sb, flags); } static int quota_disable(struct super_block *sb, void __user *addr) { __u32 flags; if (copy_from_user(&flags, addr, sizeof(flags))) return -EFAULT; if (!sb->s_qcop->quota_disable) return -ENOSYS; return sb->s_qcop->quota_disable(sb, flags); } static int <API key>(struct qc_state *state) { int flags = 0; if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) flags |= FS_QUOTA_UDQ_ACCT; if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED) flags |= FS_QUOTA_UDQ_ENFD; if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) flags |= FS_QUOTA_GDQ_ACCT; if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED) flags |= FS_QUOTA_GDQ_ENFD; if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) flags |= FS_QUOTA_PDQ_ACCT; if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED) flags |= FS_QUOTA_PDQ_ENFD; return flags; } static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs) { int type; struct qc_state state; int ret; ret = sb->s_qcop->get_state(sb, &state); if (ret < 0) return ret; memset(fqs, 0, sizeof(*fqs)); fqs->qs_version = FS_QSTAT_VERSION; fqs->qs_flags = <API key>(&state); /* No quota enabled? */ if (!fqs->qs_flags) return -ENOSYS; fqs->qs_incoredqs = state.s_incoredqs; /* * GETXSTATE quotactl has space for just one set of time limits so * report them for the first enabled quota type */ for (type = 0; type < XQM_MAXQUOTAS; type++) if (state.s_state[type].flags & QCI_ACCT_ENABLED) break; BUG_ON(type == XQM_MAXQUOTAS); fqs->qs_btimelimit = state.s_state[type].spc_timelimit; fqs->qs_itimelimit = state.s_state[type].ino_timelimit; fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; if (state.s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) { fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; } if (state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) { fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; } if (state.s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) { /* * Q_XGETQSTAT doesn't have room for both group and project * quotas. So, allow the project quota values to be copied out * only if there is no group quota information available. */ if (!(state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)) { fqs->qs_gquota.qfs_ino = state.s_state[PRJQUOTA].ino; fqs->qs_gquota.qfs_nblks = state.s_state[PRJQUOTA].blocks; fqs->qs_gquota.qfs_nextents = state.s_state[PRJQUOTA].nextents; } } return 0; } static int quota_getxstate(struct super_block *sb, void __user *addr) { struct fs_quota_stat fqs; int ret; if (!sb->s_qcop->get_state) return -ENOSYS; ret = quota_getstate(sb, &fqs); if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) return -EFAULT; return ret; } static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs) { int type; struct qc_state state; int ret; ret = sb->s_qcop->get_state(sb, &state); if (ret < 0) return ret; memset(fqs, 0, sizeof(*fqs)); fqs->qs_version = FS_QSTAT_VERSION; fqs->qs_flags = <API key>(&state); /* No quota enabled? */ if (!fqs->qs_flags) return -ENOSYS; fqs->qs_incoredqs = state.s_incoredqs; /* * GETXSTATV quotactl has space for just one set of time limits so * report them for the first enabled quota type */ for (type = 0; type < XQM_MAXQUOTAS; type++) if (state.s_state[type].flags & QCI_ACCT_ENABLED) break; BUG_ON(type == XQM_MAXQUOTAS); fqs->qs_btimelimit = state.s_state[type].spc_timelimit; fqs->qs_itimelimit = state.s_state[type].ino_timelimit; fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; if (state.s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) { fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; } if (state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) { fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; } if (state.s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) { fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino; fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks; fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents; } return 0; } static int quota_getxstatev(struct super_block *sb, void __user *addr) { struct fs_quota_statv fqs; int ret; if (!sb->s_qcop->get_state) return -ENOSYS; memset(&fqs, 0, sizeof(fqs)); if (copy_from_user(&fqs, addr, 1)) /* Just read qs_version */ return -EFAULT; /* If this kernel doesn't support user specified version, fail */ switch (fqs.qs_version) { case FS_QSTATV_VERSION1: break; default: return -EINVAL; } ret = quota_getstatev(sb, &fqs); if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) return -EFAULT; return ret; } /* * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them * out of there as xfsprogs rely on definitions being in that header file. So * just define same functions here for quota purposes. */ #define XFS_BB_SHIFT 9 static inline u64 quota_bbtob(u64 blocks) { return blocks << XFS_BB_SHIFT; } static inline u64 quota_btobb(u64 bytes) { return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT; } static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src) { dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit); dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit); dst->d_ino_hardlimit = src->d_ino_hardlimit; dst->d_ino_softlimit = src->d_ino_softlimit; dst->d_space = quota_bbtob(src->d_bcount); dst->d_ino_count = src->d_icount; dst->d_ino_timer = src->d_itimer; dst->d_spc_timer = src->d_btimer; dst->d_ino_warns = src->d_iwarns; dst->d_spc_warns = src->d_bwarns; dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit); dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit); dst->d_rt_space = quota_bbtob(src->d_rtbcount); dst->d_rt_spc_timer = src->d_rtbtimer; dst->d_rt_spc_warns = src->d_rtbwarns; dst->d_fieldmask = 0; if (src->d_fieldmask & FS_DQ_ISOFT) dst->d_fieldmask |= QC_INO_SOFT; if (src->d_fieldmask & FS_DQ_IHARD) dst->d_fieldmask |= QC_INO_HARD; if (src->d_fieldmask & FS_DQ_BSOFT) dst->d_fieldmask |= QC_SPC_SOFT; if (src->d_fieldmask & FS_DQ_BHARD) dst->d_fieldmask |= QC_SPC_HARD; if (src->d_fieldmask & FS_DQ_RTBSOFT) dst->d_fieldmask |= QC_RT_SPC_SOFT; if (src->d_fieldmask & FS_DQ_RTBHARD) dst->d_fieldmask |= QC_RT_SPC_HARD; if (src->d_fieldmask & FS_DQ_BTIMER) dst->d_fieldmask |= QC_SPC_TIMER; if (src->d_fieldmask & FS_DQ_ITIMER) dst->d_fieldmask |= QC_INO_TIMER; if (src->d_fieldmask & FS_DQ_RTBTIMER) dst->d_fieldmask |= QC_RT_SPC_TIMER; if (src->d_fieldmask & FS_DQ_BWARNS) dst->d_fieldmask |= QC_SPC_WARNS; if (src->d_fieldmask & FS_DQ_IWARNS) dst->d_fieldmask |= QC_INO_WARNS; if (src->d_fieldmask & FS_DQ_RTBWARNS) dst->d_fieldmask |= QC_RT_SPC_WARNS; if (src->d_fieldmask & FS_DQ_BCOUNT) dst->d_fieldmask |= QC_SPACE; if (src->d_fieldmask & FS_DQ_ICOUNT) dst->d_fieldmask |= QC_INO_COUNT; if (src->d_fieldmask & FS_DQ_RTBCOUNT) dst->d_fieldmask |= QC_RT_SPACE; } static void <API key>(struct qc_info *dst, struct fs_disk_quota *src) { memset(dst, 0, sizeof(*dst)); dst->i_spc_timelimit = src->d_btimer; dst->i_ino_timelimit = src->d_itimer; dst->i_rt_spc_timelimit = src->d_rtbtimer; dst->i_ino_warnlimit = src->d_iwarns; dst->i_spc_warnlimit = src->d_bwarns; dst->i_rt_spc_warnlimit = src->d_rtbwarns; if (src->d_fieldmask & FS_DQ_BWARNS) dst->i_fieldmask |= QC_SPC_WARNS; if (src->d_fieldmask & FS_DQ_IWARNS) dst->i_fieldmask |= QC_INO_WARNS; if (src->d_fieldmask & FS_DQ_RTBWARNS) dst->i_fieldmask |= QC_RT_SPC_WARNS; if (src->d_fieldmask & FS_DQ_BTIMER) dst->i_fieldmask |= QC_SPC_TIMER; if (src->d_fieldmask & FS_DQ_ITIMER) dst->i_fieldmask |= QC_INO_TIMER; if (src->d_fieldmask & FS_DQ_RTBTIMER) dst->i_fieldmask |= QC_RT_SPC_TIMER; } static int quota_setxquota(struct super_block *sb, int type, qid_t id, void __user *addr) { struct fs_disk_quota fdq; struct qc_dqblk qdq; struct kqid qid; if (copy_from_user(&fdq, addr, sizeof(fdq))) return -EFAULT; if (!sb->s_qcop->set_dqblk) return -ENOSYS; qid = make_kqid(current_user_ns(), type, id); if (!qid_has_mapping(sb->s_user_ns, qid)) return -EINVAL; /* Are we actually setting timer / warning limits for all users? */ if (from_kqid(sb->s_user_ns, qid) == 0 && fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) { struct qc_info qinfo; int ret; if (!sb->s_qcop->set_info) return -EINVAL; <API key>(&qinfo, &fdq); ret = sb->s_qcop->set_info(sb, type, &qinfo); if (ret) return ret; /* These are already done */ fdq.d_fieldmask &= ~(FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK); } copy_from_xfs_dqblk(&qdq, &fdq); return sb->s_qcop->set_dqblk(sb, qid, &qdq); } static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src, int type, qid_t id) { memset(dst, 0, sizeof(*dst)); dst->d_version = FS_DQUOT_VERSION; dst->d_id = id; if (type == USRQUOTA) dst->d_flags = FS_USER_QUOTA; else if (type == PRJQUOTA) dst->d_flags = FS_PROJ_QUOTA; else dst->d_flags = FS_GROUP_QUOTA; dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit); dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit); dst->d_ino_hardlimit = src->d_ino_hardlimit; dst->d_ino_softlimit = src->d_ino_softlimit; dst->d_bcount = quota_btobb(src->d_space); dst->d_icount = src->d_ino_count; dst->d_itimer = src->d_ino_timer; dst->d_btimer = src->d_spc_timer; dst->d_iwarns = src->d_ino_warns; dst->d_bwarns = src->d_spc_warns; dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit); dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit); dst->d_rtbcount = quota_btobb(src->d_rt_space); dst->d_rtbtimer = src->d_rt_spc_timer; dst->d_rtbwarns = src->d_rt_spc_warns; } static int quota_getxquota(struct super_block *sb, int type, qid_t id, void __user *addr) { struct fs_disk_quota fdq; struct qc_dqblk qdq; struct kqid qid; int ret; if (!sb->s_qcop->get_dqblk) return -ENOSYS; qid = make_kqid(current_user_ns(), type, id); if (!qid_has_mapping(sb->s_user_ns, qid)) return -EINVAL; ret = sb->s_qcop->get_dqblk(sb, qid, &qdq); if (ret) return ret; copy_to_xfs_dqblk(&fdq, &qdq, type, id); if (copy_to_user(addr, &fdq, sizeof(fdq))) return -EFAULT; return ret; } /* * Return quota for next active quota >= this id, if any exists, * otherwise return -ENOENT via ->get_nextdqblk. */ static int quota_getnextxquota(struct super_block *sb, int type, qid_t id, void __user *addr) { struct fs_disk_quota fdq; struct qc_dqblk qdq; struct kqid qid; qid_t id_out; int ret; if (!sb->s_qcop->get_nextdqblk) return -ENOSYS; qid = make_kqid(current_user_ns(), type, id); if (!qid_has_mapping(sb->s_user_ns, qid)) return -EINVAL; ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq); if (ret) return ret; id_out = from_kqid(current_user_ns(), qid); copy_to_xfs_dqblk(&fdq, &qdq, type, id_out); if (copy_to_user(addr, &fdq, sizeof(fdq))) return -EFAULT; return ret; } static int quota_rmxquota(struct super_block *sb, void __user *addr) { __u32 flags; if (copy_from_user(&flags, addr, sizeof(flags))) return -EFAULT; if (!sb->s_qcop->rm_xquota) return -ENOSYS; return sb->s_qcop->rm_xquota(sb, flags); } /* Copy parameters and call proper function */ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr, struct path *path) { int ret; if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS)) return -EINVAL; /* * Quota not supported on this fs? Check this before s_quota_types * since they needn't be set if quota is not supported at all. */ if (!sb->s_qcop) return -ENOSYS; if (!(sb->s_quota_types & (1 << type))) return -EINVAL; ret = <API key>(sb, type, cmd, id); if (ret < 0) return ret; switch (cmd) { case Q_QUOTAON: return quota_quotaon(sb, type, id, path); case Q_QUOTAOFF: return quota_quotaoff(sb, type); case Q_GETFMT: return quota_getfmt(sb, type, addr); case Q_GETINFO: return quota_getinfo(sb, type, addr); case Q_SETINFO: return quota_setinfo(sb, type, addr); case Q_GETQUOTA: return quota_getquota(sb, type, id, addr); case Q_GETNEXTQUOTA: return quota_getnextquota(sb, type, id, addr); case Q_SETQUOTA: return quota_setquota(sb, type, id, addr); case Q_SYNC: if (!sb->s_qcop->quota_sync) return -ENOSYS; return sb->s_qcop->quota_sync(sb, type); case Q_XQUOTAON: return quota_enable(sb, addr); case Q_XQUOTAOFF: return quota_disable(sb, addr); case Q_XQUOTARM: return quota_rmxquota(sb, addr); case Q_XGETQSTAT: return quota_getxstate(sb, addr); case Q_XGETQSTATV: return quota_getxstatev(sb, addr); case Q_XSETQLIM: return quota_setxquota(sb, type, id, addr); case Q_XGETQUOTA: return quota_getxquota(sb, type, id, addr); case Q_XGETNEXTQUOTA: return quota_getnextxquota(sb, type, id, addr); case Q_XQUOTASYNC: if (sb->s_flags & MS_RDONLY) return -EROFS; /* XFS quotas are fully coherent now, making this call a noop */ return 0; default: return -EINVAL; } } #ifdef CONFIG_BLOCK /* Return 1 if 'cmd' will block on frozen filesystem */ static int quotactl_cmd_write(int cmd) { /* * We cannot allow Q_GETQUOTA and Q_GETNEXTQUOTA without write access * as dquot_acquire() may allocate space for new structure and OCFS2 * needs to increment on-disk use count. */ switch (cmd) { case Q_GETFMT: case Q_GETINFO: case Q_SYNC: case Q_XGETQSTAT: case Q_XGETQSTATV: case Q_XGETQUOTA: case Q_XGETNEXTQUOTA: case Q_XQUOTASYNC: return 0; } return 1; } #endif /* CONFIG_BLOCK */ /* * look up a superblock on which quota ops will be performed * - use the name of a block device to find the superblock thereon */ static struct super_block *quotactl_block(const char __user *special, int cmd) { #ifdef CONFIG_BLOCK struct block_device *bdev; struct super_block *sb; struct filename *tmp = getname(special); if (IS_ERR(tmp)) return ERR_CAST(tmp); bdev = lookup_bdev(tmp->name); putname(tmp); if (IS_ERR(bdev)) return ERR_CAST(bdev); if (quotactl_cmd_write(cmd)) sb = get_super_thawed(bdev); else sb = get_super(bdev); bdput(bdev); if (!sb) return ERR_PTR(-ENODEV); return sb; #else return ERR_PTR(-ENODEV); #endif } /* * This is the system call interface. This communicates with * the user-level programs. Currently this only supports diskquota * calls. Maybe we need to add the process quotas etc. in the future, * but we probably should use rlimits for that. */ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, qid_t, id, void __user *, addr) { uint cmds, type; struct super_block *sb = NULL; struct path path, *pathp = NULL; int ret; cmds = cmd >> SUBCMDSHIFT; type = cmd & SUBCMDMASK; /* * As a special case Q_SYNC can be called without a specific device. * It will iterate all superblocks that have quota enabled and call * the sync action on each of them. */ if (!special) { if (cmds == Q_SYNC) return quota_sync_all(type); return -ENODEV; } /* * Path for quotaon has to be resolved before grabbing superblock * because that gets s_umount sem which is also possibly needed by path * resolution (think about autofs) and thus deadlocks could arise. */ if (cmds == Q_QUOTAON) { ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); if (ret) pathp = ERR_PTR(ret); else pathp = &path; } sb = quotactl_block(special, cmds); if (IS_ERR(sb)) { ret = PTR_ERR(sb); goto out; } ret = do_quotactl(sb, type, cmds, id, addr, pathp); drop_super(sb); out: if (pathp && !IS_ERR(pathp)) path_put(pathp); return ret; }
<!doctype html> <style> div { font-size: 50px; text-decoration: underline solid red; } </style> <script> onload = function() { target.style.textDecorationColor = "green"; }; </script> <p>Test that changes in <API key> are recalculated correctly. PASS if the text below has a solid green underline, and no red.</p> <div id="target"> Filler text </div>
#!/bin/sh # modification, are permitted provided that the following conditions are # met: # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. set -e if [ -f cache.mk ] ; then echo "Please don't commit cache.mk" exit 1 fi
import os import struct class InvalidPNGException(Exception): pass class <API key>(object): """Verifier of image dimensions for Chromium resources. This class verifies the image dimensions of resources in the various resource subdirectories. Attributes: paths: An array of tuples giving the folders to check and their relevant scale factors. For example: [(100, 'default_100_percent'), (200, 'default_200_percent')] """ def __init__(self, input_api, output_api, paths): """ Initializes <API key> with paths.""" self.input_api = input_api self.output_api = output_api self.paths = paths def RunChecks(self): """Verifies the scale factors of resources being added or modified. Returns: An array of presubmit errors if any images were detected not having the correct dimensions. """ def ImageSize(filename): with open(filename, 'rb', buffering=0) as f: data = f.read(24) if data[:8] != '\x89PNG\r\n\x1A\n' or data[12:16] != 'IHDR': raise InvalidPNGException return struct.unpack('>ii', data[16:24]) # Returns a list of valid scaled image sizes. The valid sizes are the # floor and ceiling of (base_size * scale_percent / 100). This is equivalent # to requiring that the actual scaled size is less than one pixel away from # the exact scaled size. def ValidSizes(base_size, scale_percent): return sorted(set([(base_size * scale_percent) / 100, (base_size * scale_percent + 99) / 100])) repository_path = self.input_api.os_path.relpath( self.input_api.PresubmitLocalPath(), self.input_api.change.RepositoryRoot()) results = [] # Check for affected files in any of the paths specified. affected_files = self.input_api.AffectedFiles(include_deletes=False) files = [] for f in affected_files: for path_spec in self.paths: path_root = self.input_api.os_path.join( repository_path, path_spec[1]) if (f.LocalPath().endswith('.png') and f.LocalPath().startswith(path_root)): # Only save the relative path from the resource directory. relative_path = self.input_api.os_path.relpath(f.LocalPath(), path_root) if relative_path not in files: files.append(relative_path) corrupt_png_error = ('Corrupt PNG in file %s. Note that binaries are not ' 'correctly uploaded to the code review tool and must be directly ' 'submitted using the dcommit command.') for f in files: base_image = self.input_api.os_path.join(self.paths[0][1], f) if not os.path.exists(base_image): results.append(self.output_api.PresubmitError( 'Base image %s does not exist' % self.input_api.os_path.join( repository_path, base_image))) continue try: base_dimensions = ImageSize(base_image) except InvalidPNGException: results.append(self.output_api.PresubmitError(corrupt_png_error % self.input_api.os_path.join(repository_path, base_image))) continue # Find all scaled versions of the base image and verify their sizes. for i in range(1, len(self.paths)): image_path = self.input_api.os_path.join(self.paths[i][1], f) if not os.path.exists(image_path): continue # Ensure that each image for a particular scale factor is the # correct scale of the base image. try: scaled_dimensions = ImageSize(image_path) except InvalidPNGException: results.append(self.output_api.PresubmitError(corrupt_png_error % self.input_api.os_path.join(repository_path, image_path))) continue for dimension_name, base_size, scaled_size in zip( ('width', 'height'), base_dimensions, scaled_dimensions): valid_sizes = ValidSizes(base_size, self.paths[i][0]) if scaled_size not in valid_sizes: results.append(self.output_api.PresubmitError( 'Image %s has %s %d, expected to be %s' % ( self.input_api.os_path.join(repository_path, image_path), dimension_name, scaled_size, ' or '.join(map(str, valid_sizes))))) return results