answer
stringlengths
15
1.25M
<?php class meter { function __construct() { } function makeSVG($tag, $type, $value, $max, $min, $optimum, $low, $high) { $svg = ''; if ($tag == 'meter') { if ($type == '2') { ////// CUSTOM <meter type="2"> $h = 10; $w = 160; $border_radius = 0.143; // Factor of Height $svg = '<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE svg PUBLIC "- <svg width="' . $w . 'px" height="' . $h . 'px" viewBox="0 0 ' . $w . ' ' . $h . '" xmlns="http: <defs> <linearGradient id="GrGRAY" x1="0" y1="0" x2="0" y2="1" gradientUnits="boundingBox"> <stop offset="0%" stop-color="rgb(222, 222, 222)" /> <stop offset="20%" stop-color="rgb(232, 232, 232)" /> <stop offset="25%" stop-color="rgb(232, 232, 232)" /> <stop offset="100%" stop-color="rgb(182, 182, 182)" /> </linearGradient> </defs> '; $svg .= '<rect x="0" y="0" width="' . $w . '" height="' . $h . '" fill="#f4f4f4" stroke="none" />'; // LOW to HIGH region //if ($low && $high && ($low != $min || $high != $max)) { if ($low && $high) { $barx = (($low - $min) / ($max - $min) ) * $w; $barw = (($high - $low) / ($max - $min) ) * $w; $svg .= '<rect x="' . $barx . '" y="0" width="' . $barw . '" height="' . $h . '" fill="url(#GrGRAY)" stroke="#888888" stroke-width="0.5px" />'; } // OPTIMUM Marker (? AVERAGE) if ($optimum) { $barx = (($optimum - $min) / ($max - $min) ) * $w; $barw = $h / 2; $barcol = '#888888'; $svg .= '<rect x="' . $barx . '" y="0" rx="' . ($h * $border_radius) . 'px" ry="' . ($h * $border_radius) . 'px" width="' . $barw . '" height="' . $h . '" fill="' . $barcol . '" stroke="none" />'; } // VALUE Marker if ($value) { if ($min != $low && $value < $low) { $col = 'orange'; } else if ($max != $high && $value > $high) { $col = 'orange'; } else { $col = '#008800'; } $cx = (($value - $min) / ($max - $min) ) * $w; $cy = $h / 2; $rx = $h / 3.5; $ry = $h / 2.2; $svg .= '<ellipse fill="' . $col . '" stroke="#000000" stroke-width="0.5px" cx="' . $cx . '" cy="' . $cy . '" rx="' . $rx . '" ry="' . $ry . '"/>'; } // BoRDER $svg .= '<rect x="0" y="0" width="' . $w . '" height="' . $h . '" fill="none" stroke="#888888" stroke-width="0.5px" />'; $svg .= '</g></svg>'; } else if ($type == '3') { ////// CUSTOM <meter type="2"> $h = 10; $w = 100; $border_radius = 0.143; // Factor of Height $svg = '<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE svg PUBLIC "- <svg width="' . $w . 'px" height="' . $h . 'px" viewBox="0 0 ' . $w . ' ' . $h . '" xmlns="http: <defs> <linearGradient id="GrGRAY" x1="0" y1="0" x2="0" y2="1" gradientUnits="boundingBox"> <stop offset="0%" stop-color="rgb(222, 222, 222)" /> <stop offset="20%" stop-color="rgb(232, 232, 232)" /> <stop offset="25%" stop-color="rgb(232, 232, 232)" /> <stop offset="100%" stop-color="rgb(182, 182, 182)" /> </linearGradient> </defs> '; $svg .= '<rect x="0" y="0" width="' . $w . '" height="' . $h . '" fill="#f4f4f4" stroke="none" />'; // LOW to HIGH region if ($low && $high && ($low != $min || $high != $max)) { //if ($low && $high) { $barx = (($low - $min) / ($max - $min) ) * $w; $barw = (($high - $low) / ($max - $min) ) * $w; $svg .= '<rect x="' . $barx . '" y="0" width="' . $barw . '" height="' . $h . '" fill="url(#GrGRAY)" stroke="#888888" stroke-width="0.5px" />'; } // OPTIMUM Marker (? AVERAGE) if ($optimum) { $barx = (($optimum - $min) / ($max - $min) ) * $w; $barw = $h / 2; $barcol = '#888888'; $svg .= '<rect x="' . $barx . '" y="0" rx="' . ($h * $border_radius) . 'px" ry="' . ($h * $border_radius) . 'px" width="' . $barw . '" height="' . $h . '" fill="' . $barcol . '" stroke="none" />'; } // VALUE Marker if ($value) { if ($min != $low && $value < $low) { $col = 'orange'; } else if ($max != $high && $value > $high) { $col = 'orange'; } else { $col = 'orange'; } $cx = (($value - $min) / ($max - $min) ) * $w; $cy = $h / 2; $rx = $h / 2.2; $ry = $h / 2.2; $svg .= '<ellipse fill="' . $col . '" stroke="#000000" stroke-width="0.5px" cx="' . $cx . '" cy="' . $cy . '" rx="' . $rx . '" ry="' . $ry . '"/>'; } // BoRDER $svg .= '<rect x="0" y="0" width="' . $w . '" height="' . $h . '" fill="none" stroke="#888888" stroke-width="0.5px" />'; $svg .= '</g></svg>'; } else { ////// DEFAULT <meter> $h = 10; $w = 50; $border_radius = 0.143; // Factor of Height $svg = '<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE svg PUBLIC "- <svg width="' . $w . 'px" height="' . $h . 'px" viewBox="0 0 ' . $w . ' ' . $h . '" xmlns="http: <defs> <linearGradient id="GrGRAY" x1="0" y1="0" x2="0" y2="1" gradientUnits="boundingBox"> <stop offset="0%" stop-color="rgb(222, 222, 222)" /> <stop offset="20%" stop-color="rgb(232, 232, 232)" /> <stop offset="25%" stop-color="rgb(232, 232, 232)" /> <stop offset="100%" stop-color="rgb(182, 182, 182)" /> </linearGradient> <linearGradient id="GrRED" x1="0" y1="0" x2="0" y2="1" gradientUnits="boundingBox"> <stop offset="0%" stop-color="rgb(255, 162, 162)" /> <stop offset="20%" stop-color="rgb(255, 218, 218)" /> <stop offset="25%" stop-color="rgb(255, 218, 218)" /> <stop offset="100%" stop-color="rgb(255, 0, 0)" /> </linearGradient> <linearGradient id="GrGREEN" x1="0" y1="0" x2="0" y2="1" gradientUnits="boundingBox"> <stop offset="0%" stop-color="rgb(102, 230, 102)" /> <stop offset="20%" stop-color="rgb(218, 255, 218)" /> <stop offset="25%" stop-color="rgb(218, 255, 218)" /> <stop offset="100%" stop-color="rgb(0, 148, 0)" /> </linearGradient> <linearGradient id="GrBLUE" x1="0" y1="0" x2="0" y2="1" gradientUnits="boundingBox"> <stop offset="0%" stop-color="rgb(102, 102, 230)" /> <stop offset="20%" stop-color="rgb(238, 238, 238)" /> <stop offset="25%" stop-color="rgb(238, 238, 238)" /> <stop offset="100%" stop-color="rgb(0, 0, 128)" /> </linearGradient> <linearGradient id="GrORANGE" x1="0" y1="0" x2="0" y2="1" gradientUnits="boundingBox"> <stop offset="0%" stop-color="rgb(255, 186, 0)" /> <stop offset="20%" stop-color="rgb(255, 238, 168)" /> <stop offset="25%" stop-color="rgb(255, 238, 168)" /> <stop offset="100%" stop-color="rgb(255, 155, 0)" /> </linearGradient> </defs> <rect x="0" y="0" rx="' . ($h * $border_radius) . 'px" ry="' . ($h * $border_radius) . 'px" width="' . $w . '" height="' . $h . '" fill="url(#GrGRAY)" stroke="none" /> '; if ($value) { $barw = (($value - $min) / ($max - $min) ) * $w; if ($optimum < $low) { if ($value < $low) { $barcol = 'url(#GrGREEN)'; } else if ($value > $high) { $barcol = 'url(#GrRED)'; } else { $barcol = 'url(#GrORANGE)'; } } else if ($optimum > $high) { if ($value < $low) { $barcol = 'url(#GrRED)'; } else if ($value > $high) { $barcol = 'url(#GrGREEN)'; } else { $barcol = 'url(#GrORANGE)'; } } else { if ($value < $low) { $barcol = 'url(#GrORANGE)'; } else if ($value > $high) { $barcol = 'url(#GrORANGE)'; } else { $barcol = 'url(#GrGREEN)'; } } $svg .= '<rect x="0" y="0" rx="' . ($h * $border_radius) . 'px" ry="' . ($h * $border_radius) . 'px" width="' . $barw . '" height="' . $h . '" fill="' . $barcol . '" stroke="none" />'; } // Borders //$svg .= '<rect x="0" y="0" rx="'.($h*$border_radius).'px" ry="'.($h*$border_radius).'px" width="'.$w.'" height="'.$h.'" fill="none" stroke="#888888" stroke-width="0.5px" />'; if ($value) { // $svg .= '<rect x="0" y="0" rx="'.($h*$border_radius).'px" ry="'.($h*$border_radius).'px" width="'.$barw.'" height="'.$h.'" fill="none" stroke="#888888" stroke-width="0.5px" />'; } $svg .= '</g></svg>'; } } else { // $tag == 'progress' if ($type == '2') { ////// CUSTOM <progress type="2"> } else { ////// DEFAULT <progress> $h = 10; $w = 100; $border_radius = 0.143; // Factor of Height if ($value or $value === '0') { $fill = 'url(#GrGRAY)'; } else { $fill = '#f8f8f8'; } $svg = '<svg width="' . $w . 'px" height="' . $h . 'px" viewBox="0 0 ' . $w . ' ' . $h . '"><g> <defs> <linearGradient id="GrGRAY" x1="0" y1="0" x2="0" y2="1" gradientUnits="boundingBox"> <stop offset="0%" stop-color="rgb(222, 222, 222)" /> <stop offset="20%" stop-color="rgb(232, 232, 232)" /> <stop offset="25%" stop-color="rgb(232, 232, 232)" /> <stop offset="100%" stop-color="rgb(182, 182, 182)" /> </linearGradient> <linearGradient id="GrGREEN" x1="0" y1="0" x2="0" y2="1" gradientUnits="boundingBox"> <stop offset="0%" stop-color="rgb(102, 230, 102)" /> <stop offset="20%" stop-color="rgb(218, 255, 218)" /> <stop offset="25%" stop-color="rgb(218, 255, 218)" /> <stop offset="100%" stop-color="rgb(0, 148, 0)" /> </linearGradient> </defs> <rect x="0" y="0" rx="' . ($h * $border_radius) . 'px" ry="' . ($h * $border_radius) . 'px" width="' . $w . '" height="' . $h . '" fill="' . $fill . '" stroke="none" /> '; if ($value) { $barw = (($value - $min) / ($max - $min) ) * $w; $barcol = 'url(#GrGREEN)'; $svg .= '<rect x="0" y="0" rx="' . ($h * $border_radius) . 'px" ry="' . ($h * $border_radius) . 'px" width="' . $barw . '" height="' . $h . '" fill="' . $barcol . '" stroke="none" />'; } // Borders $svg .= '<rect x="0" y="0" rx="' . ($h * $border_radius) . 'px" ry="' . ($h * $border_radius) . 'px" width="' . $w . '" height="' . $h . '" fill="none" stroke="#888888" stroke-width="0.5px" />'; if ($value) { // $svg .= '<rect x="0" y="0" rx="'.($h*$border_radius).'px" ry="'.($h*$border_radius).'px" width="'.$barw.'" height="'.$h.'" fill="none" stroke="#888888" stroke-width="0.5px" />'; } $svg .= '</g></svg>'; } } return $svg; } }
#include "hw.h" #include "qemu-timer.h" #include "omap.h" struct omap_synctimer_s { MemoryRegion iomem; uint32_t val; uint16_t readh; }; /* 32-kHz Sync Timer of the OMAP2 */ static uint32_t omap_synctimer_read(struct omap_synctimer_s *s) { return muldiv64(qemu_get_clock_ns(vm_clock), 0x8000, get_ticks_per_sec()); } void <API key>(struct omap_synctimer_s *s) { s->val = omap_synctimer_read(s); } static uint32_t <API key>(void *opaque, target_phys_addr_t addr) { struct omap_synctimer_s *s = (struct omap_synctimer_s *) opaque; switch (addr) { case 0x00: /* 32KSYNCNT_REV */ return 0x21; case 0x10: return omap_synctimer_read(s) - s->val; } OMAP_BAD_REG(addr); return 0; } static uint32_t <API key>(void *opaque, target_phys_addr_t addr) { struct omap_synctimer_s *s = (struct omap_synctimer_s *) opaque; uint32_t ret; if (addr & 2) return s->readh; else { ret = <API key>(opaque, addr); s->readh = ret >> 16; return ret & 0xffff; } } static void <API key>(void *opaque, target_phys_addr_t addr, uint32_t value) { OMAP_BAD_REG(addr); } static const MemoryRegionOps omap_synctimer_ops = { .old_mmio = { .read = { <API key>, <API key>, <API key>, }, .write = { <API key>, <API key>, <API key>, }, }, .endianness = <API key>, }; struct omap_synctimer_s *omap_synctimer_init(struct omap_target_agent_s *ta, struct omap_mpu_state_s *mpu, omap_clk fclk, omap_clk iclk) { struct omap_synctimer_s *s = g_malloc0(sizeof(*s)); <API key>(s); <API key>(&s->iomem, &omap_synctimer_ops, s, "omap.synctimer", omap_l4_region_size(ta, 0)); omap_l4_attach(ta, 0, &s->iomem); return s; }
#ifndef DIXGRABS_H #define DIXGRABS_H 1 struct _GrabParameters; extern GrabPtr CreateGrab( int /* client */, DeviceIntPtr /* device */, DeviceIntPtr /* modDevice */, WindowPtr /* window */, GrabType /* grabtype */, GrabMask * /* mask */, struct _GrabParameters * /* param */, int /* type */, KeyCode /* keybut */, WindowPtr /* confineTo */, CursorPtr /* cursor */); extern _X_EXPORT int DeletePassiveGrab( pointer /* value */, XID ); extern _X_EXPORT Bool GrabMatchesSecond( GrabPtr /* pFirstGrab */, GrabPtr /* pSecondGrab */, Bool /*ignoreDevice*/); extern _X_EXPORT int <API key>( ClientPtr /* client */, GrabPtr /* pGrab */); extern _X_EXPORT Bool <API key>( GrabPtr /* pMinuendGrab */); #endif /* DIXGRABS_H */
/* * Authors: * Andrey Shvetsov <andrey.shvetsov@k2l.de> * Christian Gromm <christian.gromm@microchip.com> * Sebastian Graf */ #ifndef __MOST_CORE_H__ #define __MOST_CORE_H__ #include <linux/types.h> struct kobject; struct module; /** * Interface type */ enum most_interface_type { ITYPE_LOOPBACK = 1, ITYPE_I2C, ITYPE_I2S, ITYPE_TSI, ITYPE_HBI, ITYPE_MEDIALB_DIM, ITYPE_MEDIALB_DIM2, ITYPE_USB, ITYPE_PCIE }; /** * Channel direction. */ enum <API key> { MOST_CH_RX = 1 << 0, MOST_CH_TX = 1 << 1, }; /** * Channel data type. */ enum <API key> { MOST_CH_CONTROL = 1 << 0, MOST_CH_ASYNC = 1 << 1, MOST_CH_ISOC = 1 << 2, MOST_CH_SYNC = 1 << 5, }; enum mbo_status_flags { /* MBO was processed successfully (data was send or received )*/ MBO_SUCCESS = 0, /* The MBO contains wrong or missing information. */ MBO_E_INVAL, /* MBO was completed as HDM Channel will be closed */ MBO_E_CLOSE, }; /** * struct <API key> - Channel capability * @direction: Supported channel directions. * The value is bitwise OR-combination of the values from the * enumeration <API key>. Zero is allowed value and means * "channel may not be used". * @data_type: Supported channel data types. * The value is bitwise OR-combination of the values from the * enumeration <API key>. Zero is allowed value and means * "channel may not be used". * @num_buffer_packet: Maximum number of buffers supported by this channel * for packet data types (Async,Control,QoS) * @buffer_size_packet: Maximum buffer size supported by this channel * for packet data types (Async,Control,QoS) * @<API key>: Maximum number of buffers supported by this channel * for streaming data types (Sync,AV Packetized) * @<API key>: Maximum buffer size supported by this channel * for streaming data types (Sync,AV Packetized) * @name_suffix: Optional suffix providean by an HDM that is attached to the * regular channel name. * * Describes the capabilities of a MostCore channel like supported Data Types * and directions. This information is provided by an HDM for the MostCore. * * The Core creates read only sysfs attribute files in * /sys/devices/virtual/most/mostcore/devices/mdev-#/mdev#-ch#/ with the * following attributes: * -<API key> * -available_datatypes * -<API key> * -<API key> * -<API key> * -<API key> * where content of each file is a string with all supported properties of this * very channel attribute. */ struct <API key> { u16 direction; u16 data_type; u16 num_buffers_packet; u16 buffer_size_packet; u16 <API key>; u16 <API key>; const char *name_suffix; }; /** * struct most_channel_config - stores channel configuration * @direction: direction of the channel * @data_type: data type travelling over this channel * @num_buffers: number of buffers * @buffer_size: size of a buffer for AIM. * Buffer size may be cutted down by HDM in a configure callback * to match to a given interface and channel type. * @extra_len: additional buffer space for internal HDM purposes like padding. * May be set by HDM in a configure callback if needed. * @subbuffer_size: size of a subbuffer * @packets_per_xact: number of MOST frames that are packet inside one USB * packet. This is USB specific * * Describes the configuration for a MostCore channel. This information is * provided from the MostCore to a HDM (like the Medusa PCIe Interface) as a * parameter of the "configure" function call. */ struct most_channel_config { enum <API key> direction; enum <API key> data_type; u16 num_buffers; u16 buffer_size; u16 extra_len; u16 subbuffer_size; u16 packets_per_xact; }; /* * struct mbo - MOST Buffer Object. * @context: context for core completion handler * @priv: private data for HDM * * public: documented fields that are used for the communications * between MostCore and HDMs * * @list: list head for use by the mbo's current owner * @ifp: (in) associated interface instance * @hdm_channel_id: (in) HDM channel instance * @virt_address: (in) kernel virtual address of the buffer * @bus_address: (in) bus address of the buffer * @buffer_length: (in) buffer payload length * @processed_length: (out) processed length * @status: (out) transfer status * @complete: (in) completion routine * * The MostCore allocates and initializes the MBO. * * The HDM receives MBO for transfer from MostCore with the call to enqueue(). * The HDM copies the data to- or from the buffer depending on configured * channel direction, set "processed_length" and "status" and completes * the transfer procedure by calling the completion routine. * * At the end the MostCore deallocates the MBO or recycles it for further * transfers for the same or different HDM. * * Directions of usage: * The core driver should never access any MBO fields (even if marked * as "public") while the MBO is owned by an HDM. The ownership starts with * the call of enqueue() and ends with the call of its complete() routine. * * II. * Every HDM attached to the core driver _must_ ensure that it returns any MBO * it owns (due to a previous call to enqueue() by the core driver) before it * de-registers an interface or gets unloaded from the kernel. If this direction * is violated memory leaks will occur, since the core driver does _not_ track * MBOs it is currently not in control of. * */ struct mbo { void *context; void *priv; struct list_head list; struct most_interface *ifp; int *num_buffers_ptr; u16 hdm_channel_id; void *virt_address; dma_addr_t bus_address; u16 buffer_length; u16 processed_length; enum mbo_status_flags status; void (*complete)(struct mbo *); }; /** * Interface instance description. * * Describes one instance of an interface like Medusa PCIe or Vantage USB. * This structure is allocated and initialized in the HDM. MostCore may not * modify this structure. * * @interface Interface type. \sa most_interface_type. * @description PRELIMINARY. * Unique description of the device instance from point of view of the * interface in free text form (ASCII). * It may be a hexadecimal presentation of the memory address for the MediaLB * IP or USB device ID with USB properties for USB interface, etc. * @num_channels Number of channels and size of the channel_vector. * @channel_vector Properties of the channels. * Array index represents channel ID by the driver. * @configure Callback to change data type for the channel of the * interface instance. May be zero if the instance of the interface is not * configurable. Parameter channel_config describes direction and data * type for the channel, configured by the higher level. The content of * @enqueue Delivers MBO to the HDM for processing. * After HDM completes Rx- or Tx- operation the processed MBO shall * be returned back to the MostCore using completion routine. * The reason to get the MBO delivered from the MostCore after the channel * is poisoned is the re-opening of the channel by the application. * In this case the HDM shall hold MBOs and service the channel as usual. * The HDM must be able to hold at least one MBO for each channel. * The callback returns a negative value on error, otherwise 0. * @poison_channel Informs HDM about closing the channel. The HDM shall * cancel all transfers and synchronously or asynchronously return * all enqueued for this channel MBOs using the completion routine. * The callback returns a negative value on error, otherwise 0. * @request_netinfo: triggers retrieving of network info from the HDM by * means of "Message exchange over MDP/MEP" * The call of the function request_netinfo with the parameter on_netinfo as * NULL prohibits use of the previously obtained function pointer. * @priv Private field used by mostcore to store context information. */ struct most_interface { struct module *mod; enum most_interface_type interface; const char *description; int num_channels; struct <API key> *channel_vector; int (*configure)(struct most_interface *iface, int channel_idx, struct most_channel_config *channel_config); int (*enqueue)(struct most_interface *iface, int channel_idx, struct mbo *mbo); int (*poison_channel)(struct most_interface *iface, int channel_idx); void (*request_netinfo)(struct most_interface *iface, int channel_idx, void (*on_netinfo)(struct most_interface *iface, unsigned char link_stat, unsigned char *mac_addr)); void *priv; }; /** * struct most_aim - identifies MOST device driver to mostcore * @name: Driver name * @probe_channel: function for core to notify driver about channel connection * @disconnect_channel: callback function to disconnect a certain channel * @rx_completion: completion handler for received packets * @tx_completion: completion handler for transmitted packets * @context: context pointer to be used by mostcore */ struct most_aim { const char *name; int (*probe_channel)(struct most_interface *iface, int channel_idx, struct most_channel_config *cfg, struct kobject *parent, char *name); int (*disconnect_channel)(struct most_interface *iface, int channel_idx); int (*rx_completion)(struct mbo *mbo); int (*tx_completion)(struct most_interface *iface, int channel_idx); void *context; }; /** * <API key> - Registers instance of the interface. * @iface: Pointer to the interface instance description. * * Returns a pointer to the kobject of the generated instance. * * Note: HDM has to ensure that any reference held on the kobj is * released before deregistering the interface. */ struct kobject *<API key>(struct most_interface *iface); /** * Deregisters instance of the interface. * @intf_instance Pointer to the interface instance description. */ void <API key>(struct most_interface *iface); void most_submit_mbo(struct mbo *mbo); /** * most_stop_enqueue - prevents core from enqueing MBOs * @iface: pointer to interface * @channel_idx: channel index */ void most_stop_enqueue(struct most_interface *iface, int channel_idx); /** * most_resume_enqueue - allow core to enqueue MBOs again * @iface: pointer to interface * @channel_idx: channel index * * This clears the enqueue halt flag and enqueues all MBOs currently * in wait fifo. */ void most_resume_enqueue(struct most_interface *iface, int channel_idx); int most_register_aim(struct most_aim *aim); int most_deregister_aim(struct most_aim *aim); struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx, struct most_aim *); void most_put_mbo(struct mbo *mbo); int channel_has_mbo(struct most_interface *iface, int channel_idx, struct most_aim *aim); int most_start_channel(struct most_interface *iface, int channel_idx, struct most_aim *); int most_stop_channel(struct most_interface *iface, int channel_idx, struct most_aim *); #endif /* MOST_CORE_H_ */
// This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Under Section 7 of GPL version 3, you are granted additional // 3.1, as published by the Free Software Foundation. // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // of the above authors, nor IBM Haifa Research Laboratories, make any // representation about the suitability of this software for any // purpose. It is provided "as is" without express or implied /** * @file cc_hash_table_map_/cmp_fn_imps.hpp * Contains implementations of cc_ht_map_'s entire container comparison related * functions. */ PB_DS_CLASS_T_DEC template<typename Other_HT_Map_Type> bool PB_DS_CLASS_C_DEC:: operator==(const Other_HT_Map_Type& other) const { return cmp_with_other(other); } PB_DS_CLASS_T_DEC template<typename Other_Map_Type> bool PB_DS_CLASS_C_DEC:: cmp_with_other(const Other_Map_Type& other) const { if (size() != other.size()) return false; for (typename Other_Map_Type::const_iterator it = other.begin(); it != other.end(); ++it) { key_const_reference r_key = key_const_reference(PB_DS_V2F(*it)); <API key> p_mapped_value = const_cast<PB_DS_CLASS_C_DEC& >(*this). find_key_pointer(r_key, traits_base::<API key>); if (p_mapped_value == 0) return false; #ifdef <API key> if (p_mapped_value->second != it->second) return false; #endif } return true; } PB_DS_CLASS_T_DEC template<typename Other_HT_Map_Type> bool PB_DS_CLASS_C_DEC:: operator!=(const Other_HT_Map_Type& other) const { return !operator==(other); }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <title>PolyK - pixi.js</title> <link rel="stylesheet" href="http://yui.yahooapis.com/3.9.1/build/cssgrids/cssgrids-min.css"> <link rel="stylesheet" href="../assets/vendor/prettify/prettify-min.css"> <link rel="stylesheet" href="../assets/css/main.css" id="site_styles"> <link rel="shortcut icon" type="image/png" href="../assets/favicon.png"> <script src="http://yui.yahooapis.com/combo?3.9.1/build/yui/yui-min.js"></script> </head> <body class="yui3-skin-sam"> <div id="doc"> <div id="hd" class="yui3-g header"> <div class="yui3-u-3-4"> <h1><img src="http: </div> <div class="yui3-u-1-4 version"> <em>API Docs for: 1.5.3</em> </div> </div> <div id="bd" class="yui3-g"> <div class="yui3-u-1-4"> <div id="docs-sidebar" class="sidebar apidocs"> <div id="api-list"> <h2 class="off-left">APIs</h2> <div id="api-tabview" class="tabview"> <ul class="tabs"> <li><a href="#api-classes">Classes</a></li> <li><a href="#api-modules">Modules</a></li> </ul> <div id="api-tabview-filter"> <input type="search" id="api-filter" placeholder="Type to filter APIs"> </div> <div id="api-tabview-panel"> <ul id="api-classes" class="apis classes"> <li><a href="../classes/AbstractFilter.html">AbstractFilter</a></li> <li><a href="../classes/AjaxRequest.html">AjaxRequest</a></li> <li><a href="../classes/AlphaMaskFilter.html">AlphaMaskFilter</a></li> <li><a href="../classes/AssetLoader.html">AssetLoader</a></li> <li><a href="../classes/AtlasLoader.html">AtlasLoader</a></li> <li><a href="../classes/autoDetectRenderer.html">autoDetectRenderer</a></li> <li><a href="../classes/BaseTexture.html">BaseTexture</a></li> <li><a href="../classes/BitmapFontLoader.html">BitmapFontLoader</a></li> <li><a href="../classes/BitmapText.html">BitmapText</a></li> <li><a href="../classes/BlurFilter.html">BlurFilter</a></li> <li><a href="../classes/CanvasGraphics.html">CanvasGraphics</a></li> <li><a href="../classes/CanvasMaskManager.html">CanvasMaskManager</a></li> <li><a href="../classes/CanvasRenderer.html">CanvasRenderer</a></li> <li><a href="../classes/CanvasTinter.html">CanvasTinter</a></li> <li><a href="../classes/Circle.html">Circle</a></li> <li><a href="../classes/ColorMatrixFilter.html">ColorMatrixFilter</a></li> <li><a href="../classes/ColorStepFilter.html">ColorStepFilter</a></li> <li><a href="../classes/DisplacementFilter.html">DisplacementFilter</a></li> <li><a href="../classes/DisplayObject.html">DisplayObject</a></li> <li><a href="../classes/<API key>.html"><API key></a></li> <li><a href="../classes/DotScreenFilter.html">DotScreenFilter</a></li> <li><a href="../classes/Ellipse.html">Ellipse</a></li> <li><a href="../classes/EventTarget.html">EventTarget</a></li> <li><a href="../classes/FilterTexture.html">FilterTexture</a></li> <li><a href="../classes/<API key>.html"><API key></a></li> <li><a href="../classes/Graphics.html">Graphics</a></li> <li><a href="../classes/GrayFilter.html">GrayFilter</a></li> <li><a href="../classes/ImageLoader.html">ImageLoader</a></li> <li><a href="../classes/InteractionData.html">InteractionData</a></li> <li><a href="../classes/InteractionManager.html">InteractionManager</a></li> <li><a href="../classes/InvertFilter.html">InvertFilter</a></li> <li><a href="../classes/JsonLoader.html">JsonLoader</a></li> <li><a href="../classes/MovieClip.html">MovieClip</a></li> <li><a href="../classes/NormalMapFilter.html">NormalMapFilter</a></li> <li><a href="../classes/PixelateFilter.html">PixelateFilter</a></li> <li><a href="../classes/PixiFastShader.html">PixiFastShader</a></li> <li><a href="../classes/PixiShader.html">PixiShader</a></li> <li><a href="../classes/Point.html">Point</a></li> <li><a href="../classes/Polygon.html">Polygon</a></li> <li><a href="../classes/PolyK.html">PolyK</a></li> <li><a href="../classes/PrimitiveShader.html">PrimitiveShader</a></li> <li><a href="../classes/Rectangle.html">Rectangle</a></li> <li><a href="../classes/Rope.html">Rope</a></li> <li><a href="../classes/SepiaFilter.html">SepiaFilter</a></li> <li><a href="../classes/Spine.html">Spine</a></li> <li><a href="../classes/Sprite.html">Sprite</a></li> <li><a href="../classes/SpriteBatch.html">SpriteBatch</a></li> <li><a href="../classes/SpriteSheetLoader.html">SpriteSheetLoader</a></li> <li><a href="../classes/Stage.html">Stage</a></li> <li><a href="../classes/Strip.html">Strip</a></li> <li><a href="../classes/Text.html">Text</a></li> <li><a href="../classes/Texture.html">Texture</a></li> <li><a href="../classes/TilingSprite.html">TilingSprite</a></li> <li><a href="../classes/TwistFilter.html">TwistFilter</a></li> <li><a href="../classes/WebGLFilterManager.html">WebGLFilterManager</a></li> <li><a href="../classes/WebGLGraphics.html">WebGLGraphics</a></li> <li><a href="../classes/WebGLMaskManager.html">WebGLMaskManager</a></li> <li><a href="../classes/WebGLRenderer.html">WebGLRenderer</a></li> <li><a href="../classes/WebGLShaderManager.html">WebGLShaderManager</a></li> <li><a href="../classes/WebGLSpriteBatch.html">WebGLSpriteBatch</a></li> </ul> <ul id="api-modules" class="apis modules"> <li><a href="../modules/PIXI.html">PIXI</a></li> </ul> </div> </div> </div> </div> </div> <div class="yui3-u-3-4"> <div id="api-options"> Show: <label for="api-show-inherited"> <input type="checkbox" id="api-show-inherited" checked> Inherited </label> <label for="api-show-protected"> <input type="checkbox" id="api-show-protected"> Protected </label> <label for="api-show-private"> <input type="checkbox" id="api-show-private"> Private </label> <label for="api-show-deprecated"> <input type="checkbox" id="api-show-deprecated"> Deprecated </label> </div> <div class="apidocs"> <div id="docs-main"> <div class="content"> <h1>PolyK Class</h1> <div class="box meta"> <div class="foundat"> Defined in: <a href="../files/<API key>.js.html#l34"><code>src&#x2F;pixi&#x2F;utils&#x2F;Polyk.js:34</code></a> </div> Module: <a href="../modules/PIXI.html">PIXI</a> </div> <div class="box intro"> <p>Based on the Polyk library <a href="http: This is an amazing lib! slightly modified by Mat Groves (matgroves.com);</p> </div> <div id="classdocs" class="tabview"> <ul class="api-class-tabs"> <li class="api-class-tab index"><a href="#index">Index</a></li> <li class="api-class-tab methods"><a href="#methods">Methods</a></li> </ul> <div> <div id="index" class="api-class-tabpanel index"> <h2 class="off-left">Item Index</h2> <div class="index-section methods"> <h3>Methods</h3> <ul class="index-list methods"> <li class="index-item method private"> <a href="#method__convex">_convex</a> </li> <li class="index-item method private"> <a href="#<API key>">_PointInTriangle</a> </li> <li class="index-item method"> <a href="#method_Triangulate">Triangulate</a> </li> </ul> </div> </div> <div id="methods" class="api-class-tabpanel"> <h2 class="off-left">Methods</h2> <div id="method__convex" class="method item private"> <h3 class="name"><code>_convex</code></h3> <span class="paren">()</span> <span class="flag private">private</span> <div class="meta"> <p> Defined in <a href="../files/<API key>.js.html#l159"><code>src&#x2F;pixi&#x2F;utils&#x2F;Polyk.js:159</code></a> </p> </div> <div class="description"> <p>Checks whether a shape is convex</p> </div> </div> <div id="<API key>" class="method item private"> <h3 class="name"><code>_PointInTriangle</code></h3> <div class="args"> <span class="paren">(</span><ul class="args-list inline commas"> <li class="arg"> <code>px</code> </li> <li class="arg"> <code>py</code> </li> <li class="arg"> <code>ax</code> </li> <li class="arg"> <code>ay</code> </li> <li class="arg"> <code>bx</code> </li> <li class="arg"> <code>by</code> </li> <li class="arg"> <code>cx</code> </li> <li class="arg"> <code>cy</code> </li> </ul><span class="paren">)</span> </div> <span class="flag private">private</span> <div class="meta"> <p> Defined in <a href="../files/<API key>.js.html#l122"><code>src&#x2F;pixi&#x2F;utils&#x2F;Polyk.js:122</code></a> </p> </div> <div class="description"> <p>Checks whether a point is within a triangle</p> </div> <div class="params"> <h4>Parameters:</h4> <ul class="params-list"> <li class="param"> <code class="param-name">px</code> <span class="type">Number</span> <div class="param-description"> <p>x coordinate of the point to test</p> </div> </li> <li class="param"> <code class="param-name">py</code> <span class="type">Number</span> <div class="param-description"> <p>y coordinate of the point to test</p> </div> </li> <li class="param"> <code class="param-name">ax</code> <span class="type">Number</span> <div class="param-description"> <p>x coordinate of the a point of the triangle</p> </div> </li> <li class="param"> <code class="param-name">ay</code> <span class="type">Number</span> <div class="param-description"> <p>y coordinate of the a point of the triangle</p> </div> </li> <li class="param"> <code class="param-name">bx</code> <span class="type">Number</span> <div class="param-description"> <p>x coordinate of the b point of the triangle</p> </div> </li> <li class="param"> <code class="param-name">by</code> <span class="type">Number</span> <div class="param-description"> <p>y coordinate of the b point of the triangle</p> </div> </li> <li class="param"> <code class="param-name">cx</code> <span class="type">Number</span> <div class="param-description"> <p>x coordinate of the c point of the triangle</p> </div> </li> <li class="param"> <code class="param-name">cy</code> <span class="type">Number</span> <div class="param-description"> <p>y coordinate of the c point of the triangle</p> </div> </li> </ul> </div> </div> <div id="method_Triangulate" class="method item"> <h3 class="name"><code>Triangulate</code></h3> <span class="paren">()</span> <div class="meta"> <p> Defined in <a href="../files/<API key>.js.html#l43"><code>src&#x2F;pixi&#x2F;utils&#x2F;Polyk.js:43</code></a> </p> </div> <div class="description"> <p>Triangulates shapes for webGL graphic fills</p> </div> </div> </div> </div> </div> </div> </div> </div> </div> </div> </div> <script src="../assets/vendor/prettify/prettify-min.js"></script> <script>prettyPrint();</script> <script src="../assets/js/yui-prettify.js"></script> <script src="../assets/../api.js"></script> <script src="../assets/js/api-filter.js"></script> <script src="../assets/js/api-list.js"></script> <script src="../assets/js/api-search.js"></script> <script src="../assets/js/apidocs.js"></script> </body> </html>
#include <linux/extcon.h> #include <linux/gpio.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/pinctrl/consumer.h> #define <API key> 20 struct usb_extcon_info { struct device *dev; struct extcon_dev *edev; struct gpio_desc *id_gpiod; struct gpio_desc *vbus_gpiod; int id_irq; int vbus_irq; unsigned long debounce_jiffies; struct delayed_work wq_detcable; }; static const unsigned int usb_extcon_cable[] = { EXTCON_USB, EXTCON_USB_HOST, EXTCON_NONE, }; static void <API key>(struct work_struct *work) { int id, vbus; struct usb_extcon_info *info = container_of(to_delayed_work(work), struct usb_extcon_info, wq_detcable); /* check ID and VBUS and update cable state */ id = info->id_gpiod ? <API key>(info->id_gpiod) : 1; vbus = info->vbus_gpiod ? <API key>(info->vbus_gpiod) : id; /* at first we clean states which are no longer active */ if (id) <API key>(info->edev, EXTCON_USB_HOST, false); if (!vbus) <API key>(info->edev, EXTCON_USB, false); if (!id) { <API key>(info->edev, EXTCON_USB_HOST, true); } else { if (vbus) <API key>(info->edev, EXTCON_USB, true); } } static irqreturn_t usb_irq_handler(int irq, void *dev_id) { struct usb_extcon_info *info = dev_id; queue_delayed_work(<API key>, &info->wq_detcable, info->debounce_jiffies); return IRQ_HANDLED; } static int usb_extcon_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct usb_extcon_info *info; int ret; if (!np) return -EINVAL; info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->dev = dev; info->id_gpiod = <API key>(&pdev->dev, "id", GPIOD_IN); info->vbus_gpiod = <API key>(&pdev->dev, "vbus", GPIOD_IN); if (!info->id_gpiod && !info->vbus_gpiod) { dev_err(dev, "failed to get gpios\n"); return -ENODEV; } if (IS_ERR(info->id_gpiod)) return PTR_ERR(info->id_gpiod); if (IS_ERR(info->vbus_gpiod)) return PTR_ERR(info->vbus_gpiod); info->edev = <API key>(dev, usb_extcon_cable); if (IS_ERR(info->edev)) { dev_err(dev, "failed to allocate extcon device\n"); return -ENOMEM; } ret = <API key>(dev, info->edev); if (ret < 0) { dev_err(dev, "failed to register extcon device\n"); return ret; } if (info->id_gpiod) ret = gpiod_set_debounce(info->id_gpiod, <API key> * 1000); if (!ret && info->vbus_gpiod) ret = gpiod_set_debounce(info->vbus_gpiod, <API key> * 1000); if (ret < 0) info->debounce_jiffies = msecs_to_jiffies(<API key>); INIT_DELAYED_WORK(&info->wq_detcable, <API key>); if (info->id_gpiod) { info->id_irq = gpiod_to_irq(info->id_gpiod); if (info->id_irq < 0) { dev_err(dev, "failed to get ID IRQ\n"); return info->id_irq; } ret = <API key>(dev, info->id_irq, NULL, usb_irq_handler, IRQF_TRIGGER_RISING | <API key> | IRQF_ONESHOT, pdev->name, info); if (ret < 0) { dev_err(dev, "failed to request handler for ID IRQ\n"); return ret; } } if (info->vbus_gpiod) { info->vbus_irq = gpiod_to_irq(info->vbus_gpiod); if (info->vbus_irq < 0) { dev_err(dev, "failed to get VBUS IRQ\n"); return info->vbus_irq; } ret = <API key>(dev, info->vbus_irq, NULL, usb_irq_handler, IRQF_TRIGGER_RISING | <API key> | IRQF_ONESHOT, pdev->name, info); if (ret < 0) { dev_err(dev, "failed to request handler for VBUS IRQ\n"); return ret; } } <API key>(pdev, info); <API key>(&pdev->dev, true); /* Perform initial detection */ <API key>(&info->wq_detcable.work); return 0; } static int usb_extcon_remove(struct platform_device *pdev) { struct usb_extcon_info *info = <API key>(pdev); <API key>(&info->wq_detcable); device_init_wakeup(&pdev->dev, false); return 0; } #ifdef CONFIG_PM_SLEEP static int usb_extcon_suspend(struct device *dev) { struct usb_extcon_info *info = dev_get_drvdata(dev); int ret = 0; if (device_may_wakeup(dev)) { if (info->id_gpiod) { ret = enable_irq_wake(info->id_irq); if (ret) return ret; } if (info->vbus_gpiod) { ret = enable_irq_wake(info->vbus_irq); if (ret) { if (info->id_gpiod) disable_irq_wake(info->id_irq); return ret; } } } /* * We don't want to process any IRQs after this point * as GPIOs used behind I2C subsystem might not be * accessible until resume completes. So disable IRQ. */ if (info->id_gpiod) disable_irq(info->id_irq); if (info->vbus_gpiod) disable_irq(info->vbus_irq); if (!device_may_wakeup(dev)) <API key>(dev); return ret; } static int usb_extcon_resume(struct device *dev) { struct usb_extcon_info *info = dev_get_drvdata(dev); int ret = 0; if (!device_may_wakeup(dev)) <API key>(dev); if (device_may_wakeup(dev)) { if (info->id_gpiod) { ret = disable_irq_wake(info->id_irq); if (ret) return ret; } if (info->vbus_gpiod) { ret = disable_irq_wake(info->vbus_irq); if (ret) { if (info->id_gpiod) enable_irq_wake(info->id_irq); return ret; } } } if (info->id_gpiod) enable_irq(info->id_irq); if (info->vbus_gpiod) enable_irq(info->vbus_irq); queue_delayed_work(<API key>, &info->wq_detcable, 0); return ret; } #endif static SIMPLE_DEV_PM_OPS(usb_extcon_pm_ops, usb_extcon_suspend, usb_extcon_resume); static const struct of_device_id usb_extcon_dt_match[] = { { .compatible = "linux,extcon-usb-gpio", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, usb_extcon_dt_match); static const struct platform_device_id <API key>[] = { { .name = "extcon-usb-gpio", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, <API key>); static struct platform_driver usb_extcon_driver = { .probe = usb_extcon_probe, .remove = usb_extcon_remove, .driver = { .name = "extcon-usb-gpio", .pm = &usb_extcon_pm_ops, .of_match_table = usb_extcon_dt_match, }, .id_table = <API key>, }; <API key>(usb_extcon_driver); MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); MODULE_DESCRIPTION("USB GPIO extcon driver"); MODULE_LICENSE("GPL v2");
/** * @requires OpenLayers/BaseTypes/Class.js */ /** * Class: OpenLayers.Protocol * Abstract vector layer protocol class. Not to be instantiated directly. Use * one of the protocol subclasses instead. */ OpenLayers.Protocol = OpenLayers.Class({ /** * Property: format * {<OpenLayers.Format>} The format used by this protocol. */ format: null, /** * Property: options * {Object} Any options sent to the constructor. */ options: null, /** * Property: autoDestroy * {Boolean} The creator of the protocol can set autoDestroy to false * to fully control when the protocol is destroyed. Defaults to * true. */ autoDestroy: true, /** * Property: defaultFilter * {<OpenLayers.Filter>} Optional default filter to read requests */ defaultFilter: null, /** * Constructor: OpenLayers.Protocol * Abstract class for vector protocols. Create instances of a subclass. * * Parameters: * options - {Object} Optional object whose properties will be set on the * instance. */ initialize: function(options) { options = options || {}; OpenLayers.Util.extend(this, options); this.options = options; }, /** * Method: <API key> * Merge filter passed to the read method with the default one * * Parameters: * filter - {<OpenLayers.Filter>} */ <API key>: function(filter) { var merged; if (filter && this.defaultFilter) { merged = new OpenLayers.Filter.Logical({ type: OpenLayers.Filter.Logical.AND, filters: [this.defaultFilter, filter] }); } else { merged = filter || this.defaultFilter || undefined; } return merged; }, /** * APIMethod: destroy * Clean up the protocol. */ destroy: function() { this.options = null; this.format = null; }, /** * APIMethod: read * Construct a request for reading new features. * * Parameters: * options - {Object} Optional object for configuring the request. * * Returns: * {<OpenLayers.Protocol.Response>} An <OpenLayers.Protocol.Response> * object, the same object will be passed to the callback function passed * if one exists in the options object. */ read: function(options) { options = options || {}; options.filter = this.<API key>(options.filter); }, /** * APIMethod: create * Construct a request for writing newly created features. * * Parameters: * features - {Array({<OpenLayers.Feature.Vector>})} or * {<OpenLayers.Feature.Vector>} * options - {Object} Optional object for configuring the request. * * Returns: * {<OpenLayers.Protocol.Response>} An <OpenLayers.Protocol.Response> * object, the same object will be passed to the callback function passed * if one exists in the options object. */ create: function() { }, /** * APIMethod: update * Construct a request updating modified features. * * Parameters: * features - {Array({<OpenLayers.Feature.Vector>})} or * {<OpenLayers.Feature.Vector>} * options - {Object} Optional object for configuring the request. * * Returns: * {<OpenLayers.Protocol.Response>} An <OpenLayers.Protocol.Response> * object, the same object will be passed to the callback function passed * if one exists in the options object. */ update: function() { }, /** * APIMethod: delete * Construct a request deleting a removed feature. * * Parameters: * feature - {<OpenLayers.Feature.Vector>} * options - {Object} Optional object for configuring the request. * * Returns: * {<OpenLayers.Protocol.Response>} An <OpenLayers.Protocol.Response> * object, the same object will be passed to the callback function passed * if one exists in the options object. */ "delete": function() { }, /** * APIMethod: commit * Go over the features and for each take action * based on the feature state. Possible actions are create, * update and delete. * * Parameters: * features - {Array({<OpenLayers.Feature.Vector>})} * options - {Object} Object whose possible keys are "create", "update", * "delete", "callback" and "scope", the values referenced by the * first three are objects as passed to the "create", "update", and * "delete" methods, the value referenced by the "callback" key is * a function which is called when the commit operation is complete * using the scope referenced by the "scope" key. * * Returns: * {Array({<OpenLayers.Protocol.Response>})} An array of * <OpenLayers.Protocol.Response> objects. */ commit: function() { }, /** * Method: abort * Abort an ongoing request. * * Parameters: * response - {<OpenLayers.Protocol.Response>} */ abort: function(response) { }, /** * Method: createCallback * Returns a function that applies the given public method with resp and * options arguments. * * Parameters: * method - {Function} The method to be applied by the callback. * response - {<OpenLayers.Protocol.Response>} The protocol response object. * options - {Object} Options sent to the protocol method */ createCallback: function(method, response, options) { return OpenLayers.Function.bind(function() { method.apply(this, [response, options]); }, this); }, CLASS_NAME: "OpenLayers.Protocol" }); /** * Class: OpenLayers.Protocol.Response * Protocols return Response objects to their users. */ OpenLayers.Protocol.Response = OpenLayers.Class({ /** * Property: code * {Number} - OpenLayers.Protocol.Response.SUCCESS or * OpenLayers.Protocol.Response.FAILURE */ code: null, /** * Property: requestType * {String} The type of request this response corresponds to. Either * "create", "read", "update" or "delete". */ requestType: null, /** * Property: last * {Boolean} - true if this is the last response expected in a commit, * false otherwise, defaults to true. */ last: true, /** * Property: features * {Array({<OpenLayers.Feature.Vector>})} or {<OpenLayers.Feature.Vector>} * The features returned in the response by the server. Depending on the * protocol's read payload, either features or data will be populated. */ features: null, /** * Property: data * {Object} * The data returned in the response by the server. Depending on the * protocol's read payload, either features or data will be populated. */ data: null, /** * Property: reqFeatures * {Array({<OpenLayers.Feature.Vector>})} or {<OpenLayers.Feature.Vector>} * The features provided by the user and placed in the request by the * protocol. */ reqFeatures: null, /** * Property: priv */ priv: null, /** * Property: error * {Object} The error object in case a service exception was encountered. */ error: null, /** * Constructor: OpenLayers.Protocol.Response * * Parameters: * options - {Object} Optional object whose properties will be set on the * instance. */ initialize: function(options) { OpenLayers.Util.extend(this, options); }, /** * Method: success * * Returns: * {Boolean} - true on success, false otherwise */ success: function() { return this.code > 0; }, CLASS_NAME: "OpenLayers.Protocol.Response" }); OpenLayers.Protocol.Response.SUCCESS = 1; OpenLayers.Protocol.Response.FAILURE = 0;
(function($){ if(webshims.support.texttrackapi && document.addEventListener){ var trackOptions = webshims.cfg.track; var trackListener = function(e){ $(e.target).filter('track').each(changeApi); }; var trackBugs = webshims.bugs.track; var changeApi = function(){ if(trackBugs || (!trackOptions.override && $.prop(this, 'readyState') == 3)){ trackOptions.override = true; webshims.reTest('track'); document.removeEventListener('error', trackListener, true); if(this && $.nodeName(this, 'track')){ webshims.error("track support was overwritten. Please check your vtt including your vtt mime-type"); } else { webshims.info("track support was overwritten. due to bad browser support"); } return false; } }; var detectTrackError = function(){ document.addEventListener('error', trackListener, true); if(trackBugs){ changeApi(); } else { $('track').each(changeApi); } if(!trackBugs && !trackOptions.override){ webshims.defineProperty(TextTrack.prototype, 'shimActiveCues', { get: function(){ return this._shimActiveCues || this.activeCues; } }); } }; if(!trackOptions.override){ $(detectTrackError); } } })(webshims.$); webshims.register('track-ui', function($, webshims, window, document, undefined){ "use strict"; var options = webshims.cfg.track; var support = webshims.support; //descriptions are not really shown, but they are inserted into the dom var showTracks = {subtitles: 1, captions: 1, descriptions: 1}; var mediaelement = webshims.mediaelement; var usesNativeTrack = function(){ return !options.override && support.texttrackapi; }; var trackDisplay = { update: function(baseData, media){ if(!baseData.activeCues.length){ this.hide(baseData); } else { if(!compareArray(baseData.displayedActiveCues, baseData.activeCues)){ baseData.displayedActiveCues = baseData.activeCues; if(!baseData.trackDisplay){ baseData.trackDisplay = $('<div class="cue-display '+webshims.shadowClass+'"><span class="description-cues" aria-live="assertive" /></div>').insertAfter(media); this.addEvents(baseData, media); webshims.docObserve(); } if(baseData.<API key>){ media.triggerHandler('<API key>'); } this.showCues(baseData); } } }, showCues: function(baseData){ var element = $('<span class="cue-wrapper" />'); $.each(baseData.displayedActiveCues, function(i, cue){ var id = (cue.id) ? 'id="cue-id-'+cue.id +'"' : ''; var cueHTML = $('<span class="cue-line"><span '+ id+ ' class="cue" /></span>').find('span').html(cue.getCueAsHTML()).end(); if(cue.track.kind == 'descriptions'){ setTimeout(function(){ $('span.description-cues', baseData.trackDisplay).html(cueHTML); }, 0); } else { element.prepend(cueHTML); } }); $('span.cue-wrapper', baseData.trackDisplay).remove(); baseData.trackDisplay.append(element); }, addEvents: function(baseData, media){ if(options.positionDisplay){ var timer; var positionDisplay = function(_force){ if(baseData.displayedActiveCues.length || _force === true){ baseData.trackDisplay.css({display: 'none'}); var uiElement = media.getShadowElement(); var uiHeight = uiElement.innerHeight(); var uiWidth = uiElement.innerWidth(); var position = uiElement.position(); baseData.trackDisplay.css({ left: position.left, width: uiWidth, height: uiHeight - 45, top: position.top, display: 'block' }); baseData.trackDisplay.css('fontSize', Math.max(Math.round(uiHeight / 30), 7)); baseData.<API key> = false; } else { baseData.<API key> = true; } }; var delayed = function(e){ clearTimeout(timer); timer = setTimeout(positionDisplay, 0); }; var forceUpdate = function(){ positionDisplay(true); }; media.on('<API key> <API key> updatetrackdisplay <API key> swfstageresize', delayed); media.on('<API key>', forceUpdate).onWSOff('updateshadowdom', delayed); forceUpdate(); } }, hide: function(baseData){ if(baseData.trackDisplay && baseData.displayedActiveCues.length){ baseData.displayedActiveCues = []; $('span.cue-wrapper', baseData.trackDisplay).remove(); $('span.description-cues', baseData.trackDisplay).empty(); } } }; function compareArray(a1, a2){ var ret = true; var i = 0; var len = a1.length; if(len != a2.length){ ret = false; } else { for(; i < len; i++){ if(a1[i] != a2[i]){ ret = false; break; } } } return ret; } mediaelement.trackDisplay = trackDisplay; if(!mediaelement.createCueList){ var cueListProto = { getCueById: function(id){ var cue = null; for(var i = 0, len = this.length; i < len; i++){ if(this[i].id === id){ cue = this[i]; break; } } return cue; } }; mediaelement.createCueList = function(){ return $.extend([], cueListProto); }; } mediaelement.getActiveCue = function(track, media, time, baseData){ if(!track._lastFoundCue){ track._lastFoundCue = {index: 0, time: 0}; } if(support.texttrackapi && !options.override && !track._shimActiveCues){ track._shimActiveCues = mediaelement.createCueList(); } var i = 0; var len; var cue; for(; i < track.shimActiveCues.length; i++){ cue = track.shimActiveCues[i]; if(cue.startTime > time || cue.endTime < time){ track.shimActiveCues.splice(i, 1); i if(cue.pauseOnExit){ $(media).pause(); } $(track).triggerHandler('cuechange'); $(cue).triggerHandler('exit'); } else if(track.mode == 'showing' && showTracks[track.kind] && $.inArray(cue, baseData.activeCues) == -1){ baseData.activeCues.push(cue); } } len = track.cues.length; i = track._lastFoundCue.time < time ? track._lastFoundCue.index : 0; for(; i < len; i++){ cue = track.cues[i]; if(cue.startTime <= time && cue.endTime >= time && $.inArray(cue, track.shimActiveCues) == -1){ track.shimActiveCues.push(cue); if(track.mode == 'showing' && showTracks[track.kind]){ baseData.activeCues.push(cue); } $(track).triggerHandler('cuechange'); $(cue).triggerHandler('enter'); track._lastFoundCue.time = time; track._lastFoundCue.index = i; } if(cue.startTime > time){ break; } } }; if(usesNativeTrack()){ (function(){ var block; var <API key> = function(elem){ block = true; setTimeout(function(){ $(elem).triggerHandler('updatetrackdisplay'); block = false; }, 9); }; var createUpdateFn = function(nodeName, prop, type){ var superType = '_sup'+type; var desc = {prop: {}}; var superDesc; desc.prop[type] = function(){ if(!block && usesNativeTrack()){ <API key>($(this).closest('audio, video')); } return superDesc.prop[superType].apply(this, arguments); }; superDesc = webshims.<API key>(nodeName, prop, desc); }; createUpdateFn('track', 'track', 'get'); ['audio', 'video'].forEach(function(nodeName){ createUpdateFn(nodeName, 'textTracks', 'get'); createUpdateFn('nodeName', 'addTextTrack', 'value'); }); })(); $.propHooks.activeCues = { get: function(obj){ return obj._shimActiveCues || obj.activeCues; } }; } webshims.addReady(function(context, insertedElement){ $('video, audio', context) .add(insertedElement.filter('video, audio')) .filter(function(){ return webshims.implement(this, 'trackui'); }) .each(function(){ var baseData, trackList, updateTimer, updateTimer2; var elem = $(this); var getDisplayCues = function(e){ var track; var time; if(!trackList || !baseData){ trackList = elem.prop('textTracks'); baseData = webshims.data(elem[0], 'mediaelementBase') || webshims.data(elem[0], 'mediaelementBase', {}); if(!baseData.displayedActiveCues){ baseData.displayedActiveCues = []; } } if (!trackList){return;} time = elem.prop('currentTime'); if(!time && time !== 0){return;} baseData.activeCues = []; for(var i = 0, len = trackList.length; i < len; i++){ track = trackList[i]; if(track.mode != 'disabled' && track.cues && track.cues.length){ mediaelement.getActiveCue(track, elem, time, baseData); } } trackDisplay.update(baseData, elem); }; var onUpdate = function(e){ clearTimeout(updateTimer); if(e){ if(e.type == 'timeupdate'){ getDisplayCues(); } updateTimer2 = setTimeout(onUpdate, 90); } else { updateTimer = setTimeout(getDisplayCues, 9); } }; var addTrackView = function(){ if(!trackList) { trackList = elem.prop('textTracks'); } //as soon as change on trackList is implemented in all browsers we do not need to have 'updatetrackdisplay' anymore $( [trackList] ).on('change', onUpdate); elem .off('.trackview') .on('play.trackview timeupdate.trackview updatetrackdisplay.trackview', onUpdate) ; }; elem.on('remove', function(e){ if(!e.originalEvent && baseData && baseData.trackDisplay){ setTimeout(function(){ baseData.trackDisplay.remove(); }, 4); } }); if(!usesNativeTrack()){ addTrackView(); } else { if(elem.hasClass('<API key>')){ addTrackView(); } elem .on('<API key> trackapichange', function(){ if(!usesNativeTrack() || elem.hasClass('<API key>')){ addTrackView(); } else { clearTimeout(updateTimer); clearTimeout(updateTimer2); trackList = elem.prop('textTracks'); baseData = webshims.data(elem[0], 'mediaelementBase') || webshims.data(elem[0], 'mediaelementBase', {}); $.each(trackList, function(i, track){ if(track._shimActiveCues){ delete track._shimActiveCues; } }); trackDisplay.hide(baseData); elem.off('.trackview'); } }) ; } }) ; }); });
#ifndef __ADSP_ERR__ #define __ADSP_ERR__ int <API key>(u32 adsp_error); char *<API key>(u32 adsp_error); #endif
/* { dg-do compile } */ /* { dg-skip-if "do not override -mfloat-abi" { *-*-* } { "-mfloat-abi=*" } {"-mfloat-abi=softfp" } } */ /* { dg-options "-O2 -<API key> -mabi=apcs-gnu -mfloat-abi=softfp" } */ struct super_block { int s_blocksize_bits; }; struct btrfs_fs_info { struct super_block *sb; }; struct btrfs_root { struct btrfs_fs_info *fs_info; } *b; int a, c, d; long long e; extern int foo1 (struct btrfs_root *, int, int, int); extern int foo2 (struct btrfs_root *, int, int); int truncate_one_csum (struct btrfs_root *p1, long long p2, long long p3) { int f, g, i = p1->fs_info->sb->s_blocksize_bits; g = a; long long h = p2 + p3; f = foo1 (b, 0, c, 0); e = f / g; e <<= p1->fs_info->sb->s_blocksize_bits; if (d < p2) { int j = e - h >> i; foo2 (p1, 0, j); } else { asm ("1\t.long "); <API key> (); } }
#!/usr/bin/env python # -*- coding: utf-8 -*- # GuessIt - A library for guessing information from filenames # GuessIt is free software; you can redistribute it and/or modify it under # (at your option) any later version. # GuessIt is distributed in the hope that it will be useful, # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the from __future__ import unicode_literals from guessit import Guess from guessit.transfo import SingleNodeGuesser from guessit.patterns import video_rexps, sep import re import logging log = logging.getLogger(__name__) def guess_video_rexps(string): string = '-' + string + '-' for rexp, confidence, span_adjust in video_rexps: match = re.search(sep + rexp + sep, string, re.IGNORECASE) if match: metadata = match.groupdict() # is this the better place to put it? (maybe, as it is at least # the soonest that we can catch it) if metadata.get('cdNumberTotal', -1) is None: del metadata['cdNumberTotal'] span = (match.start() + span_adjust[0], match.end() + span_adjust[1] - 2) return (Guess(metadata, confidence=confidence, raw=string[span[0]:span[1]]), span) return None, None def process(mtree): SingleNodeGuesser(guess_video_rexps, None, log).process(mtree)
/ [<API key>.ts] var obj = { [Symbol.isConcatSpreadable]: 0 } / [<API key>.js] var obj = { [Symbol.isConcatSpreadable]: 0 }; / [<API key>.d.ts] declare var obj: { [Symbol.isConcatSpreadable]: number; };
#include <stdint.h> #include "libavutil/attributes.h" #include "libavcodec/vp8dsp.h" #include "vp8dsp.h" void <API key>(int16_t block[4][4][16], int16_t dc[16]); void <API key>(uint8_t *dst, int16_t block[16], ptrdiff_t stride); void <API key>(uint8_t *dst, int16_t block[16], ptrdiff_t stride); void <API key>(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride); void <API key>(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride); VP8_LF(neon); VP8_EPEL(16, neon); VP8_EPEL(8, neon); VP8_EPEL(4, neon); VP8_BILIN(16, neon); VP8_BILIN(8, neon); VP8_BILIN(4, neon); av_cold void <API key>(VP8DSPContext *dsp) { dsp-><API key>[0][0][0] = <API key>; dsp-><API key>[0][0][2] = <API key>; dsp-><API key>[0][2][0] = <API key>; dsp-><API key>[0][2][2] = <API key>; dsp-><API key>[1][0][0] = <API key>; dsp-><API key>[1][0][1] = <API key>; dsp-><API key>[1][0][2] = <API key>; dsp-><API key>[1][1][0] = <API key>; dsp-><API key>[1][1][1] = <API key>; dsp-><API key>[1][1][2] = <API key>; dsp-><API key>[1][2][0] = <API key>; dsp-><API key>[1][2][1] = <API key>; dsp-><API key>[1][2][2] = <API key>; dsp-><API key>[2][0][1] = <API key>; dsp-><API key>[2][0][2] = <API key>; dsp-><API key>[2][1][0] = <API key>; dsp-><API key>[2][1][1] = <API key>; dsp-><API key>[2][1][2] = <API key>; dsp-><API key>[2][2][0] = <API key>; dsp-><API key>[2][2][1] = <API key>; dsp-><API key>[2][2][2] = <API key>; dsp-><API key>[0][0][0] = <API key>; dsp-><API key>[0][0][1] = <API key>; dsp-><API key>[0][0][2] = <API key>; dsp-><API key>[0][1][0] = <API key>; dsp-><API key>[0][1][1] = <API key>; dsp-><API key>[0][1][2] = <API key>; dsp-><API key>[0][2][0] = <API key>; dsp-><API key>[0][2][1] = <API key>; dsp-><API key>[0][2][2] = <API key>; dsp-><API key>[1][0][0] = <API key>; dsp-><API key>[1][0][1] = <API key>; dsp-><API key>[1][0][2] = <API key>; dsp-><API key>[1][1][0] = <API key>; dsp-><API key>[1][1][1] = <API key>; dsp-><API key>[1][1][2] = <API key>; dsp-><API key>[1][2][0] = <API key>; dsp-><API key>[1][2][1] = <API key>; dsp-><API key>[1][2][2] = <API key>; dsp-><API key>[2][0][1] = <API key>; dsp-><API key>[2][0][2] = <API key>; dsp-><API key>[2][1][0] = <API key>; dsp-><API key>[2][1][1] = <API key>; dsp-><API key>[2][1][2] = <API key>; dsp-><API key>[2][2][0] = <API key>; dsp-><API key>[2][2][1] = <API key>; dsp-><API key>[2][2][2] = <API key>; } av_cold void ff_vp8dsp_init_neon(VP8DSPContext *dsp) { dsp->vp8_luma_dc_wht = <API key>; dsp->vp8_idct_add = <API key>; dsp->vp8_idct_dc_add = <API key>; dsp->vp8_idct_dc_add4y = <API key>; dsp->vp8_idct_dc_add4uv = <API key>; dsp-><API key> = <API key>; dsp-><API key> = <API key>; dsp-><API key> = <API key>; dsp-><API key> = <API key>; dsp-><API key> = <API key>; dsp-><API key> = <API key>; dsp-><API key> = <API key>; dsp-><API key> = <API key>; dsp-><API key> = <API key>; dsp-><API key> = <API key>; }
using System; using System.Collections; using System.Collections.Generic; <summary> System.Array.Sort<T>(T[],System.Collections.Generic.IComparer<T>) </summary> public class ArraySort7 { #region Public Methods public bool RunTests() { bool retVal = true; TestLibrary.TestFramework.LogInformation("[Positive]"); retVal = PosTest1() && retVal; retVal = PosTest2() && retVal; retVal = PosTest3() && retVal; retVal = PosTest4() && retVal; retVal = PosTest5() && retVal; TestLibrary.TestFramework.LogInformation("[Negative]"); retVal = NegTest1() && retVal; retVal = NegTest2() && retVal; //retVal = NegTest3() && retVal; return retVal; } #region Positive Test Cases public bool PosTest1() { bool retVal = true; TestLibrary.TestFramework.BeginScenario("PosTest1: Sort a string array using string comparer<string>"); try { string[] s1 = new string[7]{"Jack", "Mary", "Mike", "Peter", "Boy", "Tom", "Allin"}; IComparer<string> a = new A<string>(); Array.Sort<string>(s1, a); string[] s2 = new string[7]{"Allin", "Boy", "Jack", "Mary", "Mike", "Peter", "Tom"}; for (int i = 0; i < 7; i++) { if (s1[i] != s2[i]) { TestLibrary.TestFramework.LogError("001", "The result is not the value as expected"); retVal = false; } } } catch (Exception e) { TestLibrary.TestFramework.LogError("002", "Unexpected exception: " + e); retVal = false; } return retVal; } public bool PosTest2() { bool retVal = true; TestLibrary.TestFramework.BeginScenario("PosTest2: Sort an int32 array using reverse comparer<int>"); try { int length = TestLibrary.Generator.GetInt16(-55); int[] i1 = new int[length]; int[] i2 = new int[length]; for (int i = 0; i < length; i++) { int value = TestLibrary.Generator.GetByte(-55); i1[i] = value; i2[i] = value; } IComparer<int> b = new B<int>(); Array.Sort<int>(i1, b); for (int i = 0; i < length - 1; i++) //manually quich sort { for (int j = i + 1; j < length; j++) { if (i2[i] < i2[j]) { int temp = i2[i]; i2[i] = i2[j]; i2[j] = temp; } } } for (int i = 0; i < length; i++) { if (i1[i] != i2[i]) { TestLibrary.TestFramework.LogError("003", "The result is not the value as expected"); retVal = false; } } } catch (Exception e) { TestLibrary.TestFramework.LogError("004", "Unexpected exception: " + e); retVal = false; } return retVal; } public bool PosTest3() { bool retVal = true; TestLibrary.TestFramework.BeginScenario("PosTest3: Sort a char array using default comparer "); try { int length = TestLibrary.Generator.GetInt16(-55); char[] i1 = new char[length]; char[] i2 = new char[length]; for (int i = 0; i < length; i++) { char value = TestLibrary.Generator.GetChar(-55); i1[i] = value; i2[i] = value; } IComparer<char> c = null; Array.Sort<char>(i1, c); for (int i = 0; i < length - 1; i++) //manually quich sort { for (int j = i + 1; j < length; j++) { if (i2[i] > i2[j]) { char temp = i2[i]; i2[i] = i2[j]; i2[j] = temp; } } } for (int i = 0; i < length; i++) { if (i1[i] != i2[i]) { TestLibrary.TestFramework.LogError("005", "The result is not the value as expected"); retVal = false; } } } catch (Exception e) { TestLibrary.TestFramework.LogError("006", "Unexpected exception: " + e); retVal = false; } return retVal; } public bool PosTest4() { bool retVal = true; TestLibrary.TestFramework.BeginScenario("PosTest4: Sort an array which has same elements using default Icomparer "); try { int length = TestLibrary.Generator.GetByte(-55); string[] s1 = new string[length]; string[] s2 = new string[length]; string value = TestLibrary.Generator.GetString(-55, false, 0, 10); for (int i = 0; i < length; i++) { s1[i] = value; s2[i] = value; } IComparer<string> c = null; Array.Sort<string>(s1, c); for (int i = 0; i < length; i++) { if (s1[i] != s2[i]) { TestLibrary.TestFramework.LogError("007", "The result is not the value as expected"); retVal = false; } } } catch (Exception e) { TestLibrary.TestFramework.LogError("008", "Unexpected exception: " + e); retVal = false; } return retVal; } public bool PosTest5() { bool retVal = true; TestLibrary.TestFramework.BeginScenario("PosTest5: Sort a string array including null reference and using customized comparer<T> interface"); try { string[] s1 = new string[9]{"Jack", "Mary", "Mike", null, "Peter", "Boy", "Tom", null, "Allin"}; IComparer<string> d = new D<string>(); Array.Sort<string>(s1, d); string[] s2 = new string[9]{"Allin", "Boy", "Jack", "Mary", "Mike", "Peter", "Tom", null, null}; for (int i = 0; i < 7; i++) { if (s1[i] != s2[i]) { TestLibrary.TestFramework.LogError("009", "The result is not the value as expected"); retVal = false; } } } catch (Exception e) { TestLibrary.TestFramework.LogError("010", "Unexpected exception: " + e); retVal = false; } return retVal; } #endregion #region Nagetive Test Cases public bool NegTest1() { bool retVal = true; TestLibrary.TestFramework.BeginScenario("NegTest1: The array is null "); try { string[] s1 = null; IComparer<string> a = new A<string>(); Array.Sort<string>(s1, a); TestLibrary.TestFramework.LogError("101", "The <API key> is not throw as expected "); retVal = false; } catch (<API key>) { } catch (Exception e) { TestLibrary.TestFramework.LogError("102", "Unexpected exception: " + e); retVal = false; } return retVal; } public bool NegTest2() { bool retVal = true; TestLibrary.TestFramework.BeginScenario("NegTest2: Elements in array do not implement the IComparable<T> interface "); try { E<int>[] a1 = new E<int>[4] { new E<int>(), new E<int>(), new E<int>(), new E<int>() }; IComparer<E<int>> d = null; Array.Sort<E<int>>(a1, d); TestLibrary.TestFramework.LogError("103", "The <API key> is not throw as expected "); retVal = false; } catch (<API key>) { } catch (Exception e) { TestLibrary.TestFramework.LogError("104", "Unexpected exception: " + e); retVal = false; } return retVal; } public bool NegTest3() { bool retVal = true; TestLibrary.TestFramework.BeginScenario("NegTest3:The implementation of comparer<T> caused an error during the sort"); try { int[] i1 = new int[9] { 2, 34, 56, 87, 34, 23, 209, 34, 87 }; F f = new F(); Array.Sort<int>(i1, f); TestLibrary.TestFramework.LogError("105", "The ArgumentException is not throw as expected "); retVal = false; } catch (ArgumentException) { } catch (Exception e) { TestLibrary.TestFramework.LogError("106", "Unexpected exception: " + e); retVal = false; } return retVal; } #endregion #endregion public static int Main() { ArraySort7 test = new ArraySort7(); TestLibrary.TestFramework.BeginTestCase("ArraySort7"); if (test.RunTests()) { TestLibrary.TestFramework.EndTestCase(); TestLibrary.TestFramework.LogInformation("PASS"); return 100; } else { TestLibrary.TestFramework.EndTestCase(); TestLibrary.TestFramework.LogInformation("FAIL"); return 0; } } class A<T> : IComparer<T> where T : IComparable { #region IComparer Members public int Compare(T x, T y) { return x.CompareTo(y); } #endregion } class B<T> : IComparer<T> where T : IComparable { #region IComparer Members public int Compare(T x, T y) { return (-(x).CompareTo(y)); } #endregion } class D<T> : IComparer<T> where T : IComparable { #region IComparer Members public int Compare(T x, T y) { if (x == null) { return 1; } if (y == null) { return -1; } return x.CompareTo(y); } #endregion } class E<T> { public E() { } } class F : IComparer<int> { #region IComparer<int> Members int IComparer<int>.Compare(int a, int b) { if (a.CompareTo(a) == 0) { return -1; } return a.CompareTo(b); } #endregion } }
#include <stdbool.h> #include <stdint.h> #include <stdarg.h> #include <stdlib.h> #include "platform.h" #include "build_config.h" #include "drivers/serial.h" #include "io/serial.h" #include "build_config.h" #include "printf.h" #ifdef <API key> #include "typeconversion.h" #endif static serialPort_t *printfSerialPort; #ifdef <API key> typedef void (*putcf) (void *, char); static putcf stdout_putf; static void *stdout_putp; // print bf, padded from left to at least n characters. // padding is zero ('0') if z!=0, space (' ') otherwise static int putchw(void *putp, putcf putf, int n, char z, char *bf) { int written = 0; char fc = z ? '0' : ' '; char ch; char *p = bf; while (*p++ && n > 0) n while (n putf(putp, fc); written++; } while ((ch = *bf++)) { putf(putp, ch); written++; } return written; } // retrun number of bytes written int tfp_format(void *putp, putcf putf, const char *fmt, va_list va) { char bf[12]; int written = 0; char ch; while ((ch = *(fmt++))) { if (ch != '%') { putf(putp, ch); written++; } else { char lz = 0; #ifdef <API key> char lng = 0; #endif int w = 0; ch = *(fmt++); if (ch == '0') { ch = *(fmt++); lz = 1; } if (ch >= '0' && ch <= '9') { ch = a2i(ch, &fmt, 10, &w); } #ifdef <API key> if (ch == 'l') { ch = *(fmt++); lng = 1; } #endif switch (ch) { case 0: goto abort; case 'u':{ #ifdef <API key> if (lng) uli2a(va_arg(va, unsigned long int), 10, 0, bf); else #endif ui2a(va_arg(va, unsigned int), 10, 0, bf); written += putchw(putp, putf, w, lz, bf); break; } case 'd':{ #ifdef <API key> if (lng) li2a(va_arg(va, unsigned long int), bf); else #endif i2a(va_arg(va, int), bf); written += putchw(putp, putf, w, lz, bf); break; } case 'x': case 'X': #ifdef <API key> if (lng) uli2a(va_arg(va, unsigned long int), 16, (ch == 'X'), bf); else #endif ui2a(va_arg(va, unsigned int), 16, (ch == 'X'), bf); written += putchw(putp, putf, w, lz, bf); break; case 'c': putf(putp, (char) (va_arg(va, int))); written++; break; case 's': written += putchw(putp, putf, w, 0, va_arg(va, char *)); break; case '%': putf(putp, ch); written++; break; case 'n': *va_arg(va, int*) = written; break; default: break; } } } abort: return written; } void init_printf(void *putp, void (*putf) (void *, char)) { stdout_putf = putf; stdout_putp = putp; } int tfp_printf(const char *fmt, ...) { va_list va; va_start(va, fmt); int written = tfp_format(stdout_putp, stdout_putf, fmt, va); va_end(va); while (!<API key>(printfSerialPort)); return written; } static void putcp(void *p, char c) { *(*((char **) p))++ = c; } int tfp_sprintf(char *s, const char *fmt, ...) { va_list va; va_start(va, fmt); int written = tfp_format(&s, putcp, fmt, va); putcp(&s, 0); va_end(va); return written; } static void _putc(void *p, char c) { UNUSED(p); serialWrite(printfSerialPort, c); } void printfSupportInit(void) { init_printf(NULL, _putc); } #else // keil/armcc version int fputc(int c, FILE *f) { // let DMA catch up a bit when using set or dump, we're too fast. while (!<API key>(printfSerialPort)); serialWrite(printfSerialPort, c); return c; } void printfSupportInit(void) { // Nothing to do } #endif void setPrintfSerialPort(serialPort_t *serialPort) { printfSerialPort = serialPort; }
using System.Collections; using System.Collections.Generic; namespace System.Data.Common { internal partial class DbConnectionOptions { protected DbConnectionOptions(string connectionString, Dictionary<string, string> synonyms) : this (connectionString, new Hashtable(synonyms), false) { } internal bool <API key>(string key, out string value) { if (_parsetable.ContainsKey(key)) { value = (string)_parsetable[key]; return true; } value = null; return false; } } }
<html> <body> <pre>BEGINhtml body pre include:custom(opt='val' num=2) filters.include.custom.pug END</pre> </body> </html>
<div class="loader"></div>
#include "irq.h" #include "mmu.h" #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/moduleparam.h> #include "kvm_cache_regs.h" #include "x86.h" #include <asm/io.h> #include <asm/desc.h> #include <asm/vmx.h> #include <asm/virtext.h> #define __ex(x) <API key>(x) MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); static int bypass_guest_pf = 1; module_param(bypass_guest_pf, bool, 0); static int enable_vpid = 1; module_param(enable_vpid, bool, 0); static int <API key> = 1; module_param(<API key>, bool, 0); static int enable_ept = 1; module_param(enable_ept, bool, 0); static int <API key> = 0; module_param(<API key>, bool, 0); struct vmcs { u32 revision_id; u32 abort; char data[0]; }; struct vcpu_vmx { struct kvm_vcpu vcpu; struct list_head local_vcpus_link; unsigned long host_rsp; int launched; u8 fail; u32 idt_vectoring_info; struct kvm_msr_entry *guest_msrs; struct kvm_msr_entry *host_msrs; int nmsrs; int save_nmsrs; int msr_offset_efer; #ifdef CONFIG_X86_64 int <API key>; #endif struct vmcs *vmcs; struct { int loaded; u16 fs_sel, gs_sel, ldt_sel; int <API key>; int fs_reload_needed; int guest_efer_loaded; } host_state; struct { struct { bool pending; u8 vector; unsigned rip; } irq; } rmode; int vpid; bool emulation_required; /* Support for vnmi-less CPUs */ int soft_vnmi_blocked; ktime_t entry_time; s64 vnmi_blocked_time; }; static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) { return container_of(vcpu, struct vcpu_vmx, vcpu); } static int init_rmode(struct kvm *kvm); static u64 construct_eptp(unsigned long root_hpa); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu); static struct page *vmx_io_bitmap_a; static struct page *vmx_io_bitmap_b; static struct page *vmx_msr_bitmap; static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); static DEFINE_SPINLOCK(vmx_vpid_lock); static struct vmcs_config { int size; int order; u32 revision_id; u32 pin_based_exec_ctrl; u32 cpu_based_exec_ctrl; u32 <API key>; u32 vmexit_ctrl; u32 vmentry_ctrl; } vmcs_config; static struct vmx_capability { u32 ept; u32 vpid; } vmx_capability; #define VMX_SEGMENT_FIELD(seg) \ [VCPU_SREG_##seg] = { \ .selector = GUEST_##seg##_SELECTOR, \ .base = GUEST_##seg##_BASE, \ .limit = GUEST_##seg##_LIMIT, \ .ar_bytes = GUEST_##seg##_AR_BYTES, \ } static struct <API key> { unsigned selector; unsigned base; unsigned limit; unsigned ar_bytes; } <API key>[] = { VMX_SEGMENT_FIELD(CS), VMX_SEGMENT_FIELD(DS), VMX_SEGMENT_FIELD(ES), VMX_SEGMENT_FIELD(FS), VMX_SEGMENT_FIELD(GS), VMX_SEGMENT_FIELD(SS), VMX_SEGMENT_FIELD(TR), VMX_SEGMENT_FIELD(LDTR), }; /* * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it * away by decrementing the array size. */ static const u32 vmx_msr_index[] = { #ifdef CONFIG_X86_64 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE, #endif MSR_EFER, MSR_K6_STAR, }; #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) static void load_msrs(struct kvm_msr_entry *e, int n) { int i; for (i = 0; i < n; ++i) wrmsrl(e[i].index, e[i].data); } static void save_msrs(struct kvm_msr_entry *e, int n) { int i; for (i = 0; i < n; ++i) rdmsrl(e[i].index, e[i].data); } static inline int is_page_fault(u32 intr_info) { return (intr_info & (<API key> | <API key> | <API key>)) == (INTR_TYPE_EXCEPTION | PF_VECTOR | <API key>); } static inline int is_no_device(u32 intr_info) { return (intr_info & (<API key> | <API key> | <API key>)) == (INTR_TYPE_EXCEPTION | NM_VECTOR | <API key>); } static inline int is_invalid_opcode(u32 intr_info) { return (intr_info & (<API key> | <API key> | <API key>)) == (INTR_TYPE_EXCEPTION | UD_VECTOR | <API key>); } static inline int <API key>(u32 intr_info) { return (intr_info & (<API key> | <API key>)) == (INTR_TYPE_EXT_INTR | <API key>); } static inline int <API key>(void) { return (vmcs_config.cpu_based_exec_ctrl & <API key>); } static inline int <API key>(void) { return (vmcs_config.cpu_based_exec_ctrl & <API key>); } static inline int vm_need_tpr_shadow(struct kvm *kvm) { return ((<API key>()) && (irqchip_in_kernel(kvm))); } static inline int <API key>(void) { return (vmcs_config.cpu_based_exec_ctrl & <API key>); } static inline bool <API key>(void) { return <API key> && (vmcs_config.<API key> & <API key>); } static inline int <API key>(void) { return (!!(vmx_capability.ept & <API key>)); } static inline int <API key>(void) { return (!!(vmx_capability.ept & <API key>)); } static inline int <API key>(void) { return (!!(vmx_capability.ept & <API key>)); } static inline int cpu_has_vmx_ept(void) { return (vmcs_config.<API key> & <API key>); } static inline int vm_need_ept(void) { return (cpu_has_vmx_ept() && enable_ept); } static inline int <API key>(struct kvm *kvm) { return ((<API key>()) && (irqchip_in_kernel(kvm))); } static inline int cpu_has_vmx_vpid(void) { return (vmcs_config.<API key> & <API key>); } static inline int <API key>(void) { return vmcs_config.pin_based_exec_ctrl & <API key>; } static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) { int i; for (i = 0; i < vmx->nmsrs; ++i) if (vmx->guest_msrs[i].index == msr) return i; return -1; } static inline void __invvpid(int ext, u16 vpid, gva_t gva) { struct { u64 vpid : 16; u64 rsvd : 48; u64 gva; } operand = { vpid, 0, gva }; asm volatile (__ex(ASM_VMX_INVVPID) /* CF==1 or ZF==1 --> rc = -1 */ "; ja 1f ; ud2 ; 1:" : : "a"(&operand), "c"(ext) : "cc", "memory"); } static inline void __invept(int ext, u64 eptp, gpa_t gpa) { struct { u64 eptp, gpa; } operand = {eptp, gpa}; asm volatile (__ex(ASM_VMX_INVEPT) /* CF==1 or ZF==1 --> rc = -1 */ "; ja 1f ; ud2 ; 1:\n" : : "a" (&operand), "c" (ext) : "cc", "memory"); } static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) { int i; i = __find_msr_index(vmx, msr); if (i >= 0) return &vmx->guest_msrs[i]; return NULL; } static void vmcs_clear(struct vmcs *vmcs) { u64 phys_addr = __pa(vmcs); u8 error; asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0" : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) : "cc", "memory"); if (error) printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", vmcs, phys_addr); } static void __vcpu_clear(void *arg) { struct vcpu_vmx *vmx = arg; int cpu = <API key>(); if (vmx->vcpu.cpu == cpu) vmcs_clear(vmx->vmcs); if (per_cpu(current_vmcs, cpu) == vmx->vmcs) per_cpu(current_vmcs, cpu) = NULL; rdtscll(vmx->vcpu.arch.host_tsc); list_del(&vmx->local_vcpus_link); vmx->vcpu.cpu = -1; vmx->launched = 0; } static void vcpu_clear(struct vcpu_vmx *vmx) { if (vmx->vcpu.cpu == -1) return; <API key>(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); } static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx) { if (vmx->vpid == 0) return; __invvpid(<API key>, vmx->vpid, 0); } static inline void ept_sync_global(void) { if (<API key>()) __invept(<API key>, 0, 0); } static inline void ept_sync_context(u64 eptp) { if (vm_need_ept()) { if (<API key>()) __invept(<API key>, eptp, 0); else ept_sync_global(); } } static inline void <API key>(u64 eptp, gpa_t gpa) { if (vm_need_ept()) { if (<API key>()) __invept(<API key>, eptp, gpa); else ept_sync_context(eptp); } } static unsigned long vmcs_readl(unsigned long field) { unsigned long value; asm volatile (__ex(<API key>) : "=a"(value) : "d"(field) : "cc"); return value; } static u16 vmcs_read16(unsigned long field) { return vmcs_readl(field); } static u32 vmcs_read32(unsigned long field) { return vmcs_readl(field); } static u64 vmcs_read64(unsigned long field) { #ifdef CONFIG_X86_64 return vmcs_readl(field); #else return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32); #endif } static noinline void vmwrite_error(unsigned long field, unsigned long value) { printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", field, value, vmcs_read32(<API key>)); dump_stack(); } static void vmcs_writel(unsigned long field, unsigned long value) { u8 error; asm volatile (__ex(<API key>) "; setna %0" : "=q"(error) : "a"(value), "d"(field) : "cc"); if (unlikely(error)) vmwrite_error(field, value); } static void vmcs_write16(unsigned long field, u16 value) { vmcs_writel(field, value); } static void vmcs_write32(unsigned long field, u32 value) { vmcs_writel(field, value); } static void vmcs_write64(unsigned long field, u64 value) { vmcs_writel(field, value); #ifndef CONFIG_X86_64 asm volatile (""); vmcs_writel(field+1, value >> 32); #endif } static void vmcs_clear_bits(unsigned long field, u32 mask) { vmcs_writel(field, vmcs_readl(field) & ~mask); } static void vmcs_set_bits(unsigned long field, u32 mask) { vmcs_writel(field, vmcs_readl(field) | mask); } static void <API key>(struct kvm_vcpu *vcpu) { u32 eb; eb = (1u << PF_VECTOR) | (1u << UD_VECTOR); if (!vcpu->fpu_active) eb |= 1u << NM_VECTOR; if (vcpu->guest_debug.enabled) eb |= 1u << DB_VECTOR; if (vcpu->arch.rmode.active) eb = ~0; if (vm_need_ept()) eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ vmcs_write32(EXCEPTION_BITMAP, eb); } static void reload_tss(void) { /* * VT restores TR but not its size. Useless. */ struct descriptor_table gdt; struct desc_struct *descs; kvm_get_gdt(&gdt); descs = (void *)gdt.base; descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ load_TR_desc(); } static void <API key>(struct vcpu_vmx *vmx) { int efer_offset = vmx->msr_offset_efer; u64 host_efer = vmx->host_msrs[efer_offset].data; u64 guest_efer = vmx->guest_msrs[efer_offset].data; u64 ignore_bits; if (efer_offset < 0) return; /* * NX is emulated; LMA and LME handled by hardware; SCE meaninless * outside long mode */ ignore_bits = EFER_NX | EFER_SCE; #ifdef CONFIG_X86_64 ignore_bits |= EFER_LMA | EFER_LME; /* SCE is meaningful only in long mode on Intel */ if (guest_efer & EFER_LMA) ignore_bits &= ~(u64)EFER_SCE; #endif if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits)) return; vmx->host_state.guest_efer_loaded = 1; guest_efer &= ~ignore_bits; guest_efer |= host_efer & ignore_bits; wrmsrl(MSR_EFER, guest_efer); vmx->vcpu.stat.efer_reload++; } static void reload_host_efer(struct vcpu_vmx *vmx) { if (vmx->host_state.guest_efer_loaded) { vmx->host_state.guest_efer_loaded = 0; load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1); } } static void vmx_save_host_state(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (vmx->host_state.loaded) return; vmx->host_state.loaded = 1; /* * Set host fs and gs selectors. Unfortunately, 22.2.3 does not * allow segment selectors with cpl > 0 or ti == 1. */ vmx->host_state.ldt_sel = kvm_read_ldt(); vmx->host_state.<API key> = vmx->host_state.ldt_sel; vmx->host_state.fs_sel = kvm_read_fs(); if (!(vmx->host_state.fs_sel & 7)) { vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); vmx->host_state.fs_reload_needed = 0; } else { vmcs_write16(HOST_FS_SELECTOR, 0); vmx->host_state.fs_reload_needed = 1; } vmx->host_state.gs_sel = kvm_read_gs(); if (!(vmx->host_state.gs_sel & 7)) vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); else { vmcs_write16(HOST_GS_SELECTOR, 0); vmx->host_state.<API key> = 1; } #ifdef CONFIG_X86_64 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); #else vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel)); vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel)); #endif #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) save_msrs(vmx->host_msrs + vmx-><API key>, 1); #endif load_msrs(vmx->guest_msrs, vmx->save_nmsrs); <API key>(vmx); } static void <API key>(struct vcpu_vmx *vmx) { unsigned long flags; if (!vmx->host_state.loaded) return; ++vmx->vcpu.stat.host_state_reload; vmx->host_state.loaded = 0; if (vmx->host_state.fs_reload_needed) kvm_load_fs(vmx->host_state.fs_sel); if (vmx->host_state.<API key>) { kvm_load_ldt(vmx->host_state.ldt_sel); /* * If we have to reload gs, we must take care to * preserve our gs base. */ local_irq_save(flags); kvm_load_gs(vmx->host_state.gs_sel); #ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); #endif local_irq_restore(flags); } reload_tss(); save_msrs(vmx->guest_msrs, vmx->save_nmsrs); load_msrs(vmx->host_msrs, vmx->save_nmsrs); reload_host_efer(vmx); } static void vmx_load_host_state(struct vcpu_vmx *vmx) { preempt_disable(); <API key>(vmx); preempt_enable(); } /* * Switches to specified vcpu, until a matching vcpu_put(), but assumes * vcpu mutex is already taken. */ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 phys_addr = __pa(vmx->vmcs); u64 tsc_this, delta, new_offset; if (vcpu->cpu != cpu) { vcpu_clear(vmx); kvm_migrate_timers(vcpu); vpid_sync_vcpu_all(vmx); local_irq_disable(); list_add(&vmx->local_vcpus_link, &per_cpu(vcpus_on_cpu, cpu)); local_irq_enable(); } if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { u8 error; per_cpu(current_vmcs, cpu) = vmx->vmcs; asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) : "cc"); if (error) printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n", vmx->vmcs, phys_addr); } if (vcpu->cpu != cpu) { struct descriptor_table dt; unsigned long sysenter_esp; vcpu->cpu = cpu; /* * Linux uses per-cpu TSS and GDT, so set these when switching * processors. */ vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ kvm_get_gdt(&dt); vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */ rdmsrl(<API key>, sysenter_esp); vmcs_writel(<API key>, sysenter_esp); /* 22.2.3 */ /* * Make sure the time stamp counter is monotonous. */ rdtscll(tsc_this); if (tsc_this < vcpu->arch.host_tsc) { delta = vcpu->arch.host_tsc - tsc_this; new_offset = vmcs_read64(TSC_OFFSET) + delta; vmcs_write64(TSC_OFFSET, new_offset); } } } static void vmx_vcpu_put(struct kvm_vcpu *vcpu) { <API key>(to_vmx(vcpu)); } static void vmx_fpu_activate(struct kvm_vcpu *vcpu) { if (vcpu->fpu_active) return; vcpu->fpu_active = 1; vmcs_clear_bits(GUEST_CR0, X86_CR0_TS); if (vcpu->arch.cr0 & X86_CR0_TS) vmcs_set_bits(GUEST_CR0, X86_CR0_TS); <API key>(vcpu); } static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) { if (!vcpu->fpu_active) return; vcpu->fpu_active = 0; vmcs_set_bits(GUEST_CR0, X86_CR0_TS); <API key>(vcpu); } static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) { return vmcs_readl(GUEST_RFLAGS); } static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { if (vcpu->arch.rmode.active) rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; vmcs_writel(GUEST_RFLAGS, rflags); } static void <API key>(struct kvm_vcpu *vcpu) { unsigned long rip; u32 interruptibility; rip = kvm_rip_read(vcpu); rip += vmcs_read32(<API key>); kvm_rip_write(vcpu, rip); /* * We emulated an instruction, so temporary interrupt blocking * should be removed, if set. */ interruptibility = vmcs_read32(<API key>); if (interruptibility & 3) vmcs_write32(<API key>, interruptibility & ~3); vcpu->arch.<API key> = 1; } static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error_code, u32 error_code) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (has_error_code) vmcs_write32(<API key>, error_code); if (vcpu->arch.rmode.active) { vmx->rmode.irq.pending = true; vmx->rmode.irq.vector = nr; vmx->rmode.irq.rip = kvm_rip_read(vcpu); if (nr == BP_VECTOR) vmx->rmode.irq.rip++; vmcs_write32(<API key>, nr | INTR_TYPE_SOFT_INTR | (has_error_code ? <API key> : 0) | <API key>); vmcs_write32(<API key>, 1); kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); return; } vmcs_write32(<API key>, nr | INTR_TYPE_EXCEPTION | (has_error_code ? <API key> : 0) | <API key>); } static bool <API key>(struct kvm_vcpu *vcpu) { return false; } /* * Swap MSR entry in host/guest MSR entry array. */ #ifdef CONFIG_X86_64 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) { struct kvm_msr_entry tmp; tmp = vmx->guest_msrs[to]; vmx->guest_msrs[to] = vmx->guest_msrs[from]; vmx->guest_msrs[from] = tmp; tmp = vmx->host_msrs[to]; vmx->host_msrs[to] = vmx->host_msrs[from]; vmx->host_msrs[from] = tmp; } #endif /* * Set up the vmcs to automatically save and restore system * msrs. Don't touch the 64-bit msrs if the guest is in legacy * mode, as fiddling with msrs is very expensive. */ static void setup_msrs(struct vcpu_vmx *vmx) { int save_nmsrs; vmx_load_host_state(vmx); save_nmsrs = 0; #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) { int index; index = __find_msr_index(vmx, MSR_SYSCALL_MASK); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_LSTAR); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_CSTAR); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); /* * MSR_K6_STAR is only needed on long mode guests, and only * if efer.sce is enabled. */ index = __find_msr_index(vmx, MSR_K6_STAR); if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE)) move_msr_up(vmx, index, save_nmsrs++); } #endif vmx->save_nmsrs = save_nmsrs; #ifdef CONFIG_X86_64 vmx-><API key> = __find_msr_index(vmx, MSR_KERNEL_GS_BASE); #endif vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER); } /* * reads and returns guest's timestamp counter "register" * guest_tsc = host_tsc + tsc_offset -- 21.3 */ static u64 guest_read_tsc(void) { u64 host_tsc, tsc_offset; rdtscll(host_tsc); tsc_offset = vmcs_read64(TSC_OFFSET); return host_tsc + tsc_offset; } /* * writes 'guest_tsc' into guest's timestamp counter "register" * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc */ static void guest_write_tsc(u64 guest_tsc) { u64 host_tsc; rdtscll(host_tsc); vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc); } /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { u64 data; struct kvm_msr_entry *msr; if (!pdata) { printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); return -EINVAL; } switch (msr_index) { #ifdef CONFIG_X86_64 case MSR_FS_BASE: data = vmcs_readl(GUEST_FS_BASE); break; case MSR_GS_BASE: data = vmcs_readl(GUEST_GS_BASE); break; case MSR_EFER: return kvm_get_msr_common(vcpu, msr_index, pdata); #endif case <API key>: data = guest_read_tsc(); break; case <API key>: data = vmcs_read32(GUEST_SYSENTER_CS); break; case <API key>: data = vmcs_readl(GUEST_SYSENTER_EIP); break; case <API key>: data = vmcs_readl(GUEST_SYSENTER_ESP); break; default: vmx_load_host_state(to_vmx(vcpu)); msr = find_msr_entry(to_vmx(vcpu), msr_index); if (msr) { data = msr->data; break; } return kvm_get_msr_common(vcpu, msr_index, pdata); } *pdata = data; return 0; } /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct kvm_msr_entry *msr; int ret = 0; switch (msr_index) { #ifdef CONFIG_X86_64 case MSR_EFER: vmx_load_host_state(vmx); ret = kvm_set_msr_common(vcpu, msr_index, data); break; case MSR_FS_BASE: vmcs_writel(GUEST_FS_BASE, data); break; case MSR_GS_BASE: vmcs_writel(GUEST_GS_BASE, data); break; #endif case <API key>: vmcs_write32(GUEST_SYSENTER_CS, data); break; case <API key>: vmcs_writel(GUEST_SYSENTER_EIP, data); break; case <API key>: vmcs_writel(GUEST_SYSENTER_ESP, data); break; case <API key>: guest_write_tsc(data); break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: /* * Just discard all writes to the performance counters; this * should keep both older linux and windows 64-bit guests * happy */ pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", msr_index, data); break; case MSR_IA32_CR_PAT: if (vmcs_config.vmentry_ctrl & <API key>) { vmcs_write64(GUEST_IA32_PAT, data); vcpu->arch.pat = data; break; } /* Otherwise falls through to kvm_set_msr_common */ default: vmx_load_host_state(vmx); msr = find_msr_entry(vmx, msr_index); if (msr) { msr->data = data; break; } ret = kvm_set_msr_common(vcpu, msr_index, data); } return ret; } static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) { __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); switch (reg) { case VCPU_REGS_RSP: vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); break; case VCPU_REGS_RIP: vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); break; default: break; } } static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) { unsigned long dr7 = 0x400; int old_singlestep; old_singlestep = vcpu->guest_debug.singlestep; vcpu->guest_debug.enabled = dbg->enabled; if (vcpu->guest_debug.enabled) { int i; dr7 |= 0x200; /* exact */ for (i = 0; i < 4; ++i) { if (!dbg->breakpoints[i].enabled) continue; vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address; dr7 |= 2 << (i*2); /* global enable */ dr7 |= 0 << (i*4+16); /* execution breakpoint */ } vcpu->guest_debug.singlestep = dbg->singlestep; } else vcpu->guest_debug.singlestep = 0; if (old_singlestep && !vcpu->guest_debug.singlestep) { unsigned long flags; flags = vmcs_readl(GUEST_RFLAGS); flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); vmcs_writel(GUEST_RFLAGS, flags); } <API key>(vcpu); vmcs_writel(GUEST_DR7, dr7); return 0; } static int vmx_get_irq(struct kvm_vcpu *vcpu) { if (!vcpu->arch.interrupt.pending) return -1; return vcpu->arch.interrupt.nr; } static __init int cpu_has_kvm_support(void) { return cpu_has_vmx(); } static __init int <API key>(void) { u64 msr; rdmsrl(<API key>, msr); return (msr & (<API key> | <API key>)) == <API key>; /* locked but not enabled */ } static void hardware_enable(void *garbage) { int cpu = <API key>(); u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); u64 old; INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); rdmsrl(<API key>, old); if ((old & (<API key> | <API key>)) != (<API key> | <API key>)) /* enable and lock */ wrmsrl(<API key>, old | <API key> | <API key>); write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr) : "memory", "cc"); } static void vmclear_local_vcpus(void) { int cpu = <API key>(); struct vcpu_vmx *vmx, *n; <API key>(vmx, n, &per_cpu(vcpus_on_cpu, cpu), local_vcpus_link) __vcpu_clear(vmx); } /* Just like cpu_vmxoff(), but with the <API key>() * tricks. */ static void kvm_cpu_vmxoff(void) { asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); write_cr4(read_cr4() & ~X86_CR4_VMXE); } static void hardware_disable(void *garbage) { vmclear_local_vcpus(); kvm_cpu_vmxoff(); } static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result) { u32 vmx_msr_low, vmx_msr_high; u32 ctl = ctl_min | ctl_opt; rdmsr(msr, vmx_msr_low, vmx_msr_high); ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ /* Ensure minimum (required) set of control bits are supported. */ if (ctl_min & ~ctl) return -EIO; *result = ctl; return 0; } static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) { u32 vmx_msr_low, vmx_msr_high; u32 min, opt, min2, opt2; u32 <API key> = 0; u32 <API key> = 0; u32 <API key> = 0; u32 _vmexit_control = 0; u32 _vmentry_control = 0; min = <API key> | <API key>; opt = <API key>; if (adjust_vmx_controls(min, opt, <API key>, &<API key>) < 0) return -EIO; min = <API key> | #ifdef CONFIG_X86_64 <API key> | <API key> | #endif <API key> | <API key> | <API key> | <API key> | <API key> | <API key>; opt = <API key> | <API key> | <API key>; if (adjust_vmx_controls(min, opt, <API key>, &<API key>) < 0) return -EIO; #ifdef CONFIG_X86_64 if ((<API key> & <API key>)) <API key> &= ~<API key> & ~<API key>; #endif if (<API key> & <API key>) { min2 = 0; opt2 = <API key> | <API key> | <API key> | <API key>; if (adjust_vmx_controls(min2, opt2, <API key>, &<API key>) < 0) return -EIO; } #ifndef CONFIG_X86_64 if (!(<API key> & <API key>)) <API key> &= ~<API key>; #endif if (<API key> & <API key>) { /* CR3 accesses and invlpg don't need to cause VM Exits when EPT enabled */ min &= ~(<API key> | <API key> | <API key>); if (adjust_vmx_controls(min, opt, <API key>, &<API key>) < 0) return -EIO; rdmsr(<API key>, vmx_capability.ept, vmx_capability.vpid); } min = 0; #ifdef CONFIG_X86_64 min |= <API key>; #endif opt = <API key> | <API key>; if (adjust_vmx_controls(min, opt, <API key>, &_vmexit_control) < 0) return -EIO; min = 0; opt = <API key>; if (adjust_vmx_controls(min, opt, <API key>, &_vmentry_control) < 0) return -EIO; rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) return -EIO; #ifdef CONFIG_X86_64 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ if (vmx_msr_high & (1u<<16)) return -EIO; #endif /* Require Write-Back (WB) memory type for VMCS accesses. */ if (((vmx_msr_high >> 18) & 15) != 6) return -EIO; vmcs_conf->size = vmx_msr_high & 0x1fff; vmcs_conf->order = get_order(vmcs_config.size); vmcs_conf->revision_id = vmx_msr_low; vmcs_conf->pin_based_exec_ctrl = <API key>; vmcs_conf->cpu_based_exec_ctrl = <API key>; vmcs_conf-><API key> = <API key>; vmcs_conf->vmexit_ctrl = _vmexit_control; vmcs_conf->vmentry_ctrl = _vmentry_control; return 0; } static struct vmcs *alloc_vmcs_cpu(int cpu) { int node = cpu_to_node(cpu); struct page *pages; struct vmcs *vmcs; pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order); if (!pages) return NULL; vmcs = page_address(pages); memset(vmcs, 0, vmcs_config.size); vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */ return vmcs; } static struct vmcs *alloc_vmcs(void) { return alloc_vmcs_cpu(<API key>()); } static void free_vmcs(struct vmcs *vmcs) { free_pages((unsigned long)vmcs, vmcs_config.order); } static void free_kvm_area(void) { int cpu; for_each_online_cpu(cpu) free_vmcs(per_cpu(vmxarea, cpu)); } static __init int alloc_kvm_area(void) { int cpu; for_each_online_cpu(cpu) { struct vmcs *vmcs; vmcs = alloc_vmcs_cpu(cpu); if (!vmcs) { free_kvm_area(); return -ENOMEM; } per_cpu(vmxarea, cpu) = vmcs; } return 0; } static __init int hardware_setup(void) { if (setup_vmcs_config(&vmcs_config) < 0) return -EIO; if (boot_cpu_has(X86_FEATURE_NX)) <API key>(EFER_NX); return alloc_kvm_area(); } static __exit void hardware_unsetup(void) { free_kvm_area(); } static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save) { struct <API key> *sf = &<API key>[seg]; if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) { vmcs_write16(sf->selector, save->selector); vmcs_writel(sf->base, save->base); vmcs_write32(sf->limit, save->limit); vmcs_write32(sf->ar_bytes, save->ar); } else { u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK) << AR_DPL_SHIFT; vmcs_write32(sf->ar_bytes, 0x93 | dpl); } } static void enter_pmode(struct kvm_vcpu *vcpu) { unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); vmx->emulation_required = 1; vcpu->arch.rmode.active = 0; vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base); vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit); vmcs_write32(GUEST_TR_AR_BYTES, vcpu->arch.rmode.tr.ar); flags = vmcs_readl(GUEST_RFLAGS); flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); flags |= (vcpu->arch.rmode.save_iopl << IOPL_SHIFT); vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); <API key>(vcpu); if (<API key>) return; fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es); fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds); fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); vmcs_write16(GUEST_SS_SELECTOR, 0); vmcs_write32(GUEST_SS_AR_BYTES, 0x93); vmcs_write16(GUEST_CS_SELECTOR, vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK); vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); } static gva_t rmode_tss_base(struct kvm *kvm) { if (!kvm->arch.tss_addr) { gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3; return base_gfn << PAGE_SHIFT; } return kvm->arch.tss_addr; } static void fix_rmode_seg(int seg, struct kvm_save_segment *save) { struct <API key> *sf = &<API key>[seg]; save->selector = vmcs_read16(sf->selector); save->base = vmcs_readl(sf->base); save->limit = vmcs_read32(sf->limit); save->ar = vmcs_read32(sf->ar_bytes); vmcs_write16(sf->selector, save->base >> 4); vmcs_write32(sf->base, save->base & 0xfffff); vmcs_write32(sf->limit, 0xffff); vmcs_write32(sf->ar_bytes, 0xf3); } static void enter_rmode(struct kvm_vcpu *vcpu) { unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); vmx->emulation_required = 1; vcpu->arch.rmode.active = 1; vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE); vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm)); vcpu->arch.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT); vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); vcpu->arch.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES); vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); flags = vmcs_readl(GUEST_RFLAGS); vcpu->arch.rmode.save_iopl = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); <API key>(vcpu); if (<API key>) goto continue_rmode; vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4); vmcs_write32(GUEST_SS_LIMIT, 0xffff); vmcs_write32(GUEST_SS_AR_BYTES, 0xf3); vmcs_write32(GUEST_CS_AR_BYTES, 0xf3); vmcs_write32(GUEST_CS_LIMIT, 0xffff); if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000) vmcs_writel(GUEST_CS_BASE, 0xf0000); vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4); fix_rmode_seg(VCPU_SREG_ES, &vcpu->arch.rmode.es); fix_rmode_seg(VCPU_SREG_DS, &vcpu->arch.rmode.ds); fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); continue_rmode: <API key>(vcpu); init_rmode(vcpu->kvm); } #ifdef CONFIG_X86_64 static void enter_lmode(struct kvm_vcpu *vcpu) { u32 guest_tr_ar; guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { printk(KERN_DEBUG "%s: tss fixup for long mode. \n", __func__); vmcs_write32(GUEST_TR_AR_BYTES, (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS); } vcpu->arch.shadow_efer |= EFER_LMA; find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME; vmcs_write32(VM_ENTRY_CONTROLS, vmcs_read32(VM_ENTRY_CONTROLS) | VM_ENTRY_IA32E_MODE); } static void exit_lmode(struct kvm_vcpu *vcpu) { vcpu->arch.shadow_efer &= ~EFER_LMA; vmcs_write32(VM_ENTRY_CONTROLS, vmcs_read32(VM_ENTRY_CONTROLS) & ~VM_ENTRY_IA32E_MODE); } #endif static void vmx_flush_tlb(struct kvm_vcpu *vcpu) { vpid_sync_vcpu_all(to_vmx(vcpu)); if (vm_need_ept()) ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); } static void <API key>(struct kvm_vcpu *vcpu) { vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK; vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; } static void ept_load_pdptrs(struct kvm_vcpu *vcpu) { if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { if (!load_pdptrs(vcpu, vcpu->arch.cr3)) { printk(KERN_ERR "EPT: Fail to load pdptrs!\n"); return; } vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]); vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]); vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]); vmcs_write64(GUEST_PDPTR3, vcpu->arch.pdptrs[3]); } } static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); static void <API key>(unsigned long *hw_cr0, unsigned long cr0, struct kvm_vcpu *vcpu) { if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ vmcs_write32(<API key>, vmcs_read32(<API key>) | (<API key> | <API key>)); vcpu->arch.cr0 = cr0; vmx_set_cr4(vcpu, vcpu->arch.cr4); *hw_cr0 |= X86_CR0_PE | X86_CR0_PG; *hw_cr0 &= ~X86_CR0_WP; } else if (!is_paging(vcpu)) { /* From nonpaging to paging */ vmcs_write32(<API key>, vmcs_read32(<API key>) & ~(<API key> | <API key>)); vcpu->arch.cr0 = cr0; vmx_set_cr4(vcpu, vcpu->arch.cr4); if (!(vcpu->arch.cr0 & X86_CR0_WP)) *hw_cr0 &= ~X86_CR0_WP; } } static void <API key>(unsigned long *hw_cr4, struct kvm_vcpu *vcpu) { if (!is_paging(vcpu)) { *hw_cr4 &= ~X86_CR4_PAE; *hw_cr4 |= X86_CR4_PSE; } else if (!(vcpu->arch.cr4 & X86_CR4_PAE)) *hw_cr4 &= ~X86_CR4_PAE; } static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { unsigned long hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | <API key>; vmx_fpu_deactivate(vcpu); if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE)) enter_pmode(vcpu); if (!vcpu->arch.rmode.active && !(cr0 & X86_CR0_PE)) enter_rmode(vcpu); #ifdef CONFIG_X86_64 if (vcpu->arch.shadow_efer & EFER_LME) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) enter_lmode(vcpu); if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) exit_lmode(vcpu); } #endif if (vm_need_ept()) <API key>(&hw_cr0, cr0, vcpu); vmcs_writel(CR0_READ_SHADOW, cr0); vmcs_writel(GUEST_CR0, hw_cr0); vcpu->arch.cr0 = cr0; if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE)) vmx_fpu_activate(vcpu); } static u64 construct_eptp(unsigned long root_hpa) { u64 eptp; /* TODO write the value reading from MSR */ eptp = VMX_EPT_DEFAULT_MT | VMX_EPT_DEFAULT_GAW << <API key>; eptp |= (root_hpa & PAGE_MASK); return eptp; } static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { unsigned long guest_cr3; u64 eptp; guest_cr3 = cr3; if (vm_need_ept()) { eptp = construct_eptp(cr3); vmcs_write64(EPT_POINTER, eptp); ept_sync_context(eptp); ept_load_pdptrs(vcpu); guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 : <API key>; } vmx_flush_tlb(vcpu); vmcs_writel(GUEST_CR3, guest_cr3); if (vcpu->arch.cr0 & X86_CR0_PE) vmx_fpu_deactivate(vcpu); } static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long hw_cr4 = cr4 | (vcpu->arch.rmode.active ? <API key> : <API key>); vcpu->arch.cr4 = cr4; if (vm_need_ept()) <API key>(&hw_cr4, vcpu); vmcs_writel(CR4_READ_SHADOW, cr4); vmcs_writel(GUEST_CR4, hw_cr4); } static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); vcpu->arch.shadow_efer = efer; if (!msr) return; if (efer & EFER_LMA) { vmcs_write32(VM_ENTRY_CONTROLS, vmcs_read32(VM_ENTRY_CONTROLS) | VM_ENTRY_IA32E_MODE); msr->data = efer; } else { vmcs_write32(VM_ENTRY_CONTROLS, vmcs_read32(VM_ENTRY_CONTROLS) & ~VM_ENTRY_IA32E_MODE); msr->data = efer & ~EFER_LME; } setup_msrs(vmx); } static u64 <API key>(struct kvm_vcpu *vcpu, int seg) { struct <API key> *sf = &<API key>[seg]; return vmcs_readl(sf->base); } static void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct <API key> *sf = &<API key>[seg]; u32 ar; var->base = vmcs_readl(sf->base); var->limit = vmcs_read32(sf->limit); var->selector = vmcs_read16(sf->selector); ar = vmcs_read32(sf->ar_bytes); if (ar & AR_UNUSABLE_MASK) ar = 0; var->type = ar & 15; var->s = (ar >> 4) & 1; var->dpl = (ar >> 5) & 3; var->present = (ar >> 7) & 1; var->avl = (ar >> 12) & 1; var->l = (ar >> 13) & 1; var->db = (ar >> 14) & 1; var->g = (ar >> 15) & 1; var->unusable = (ar >> 16) & 1; } static int vmx_get_cpl(struct kvm_vcpu *vcpu) { struct kvm_segment kvm_seg; if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */ return 0; if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */ return 3; vmx_get_segment(vcpu, &kvm_seg, VCPU_SREG_CS); return kvm_seg.selector & 3; } static u32 <API key>(struct kvm_segment *var) { u32 ar; if (var->unusable) ar = 1 << 16; else { ar = var->type & 15; ar |= (var->s & 1) << 4; ar |= (var->dpl & 3) << 5; ar |= (var->present & 1) << 7; ar |= (var->avl & 1) << 12; ar |= (var->l & 1) << 13; ar |= (var->db & 1) << 14; ar |= (var->g & 1) << 15; } if (ar == 0) /* a 0 value means unusable */ ar = AR_UNUSABLE_MASK; return ar; } static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct <API key> *sf = &<API key>[seg]; u32 ar; if (vcpu->arch.rmode.active && seg == VCPU_SREG_TR) { vcpu->arch.rmode.tr.selector = var->selector; vcpu->arch.rmode.tr.base = var->base; vcpu->arch.rmode.tr.limit = var->limit; vcpu->arch.rmode.tr.ar = <API key>(var); return; } vmcs_writel(sf->base, var->base); vmcs_write32(sf->limit, var->limit); vmcs_write16(sf->selector, var->selector); if (vcpu->arch.rmode.active && var->s) { /* * Hack real-mode segments into vm86 compatibility. */ if (var->base == 0xffff0000 && var->selector == 0xf000) vmcs_writel(sf->base, 0xf0000); ar = 0xf3; } else ar = <API key>(var); vmcs_write32(sf->ar_bytes, ar); } static void <API key>(struct kvm_vcpu *vcpu, int *db, int *l) { u32 ar = vmcs_read32(GUEST_CS_AR_BYTES); *db = (ar >> 14) & 1; *l = (ar >> 13) & 1; } static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) { dt->limit = vmcs_read32(GUEST_IDTR_LIMIT); dt->base = vmcs_readl(GUEST_IDTR_BASE); } static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) { vmcs_write32(GUEST_IDTR_LIMIT, dt->limit); vmcs_writel(GUEST_IDTR_BASE, dt->base); } static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) { dt->limit = vmcs_read32(GUEST_GDTR_LIMIT); dt->base = vmcs_readl(GUEST_GDTR_BASE); } static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) { vmcs_write32(GUEST_GDTR_LIMIT, dt->limit); vmcs_writel(GUEST_GDTR_BASE, dt->base); } static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment var; u32 ar; vmx_get_segment(vcpu, &var, seg); ar = <API key>(&var); if (var.base != (var.selector << 4)) return false; if (var.limit != 0xffff) return false; if (ar != 0xf3) return false; return true; } static bool code_segment_valid(struct kvm_vcpu *vcpu) { struct kvm_segment cs; unsigned int cs_rpl; vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); cs_rpl = cs.selector & SELECTOR_RPL_MASK; if (~cs.type & (AR_TYPE_CODE_MASK|<API key>)) return false; if (!cs.s) return false; if (!(~cs.type & (AR_TYPE_CODE_MASK|<API key>))) { if (cs.dpl > cs_rpl) return false; } else if (cs.type & AR_TYPE_CODE_MASK) { if (cs.dpl != cs_rpl) return false; } if (!cs.present) return false; /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ return true; } static bool stack_segment_valid(struct kvm_vcpu *vcpu) { struct kvm_segment ss; unsigned int ss_rpl; vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); ss_rpl = ss.selector & SELECTOR_RPL_MASK; if ((ss.type != 3) || (ss.type != 7)) return false; if (!ss.s) return false; if (ss.dpl != ss_rpl) /* DPL != RPL */ return false; if (!ss.present) return false; return true; } static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment var; unsigned int rpl; vmx_get_segment(vcpu, &var, seg); rpl = var.selector & SELECTOR_RPL_MASK; if (!var.s) return false; if (!var.present) return false; if (~var.type & (AR_TYPE_CODE_MASK|<API key>)) { if (var.dpl < rpl) /* DPL < RPL */ return false; } /* TODO: Add other members to kvm_segment_field to allow checking for other access * rights flags */ return true; } static bool tr_valid(struct kvm_vcpu *vcpu) { struct kvm_segment tr; vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */ return false; if ((tr.type != 3) || (tr.type != 11)) /* TODO: Check if guest is in IA32e mode */ return false; if (!tr.present) return false; return true; } static bool ldtr_valid(struct kvm_vcpu *vcpu) { struct kvm_segment ldtr; vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */ return false; if (ldtr.type != 2) return false; if (!ldtr.present) return false; return true; } static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) { struct kvm_segment cs, ss; vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); return ((cs.selector & SELECTOR_RPL_MASK) == (ss.selector & SELECTOR_RPL_MASK)); } /* * Check if guest state is valid. Returns true if valid, false if * not. * We assume that registers are always usable */ static bool guest_state_valid(struct kvm_vcpu *vcpu) { /* real mode guest state checks */ if (!(vcpu->arch.cr0 & X86_CR0_PE)) { if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) return false; } else { /* protected mode guest state checks */ if (!cs_ss_rpl_check(vcpu)) return false; if (!code_segment_valid(vcpu)) return false; if (!stack_segment_valid(vcpu)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_DS)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_ES)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_FS)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_GS)) return false; if (!tr_valid(vcpu)) return false; if (!ldtr_valid(vcpu)) return false; } /* TODO: * - Add checks on RIP * - Add checks on RFLAGS */ return true; } static int init_rmode_tss(struct kvm *kvm) { gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; u16 data = 0; int ret = 0; int r; r = <API key>(kvm, fn, 0, PAGE_SIZE); if (r < 0) goto out; data = TSS_BASE_SIZE + <API key>; r = <API key>(kvm, fn++, &data, <API key>, sizeof(u16)); if (r < 0) goto out; r = <API key>(kvm, fn++, 0, PAGE_SIZE); if (r < 0) goto out; r = <API key>(kvm, fn, 0, PAGE_SIZE); if (r < 0) goto out; data = ~0; r = <API key>(kvm, fn, &data, RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, sizeof(u8)); if (r < 0) goto out; ret = 1; out: return ret; } static int <API key>(struct kvm *kvm) { int i, r, ret; pfn_t identity_map_pfn; u32 tmp; if (!vm_need_ept()) return 1; if (unlikely(!kvm->arch.<API key>)) { printk(KERN_ERR "EPT: identity-mapping pagetable " "haven't been allocated!\n"); return 0; } if (likely(kvm->arch.<API key>)) return 1; ret = 0; identity_map_pfn = <API key> >> PAGE_SHIFT; r = <API key>(kvm, identity_map_pfn, 0, PAGE_SIZE); if (r < 0) goto out; /* Set up identity-mapping pagetable for EPT in real mode */ for (i = 0; i < PT32_ENT_PER_PAGE; i++) { tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); r = <API key>(kvm, identity_map_pfn, &tmp, i * sizeof(tmp), sizeof(tmp)); if (r < 0) goto out; } kvm->arch.<API key> = true; ret = 1; out: return ret; } static void seg_setup(int seg) { struct <API key> *sf = &<API key>[seg]; vmcs_write16(sf->selector, 0); vmcs_writel(sf->base, 0); vmcs_write32(sf->limit, 0xffff); vmcs_write32(sf->ar_bytes, 0xf3); } static int <API key>(struct kvm *kvm) { struct <API key> kvm_userspace_mem; int r = 0; down_write(&kvm->slots_lock); if (kvm->arch.apic_access_page) goto out; kvm_userspace_mem.slot = <API key>; kvm_userspace_mem.flags = 0; kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL; kvm_userspace_mem.memory_size = PAGE_SIZE; r = <API key>(kvm, &kvm_userspace_mem, 0); if (r) goto out; kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); out: up_write(&kvm->slots_lock); return r; } static int <API key>(struct kvm *kvm) { struct <API key> kvm_userspace_mem; int r = 0; down_write(&kvm->slots_lock); if (kvm->arch.<API key>) goto out; kvm_userspace_mem.slot = <API key>; kvm_userspace_mem.flags = 0; kvm_userspace_mem.guest_phys_addr = <API key>; kvm_userspace_mem.memory_size = PAGE_SIZE; r = <API key>(kvm, &kvm_userspace_mem, 0); if (r) goto out; kvm->arch.<API key> = gfn_to_page(kvm, <API key> >> PAGE_SHIFT); out: up_write(&kvm->slots_lock); return r; } static void allocate_vpid(struct vcpu_vmx *vmx) { int vpid; vmx->vpid = 0; if (!enable_vpid || !cpu_has_vmx_vpid()) return; spin_lock(&vmx_vpid_lock); vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); if (vpid < VMX_NR_VPIDS) { vmx->vpid = vpid; __set_bit(vpid, vmx_vpid_bitmap); } spin_unlock(&vmx_vpid_lock); } static void <API key>(struct page *msr_bitmap, u32 msr) { void *va; if (!<API key>()) return; /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. * We can control MSRs <API key> and <API key>. */ va = kmap(msr_bitmap); if (msr <= 0x1fff) { __clear_bit(msr, va + 0x000); /* read-low */ __clear_bit(msr, va + 0x800); /* write-low */ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { msr &= 0x1fff; __clear_bit(msr, va + 0x400); /* read-high */ __clear_bit(msr, va + 0xc00); /* write-high */ } kunmap(msr_bitmap); } /* * Sets up the vmcs for emulated real mode. */ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) { u32 host_sysenter_cs, msr_low, msr_high; u32 junk; u64 host_pat; unsigned long a; struct descriptor_table dt; int i; unsigned long kvm_vmx_return; u32 exec_control; vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a)); vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b)); if (<API key>()) vmcs_write64(MSR_BITMAP, page_to_phys(vmx_msr_bitmap)); vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ /* Control */ vmcs_write32(<API key>, vmcs_config.pin_based_exec_ctrl); exec_control = vmcs_config.cpu_based_exec_ctrl; if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) { exec_control &= ~<API key>; #ifdef CONFIG_X86_64 exec_control |= <API key> | <API key>; #endif } if (!vm_need_ept()) exec_control |= <API key> | <API key> | <API key>; vmcs_write32(<API key>, exec_control); if (<API key>()) { exec_control = vmcs_config.<API key>; if (!<API key>(vmx->vcpu.kvm)) exec_control &= ~<API key>; if (vmx->vpid == 0) exec_control &= ~<API key>; if (!vm_need_ept()) exec_control &= ~<API key>; vmcs_write32(<API key>, exec_control); } vmcs_write32(<API key>, !!bypass_guest_pf); vmcs_write32(<API key>, !!bypass_guest_pf); vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */ vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */ vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ #ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ rdmsrl(MSR_GS_BASE, a); vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */ #else vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ #endif vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ kvm_get_idt(&dt); vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ vmcs_write32(<API key>, 0); vmcs_write32(<API key>, 0); vmcs_write32(<API key>, 0); rdmsr(<API key>, host_sysenter_cs, junk); vmcs_write32(<API key>, host_sysenter_cs); rdmsrl(<API key>, a); vmcs_writel(<API key>, a); /* 22.2.3 */ rdmsrl(<API key>, a); vmcs_writel(<API key>, a); /* 22.2.3 */ if (vmcs_config.vmexit_ctrl & <API key>) { rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); host_pat = msr_low | ((u64) msr_high << 32); vmcs_write64(HOST_IA32_PAT, host_pat); } if (vmcs_config.vmentry_ctrl & <API key>) { rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); host_pat = msr_low | ((u64) msr_high << 32); /* Write the default value follow host pat */ vmcs_write64(GUEST_IA32_PAT, host_pat); /* Keep arch.pat sync with GUEST_IA32_PAT */ vmx->vcpu.arch.pat = host_pat; } for (i = 0; i < NR_VMX_MSR; ++i) { u32 index = vmx_msr_index[i]; u32 data_low, data_high; u64 data; int j = vmx->nmsrs; if (rdmsr_safe(index, &data_low, &data_high) < 0) continue; if (wrmsr_safe(index, data_low, data_high) < 0) continue; data = data_low | ((u64)data_high << 32); vmx->host_msrs[j].index = index; vmx->host_msrs[j].reserved = 0; vmx->host_msrs[j].data = data; vmx->guest_msrs[j] = vmx->host_msrs[j]; ++vmx->nmsrs; } vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); /* 22.2.1, 20.8.1 */ vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl); vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); return 0; } static int init_rmode(struct kvm *kvm) { if (!init_rmode_tss(kvm)) return 0; if (!<API key>(kvm)) return 0; return 1; } static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 msr; int ret; vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); down_read(&vcpu->kvm->slots_lock); if (!init_rmode(vmx->vcpu.kvm)) { ret = -ENOMEM; goto out; } vmx->vcpu.arch.rmode.active = 0; vmx->soft_vnmi_blocked = 0; vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); kvm_set_cr8(&vmx->vcpu, 0); msr = 0xfee00000 | <API key>; if (vmx->vcpu.vcpu_id == 0) msr |= <API key>; kvm_set_apic_base(&vmx->vcpu, msr); fx_init(&vmx->vcpu); seg_setup(VCPU_SREG_CS); /* * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh. */ if (vmx->vcpu.vcpu_id == 0) { vmcs_write16(GUEST_CS_SELECTOR, 0xf000); vmcs_writel(GUEST_CS_BASE, 0x000f0000); } else { vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8); vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12); } seg_setup(VCPU_SREG_DS); seg_setup(VCPU_SREG_ES); seg_setup(VCPU_SREG_FS); seg_setup(VCPU_SREG_GS); seg_setup(VCPU_SREG_SS); vmcs_write16(GUEST_TR_SELECTOR, 0); vmcs_writel(GUEST_TR_BASE, 0); vmcs_write32(GUEST_TR_LIMIT, 0xffff); vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); vmcs_write16(GUEST_LDTR_SELECTOR, 0); vmcs_writel(GUEST_LDTR_BASE, 0); vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); vmcs_write32(GUEST_SYSENTER_CS, 0); vmcs_writel(GUEST_SYSENTER_ESP, 0); vmcs_writel(GUEST_SYSENTER_EIP, 0); vmcs_writel(GUEST_RFLAGS, 0x02); if (vmx->vcpu.vcpu_id == 0) kvm_rip_write(vcpu, 0xfff0); else kvm_rip_write(vcpu, 0); kvm_register_write(vcpu, VCPU_REGS_RSP, 0); /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */ vmcs_writel(GUEST_DR7, 0x400); vmcs_writel(GUEST_GDTR_BASE, 0); vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); vmcs_writel(GUEST_IDTR_BASE, 0); vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); vmcs_write32(<API key>, 0); vmcs_write32(<API key>, 0); vmcs_write32(<API key>, 0); guest_write_tsc(0); /* Special registers */ vmcs_write64(GUEST_IA32_DEBUGCTL, 0); setup_msrs(vmx); vmcs_write32(<API key>, 0); /* 22.2.1 */ if (<API key>()) { vmcs_write64(<API key>, 0); if (vm_need_tpr_shadow(vmx->vcpu.kvm)) vmcs_write64(<API key>, page_to_phys(vmx->vcpu.arch.apic->regs_page)); vmcs_write32(TPR_THRESHOLD, 0); } if (<API key>(vmx->vcpu.kvm)) vmcs_write64(APIC_ACCESS_ADDR, page_to_phys(vmx->vcpu.kvm->arch.apic_access_page)); if (vmx->vpid != 0) vmcs_write16(<API key>, vmx->vpid); vmx->vcpu.arch.cr0 = 0x60000010; vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */ vmx_set_cr4(&vmx->vcpu, 0); vmx_set_efer(&vmx->vcpu, 0); vmx_fpu_activate(&vmx->vcpu); <API key>(&vmx->vcpu); vpid_sync_vcpu_all(vmx); ret = 0; /* HACK: Don't enable emulation on guest boot/reset */ vmx->emulation_required = 0; out: up_read(&vcpu->kvm->slots_lock); return ret; } static void enable_irq_window(struct kvm_vcpu *vcpu) { u32 <API key>; <API key> = vmcs_read32(<API key>); <API key> |= <API key>; vmcs_write32(<API key>, <API key>); } static void enable_nmi_window(struct kvm_vcpu *vcpu) { u32 <API key>; if (!<API key>()) { enable_irq_window(vcpu); return; } <API key> = vmcs_read32(<API key>); <API key> |= <API key>; vmcs_write32(<API key>, <API key>); } static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) { struct vcpu_vmx *vmx = to_vmx(vcpu); KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler); ++vcpu->stat.irq_injections; if (vcpu->arch.rmode.active) { vmx->rmode.irq.pending = true; vmx->rmode.irq.vector = irq; vmx->rmode.irq.rip = kvm_rip_read(vcpu); vmcs_write32(<API key>, irq | INTR_TYPE_SOFT_INTR | <API key>); vmcs_write32(<API key>, 1); kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); return; } vmcs_write32(<API key>, irq | INTR_TYPE_EXT_INTR | <API key>); } static void vmx_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (!<API key>()) { /* * Tracking the NMI-blocked state in software is built upon * finding the next open IRQ window. This, in turn, depends on * well-behaving guests: They have to keep IRQs disabled at * least as long as the NMI handler runs. Otherwise we may * cause NMI nesting, maybe breaking the guest. But as this is * highly unlikely, we can live with the residual risk. */ vmx->soft_vnmi_blocked = 1; vmx->vnmi_blocked_time = 0; } ++vcpu->stat.nmi_injections; if (vcpu->arch.rmode.active) { vmx->rmode.irq.pending = true; vmx->rmode.irq.vector = NMI_VECTOR; vmx->rmode.irq.rip = kvm_rip_read(vcpu); vmcs_write32(<API key>, NMI_VECTOR | INTR_TYPE_SOFT_INTR | <API key>); vmcs_write32(<API key>, 1); kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); return; } vmcs_write32(<API key>, INTR_TYPE_NMI_INTR | <API key> | NMI_VECTOR); } static void <API key>(struct kvm_vcpu *vcpu) { u32 guest_intr = vmcs_read32(<API key>); vcpu->arch.nmi_window_open = !(guest_intr & (<API key> | <API key> | <API key>)); if (!<API key>() && to_vmx(vcpu)->soft_vnmi_blocked) vcpu->arch.nmi_window_open = 0; vcpu->arch.<API key> = ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && !(guest_intr & (<API key> | <API key>))); } static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) { int word_index = __ffs(vcpu->arch.irq_summary); int bit_index = __ffs(vcpu->arch.irq_pending[word_index]); int irq = word_index * BITS_PER_LONG + bit_index; clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]); if (!vcpu->arch.irq_pending[word_index]) clear_bit(word_index, &vcpu->arch.irq_summary); kvm_queue_interrupt(vcpu, irq); } static void <API key>(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { <API key>(vcpu); if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { if (vcpu->arch.interrupt.pending) { enable_nmi_window(vcpu); } else if (vcpu->arch.nmi_window_open) { vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = true; } else { enable_nmi_window(vcpu); return; } } if (vcpu->arch.nmi_injected) { vmx_inject_nmi(vcpu); if (vcpu->arch.nmi_pending) enable_nmi_window(vcpu); else if (vcpu->arch.irq_summary || kvm_run-><API key>) enable_irq_window(vcpu); return; } if (vcpu->arch.<API key>) { if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) kvm_do_inject_irq(vcpu); if (vcpu->arch.interrupt.pending) vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); } if (!vcpu->arch.<API key> && (vcpu->arch.irq_summary || kvm_run-><API key>)) enable_irq_window(vcpu); } static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) { int ret; struct <API key> tss_mem = { .slot = TSS_PRIVATE_MEMSLOT, .guest_phys_addr = addr, .memory_size = PAGE_SIZE * 3, .flags = 0, }; ret = <API key>(kvm, &tss_mem, 0); if (ret) return ret; kvm->arch.tss_addr = addr; return 0; } static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu) { struct kvm_guest_debug *dbg = &vcpu->guest_debug; set_debugreg(dbg->bp[0], 0); set_debugreg(dbg->bp[1], 1); set_debugreg(dbg->bp[2], 2); set_debugreg(dbg->bp[3], 3); if (dbg->singlestep) { unsigned long flags; flags = vmcs_readl(GUEST_RFLAGS); flags |= X86_EFLAGS_TF | X86_EFLAGS_RF; vmcs_writel(GUEST_RFLAGS, flags); } } static int <API key>(struct kvm_vcpu *vcpu, int vec, u32 err_code) { /* * Instruction with address size override prefix opcode 0x67 * Cause the #SS fault with 0 error code in VM86 mode. */ if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE) return 1; /* * Forward all other exceptions that are valid in real mode. * FIXME: Breaks guest debugging in real mode, needs to be fixed with * the required debugging infrastructure rework. */ switch (vec) { case DE_VECTOR: case DB_VECTOR: case BP_VECTOR: case OF_VECTOR: case BR_VECTOR: case UD_VECTOR: case DF_VECTOR: case SS_VECTOR: case GP_VECTOR: case MF_VECTOR: kvm_queue_exception(vcpu, vec); return 1; } return 0; } static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 intr_info, error_code; unsigned long cr2, rip; u32 vect_info; enum emulation_result er; vect_info = vmx->idt_vectoring_info; intr_info = vmcs_read32(VM_EXIT_INTR_INFO); if ((vect_info & <API key>) && !is_page_fault(intr_info)) printk(KERN_ERR "%s: unexpected, vectoring info 0x%x " "intr info 0x%x\n", __func__, vect_info, intr_info); if (!irqchip_in_kernel(vcpu->kvm) && <API key>(vect_info)) { int irq = vect_info & <API key>; set_bit(irq, vcpu->arch.irq_pending); set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary); } if ((intr_info & <API key>) == INTR_TYPE_NMI_INTR) return 1; /* already handled by vmx_vcpu_run() */ if (is_no_device(intr_info)) { vmx_fpu_activate(vcpu); return 1; } if (is_invalid_opcode(intr_info)) { er = emulate_instruction(vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD); if (er != EMULATE_DONE) kvm_queue_exception(vcpu, UD_VECTOR); return 1; } error_code = 0; rip = kvm_rip_read(vcpu); if (intr_info & <API key>) error_code = vmcs_read32(<API key>); if (is_page_fault(intr_info)) { /* EPT won't cause page fault directly */ if (vm_need_ept()) BUG(); cr2 = vmcs_readl(EXIT_QUALIFICATION); KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, (u32)((u64)cr2 >> 32), handler); if (vcpu->arch.interrupt.pending || vcpu->arch.exception.pending) <API key>(vcpu, cr2); return kvm_mmu_page_fault(vcpu, cr2, error_code); } if (vcpu->arch.rmode.active && <API key>(vcpu, intr_info & <API key>, error_code)) { if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; return kvm_emulate_halt(vcpu); } return 1; } if ((intr_info & (<API key> | <API key>)) == (INTR_TYPE_EXCEPTION | 1)) { kvm_run->exit_reason = KVM_EXIT_DEBUG; return 0; } kvm_run->exit_reason = KVM_EXIT_EXCEPTION; kvm_run->ex.exception = intr_info & <API key>; kvm_run->ex.error_code = error_code; return 0; } static int <API key>(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { ++vcpu->stat.irq_exits; KVMTRACE_1D(INTR, vcpu, vmcs_read32(VM_EXIT_INTR_INFO), handler); return 1; } static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; return 0; } static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { unsigned long exit_qualification; int size, down, in, string, rep; unsigned port; ++vcpu->stat.io_exits; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); string = (exit_qualification & 16) != 0; if (string) { if (emulate_instruction(vcpu, kvm_run, 0, 0, 0) == EMULATE_DO_MMIO) return 0; return 1; } size = (exit_qualification & 7) + 1; in = (exit_qualification & 8) != 0; down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0; rep = (exit_qualification & 32) != 0; port = exit_qualification >> 16; <API key>(vcpu); return kvm_emulate_pio(vcpu, kvm_run, in, size, port); } static void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) { /* * Patch in the VMCALL instruction: */ hypercall[0] = 0x0f; hypercall[1] = 0x01; hypercall[2] = 0xc1; } static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { unsigned long exit_qualification; int cr; int reg; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); cr = exit_qualification & 15; reg = (exit_qualification >> 8) & 15; switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)kvm_register_read(vcpu, reg), (u32)((u64)kvm_register_read(vcpu, reg) >> 32), handler); switch (cr) { case 0: kvm_set_cr0(vcpu, kvm_register_read(vcpu, reg)); <API key>(vcpu); return 1; case 3: kvm_set_cr3(vcpu, kvm_register_read(vcpu, reg)); <API key>(vcpu); return 1; case 4: kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg)); <API key>(vcpu); return 1; case 8: kvm_set_cr8(vcpu, kvm_register_read(vcpu, reg)); <API key>(vcpu); if (irqchip_in_kernel(vcpu->kvm)) return 1; kvm_run->exit_reason = KVM_EXIT_SET_TPR; return 0; }; break; case 2: /* clts */ vmx_fpu_deactivate(vcpu); vcpu->arch.cr0 &= ~X86_CR0_TS; vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); vmx_fpu_activate(vcpu); KVMTRACE_0D(CLTS, vcpu, handler); <API key>(vcpu); return 1; case 1: /*mov from cr*/ switch (cr) { case 3: kvm_register_write(vcpu, reg, vcpu->arch.cr3); KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)kvm_register_read(vcpu, reg), (u32)((u64)kvm_register_read(vcpu, reg) >> 32), handler); <API key>(vcpu); return 1; case 8: kvm_register_write(vcpu, reg, kvm_get_cr8(vcpu)); KVMTRACE_2D(CR_READ, vcpu, (u32)cr, (u32)kvm_register_read(vcpu, reg), handler); <API key>(vcpu); return 1; } break; case 3: /* lmsw */ kvm_lmsw(vcpu, (exit_qualification >> <API key>) & 0x0f); <API key>(vcpu); return 1; default: break; } kvm_run->exit_reason = 0; pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n", (int)(exit_qualification >> 4) & 3, cr); return 0; } static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { unsigned long exit_qualification; unsigned long val; int dr, reg; /* * FIXME: this code assumes the host is debugging the guest. * need to deal with guest debugging itself too. */ exit_qualification = vmcs_readl(EXIT_QUALIFICATION); dr = exit_qualification & 7; reg = (exit_qualification >> 8) & 15; if (exit_qualification & 16) { /* mov from dr */ switch (dr) { case 6: val = 0xffff0ff0; break; case 7: val = 0x400; break; default: val = 0; } kvm_register_write(vcpu, reg, val); KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); } else { /* mov to dr */ } <API key>(vcpu); return 1; } static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { kvm_emulate_cpuid(vcpu); return 1; } static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; u64 data; if (vmx_get_msr(vcpu, ecx, &data)) { kvm_inject_gp(vcpu, 0); return 1; } KVMTRACE_3D(MSR_READ, vcpu, ecx, (u32)data, (u32)(data >> 32), handler); /* FIXME: handling of bits 32:63 of rax, rdx */ vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u; <API key>(vcpu); return 1; } static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); KVMTRACE_3D(MSR_WRITE, vcpu, ecx, (u32)data, (u32)(data >> 32), handler); if (vmx_set_msr(vcpu, ecx, data) != 0) { kvm_inject_gp(vcpu, 0); return 1; } <API key>(vcpu); return 1; } static int <API key>(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { return 1; } static int <API key>(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { u32 <API key>; /* clear pending irq */ <API key> = vmcs_read32(<API key>); <API key> &= ~<API key>; vmcs_write32(<API key>, <API key>); KVMTRACE_0D(PEND_INTR, vcpu, handler); ++vcpu->stat.irq_window_exits; /* * If the user space waits to inject interrupts, exit as soon as * possible */ if (kvm_run-><API key> && !vcpu->arch.irq_summary) { kvm_run->exit_reason = <API key>; return 0; } return 1; } static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { <API key>(vcpu); return kvm_emulate_halt(vcpu); } static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { <API key>(vcpu); <API key>(vcpu); return 1; } static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { u64 exit_qualification = vmcs_read64(EXIT_QUALIFICATION); kvm_mmu_invlpg(vcpu, exit_qualification); <API key>(vcpu); return 1; } static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { <API key>(vcpu); /* TODO: Add support for VT-d/pass-through device */ return 1; } static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { u64 exit_qualification; enum emulation_result er; unsigned long offset; exit_qualification = vmcs_read64(EXIT_QUALIFICATION); offset = exit_qualification & 0xffful; er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); if (er != EMULATE_DONE) { printk(KERN_ERR "Fail to handle apic access vmexit! Offset is 0x%lx\n", offset); return -ENOTSUPP; } return 1; } static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qualification; u16 tss_selector; int reason; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); reason = (u32)exit_qualification >> 30; if (reason == TASK_SWITCH_GATE && vmx->vcpu.arch.nmi_injected && (vmx->idt_vectoring_info & <API key>) && (vmx->idt_vectoring_info & <API key>) == INTR_TYPE_NMI_INTR) { vcpu->arch.nmi_injected = false; if (<API key>()) vmcs_set_bits(<API key>, <API key>); } tss_selector = exit_qualification; return kvm_task_switch(vcpu, tss_selector, reason); } static int <API key>(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { u64 exit_qualification; enum emulation_result er; gpa_t gpa; unsigned long hva; int gla_validity; int r; exit_qualification = vmcs_read64(EXIT_QUALIFICATION); if (exit_qualification & (1 << 6)) { printk(KERN_ERR "EPT: GPA exceeds GAW!\n"); return -ENOTSUPP; } gla_validity = (exit_qualification >> 7) & 0x3; if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) { printk(KERN_ERR "EPT: Handling EPT violation failed!\n"); printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n", (long unsigned int)vmcs_read64(<API key>), (long unsigned int)vmcs_read64(<API key>)); printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", (long unsigned int)exit_qualification); kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.<API key> = 0; return -ENOTSUPP; } gpa = vmcs_read64(<API key>); hva = gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT); if (!kvm_is_error_hva(hva)) { r = kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0); if (r < 0) { printk(KERN_ERR "EPT: Not enough memory!\n"); return -ENOMEM; } return 1; } else { /* must be MMIO */ er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); if (er == EMULATE_FAIL) { printk(KERN_ERR "EPT: Fail to handle EPT violation vmexit!er is %d\n", er); printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n", (long unsigned int)vmcs_read64(<API key>), (long unsigned int)vmcs_read64(<API key>)); printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", (long unsigned int)exit_qualification); return -ENOTSUPP; } else if (er == EMULATE_DO_MMIO) return 0; } return 1; } static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { u32 <API key>; /* clear pending NMI */ <API key> = vmcs_read32(<API key>); <API key> &= ~<API key>; vmcs_write32(<API key>, <API key>); ++vcpu->stat.nmi_window_exits; return 1; } static void <API key>(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct vcpu_vmx *vmx = to_vmx(vcpu); int err; preempt_enable(); local_irq_enable(); while (!guest_state_valid(vcpu)) { err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); if (err == EMULATE_DO_MMIO) break; if (err != EMULATE_DONE) { <API key>(vcpu, "emulation failure"); return; } if (signal_pending(current)) break; if (need_resched()) schedule(); } local_irq_disable(); preempt_disable(); /* Guest state should be valid now except if we need to * emulate an MMIO */ if (guest_state_valid(vcpu)) vmx->emulation_required = 0; } /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs * to be done to userspace and return 0. */ static int (*<API key>[])(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) = { [<API key>] = handle_exception, [<API key>] = <API key>, [<API key>] = handle_triple_fault, [<API key>] = handle_nmi_window, [<API key>] = handle_io, [<API key>] = handle_cr, [<API key>] = handle_dr, [EXIT_REASON_CPUID] = handle_cpuid, [<API key>] = handle_rdmsr, [<API key>] = handle_wrmsr, [<API key>] = <API key>, [EXIT_REASON_HLT] = handle_halt, [EXIT_REASON_INVLPG] = handle_invlpg, [EXIT_REASON_VMCALL] = handle_vmcall, [<API key>] = <API key>, [<API key>] = handle_apic_access, [EXIT_REASON_WBINVD] = handle_wbinvd, [<API key>] = handle_task_switch, [<API key>] = <API key>, }; static const int <API key> = ARRAY_SIZE(<API key>); /* * The guest has exited. See if we can fix it or if we need userspace * assistance. */ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { u32 exit_reason = vmcs_read32(VM_EXIT_REASON); struct vcpu_vmx *vmx = to_vmx(vcpu); u32 vectoring_info = vmx->idt_vectoring_info; KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu), (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit); /* If we need to emulate an MMIO from <API key> * we just return 0 */ if (vmx->emulation_required && <API key>) return 0; /* Access CR3 don't cause VMExit in paging mode, so we need * to sync with guest real CR3. */ if (vm_need_ept() && is_paging(vcpu)) { vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); ept_load_pdptrs(vcpu); } if (unlikely(vmx->fail)) { kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; kvm_run->fail_entry.<API key> = vmcs_read32(<API key>); return 0; } if ((vectoring_info & <API key>) && (exit_reason != <API key> && exit_reason != <API key> && exit_reason != <API key>)) printk(KERN_WARNING "%s: unexpected, valid vectoring info " "(0x%x) and exit reason is 0x%x\n", __func__, vectoring_info, exit_reason); if (unlikely(!<API key>() && vmx->soft_vnmi_blocked)) { if (vcpu->arch.<API key>) { vmx->soft_vnmi_blocked = 0; vcpu->arch.nmi_window_open = 1; } else if (vmx->vnmi_blocked_time > 1000000000LL && vcpu->arch.nmi_pending) { /* * This CPU don't support us in finding the end of an * NMI-blocked window if the guest runs with IRQs * disabled. So we pull the trigger after 1 s of * futile waiting, but inform the user about this. */ printk(KERN_WARNING "%s: Breaking out of NMI-blocked " "state on VCPU %d after 1 s timeout\n", __func__, vcpu->vcpu_id); vmx->soft_vnmi_blocked = 0; vmx->vcpu.arch.nmi_window_open = 1; } } if (exit_reason < <API key> && <API key>[exit_reason]) return <API key>[exit_reason](vcpu, kvm_run); else { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.<API key> = exit_reason; } return 0; } static void <API key>(struct kvm_vcpu *vcpu) { int max_irr, tpr; if (!vm_need_tpr_shadow(vcpu->kvm)) return; if (!kvm_lapic_enabled(vcpu) || ((max_irr = <API key>(vcpu)) == -1)) { vmcs_write32(TPR_THRESHOLD, 0); return; } tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4; vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4); } static void <API key>(struct vcpu_vmx *vmx) { u32 exit_intr_info; u32 idt_vectoring_info; bool unblock_nmi; u8 vector; int type; bool idtv_info_valid; u32 error; exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); if (<API key>()) { unblock_nmi = (exit_intr_info & <API key>) != 0; vector = exit_intr_info & <API key>; /* * SDM 3: 25.7.1.2 * Re-set bit "block by NMI" before VM entry if vmexit caused by * a guest IRET fault. */ if (unblock_nmi && vector != DF_VECTOR) vmcs_set_bits(<API key>, <API key>); } else if (unlikely(vmx->soft_vnmi_blocked)) vmx->vnmi_blocked_time += ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); idt_vectoring_info = vmx->idt_vectoring_info; idtv_info_valid = idt_vectoring_info & <API key>; vector = idt_vectoring_info & <API key>; type = idt_vectoring_info & <API key>; if (vmx->vcpu.arch.nmi_injected) { /* * SDM 3: 25.7.1.2 * Clear bit "block by NMI" before VM entry if a NMI delivery * faulted. */ if (idtv_info_valid && type == INTR_TYPE_NMI_INTR) vmcs_clear_bits(<API key>, <API key>); else vmx->vcpu.arch.nmi_injected = false; } <API key>(&vmx->vcpu); if (idtv_info_valid && type == INTR_TYPE_EXCEPTION) { if (idt_vectoring_info & <API key>) { error = vmcs_read32(<API key>); <API key>(&vmx->vcpu, vector, error); } else kvm_queue_exception(&vmx->vcpu, vector); vmx->idt_vectoring_info = 0; } <API key>(&vmx->vcpu); if (idtv_info_valid && type == INTR_TYPE_EXT_INTR) { kvm_queue_interrupt(&vmx->vcpu, vector); vmx->idt_vectoring_info = 0; } } static void vmx_intr_assist(struct kvm_vcpu *vcpu) { <API key>(vcpu); <API key>(vcpu); if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { if (vcpu->arch.interrupt.pending) { enable_nmi_window(vcpu); } else if (vcpu->arch.nmi_window_open) { vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = true; } else { enable_nmi_window(vcpu); return; } } if (vcpu->arch.nmi_injected) { vmx_inject_nmi(vcpu); if (vcpu->arch.nmi_pending) enable_nmi_window(vcpu); else if (<API key>(vcpu)) enable_irq_window(vcpu); return; } if (!vcpu->arch.interrupt.pending && <API key>(vcpu)) { if (vcpu->arch.<API key>) kvm_queue_interrupt(vcpu, <API key>(vcpu)); else enable_irq_window(vcpu); } if (vcpu->arch.interrupt.pending) { vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); if (<API key>(vcpu)) enable_irq_window(vcpu); } } /* * Failure to inject an interrupt should give us the information * in <API key>. However, if the failure occurs * when fetching the interrupt redirection bitmap in the real-mode * tss, this doesn't happen. So we do it ourselves. */ static void fixup_rmode_irq(struct vcpu_vmx *vmx) { vmx->rmode.irq.pending = 0; if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip) return; kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip); if (vmx->idt_vectoring_info & <API key>) { vmx->idt_vectoring_info &= ~<API key>; vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR; return; } vmx->idt_vectoring_info = <API key> | INTR_TYPE_EXT_INTR | vmx->rmode.irq.vector; } #ifdef CONFIG_X86_64 #define R "r" #define Q "q" #else #define R "e" #define Q "l" #endif static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 intr_info; /* Record the guest's net vcpu time for enforced NMI injections. */ if (unlikely(!<API key>() && vmx->soft_vnmi_blocked)) vmx->entry_time = ktime_get(); /* Handle invalid guest state instead of entering VMX */ if (vmx->emulation_required && <API key>) { <API key>(vcpu, kvm_run); return; } if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); /* * Loading guest fpu may have cleared host cr0.ts */ vmcs_writel(HOST_CR0, read_cr0()); asm( /* Store host registers */ "push %%"R"dx; push %%"R"bp;" "push %%"R"cx \n\t" "cmp %%"R"sp, %c[host_rsp](%0) \n\t" "je 1f \n\t" "mov %%"R"sp, %c[host_rsp](%0) \n\t" __ex(<API key>) "\n\t" "1: \n\t" /* Check if vmlaunch of vmresume is needed */ "cmpl $0, %c[launched](%0) \n\t" /* Load guest registers. Don't clobber flags. */ "mov %c[cr2](%0), %%"R"ax \n\t" "mov %%"R"ax, %%cr2 \n\t" "mov %c[rax](%0), %%"R"ax \n\t" "mov %c[rbx](%0), %%"R"bx \n\t" "mov %c[rdx](%0), %%"R"dx \n\t" "mov %c[rsi](%0), %%"R"si \n\t" "mov %c[rdi](%0), %%"R"di \n\t" "mov %c[rbp](%0), %%"R"bp \n\t" #ifdef CONFIG_X86_64 "mov %c[r8](%0), %%r8 \n\t" "mov %c[r9](%0), %%r9 \n\t" "mov %c[r10](%0), %%r10 \n\t" "mov %c[r11](%0), %%r11 \n\t" "mov %c[r12](%0), %%r12 \n\t" "mov %c[r13](%0), %%r13 \n\t" "mov %c[r14](%0), %%r14 \n\t" "mov %c[r15](%0), %%r15 \n\t" #endif "mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */ /* Enter guest mode */ "jne .Llaunched \n\t" __ex(ASM_VMX_VMLAUNCH) "\n\t" "jmp .Lkvm_vmx_return \n\t" ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" ".Lkvm_vmx_return: " /* Save guest registers, load host registers, keep flags */ "xchg %0, (%%"R"sp) \n\t" "mov %%"R"ax, %c[rax](%0) \n\t" "mov %%"R"bx, %c[rbx](%0) \n\t" "push"Q" (%%"R"sp); pop"Q" %c[rcx](%0) \n\t" "mov %%"R"dx, %c[rdx](%0) \n\t" "mov %%"R"si, %c[rsi](%0) \n\t" "mov %%"R"di, %c[rdi](%0) \n\t" "mov %%"R"bp, %c[rbp](%0) \n\t" #ifdef CONFIG_X86_64 "mov %%r8, %c[r8](%0) \n\t" "mov %%r9, %c[r9](%0) \n\t" "mov %%r10, %c[r10](%0) \n\t" "mov %%r11, %c[r11](%0) \n\t" "mov %%r12, %c[r12](%0) \n\t" "mov %%r13, %c[r13](%0) \n\t" "mov %%r14, %c[r14](%0) \n\t" "mov %%r15, %c[r15](%0) \n\t" #endif "mov %%cr2, %%"R"ax \n\t" "mov %%"R"ax, %c[cr2](%0) \n\t" "pop %%"R"bp; pop %%"R"bp; pop %%"R"dx \n\t" "setbe %c[fail](%0) \n\t" : : "c"(vmx), "d"((unsigned long)HOST_RSP), [launched]"i"(offsetof(struct vcpu_vmx, launched)), [fail]"i"(offsetof(struct vcpu_vmx, fail)), [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), #ifdef CONFIG_X86_64 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), #endif [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)) : "cc", "memory" , R"bx", R"di", R"si" #ifdef CONFIG_X86_64 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" #endif ); vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); vcpu->arch.regs_dirty = 0; vmx->idt_vectoring_info = vmcs_read32(<API key>); if (vmx->rmode.irq.pending) fixup_rmode_irq(vmx); <API key>(vcpu); asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); vmx->launched = 1; intr_info = vmcs_read32(VM_EXIT_INTR_INFO); /* We need to handle NMIs before interrupts are enabled */ if ((intr_info & <API key>) == INTR_TYPE_NMI_INTR && (intr_info & <API key>)) { KVMTRACE_0D(NMI, vcpu, handler); asm("int $2"); } <API key>(vmx); } #undef R #undef Q static void vmx_free_vmcs(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (vmx->vmcs) { vcpu_clear(vmx); free_vmcs(vmx->vmcs); vmx->vmcs = NULL; } } static void vmx_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); spin_lock(&vmx_vpid_lock); if (vmx->vpid != 0) __clear_bit(vmx->vpid, vmx_vpid_bitmap); spin_unlock(&vmx_vpid_lock); vmx_free_vmcs(vcpu); kfree(vmx->host_msrs); kfree(vmx->guest_msrs); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vmx); } static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) { int err; struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); int cpu; if (!vmx) return ERR_PTR(-ENOMEM); allocate_vpid(vmx); err = kvm_vcpu_init(&vmx->vcpu, kvm, id); if (err) goto free_vcpu; vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!vmx->guest_msrs) { err = -ENOMEM; goto uninit_vcpu; } vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!vmx->host_msrs) goto free_guest_msrs; vmx->vmcs = alloc_vmcs(); if (!vmx->vmcs) goto free_msrs; vmcs_clear(vmx->vmcs); cpu = get_cpu(); vmx_vcpu_load(&vmx->vcpu, cpu); err = vmx_vcpu_setup(vmx); vmx_vcpu_put(&vmx->vcpu); put_cpu(); if (err) goto free_vmcs; if (<API key>(kvm)) if (<API key>(kvm) != 0) goto free_vmcs; if (vm_need_ept()) if (<API key>(kvm) != 0) goto free_vmcs; return &vmx->vcpu; free_vmcs: free_vmcs(vmx->vmcs); free_msrs: kfree(vmx->host_msrs); free_guest_msrs: kfree(vmx->guest_msrs); uninit_vcpu: kvm_vcpu_uninit(&vmx->vcpu); free_vcpu: kmem_cache_free(kvm_vcpu_cache, vmx); return ERR_PTR(err); } static void __init <API key>(void *rtn) { struct vmcs_config vmcs_conf; *(int *)rtn = 0; if (setup_vmcs_config(&vmcs_conf) < 0) *(int *)rtn = -EIO; if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", smp_processor_id()); *(int *)rtn = -EIO; } } static int get_ept_level(void) { return VMX_EPT_DEFAULT_GAW + 1; } static int <API key>(void) { return <API key>; } static struct kvm_x86_ops vmx_x86_ops = { .cpu_has_kvm_support = cpu_has_kvm_support, .disabled_by_bios = <API key>, .hardware_setup = hardware_setup, .hardware_unsetup = hardware_unsetup, .<API key> = <API key>, .hardware_enable = hardware_enable, .hardware_disable = hardware_disable, .<API key> = <API key>, .vcpu_create = vmx_create_vcpu, .vcpu_free = vmx_free_vcpu, .vcpu_reset = vmx_vcpu_reset, .<API key> = vmx_save_host_state, .vcpu_load = vmx_vcpu_load, .vcpu_put = vmx_vcpu_put, .set_guest_debug = set_guest_debug, .guest_debug_pre = kvm_guest_debug_pre, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, .get_segment_base = <API key>, .get_segment = vmx_get_segment, .set_segment = vmx_set_segment, .get_cpl = vmx_get_cpl, .get_cs_db_l_bits = <API key>, .<API key> = <API key>, .set_cr0 = vmx_set_cr0, .set_cr3 = vmx_set_cr3, .set_cr4 = vmx_set_cr4, .set_efer = vmx_set_efer, .get_idt = vmx_get_idt, .set_idt = vmx_set_idt, .get_gdt = vmx_get_gdt, .set_gdt = vmx_set_gdt, .cache_reg = vmx_cache_reg, .get_rflags = vmx_get_rflags, .set_rflags = vmx_set_rflags, .tlb_flush = vmx_flush_tlb, .run = vmx_vcpu_run, .handle_exit = kvm_handle_exit, .<API key> = <API key>, .patch_hypercall = vmx_patch_hypercall, .get_irq = vmx_get_irq, .set_irq = vmx_inject_irq, .queue_exception = vmx_queue_exception, .exception_injected = <API key>, .inject_pending_irq = vmx_intr_assist, .<API key> = <API key>, .set_tss_addr = vmx_set_tss_addr, .get_tdp_level = get_ept_level, .get_mt_mask_shift = <API key>, }; static int __init vmx_init(void) { void *va; int r; vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); if (!vmx_io_bitmap_a) return -ENOMEM; vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); if (!vmx_io_bitmap_b) { r = -ENOMEM; goto out; } vmx_msr_bitmap = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); if (!vmx_msr_bitmap) { r = -ENOMEM; goto out1; } /* * Allow direct access to the PC debug port (it is often used for I/O * delays, but the vmexits simply slow things down). */ va = kmap(vmx_io_bitmap_a); memset(va, 0xff, PAGE_SIZE); clear_bit(0x80, va); kunmap(vmx_io_bitmap_a); va = kmap(vmx_io_bitmap_b); memset(va, 0xff, PAGE_SIZE); kunmap(vmx_io_bitmap_b); va = kmap(vmx_msr_bitmap); memset(va, 0xff, PAGE_SIZE); kunmap(vmx_msr_bitmap); set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE); if (r) goto out2; <API key>(vmx_msr_bitmap, MSR_FS_BASE); <API key>(vmx_msr_bitmap, MSR_GS_BASE); <API key>(vmx_msr_bitmap, <API key>); <API key>(vmx_msr_bitmap, <API key>); <API key>(vmx_msr_bitmap, <API key>); if (vm_need_ept()) { bypass_guest_pf = 0; <API key>(<API key> | <API key>); <API key>(0ull, 0ull, 0ull, 0ull, <API key>, VMX_EPT_DEFAULT_MT << <API key>); kvm_enable_tdp(); } else kvm_disable_tdp(); if (bypass_guest_pf) <API key>(~0xffeull, 0ull); ept_sync_global(); return 0; out2: __free_page(vmx_msr_bitmap); out1: __free_page(vmx_io_bitmap_b); out: __free_page(vmx_io_bitmap_a); return r; } static void __exit vmx_exit(void) { __free_page(vmx_msr_bitmap); __free_page(vmx_io_bitmap_b); __free_page(vmx_io_bitmap_a); kvm_exit(); } module_init(vmx_init) module_exit(vmx_exit)
// +build !windows package daemon import ( "github.com/docker/docker/container" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/idtools" ) func (daemon *Daemon) tarCopyOptions(container *container.Container, <API key> bool) (*archive.TarOptions, error) { if container.Config.User == "" { return daemon.<API key>(<API key>), nil } user, err := idtools.LookupUser(container.Config.User) if err != nil { return nil, err } return &archive.TarOptions{ <API key>: <API key>, ChownOpts: &idtools.IDPair{UID: user.Uid, GID: user.Gid}, }, nil }
<?php /** * Memcache storage engine for cache. Memcache has some limitations in the amount of * control you have over expire times far in the future. See MemcacheEngine::write() for * more information. * * @package Cake.Cache.Engine * @deprecated 3.0.0 You should use the Memcached adapter instead. */ class MemcacheEngine extends CacheEngine { /** * Contains the compiled group names * (prefixed with the global configuration prefix) * * @var array */ protected $_compiledGroupNames = array(); /** * Memcache wrapper. * * @var Memcache */ protected $_Memcache = null; /** * Settings * * - servers = string or array of memcache servers, default => 127.0.0.1. If an * array MemcacheEngine will use them as a pool. * - compress = boolean, default => false * * @var array */ public $settings = array(); /** * Initialize the Cache Engine * * Called automatically by the cache frontend * To reinitialize the settings call Cache::engine('EngineName', [optional] settings = array()); * * @param array $settings array of setting for the engine * @return bool True if the engine has been successfully initialized, false if not */ public function init($settings = array()) { if (!class_exists('Memcache')) { return false; } if (!isset($settings['prefix'])) { $settings['prefix'] = Inflector::slug(APP_DIR) . '_'; } $settings += array( 'engine' => 'Memcache', 'servers' => array('127.0.0.1'), 'compress' => false, 'persistent' => true ); parent::init($settings); if ($this->settings['compress']) { $this->settings['compress'] = MEMCACHE_COMPRESSED; } if (is_string($this->settings['servers'])) { $this->settings['servers'] = array($this->settings['servers']); } if (!isset($this->_Memcache)) { $return = false; $this->_Memcache = new Memcache(); foreach ($this->settings['servers'] as $server) { list($host, $port) = $this->_parseServerString($server); if ($this->_Memcache->addServer($host, $port, $this->settings['persistent'])) { $return = true; } } return $return; } return true; } /** * Parses the server address into the host/port. Handles both IPv6 and IPv4 * addresses and Unix sockets * * @param string $server The server address string. * @return array Array containing host, port */ protected function _parseServerString($server) { if ($server[0] === 'u') { return array($server, 0); } if (substr($server, 0, 1) === '[') { $position = strpos($server, ']:'); if ($position !== false) { $position++; } } else { $position = strpos($server, ':'); } $port = 11211; $host = $server; if ($position !== false) { $host = substr($server, 0, $position); $port = substr($server, $position + 1); } return array($host, $port); } public function write($key, $value, $duration) { if ($duration > 30 * DAY) { $duration = 0; } return $this->_Memcache->set($key, $value, $this->settings['compress'], $duration); } /** * Read a key from the cache * * @param string $key Identifier for the data * @return mixed The cached data, or false if the data doesn't exist, has expired, or if there was an error fetching it */ public function read($key) { return $this->_Memcache->get($key); } /** * Increments the value of an integer cached key * * @param string $key Identifier for the data * @param int $offset How much to increment * @return New incremented value, false otherwise * @throws CacheException when you try to increment with compress = true */ public function increment($key, $offset = 1) { if ($this->settings['compress']) { throw new CacheException( __d('cake_dev', 'Method %s not implemented for compressed cache in %s', 'increment()', __CLASS__) ); } return $this->_Memcache->increment($key, $offset); } /** * Decrements the value of an integer cached key * * @param string $key Identifier for the data * @param int $offset How much to subtract * @return New decremented value, false otherwise * @throws CacheException when you try to decrement with compress = true */ public function decrement($key, $offset = 1) { if ($this->settings['compress']) { throw new CacheException( __d('cake_dev', 'Method %s not implemented for compressed cache in %s', 'decrement()', __CLASS__) ); } return $this->_Memcache->decrement($key, $offset); } /** * Delete a key from the cache * * @param string $key Identifier for the data * @return bool True if the value was successfully deleted, false if it didn't exist or couldn't be removed */ public function delete($key) { return $this->_Memcache->delete($key); } /** * Delete all keys from the cache * * @param bool $check If true no deletes will occur and instead CakePHP will rely * on key TTL values. * @return bool True if the cache was successfully cleared, false otherwise */ public function clear($check) { if ($check) { return true; } foreach ($this->_Memcache->getExtendedStats('slabs', 0) as $slabs) { foreach (array_keys($slabs) as $slabId) { if (!is_numeric($slabId)) { continue; } foreach ($this->_Memcache->getExtendedStats('cachedump', $slabId, 0) as $stats) { if (!is_array($stats)) { continue; } foreach (array_keys($stats) as $key) { if (strpos($key, $this->settings['prefix']) === 0) { $this->_Memcache->delete($key); } } } } } return true; } /** * Connects to a server in connection pool * * @param string $host host ip address or name * @param int $port Server port * @return bool True if memcache server was connected */ public function connect($host, $port = 11211) { if ($this->_Memcache->getServerStatus($host, $port) === 0) { if ($this->_Memcache->connect($host, $port)) { return true; } return false; } return true; } /** * Returns the `group value` for each of the configured groups * If the group initial value was not found, then it initializes * the group accordingly. * * @return array */ public function groups() { if (empty($this->_compiledGroupNames)) { foreach ($this->settings['groups'] as $group) { $this->_compiledGroupNames[] = $this->settings['prefix'] . $group; } } $groups = $this->_Memcache->get($this->_compiledGroupNames); if (count($groups) !== count($this->settings['groups'])) { foreach ($this->_compiledGroupNames as $group) { if (!isset($groups[$group])) { $this->_Memcache->set($group, 1, false, 0); $groups[$group] = 1; } } ksort($groups); } $result = array(); $groups = array_values($groups); foreach ($this->settings['groups'] as $i => $group) { $result[] = $group . $groups[$i]; } return $result; } /** * Increments the group value to simulate deletion of all keys under a group * old values will remain in storage until they expire. * * @param string $group The group to clear. * @return bool success */ public function clearGroup($group) { return (bool)$this->_Memcache->increment($this->settings['prefix'] . $group); } }
class Nasm < Formula desc "Netwide Assembler (NASM) is an 80x86 assembler" homepage "http: revision 1 stable do url "http: sha256 "<SHA256-like>" patch do url "https://raw.githubusercontent.com/Homebrew/patches/7a329c65e/nasm/nasm_outmac64.patch" sha256 "<SHA256-like>" end end bottle do cellar :any_skip_relocation sha256 "<SHA256-like>" => :el_capitan sha256 "<SHA256-like>" => :yosemite sha256 "<SHA256-like>" => :mavericks end devel do url "http: sha256 "<SHA256-like>" end option :universal def install ENV.universal_binary if build.universal? system "./configure", "--prefix=#{prefix}" system "make", "install", "install_rdf" end test do (testpath/"foo.s").write <<-EOS mov eax, 0 mov ebx, 0 int 0x80 EOS system "#{bin}/nasm", "foo.s" code = File.open("foo", "rb") { |f| f.read.unpack("C*") } expected = [0x66, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x66, 0xbb, 0x00, 0x00, 0x00, 0x00, 0xcd, 0x80] assert_equal expected, code end end
#include "flow.h" #include "datapath.h" #include <linux/uaccess.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <net/llc_pdu.h> #include <linux/kernel.h> #include <linux/jhash.h> #include <linux/jiffies.h> #include <linux/llc.h> #include <linux/module.h> #include <linux/in.h> #include <linux/rcupdate.h> #include <linux/if_arp.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/icmp.h> #include <linux/icmpv6.h> #include <linux/rculist.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/ndisc.h> static struct kmem_cache *flow_cache; static int check_header(struct sk_buff *skb, int len) { if (unlikely(skb->len < len)) return -EINVAL; if (unlikely(!pskb_may_pull(skb, len))) return -ENOMEM; return 0; } static bool arphdr_ok(struct sk_buff *skb) { return pskb_may_pull(skb, skb_network_offset(skb) + sizeof(struct arp_eth_header)); } static int check_iphdr(struct sk_buff *skb) { unsigned int nh_ofs = skb_network_offset(skb); unsigned int ip_len; int err; err = check_header(skb, nh_ofs + sizeof(struct iphdr)); if (unlikely(err)) return err; ip_len = ip_hdrlen(skb); if (unlikely(ip_len < sizeof(struct iphdr) || skb->len < nh_ofs + ip_len)) return -EINVAL; <API key>(skb, nh_ofs + ip_len); return 0; } static bool tcphdr_ok(struct sk_buff *skb) { int th_ofs = <API key>(skb); int tcp_len; if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr)))) return false; tcp_len = tcp_hdrlen(skb); if (unlikely(tcp_len < sizeof(struct tcphdr) || skb->len < th_ofs + tcp_len)) return false; return true; } static bool udphdr_ok(struct sk_buff *skb) { return pskb_may_pull(skb, <API key>(skb) + sizeof(struct udphdr)); } static bool icmphdr_ok(struct sk_buff *skb) { return pskb_may_pull(skb, <API key>(skb) + sizeof(struct icmphdr)); } u64 ovs_flow_used_time(unsigned long flow_jiffies) { struct timespec cur_ts; u64 cur_ms, idle_ms; ktime_get_ts(&cur_ts); idle_ms = jiffies_to_msecs(jiffies - flow_jiffies); cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC + cur_ts.tv_nsec / NSEC_PER_MSEC; return cur_ms - idle_ms; } #define SW_FLOW_KEY_OFFSET(field) \ (offsetof(struct sw_flow_key, field) + \ FIELD_SIZEOF(struct sw_flow_key, field)) static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key, int *key_lenp) { unsigned int nh_ofs = skb_network_offset(skb); unsigned int nh_len; int payload_ofs; struct ipv6hdr *nh; uint8_t nexthdr; __be16 frag_off; int err; *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label); err = check_header(skb, nh_ofs + sizeof(*nh)); if (unlikely(err)) return err; nh = ipv6_hdr(skb); nexthdr = nh->nexthdr; payload_ofs = (u8 *)(nh + 1) - skb->data; key->ip.proto = NEXTHDR_NONE; key->ip.tos = ipv6_get_dsfield(nh); key->ip.ttl = nh->hop_limit; key->ipv6.label = *(__be32 *)nh & htonl(<API key>); key->ipv6.addr.src = nh->saddr; key->ipv6.addr.dst = nh->daddr; payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off); if (unlikely(payload_ofs < 0)) return -EINVAL; if (frag_off) { if (frag_off & htons(~0x7)) key->ip.frag = OVS_FRAG_TYPE_LATER; else key->ip.frag = OVS_FRAG_TYPE_FIRST; } nh_len = payload_ofs - nh_ofs; <API key>(skb, nh_ofs + nh_len); key->ip.proto = nexthdr; return nh_len; } static bool icmp6hdr_ok(struct sk_buff *skb) { return pskb_may_pull(skb, <API key>(skb) + sizeof(struct icmp6hdr)); } #define TCP_FLAGS_OFFSET 13 #define TCP_FLAG_MASK 0x3f void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) { u8 tcp_flags = 0; if (flow->key.eth.type == htons(ETH_P_IP) && flow->key.ip.proto == IPPROTO_TCP) { u8 *tcp = (u8 *)tcp_hdr(skb); tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; } spin_lock(&flow->lock); flow->used = jiffies; flow->packet_count++; flow->byte_count += skb->len; flow->tcp_flags |= tcp_flags; spin_unlock(&flow->lock); } struct sw_flow_actions *<API key>(const struct nlattr *actions) { int actions_len = nla_len(actions); struct sw_flow_actions *sfa; /* At least DP_MAX_PORTS actions are required to be able to flood a * packet to every port. Factor of 2 allows for setting VLAN tags, * etc. */ if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4)) return ERR_PTR(-EINVAL); sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL); if (!sfa) return ERR_PTR(-ENOMEM); sfa->actions_len = actions_len; memcpy(sfa->actions, nla_data(actions), actions_len); return sfa; } struct sw_flow *ovs_flow_alloc(void) { struct sw_flow *flow; flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); if (!flow) return ERR_PTR(-ENOMEM); spin_lock_init(&flow->lock); flow->sf_acts = NULL; return flow; } static struct hlist_head *find_bucket(struct flow_table *table, u32 hash) { hash = jhash_1word(hash, table->hash_seed); return flex_array_get(table->buckets, (hash & (table->n_buckets - 1))); } static struct flex_array *alloc_buckets(unsigned int n_buckets) { struct flex_array *buckets; int i, err; buckets = flex_array_alloc(sizeof(struct hlist_head *), n_buckets, GFP_KERNEL); if (!buckets) return NULL; err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL); if (err) { flex_array_free(buckets); return NULL; } for (i = 0; i < n_buckets; i++) INIT_HLIST_HEAD((struct hlist_head *) flex_array_get(buckets, i)); return buckets; } static void free_buckets(struct flex_array *buckets) { flex_array_free(buckets); } struct flow_table *ovs_flow_tbl_alloc(int new_size) { struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL); if (!table) return NULL; table->buckets = alloc_buckets(new_size); if (!table->buckets) { kfree(table); return NULL; } table->n_buckets = new_size; table->count = 0; table->node_ver = 0; table->keep_flows = false; get_random_bytes(&table->hash_seed, sizeof(u32)); return table; } void <API key>(struct flow_table *table) { int i; if (!table) return; if (table->keep_flows) goto skip_flows; for (i = 0; i < table->n_buckets; i++) { struct sw_flow *flow; struct hlist_head *head = flex_array_get(table->buckets, i); struct hlist_node *node, *n; int ver = table->node_ver; <API key>(flow, node, n, head, hash_node[ver]) { hlist_del_rcu(&flow->hash_node[ver]); ovs_flow_free(flow); } } skip_flows: free_buckets(table->buckets); kfree(table); } static void <API key>(struct rcu_head *rcu) { struct flow_table *table = container_of(rcu, struct flow_table, rcu); <API key>(table); } void <API key>(struct flow_table *table) { if (!table) return; call_rcu(&table->rcu, <API key>); } struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last) { struct sw_flow *flow; struct hlist_head *head; struct hlist_node *n; int ver; int i; ver = table->node_ver; while (*bucket < table->n_buckets) { i = 0; head = flex_array_get(table->buckets, *bucket); <API key>(flow, n, head, hash_node[ver]) { if (i < *last) { i++; continue; } *last = i + 1; return flow; } (*bucket)++; *last = 0; } return NULL; } static void <API key>(struct flow_table *old, struct flow_table *new) { int old_ver; int i; old_ver = old->node_ver; new->node_ver = !old_ver; /* Insert in new table. */ for (i = 0; i < old->n_buckets; i++) { struct sw_flow *flow; struct hlist_head *head; struct hlist_node *n; head = flex_array_get(old->buckets, i); <API key>(flow, n, head, hash_node[old_ver]) ovs_flow_tbl_insert(new, flow); } old->keep_flows = true; } static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets) { struct flow_table *new_table; new_table = ovs_flow_tbl_alloc(n_buckets); if (!new_table) return ERR_PTR(-ENOMEM); <API key>(table, new_table); return new_table; } struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table) { return __flow_tbl_rehash(table, table->n_buckets); } struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) { return __flow_tbl_rehash(table, table->n_buckets * 2); } void ovs_flow_free(struct sw_flow *flow) { if (unlikely(!flow)) return; kfree((struct sf_flow_acts __force *)flow->sf_acts); kmem_cache_free(flow_cache, flow); } /* RCU callback used by <API key>. */ static void <API key>(struct rcu_head *rcu) { struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); ovs_flow_free(flow); } /* Schedules 'flow' to be freed after the next RCU grace period. * The caller must hold rcu_read_lock for this to be sensible. */ void <API key>(struct sw_flow *flow) { call_rcu(&flow->rcu, <API key>); } /* RCU callback used by <API key>. */ static void <API key>(struct rcu_head *rcu) { struct sw_flow_actions *sf_acts = container_of(rcu, struct sw_flow_actions, rcu); kfree(sf_acts); } /* Schedules 'sf_acts' to be freed after the next RCU grace period. * The caller must hold rcu_read_lock for this to be sensible. */ void <API key>(struct sw_flow_actions *sf_acts) { call_rcu(&sf_acts->rcu, <API key>); } static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) { struct qtag_prefix { __be16 eth_type; /* ETH_P_8021Q */ __be16 tci; }; struct qtag_prefix *qp; if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16))) return 0; if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) + sizeof(__be16)))) return -ENOMEM; qp = (struct qtag_prefix *) skb->data; key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT); __skb_pull(skb, sizeof(struct qtag_prefix)); return 0; } static __be16 parse_ethertype(struct sk_buff *skb) { struct llc_snap_hdr { u8 dsap; /* Always 0xAA */ u8 ssap; /* Always 0xAA */ u8 ctrl; u8 oui[3]; __be16 ethertype; }; struct llc_snap_hdr *llc; __be16 proto; proto = *(__be16 *) skb->data; __skb_pull(skb, sizeof(__be16)); if (ntohs(proto) >= 1536) return proto; if (skb->len < sizeof(struct llc_snap_hdr)) return htons(ETH_P_802_2); if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr)))) return htons(0); llc = (struct llc_snap_hdr *) skb->data; if (llc->dsap != LLC_SAP_SNAP || llc->ssap != LLC_SAP_SNAP || (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0) return htons(ETH_P_802_2); __skb_pull(skb, sizeof(struct llc_snap_hdr)); return llc->ethertype; } static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, int *key_lenp, int nh_len) { struct icmp6hdr *icmp = icmp6_hdr(skb); int error = 0; int key_len; /* The ICMPv6 type and code fields use the 16-bit transport port * fields, so we need to store them in 16-bit network byte order. */ key->ipv6.tp.src = htons(icmp->icmp6_type); key->ipv6.tp.dst = htons(icmp->icmp6_code); key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); if (icmp->icmp6_code == 0 && (icmp->icmp6_type == <API key> || icmp->icmp6_type == <API key>)) { int icmp_len = skb->len - <API key>(skb); struct nd_msg *nd; int offset; key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); /* In order to process neighbor discovery options, we need the * entire packet. */ if (unlikely(icmp_len < sizeof(*nd))) goto out; if (unlikely(skb_linearize(skb))) { error = -ENOMEM; goto out; } nd = (struct nd_msg *)<API key>(skb); key->ipv6.nd.target = nd->target; key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); icmp_len -= sizeof(*nd); offset = 0; while (icmp_len >= 8) { struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)(nd->opt + offset); int opt_len = nd_opt->nd_opt_len * 8; if (unlikely(!opt_len || opt_len > icmp_len)) goto invalid; /* Store the link layer address if the appropriate * option is provided. It is considered an error if * the same link layer option is specified twice. */ if (nd_opt->nd_opt_type == <API key> && opt_len == 8) { if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll))) goto invalid; memcpy(key->ipv6.nd.sll, &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN); } else if (nd_opt->nd_opt_type == <API key> && opt_len == 8) { if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll))) goto invalid; memcpy(key->ipv6.nd.tll, &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN); } icmp_len -= opt_len; offset += opt_len; } } goto out; invalid: memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target)); memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll)); memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll)); out: *key_lenp = key_len; return error; } /** * ovs_flow_extract - extracts a flow key from an Ethernet frame. * @skb: sk_buff that contains the frame, with skb->data pointing to the * Ethernet header * @in_port: port number on which @skb was received. * @key: output flow key * @key_lenp: length of output flow key * * The caller must ensure that skb->len >= ETH_HLEN. * * Returns 0 if successful, otherwise a negative errno value. * * Initializes @skb header pointers as follows: * * - skb->mac_header: the Ethernet header. * * - skb->network_header: just past the Ethernet header, or just past the * VLAN header, to the first byte of the Ethernet payload. * * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6 * on output, then just past the IP header, if one is present and * of a correct length, otherwise the same as skb->network_header. * For other key->dl_type values it is left untouched. */ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, int *key_lenp) { int error = 0; int key_len = SW_FLOW_KEY_OFFSET(eth); struct ethhdr *eth; memset(key, 0, sizeof(*key)); key->phy.priority = skb->priority; key->phy.in_port = in_port; <API key>(skb); /* Link layer. We are guaranteed to have at least the 14 byte Ethernet * header in the linear data area. */ eth = eth_hdr(skb); memcpy(key->eth.src, eth->h_source, ETH_ALEN); memcpy(key->eth.dst, eth->h_dest, ETH_ALEN); __skb_pull(skb, 2 * ETH_ALEN); if (vlan_tx_tag_present(skb)) key->eth.tci = htons(skb->vlan_tci); else if (eth->h_proto == htons(ETH_P_8021Q)) if (unlikely(parse_vlan(skb, key))) return -ENOMEM; key->eth.type = parse_ethertype(skb); if (unlikely(key->eth.type == htons(0))) return -ENOMEM; <API key>(skb); __skb_push(skb, skb->data - skb_mac_header(skb)); /* Network layer. */ if (key->eth.type == htons(ETH_P_IP)) { struct iphdr *nh; __be16 offset; key_len = SW_FLOW_KEY_OFFSET(ipv4.addr); error = check_iphdr(skb); if (unlikely(error)) { if (error == -EINVAL) { skb->transport_header = skb->network_header; error = 0; } goto out; } nh = ip_hdr(skb); key->ipv4.addr.src = nh->saddr; key->ipv4.addr.dst = nh->daddr; key->ip.proto = nh->protocol; key->ip.tos = nh->tos; key->ip.ttl = nh->ttl; offset = nh->frag_off & htons(IP_OFFSET); if (offset) { key->ip.frag = OVS_FRAG_TYPE_LATER; goto out; } if (nh->frag_off & htons(IP_MF) || skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; /* Transport layer. */ if (key->ip.proto == IPPROTO_TCP) { key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->ipv4.tp.src = tcp->source; key->ipv4.tp.dst = tcp->dest; } } else if (key->ip.proto == IPPROTO_UDP) { key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->ipv4.tp.src = udp->source; key->ipv4.tp.dst = udp->dest; } } else if (key->ip.proto == IPPROTO_ICMP) { key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); if (icmphdr_ok(skb)) { struct icmphdr *icmp = icmp_hdr(skb); /* The ICMP type and code fields use the 16-bit * transport port fields, so we need to store * them in 16-bit network byte order. */ key->ipv4.tp.src = htons(icmp->type); key->ipv4.tp.dst = htons(icmp->code); } } } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) { struct arp_eth_header *arp; arp = (struct arp_eth_header *)skb_network_header(skb); if (arp->ar_hrd == htons(ARPHRD_ETHER) && arp->ar_pro == htons(ETH_P_IP) && arp->ar_hln == ETH_ALEN && arp->ar_pln == 4) { /* We only match on the lower 8 bits of the opcode. */ if (ntohs(arp->ar_op) <= 0xff) key->ip.proto = ntohs(arp->ar_op); if (key->ip.proto == ARPOP_REQUEST || key->ip.proto == ARPOP_REPLY) { memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); } } } else if (key->eth.type == htons(ETH_P_IPV6)) { int nh_len; /* IPv6 Header + Extensions */ nh_len = parse_ipv6hdr(skb, key, &key_len); if (unlikely(nh_len < 0)) { if (nh_len == -EINVAL) skb->transport_header = skb->network_header; else error = nh_len; goto out; } if (key->ip.frag == OVS_FRAG_TYPE_LATER) goto out; if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; /* Transport layer. */ if (key->ip.proto == NEXTHDR_TCP) { key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->ipv6.tp.src = tcp->source; key->ipv6.tp.dst = tcp->dest; } } else if (key->ip.proto == NEXTHDR_UDP) { key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->ipv6.tp.src = udp->source; key->ipv6.tp.dst = udp->dest; } } else if (key->ip.proto == NEXTHDR_ICMP) { key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); if (icmp6hdr_ok(skb)) { error = parse_icmpv6(skb, key, &key_len, nh_len); if (error < 0) goto out; } } } out: *key_lenp = key_len; return error; } u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len) { return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), 0); } struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, struct sw_flow_key *key, int key_len) { struct sw_flow *flow; struct hlist_node *n; struct hlist_head *head; u32 hash; hash = ovs_flow_hash(key, key_len); head = find_bucket(table, hash); <API key>(flow, n, head, hash_node[table->node_ver]) { if (flow->hash == hash && !memcmp(&flow->key, key, key_len)) { return flow; } } return NULL; } void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) { struct hlist_head *head; head = find_bucket(table, flow->hash); hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); table->count++; } void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) { hlist_del_rcu(&flow->hash_node[table->node_ver]); table->count BUG_ON(table->count < 0); } /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_ENCAP] = -1, [<API key>] = sizeof(u32), [<API key>] = sizeof(u32), [<API key>] = sizeof(struct ovs_key_ethernet), [OVS_KEY_ATTR_VLAN] = sizeof(__be16), [<API key>] = sizeof(__be16), [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4), [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6), [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp), [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp), [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp), [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd), }; static int <API key>(struct sw_flow_key *swkey, int *key_len, const struct nlattr *a[], u32 *attrs) { const struct ovs_key_icmp *icmp_key; const struct ovs_key_tcp *tcp_key; const struct ovs_key_udp *udp_key; switch (swkey->ip.proto) { case IPPROTO_TCP: if (!(*attrs & (1 << OVS_KEY_ATTR_TCP))) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_TCP); *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); swkey->ipv4.tp.src = tcp_key->tcp_src; swkey->ipv4.tp.dst = tcp_key->tcp_dst; break; case IPPROTO_UDP: if (!(*attrs & (1 << OVS_KEY_ATTR_UDP))) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_UDP); *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); swkey->ipv4.tp.src = udp_key->udp_src; swkey->ipv4.tp.dst = udp_key->udp_dst; break; case IPPROTO_ICMP: if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP))) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_ICMP); *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); swkey->ipv4.tp.src = htons(icmp_key->icmp_type); swkey->ipv4.tp.dst = htons(icmp_key->icmp_code); break; } return 0; } static int <API key>(struct sw_flow_key *swkey, int *key_len, const struct nlattr *a[], u32 *attrs) { const struct ovs_key_icmpv6 *icmpv6_key; const struct ovs_key_tcp *tcp_key; const struct ovs_key_udp *udp_key; switch (swkey->ip.proto) { case IPPROTO_TCP: if (!(*attrs & (1 << OVS_KEY_ATTR_TCP))) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_TCP); *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); swkey->ipv6.tp.src = tcp_key->tcp_src; swkey->ipv6.tp.dst = tcp_key->tcp_dst; break; case IPPROTO_UDP: if (!(*attrs & (1 << OVS_KEY_ATTR_UDP))) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_UDP); *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); swkey->ipv6.tp.src = udp_key->udp_src; swkey->ipv6.tp.dst = udp_key->udp_dst; break; case IPPROTO_ICMPV6: if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6))) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type); swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code); if (swkey->ipv6.tp.src == htons(<API key>) || swkey->ipv6.tp.src == htons(<API key>)) { const struct ovs_key_nd *nd_key; if (!(*attrs & (1 << OVS_KEY_ATTR_ND))) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_ND); *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); nd_key = nla_data(a[OVS_KEY_ATTR_ND]); memcpy(&swkey->ipv6.nd.target, nd_key->nd_target, sizeof(swkey->ipv6.nd.target)); memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN); memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN); } break; } return 0; } static int parse_flow_nlattrs(const struct nlattr *attr, const struct nlattr *a[], u32 *attrsp) { const struct nlattr *nla; u32 attrs; int rem; attrs = 0; nla_for_each_nested(nla, attr, rem) { u16 type = nla_type(nla); int expected_len; if (type > OVS_KEY_ATTR_MAX || attrs & (1 << type)) return -EINVAL; expected_len = ovs_key_lens[type]; if (nla_len(nla) != expected_len && expected_len != -1) return -EINVAL; attrs |= 1 << type; a[type] = nla; } if (rem) return -EINVAL; *attrsp = attrs; return 0; } /** * <API key> - parses Netlink attributes into a flow key. * @swkey: receives the extracted flow key. * @key_lenp: number of bytes used in @swkey. * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute * sequence. */ int <API key>(struct sw_flow_key *swkey, int *key_lenp, const struct nlattr *attr) { const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; const struct ovs_key_ethernet *eth_key; int key_len; u32 attrs; int err; memset(swkey, 0, sizeof(struct sw_flow_key)); key_len = SW_FLOW_KEY_OFFSET(eth); err = parse_flow_nlattrs(attr, a, &attrs); if (err) return err; /* Metadata attributes. */ if (attrs & (1 << <API key>)) { swkey->phy.priority = nla_get_u32(a[<API key>]); attrs &= ~(1 << <API key>); } if (attrs & (1 << <API key>)) { u32 in_port = nla_get_u32(a[<API key>]); if (in_port >= DP_MAX_PORTS) return -EINVAL; swkey->phy.in_port = in_port; attrs &= ~(1 << <API key>); } else { swkey->phy.in_port = USHRT_MAX; } /* Data attributes. */ if (!(attrs & (1 << <API key>))) return -EINVAL; attrs &= ~(1 << <API key>); eth_key = nla_data(a[<API key>]); memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN); memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN); if (attrs & (1u << <API key>) && nla_get_be16(a[<API key>]) == htons(ETH_P_8021Q)) { const struct nlattr *encap; __be16 tci; if (attrs != ((1 << OVS_KEY_ATTR_VLAN) | (1 << <API key>) | (1 << OVS_KEY_ATTR_ENCAP))) return -EINVAL; encap = a[OVS_KEY_ATTR_ENCAP]; tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); if (tci & htons(VLAN_TAG_PRESENT)) { swkey->eth.tci = tci; err = parse_flow_nlattrs(encap, a, &attrs); if (err) return err; } else if (!tci) { /* Corner case for truncated 802.1Q header. */ if (nla_len(encap)) return -EINVAL; swkey->eth.type = htons(ETH_P_8021Q); *key_lenp = key_len; return 0; } else { return -EINVAL; } } if (attrs & (1 << <API key>)) { swkey->eth.type = nla_get_be16(a[<API key>]); if (ntohs(swkey->eth.type) < 1536) return -EINVAL; attrs &= ~(1 << <API key>); } else { swkey->eth.type = htons(ETH_P_802_2); } if (swkey->eth.type == htons(ETH_P_IP)) { const struct ovs_key_ipv4 *ipv4_key; if (!(attrs & (1 << OVS_KEY_ATTR_IPV4))) return -EINVAL; attrs &= ~(1 << OVS_KEY_ATTR_IPV4); key_len = SW_FLOW_KEY_OFFSET(ipv4.addr); ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) return -EINVAL; swkey->ip.proto = ipv4_key->ipv4_proto; swkey->ip.tos = ipv4_key->ipv4_tos; swkey->ip.ttl = ipv4_key->ipv4_ttl; swkey->ip.frag = ipv4_key->ipv4_frag; swkey->ipv4.addr.src = ipv4_key->ipv4_src; swkey->ipv4.addr.dst = ipv4_key->ipv4_dst; if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) { err = <API key>(swkey, &key_len, a, &attrs); if (err) return err; } } else if (swkey->eth.type == htons(ETH_P_IPV6)) { const struct ovs_key_ipv6 *ipv6_key; if (!(attrs & (1 << OVS_KEY_ATTR_IPV6))) return -EINVAL; attrs &= ~(1 << OVS_KEY_ATTR_IPV6); key_len = SW_FLOW_KEY_OFFSET(ipv6.label); ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) return -EINVAL; swkey->ipv6.label = ipv6_key->ipv6_label; swkey->ip.proto = ipv6_key->ipv6_proto; swkey->ip.tos = ipv6_key->ipv6_tclass; swkey->ip.ttl = ipv6_key->ipv6_hlimit; swkey->ip.frag = ipv6_key->ipv6_frag; memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src, sizeof(swkey->ipv6.addr.src)); memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst, sizeof(swkey->ipv6.addr.dst)); if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) { err = <API key>(swkey, &key_len, a, &attrs); if (err) return err; } } else if (swkey->eth.type == htons(ETH_P_ARP)) { const struct ovs_key_arp *arp_key; if (!(attrs & (1 << OVS_KEY_ATTR_ARP))) return -EINVAL; attrs &= ~(1 << OVS_KEY_ATTR_ARP); key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); swkey->ipv4.addr.src = arp_key->arp_sip; swkey->ipv4.addr.dst = arp_key->arp_tip; if (arp_key->arp_op & htons(0xff00)) return -EINVAL; swkey->ip.proto = ntohs(arp_key->arp_op); memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN); memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN); } if (attrs) return -EINVAL; *key_lenp = key_len; return 0; } /** * <API key> - parses Netlink attributes into a flow key. * @in_port: receives the extracted input port. * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute * sequence. * * This parses a series of Netlink attributes that form a flow key, which must * take the same form accepted by flow_from_nlattrs(), but only enough of it to * get the metadata, that is, the parts of the flow key that cannot be * extracted from the packet itself. */ int <API key>(u32 *priority, u16 *in_port, const struct nlattr *attr) { const struct nlattr *nla; int rem; *in_port = USHRT_MAX; *priority = 0; nla_for_each_nested(nla, attr, rem) { int type = nla_type(nla); if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) { if (nla_len(nla) != ovs_key_lens[type]) return -EINVAL; switch (type) { case <API key>: *priority = nla_get_u32(nla); break; case <API key>: if (nla_get_u32(nla) >= DP_MAX_PORTS) return -EINVAL; *in_port = nla_get_u32(nla); break; } } } if (rem) return -EINVAL; return 0; } int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) { struct ovs_key_ethernet *eth_key; struct nlattr *nla, *encap; if (swkey->phy.priority) NLA_PUT_U32(skb, <API key>, swkey->phy.priority); if (swkey->phy.in_port != USHRT_MAX) NLA_PUT_U32(skb, <API key>, swkey->phy.in_port); nla = nla_reserve(skb, <API key>, sizeof(*eth_key)); if (!nla) goto nla_put_failure; eth_key = nla_data(nla); memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN); memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN); if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { NLA_PUT_BE16(skb, <API key>, htons(ETH_P_8021Q)); NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci); encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); if (!swkey->eth.tci) goto unencap; } else { encap = NULL; } if (swkey->eth.type == htons(ETH_P_802_2)) goto unencap; NLA_PUT_BE16(skb, <API key>, swkey->eth.type); if (swkey->eth.type == htons(ETH_P_IP)) { struct ovs_key_ipv4 *ipv4_key; nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key)); if (!nla) goto nla_put_failure; ipv4_key = nla_data(nla); ipv4_key->ipv4_src = swkey->ipv4.addr.src; ipv4_key->ipv4_dst = swkey->ipv4.addr.dst; ipv4_key->ipv4_proto = swkey->ip.proto; ipv4_key->ipv4_tos = swkey->ip.tos; ipv4_key->ipv4_ttl = swkey->ip.ttl; ipv4_key->ipv4_frag = swkey->ip.frag; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { struct ovs_key_ipv6 *ipv6_key; nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key)); if (!nla) goto nla_put_failure; ipv6_key = nla_data(nla); memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src, sizeof(ipv6_key->ipv6_src)); memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst, sizeof(ipv6_key->ipv6_dst)); ipv6_key->ipv6_label = swkey->ipv6.label; ipv6_key->ipv6_proto = swkey->ip.proto; ipv6_key->ipv6_tclass = swkey->ip.tos; ipv6_key->ipv6_hlimit = swkey->ip.ttl; ipv6_key->ipv6_frag = swkey->ip.frag; } else if (swkey->eth.type == htons(ETH_P_ARP)) { struct ovs_key_arp *arp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key)); if (!nla) goto nla_put_failure; arp_key = nla_data(nla); memset(arp_key, 0, sizeof(struct ovs_key_arp)); arp_key->arp_sip = swkey->ipv4.addr.src; arp_key->arp_tip = swkey->ipv4.addr.dst; arp_key->arp_op = htons(swkey->ip.proto); memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN); memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN); } if ((swkey->eth.type == htons(ETH_P_IP) || swkey->eth.type == htons(ETH_P_IPV6)) && swkey->ip.frag != OVS_FRAG_TYPE_LATER) { if (swkey->ip.proto == IPPROTO_TCP) { struct ovs_key_tcp *tcp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key)); if (!nla) goto nla_put_failure; tcp_key = nla_data(nla); if (swkey->eth.type == htons(ETH_P_IP)) { tcp_key->tcp_src = swkey->ipv4.tp.src; tcp_key->tcp_dst = swkey->ipv4.tp.dst; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { tcp_key->tcp_src = swkey->ipv6.tp.src; tcp_key->tcp_dst = swkey->ipv6.tp.dst; } } else if (swkey->ip.proto == IPPROTO_UDP) { struct ovs_key_udp *udp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key)); if (!nla) goto nla_put_failure; udp_key = nla_data(nla); if (swkey->eth.type == htons(ETH_P_IP)) { udp_key->udp_src = swkey->ipv4.tp.src; udp_key->udp_dst = swkey->ipv4.tp.dst; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { udp_key->udp_src = swkey->ipv6.tp.src; udp_key->udp_dst = swkey->ipv6.tp.dst; } } else if (swkey->eth.type == htons(ETH_P_IP) && swkey->ip.proto == IPPROTO_ICMP) { struct ovs_key_icmp *icmp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key)); if (!nla) goto nla_put_failure; icmp_key = nla_data(nla); icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src); icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst); } else if (swkey->eth.type == htons(ETH_P_IPV6) && swkey->ip.proto == IPPROTO_ICMPV6) { struct ovs_key_icmpv6 *icmpv6_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6, sizeof(*icmpv6_key)); if (!nla) goto nla_put_failure; icmpv6_key = nla_data(nla); icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src); icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst); if (icmpv6_key->icmpv6_type == <API key> || icmpv6_key->icmpv6_type == <API key>) { struct ovs_key_nd *nd_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key)); if (!nla) goto nla_put_failure; nd_key = nla_data(nla); memcpy(nd_key->nd_target, &swkey->ipv6.nd.target, sizeof(nd_key->nd_target)); memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN); memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN); } } } unencap: if (encap) nla_nest_end(skb, encap); return 0; nla_put_failure: return -EMSGSIZE; } /* Initializes the flow module. * Returns zero if successful or a negative error code. */ int ovs_flow_init(void) { flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, 0, NULL); if (flow_cache == NULL) return -ENOMEM; return 0; } /* Uninitializes the flow module. */ void ovs_flow_exit(void) { kmem_cache_destroy(flow_cache); }
#include "system.h" #if defined (HAVE_WAYLAND) #include <boost/scoped_ptr.hpp> #include "Application.h" #include "xbmc/windowing/WindowingFactory.h" #include "WinEventsWayland.h" #include "wayland/EventListener.h" #include "wayland/InputFactory.h" #include "wayland/EventLoop.h" namespace xwe = xbmc::wayland::events; namespace { class XBMCListener : public xbmc::IEventListener { public: virtual void OnEvent(XBMC_Event &event); virtual void OnFocused(); virtual void OnUnfocused(); }; XBMCListener g_listener; boost::scoped_ptr <xbmc::InputFactory> g_inputInstance; boost::scoped_ptr <xwe::Loop> g_eventLoop; } void XBMCListener::OnEvent(XBMC_Event &e) { g_application.OnEvent(e); } void XBMCListener::OnFocused() { g_application.m_AppFocused = true; g_Windowing.<API key>(g_application.m_AppFocused); } void XBMCListener::OnUnfocused() { g_application.m_AppFocused = false; g_Windowing.<API key>(g_application.m_AppFocused); } CWinEventsWayland::CWinEventsWayland() { } void CWinEventsWayland::RefreshDevices() { } bool CWinEventsWayland::IsRemoteLowBattery() { return false; } /* This function reads the display connection and dispatches * any events through the specified object listeners */ bool CWinEventsWayland::MessagePump() { if (!g_eventLoop.get()) return false; g_eventLoop->Dispatch(); return true; } size_t CWinEventsWayland::GetQueueSize() { /* We can't query the size of the queue */ return 0; } void CWinEventsWayland::<API key>(xwe::IEventQueueStrategy &strategy) { g_eventLoop.reset(new xwe::Loop(g_listener, strategy)); } void CWinEventsWayland::<API key>() { g_eventLoop.reset(); } /* Once we know about a wayland seat, we can just create our manager * object to encapsulate all of that state. When the seat goes away * we just unset the manager object and it is all cleaned up at that * point */ void CWinEventsWayland::SetWaylandSeat(IDllWaylandClient &clientLibrary, IDllXKBCommon &xkbCommonLibrary, struct wl_seat *s) { if (!g_eventLoop.get()) throw std::logic_error("Must have a wl_display set before setting " "the wl_seat in CWinEventsWayland "); g_inputInstance.reset(new xbmc::InputFactory(clientLibrary, xkbCommonLibrary, s, *g_eventLoop, *g_eventLoop)); } void CWinEventsWayland::DestroyWaylandSeat() { g_inputInstance.reset(); } /* When a surface becomes available, this function should be called * to register it as the current one for processing input events on. * * It is a precondition violation to call this function before * a seat has been registered */ void CWinEventsWayland::SetXBMCSurface(struct wl_surface *s) { if (!g_inputInstance.get()) throw std::logic_error("Must have a wl_seat set before setting " "the wl_surface in CWinEventsWayland"); g_inputInstance->SetXBMCSurface(s); } #endif
#include <string.h> #include <node.h> #include "macros.h" #include "database.h" #include "statement.h" using namespace node_sqlite3; Persistent<FunctionTemplate> Database::<API key>; void Database::Init(Handle<Object> target) { NanScope(); Local<FunctionTemplate> t = NanNew<FunctionTemplate>(New); t->InstanceTemplate()-><API key>(1); t->SetClassName(NanNew("Database")); <API key>(t, "close", Close); <API key>(t, "exec", Exec); <API key>(t, "wait", Wait); <API key>(t, "loadExtension", LoadExtension); <API key>(t, "serialize", Serialize); <API key>(t, "parallelize", Parallelize); <API key>(t, "configure", Configure); NODE_SET_GETTER(t, "open", OpenGetter); NanAssignPersistent(<API key>, t); target->Set(NanNew("Database"), t->GetFunction()); } void Database::Process() { NanScope(); if (!open && locked && !queue.empty()) { EXCEPTION(NanNew<String>("Database handle is closed"), SQLITE_MISUSE, exception); Local<Value> argv[] = { exception }; bool called = false; // Call all callbacks with the error object. while (!queue.empty()) { Call* call = queue.front(); Local<Function> cb = NanNew(call->baton->callback); if (!cb.IsEmpty() && cb->IsFunction()) { TRY_CATCH_CALL(NanObjectWrapHandle(this), cb, 1, argv); called = true; } queue.pop(); // We don't call the actual callback, so we have to make sure that // the baton gets destroyed. delete call->baton; delete call; } // When we couldn't call a callback function, emit an error on the // Database object. if (!called) { Local<Value> args[] = { NanNew("error"), exception }; EMIT_EVENT(NanObjectWrapHandle(this), 2, args); } return; } while (open && (!locked || pending == 0) && !queue.empty()) { Call* call = queue.front(); if (call->exclusive && pending > 0) { break; } queue.pop(); locked = call->exclusive; call->callback(call->baton); delete call; if (locked) break; } } void Database::Schedule(Work_Callback callback, Baton* baton, bool exclusive) { NanScope(); if (!open && locked) { EXCEPTION(NanNew<String>("Database is closed"), SQLITE_MISUSE, exception); Local<Function> cb = NanNew(baton->callback); if (!cb.IsEmpty() && cb->IsFunction()) { Local<Value> argv[] = { exception }; TRY_CATCH_CALL(NanObjectWrapHandle(this), cb, 1, argv); } else { Local<Value> argv[] = { NanNew("error"), exception }; EMIT_EVENT(NanObjectWrapHandle(this), 2, argv); } return; } if (!open || ((locked || exclusive || serialize) && pending > 0)) { queue.push(new Call(callback, baton, exclusive || serialize)); } else { locked = exclusive; callback(baton); } } NAN_METHOD(Database::New) { NanScope(); if (!args.IsConstructCall()) { return NanThrowTypeError("Use the new operator to create new Database objects"); } <API key>(0, filename); int pos = 1; int mode; if (args.Length() >= pos && args[pos]->IsInt32()) { mode = args[pos++]->Int32Value(); } else { mode = <API key> | SQLITE_OPEN_CREATE | <API key>; } Local<Function> callback; if (args.Length() >= pos && args[pos]->IsFunction()) { callback = Local<Function>::Cast(args[pos++]); } Database* db = new Database(); db->Wrap(args.This()); args.This()->Set(NanNew("filename"), args[0]->ToString(), ReadOnly); args.This()->Set(NanNew("mode"), NanNew<Integer>(mode), ReadOnly); // Start opening the database. OpenBaton* baton = new OpenBaton(db, callback, *filename, mode); Work_BeginOpen(baton); NanReturnValue(args.This()); } void Database::Work_BeginOpen(Baton* baton) { int status = uv_queue_work(uv_default_loop(), &baton->request, Work_Open, (uv_after_work_cb)Work_AfterOpen); assert(status == 0); } void Database::Work_Open(uv_work_t* req) { OpenBaton* baton = static_cast<OpenBaton*>(req->data); Database* db = baton->db; baton->status = sqlite3_open_v2( baton->filename.c_str(), &db->_handle, baton->mode, NULL ); if (baton->status != SQLITE_OK) { baton->message = std::string(sqlite3_errmsg(db->_handle)); sqlite3_close(db->_handle); db->_handle = NULL; } else { // Set default database handle values. <API key>(db->_handle, 1000); } } void Database::Work_AfterOpen(uv_work_t* req) { NanScope(); OpenBaton* baton = static_cast<OpenBaton*>(req->data); Database* db = baton->db; Local<Value> argv[1]; if (baton->status != SQLITE_OK) { EXCEPTION(NanNew<String>(baton->message.c_str()), baton->status, exception); argv[0] = exception; } else { db->open = true; argv[0] = NanNew(NanNull()); } Local<Function> cb = NanNew(baton->callback); if (!cb.IsEmpty() && cb->IsFunction()) { TRY_CATCH_CALL(NanObjectWrapHandle(db), cb, 1, argv); } else if (!db->open) { Local<Value> args[] = { NanNew("error"), argv[0] }; EMIT_EVENT(NanObjectWrapHandle(db), 2, args); } if (db->open) { Local<Value> args[] = { NanNew("open") }; EMIT_EVENT(NanObjectWrapHandle(db), 1, args); db->Process(); } delete baton; } NAN_GETTER(Database::OpenGetter) { NanScope(); Database* db = ObjectWrap::Unwrap<Database>(args.This()); NanReturnValue(NanNew<Boolean>(db->open)); } NAN_METHOD(Database::Close) { NanScope(); Database* db = ObjectWrap::Unwrap<Database>(args.This()); <API key>(0, callback); Baton* baton = new Baton(db, callback); db->Schedule(Work_BeginClose, baton, true); NanReturnValue(args.This()); } void Database::Work_BeginClose(Baton* baton) { assert(baton->db->locked); assert(baton->db->open); assert(baton->db->_handle); assert(baton->db->pending == 0); baton->db->RemoveCallbacks(); int status = uv_queue_work(uv_default_loop(), &baton->request, Work_Close, (uv_after_work_cb)Work_AfterClose); assert(status == 0); } void Database::Work_Close(uv_work_t* req) { Baton* baton = static_cast<Baton*>(req->data); Database* db = baton->db; baton->status = sqlite3_close(db->_handle); if (baton->status != SQLITE_OK) { baton->message = std::string(sqlite3_errmsg(db->_handle)); } else { db->_handle = NULL; } } void Database::Work_AfterClose(uv_work_t* req) { NanScope(); Baton* baton = static_cast<Baton*>(req->data); Database* db = baton->db; Local<Value> argv[1]; if (baton->status != SQLITE_OK) { EXCEPTION(NanNew<String>(baton->message.c_str()), baton->status, exception); argv[0] = exception; } else { db->open = false; // Leave db->locked to indicate that this db object has reached // the end of its life. argv[0] = NanNew(NanNull()); } Local<Function> cb = NanNew(baton->callback); // Fire callbacks. if (!cb.IsEmpty() && cb->IsFunction()) { TRY_CATCH_CALL(NanObjectWrapHandle(db), cb, 1, argv); } else if (db->open) { Local<Value> args[] = { NanNew("error"), argv[0] }; EMIT_EVENT(NanObjectWrapHandle(db), 2, args); } if (!db->open) { Local<Value> args[] = { NanNew("close"), argv[0] }; EMIT_EVENT(NanObjectWrapHandle(db), 1, args); db->Process(); } delete baton; } NAN_METHOD(Database::Serialize) { NanScope(); Database* db = ObjectWrap::Unwrap<Database>(args.This()); <API key>(0, callback); bool before = db->serialize; db->serialize = true; if (!callback.IsEmpty() && callback->IsFunction()) { TRY_CATCH_CALL(args.This(), callback, 0, NULL); db->serialize = before; } db->Process(); NanReturnValue(args.This()); } NAN_METHOD(Database::Parallelize) { NanScope(); Database* db = ObjectWrap::Unwrap<Database>(args.This()); <API key>(0, callback); bool before = db->serialize; db->serialize = false; if (!callback.IsEmpty() && callback->IsFunction()) { TRY_CATCH_CALL(args.This(), callback, 0, NULL); db->serialize = before; } db->Process(); NanReturnValue(args.This()); } NAN_METHOD(Database::Configure) { NanScope(); Database* db = ObjectWrap::Unwrap<Database>(args.This()); REQUIRE_ARGUMENTS(2); if (args[0]->Equals(NanNew("trace"))) { Local<Function> handle; Baton* baton = new Baton(db, handle); db->Schedule(<API key>, baton); } else if (args[0]->Equals(NanNew("profile"))) { Local<Function> handle; Baton* baton = new Baton(db, handle); db->Schedule(<API key>, baton); } else if (args[0]->Equals(NanNew("busyTimeout"))) { if (!args[1]->IsInt32()) { return NanThrowTypeError("Value must be an integer"); } Local<Function> handle; Baton* baton = new Baton(db, handle); baton->status = args[1]->Int32Value(); db->Schedule(SetBusyTimeout, baton); } else { return NanThrowError(Exception::Error(String::Concat( args[0]->ToString(), NanNew<String>(" is not a valid configuration option") ))); } db->Process(); NanReturnValue(args.This()); } void Database::SetBusyTimeout(Baton* baton) { assert(baton->db->open); assert(baton->db->_handle); // Abuse the status field for passing the timeout. <API key>(baton->db->_handle, baton->status); delete baton; } void Database::<API key>(Baton* baton) { assert(baton->db->open); assert(baton->db->_handle); Database* db = baton->db; if (db->debug_trace == NULL) { // Add it. db->debug_trace = new AsyncTrace(db, TraceCallback); sqlite3_trace(db->_handle, TraceCallback, db); } else { // Remove it. sqlite3_trace(db->_handle, NULL, NULL); db->debug_trace->finish(); db->debug_trace = NULL; } delete baton; } void Database::TraceCallback(void* db, const char* sql) { // Note: This function is called in the thread pool. // Note: Some queries, such as "EXPLAIN" queries, are not sent through this. static_cast<Database*>(db)->debug_trace->send(new std::string(sql)); } void Database::TraceCallback(Database* db, std::string* sql) { // Note: This function is called in the main V8 thread. NanScope(); Local<Value> argv[] = { NanNew("trace"), NanNew<String>(sql->c_str()) }; EMIT_EVENT(NanObjectWrapHandle(db), 2, argv); delete sql; } void Database::<API key>(Baton* baton) { assert(baton->db->open); assert(baton->db->_handle); Database* db = baton->db; if (db->debug_profile == NULL) { // Add it. db->debug_profile = new AsyncProfile(db, ProfileCallback); sqlite3_profile(db->_handle, ProfileCallback, db); } else { // Remove it. sqlite3_profile(db->_handle, NULL, NULL); db->debug_profile->finish(); db->debug_profile = NULL; } delete baton; } void Database::ProfileCallback(void* db, const char* sql, sqlite3_uint64 nsecs) { // Note: This function is called in the thread pool. // Note: Some queries, such as "EXPLAIN" queries, are not sent through this. ProfileInfo* info = new ProfileInfo(); info->sql = std::string(sql); info->nsecs = nsecs; static_cast<Database*>(db)->debug_profile->send(info); } void Database::ProfileCallback(Database *db, ProfileInfo* info) { NanScope(); Local<Value> argv[] = { NanNew("profile"), NanNew<String>(info->sql.c_str()), NanNew<Integer>((double)info->nsecs / 1000000.0) }; EMIT_EVENT(NanObjectWrapHandle(db), 3, argv); delete info; } void Database::<API key>(Baton* baton) { assert(baton->db->open); assert(baton->db->_handle); Database* db = baton->db; if (db->update_event == NULL) { // Add it. db->update_event = new AsyncUpdate(db, UpdateCallback); sqlite3_update_hook(db->_handle, UpdateCallback, db); } else { // Remove it. sqlite3_update_hook(db->_handle, NULL, NULL); db->update_event->finish(); db->update_event = NULL; } delete baton; } void Database::UpdateCallback(void* db, int type, const char* database, const char* table, sqlite3_int64 rowid) { // Note: This function is called in the thread pool. // Note: Some queries, such as "EXPLAIN" queries, are not sent through this. UpdateInfo* info = new UpdateInfo(); info->type = type; info->database = std::string(database); info->table = std::string(table); info->rowid = rowid; static_cast<Database*>(db)->update_event->send(info); } void Database::UpdateCallback(Database *db, UpdateInfo* info) { NanScope(); Local<Value> argv[] = { NanNew(<API key>(info->type)), NanNew<String>(info->database.c_str()), NanNew<String>(info->table.c_str()), NanNew<Integer>(info->rowid), }; EMIT_EVENT(NanObjectWrapHandle(db), 4, argv); delete info; } NAN_METHOD(Database::Exec) { NanScope(); Database* db = ObjectWrap::Unwrap<Database>(args.This()); <API key>(0, sql); <API key>(1, callback); Baton* baton = new ExecBaton(db, callback, *sql); db->Schedule(Work_BeginExec, baton, true); NanReturnValue(args.This()); } void Database::Work_BeginExec(Baton* baton) { assert(baton->db->locked); assert(baton->db->open); assert(baton->db->_handle); assert(baton->db->pending == 0); int status = uv_queue_work(uv_default_loop(), &baton->request, Work_Exec, (uv_after_work_cb)Work_AfterExec); assert(status == 0); } void Database::Work_Exec(uv_work_t* req) { ExecBaton* baton = static_cast<ExecBaton*>(req->data); char* message = NULL; baton->status = sqlite3_exec( baton->db->_handle, baton->sql.c_str(), NULL, NULL, &message ); if (baton->status != SQLITE_OK && message != NULL) { baton->message = std::string(message); sqlite3_free(message); } } void Database::Work_AfterExec(uv_work_t* req) { NanScope(); ExecBaton* baton = static_cast<ExecBaton*>(req->data); Database* db = baton->db; Local<Function> cb = NanNew(baton->callback); if (baton->status != SQLITE_OK) { EXCEPTION(NanNew<String>(baton->message.c_str()), baton->status, exception); if (!cb.IsEmpty() && cb->IsFunction()) { Local<Value> argv[] = { exception }; TRY_CATCH_CALL(NanObjectWrapHandle(db), cb, 1, argv); } else { Local<Value> args[] = { NanNew("error"), exception }; EMIT_EVENT(NanObjectWrapHandle(db), 2, args); } } else if (!cb.IsEmpty() && cb->IsFunction()) { Local<Value> argv[] = { NanNew(NanNull()) }; TRY_CATCH_CALL(NanObjectWrapHandle(db), cb, 1, argv); } db->Process(); delete baton; } NAN_METHOD(Database::Wait) { NanScope(); Database* db = ObjectWrap::Unwrap<Database>(args.This()); <API key>(0, callback); Baton* baton = new Baton(db, callback); db->Schedule(Work_Wait, baton, true); NanReturnValue(args.This()); } void Database::Work_Wait(Baton* baton) { NanScope(); assert(baton->db->locked); assert(baton->db->open); assert(baton->db->_handle); assert(baton->db->pending == 0); Local<Function> cb = NanNew(baton->callback); if (!cb.IsEmpty() && cb->IsFunction()) { Local<Value> argv[] = { NanNew(NanNull()) }; TRY_CATCH_CALL(NanObjectWrapHandle(baton->db), cb, 1, argv); } baton->db->Process(); delete baton; } NAN_METHOD(Database::LoadExtension) { NanScope(); Database* db = ObjectWrap::Unwrap<Database>(args.This()); <API key>(0, filename); <API key>(1, callback); Baton* baton = new LoadExtensionBaton(db, callback, *filename); db->Schedule(<API key>, baton, true); NanReturnValue(args.This()); } void Database::<API key>(Baton* baton) { assert(baton->db->locked); assert(baton->db->open); assert(baton->db->_handle); assert(baton->db->pending == 0); int status = uv_queue_work(uv_default_loop(), &baton->request, Work_LoadExtension, (uv_after_work_cb)<API key>); assert(status == 0); } void Database::Work_LoadExtension(uv_work_t* req) { LoadExtensionBaton* baton = static_cast<LoadExtensionBaton*>(req->data); <API key>(baton->db->_handle, 1); char* message = NULL; baton->status = <API key>( baton->db->_handle, baton->filename.c_str(), 0, &message ); <API key>(baton->db->_handle, 0); if (baton->status != SQLITE_OK && message != NULL) { baton->message = std::string(message); sqlite3_free(message); } } void Database::<API key>(uv_work_t* req) { NanScope(); LoadExtensionBaton* baton = static_cast<LoadExtensionBaton*>(req->data); Database* db = baton->db; Local<Function> cb = NanNew(baton->callback); if (baton->status != SQLITE_OK) { EXCEPTION(NanNew<String>(baton->message.c_str()), baton->status, exception); if (!cb.IsEmpty() && cb->IsFunction()) { Local<Value> argv[] = { exception }; TRY_CATCH_CALL(NanObjectWrapHandle(db), cb, 1, argv); } else { Local<Value> args[] = { NanNew("error"), exception }; EMIT_EVENT(NanObjectWrapHandle(db), 2, args); } } else if (!cb.IsEmpty() && cb->IsFunction()) { Local<Value> argv[] = { NanNew(NanNull()) }; TRY_CATCH_CALL(NanObjectWrapHandle(db), cb, 1, argv); } db->Process(); delete baton; } void Database::RemoveCallbacks() { if (debug_trace) { debug_trace->finish(); debug_trace = NULL; } if (debug_profile) { debug_profile->finish(); debug_profile = NULL; } }
#undef DEBUG #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/memblock.h> #include <asm/prom.h> #include <asm/iommu.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/udbg.h> #include <asm/firmware.h> #include <asm/cell-regs.h> #include "cell.h" #include "interrupt.h" /* Define <API key> to actually unmap non-used pages * instead of leaving them mapped to some dummy page. This can be * enabled once the appropriate workarounds for spider bugs have * been enabled */ #define <API key> /* Define <API key> to enforce protection of * IO PTEs based on the transfer direction. That can be enabled * once spider-net has been fixed to pass the correct direction * to the DMA mapping functions */ #define <API key> #define NR_IOMMUS 2 /* IOC mmap registers */ #define IOC_Reg_Size 0x2000 #define IOC_IOPT_CacheInvd 0x908 #define <API key> <API key> #define <API key> <API key> #define <API key> <API key> #define IOC_IOST_Origin 0x918 #define IOC_IOST_Origin_E <API key> #define IOC_IOST_Origin_HW <API key> #define IOC_IOST_Origin_HL <API key> #define IOC_IO_ExcpStat 0x920 #define IOC_IO_ExcpStat_V <API key> #define <API key> <API key> #define <API key> <API key> #define <API key> <API key> #define <API key> <API key> #define <API key> <API key> #define <API key> <API key> #define IOC_IO_ExcpMask 0x928 #define IOC_IO_ExcpMask_SFE <API key> #define IOC_IO_ExcpMask_PFE <API key> #define IOC_IOCmd_Offset 0x1000 #define IOC_IOCmd_Cfg 0xc00 #define IOC_IOCmd_Cfg_TE <API key> /* Segment table entries */ #define IOSTE_V <API key> /* valid */ #define IOSTE_H <API key> /* cache hint */ #define <API key> <API key> /* base RPN of IOPT */ #define IOSTE_NPPT_Mask <API key> /* no. pages in IOPT */ #define IOSTE_PS_Mask <API key> /* page size */ #define IOSTE_PS_4K <API key> /* - 4kB */ #define IOSTE_PS_64K <API key> /* - 64kB */ #define IOSTE_PS_1M <API key> /* - 1MB */ #define IOSTE_PS_16M <API key> /* - 16MB */ /* IOMMU sizing */ #define IO_SEGMENT_SHIFT 28 #define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift)) /* The high bit needs to be set on every DMA address */ #define SPIDER_DMA_OFFSET 0x80000000ul struct iommu_window { struct list_head list; struct cbe_iommu *iommu; unsigned long offset; unsigned long size; unsigned int ioid; struct iommu_table table; }; #define NAMESIZE 8 struct cbe_iommu { int nid; char name[NAMESIZE]; void __iomem *xlate_regs; void __iomem *cmd_regs; unsigned long *stab; unsigned long *ptab; void *pad_page; struct list_head windows; }; /* Static array of iommus, one per node * each contains a list of windows, keyed from dma_window property * - on bus setup, look for a matching window, or create one * - on dev setup, assign iommu_table ptr */ static struct cbe_iommu iommus[NR_IOMMUS]; static int cbe_nr_iommus; static void <API key>(struct cbe_iommu *iommu, unsigned long *pte, long n_ptes) { u64 __iomem *reg; u64 val; long n; reg = iommu->xlate_regs + IOC_IOPT_CacheInvd; while (n_ptes > 0) { /* we can invalidate up to 1 << 11 PTEs at once */ n = min(n_ptes, 1l << 11); val = (((n ) << 53) & <API key>) | (__pa(pte) & <API key>) | <API key>; out_be64(reg, val); while (in_be64(reg) & <API key>) ; n_ptes -= n; pte += n; } } static int tce_build_cell(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, unsigned long attrs) { int i; unsigned long *io_pte, base_pte; struct iommu_window *window = container_of(tbl, struct iommu_window, table); /* implementing proper protection causes problems with the spidernet * driver - check mapping directions later, but allow read & write by * default for now.*/ #ifdef <API key> /* to avoid referencing a global, we use a trick here to setup the * protection bit. "prot" is setup to be 3 fields of 4 bits appended * together for each of the 3 supported direction values. It is then * shifted left so that the fields matching the desired direction * lands on the appropriate bits, and other bits are masked out. */ const unsigned long prot = 0xc48; base_pte = ((prot << (52 + 4 * direction)) & (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) | CBE_IOPTE_M | CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); #else base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); #endif if (unlikely(attrs & <API key>)) base_pte &= ~CBE_IOPTE_SO_RW; io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift)) io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask); mb(); <API key>(window->iommu, io_pte, npages); pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n", index, npages, direction, base_pte); return 0; } static void tce_free_cell(struct iommu_table *tbl, long index, long npages) { int i; unsigned long *io_pte, pte; struct iommu_window *window = container_of(tbl, struct iommu_window, table); pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages); #ifdef <API key> pte = 0; #else /* spider bridge does PCI reads after freeing - insert a mapping * to a scratch page instead of an invalid entry */ pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW | __pa(window->iommu->pad_page) | (window->ioid & CBE_IOPTE_IOID_Mask); #endif io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); for (i = 0; i < npages; i++) io_pte[i] = pte; mb(); <API key>(window->iommu, io_pte, npages); } static irqreturn_t ioc_interrupt(int irq, void *data) { unsigned long stat, spf; struct cbe_iommu *iommu = data; stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); spf = stat & <API key>; /* Might want to rate limit it */ printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat); printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n", !!(stat & IOC_IO_ExcpStat_V), (spf == <API key>) ? 'S' : ' ', (spf == <API key>) ? 'P' : ' ', (stat & <API key>) ? "Read" : "Write", (unsigned int)(stat & <API key>)); printk(KERN_ERR " page=0x%016lx\n", stat & <API key>); /* clear interrupt */ stat &= ~IOC_IO_ExcpStat_V; out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat); return IRQ_HANDLED; } static int cell_iommu_find_ioc(int nid, unsigned long *base) { struct device_node *np; struct resource r; *base = 0; /* First look for new style /be nodes */ <API key>(np, "ioc") { if (of_node_to_nid(np) != nid) continue; if (<API key>(np, 0, &r)) { printk(KERN_ERR "iommu: can't get address for %pOF\n", np); continue; } *base = r.start; of_node_put(np); return 0; } /* Ok, let's try the old way */ <API key>(np, "cpu") { const unsigned int *nidp; const unsigned long *tmp; nidp = of_get_property(np, "node-id", NULL); if (nidp && *nidp == nid) { tmp = of_get_property(np, "ioc-translation", NULL); if (tmp) { *base = *tmp; of_node_put(np); return 0; } } } return -ENODEV; } static void <API key>(struct cbe_iommu *iommu, unsigned long dbase, unsigned long dsize, unsigned long fbase, unsigned long fsize) { struct page *page; unsigned long segments, stab_size; segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; pr_debug("%s: iommu[%d]: segments: %lu\n", __func__, iommu->nid, segments); /* set up the segment table */ stab_size = segments * sizeof(unsigned long); page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); BUG_ON(!page); iommu->stab = page_address(page); memset(iommu->stab, 0, stab_size); } static unsigned long *<API key>(struct cbe_iommu *iommu, unsigned long base, unsigned long size, unsigned long gap_base, unsigned long gap_size, unsigned long page_shift) { struct page *page; int i; unsigned long reg, segments, pages_per_segment, ptab_size, n_pte_pages, start_seg, *ptab; start_seg = base >> IO_SEGMENT_SHIFT; segments = size >> IO_SEGMENT_SHIFT; pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift); /* PTEs for each segment must start on a 4K boundary */ pages_per_segment = max(pages_per_segment, (1 << 12) / sizeof(unsigned long)); ptab_size = segments * pages_per_segment * sizeof(unsigned long); pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__, iommu->nid, ptab_size, get_order(ptab_size)); page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); BUG_ON(!page); ptab = page_address(page); memset(ptab, 0, ptab_size); /* number of 4K pages needed for a page table */ n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12; pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", __func__, iommu->nid, iommu->stab, ptab, n_pte_pages); /* initialise the STEs */ reg = IOSTE_V | ((n_pte_pages - 1) << 5); switch (page_shift) { case 12: reg |= IOSTE_PS_4K; break; case 16: reg |= IOSTE_PS_64K; break; case 20: reg |= IOSTE_PS_1M; break; case 24: reg |= IOSTE_PS_16M; break; default: BUG(); } gap_base = gap_base >> IO_SEGMENT_SHIFT; gap_size = gap_size >> IO_SEGMENT_SHIFT; pr_debug("Setting up IOMMU stab:\n"); for (i = start_seg; i < (start_seg + segments); i++) { if (i >= gap_base && i < (gap_base + gap_size)) { pr_debug("\toverlap at %d, skipping\n", i); continue; } iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) * (i - start_seg)); pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); } return ptab; } static void <API key>(struct cbe_iommu *iommu) { int ret; unsigned long reg, xlate_base; unsigned int virq; if (cell_iommu_find_ioc(iommu->nid, &xlate_base)) panic("%s: missing IOC register mappings for node %d\n", __func__, iommu->nid); iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size); iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset; /* ensure that the STEs have updated */ mb(); /* setup interrupts for the iommu. */ reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, reg & ~IOC_IO_ExcpStat_V); out_be64(iommu->xlate_regs + IOC_IO_ExcpMask, IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE); virq = irq_create_mapping(NULL, IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT)); BUG_ON(!virq); ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu); BUG_ON(ret); /* set the IOC segment table origin register (and turn on the iommu) */ reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW; out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg); in_be64(iommu->xlate_regs + IOC_IOST_Origin); /* turn on IO translation */ reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE; out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg); } static void <API key>(struct cbe_iommu *iommu, unsigned long base, unsigned long size) { <API key>(iommu, base, size, 0, 0); iommu->ptab = <API key>(iommu, base, size, 0, 0, IOMMU_PAGE_SHIFT_4K); <API key>(iommu); } #if 0/* Unused for now */ static struct iommu_window *find_window(struct cbe_iommu *iommu, unsigned long offset, unsigned long size) { struct iommu_window *window; /* todo: check for overlapping (but not equal) windows) */ list_for_each_entry(window, &(iommu->windows), list) { if (window->offset == offset && window->size == size) return window; } return NULL; } #endif static inline u32 cell_iommu_get_ioid(struct device_node *np) { const u32 *ioid; ioid = of_get_property(np, "ioid", NULL); if (ioid == NULL) { printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n", np); return 0; } return *ioid; } static struct iommu_table_ops cell_iommu_ops = { .set = tce_build_cell, .clear = tce_free_cell }; static struct iommu_window * __init <API key>(struct cbe_iommu *iommu, struct device_node *np, unsigned long offset, unsigned long size, unsigned long pte_offset) { struct iommu_window *window; struct page *page; u32 ioid; ioid = cell_iommu_get_ioid(np); window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); BUG_ON(window == NULL); window->offset = offset; window->size = size; window->ioid = ioid; window->iommu = iommu; window->table.it_blocksize = 16; window->table.it_base = (unsigned long)iommu->ptab; window->table.it_index = iommu->nid; window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K; window->table.it_offset = (offset >> window->table.it_page_shift) + pte_offset; window->table.it_size = size >> window->table.it_page_shift; window->table.it_ops = &cell_iommu_ops; iommu_init_table(&window->table, iommu->nid); pr_debug("\tioid %d\n", window->ioid); pr_debug("\tblocksize %ld\n", window->table.it_blocksize); pr_debug("\tbase 0x%016lx\n", window->table.it_base); pr_debug("\toffset 0x%lx\n", window->table.it_offset); pr_debug("\tsize %ld\n", window->table.it_size); list_add(&window->list, &iommu->windows); if (offset != 0) return window; /* We need to map and reserve the first IOMMU page since it's used * by the spider workaround. In theory, we only need to do that when * running on spider but it doesn't really matter. * * This code also assumes that we have a window that starts at 0, * which is the case on all spider based blades. */ page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); BUG_ON(!page); iommu->pad_page = page_address(page); clear_page(iommu->pad_page); __set_bit(0, window->table.it_map); tce_build_cell(&window->table, window->table.it_offset, 1, (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0); return window; } static struct cbe_iommu *cell_iommu_for_node(int nid) { int i; for (i = 0; i < cbe_nr_iommus; i++) if (iommus[i].nid == nid) return &iommus[i]; return NULL; } static unsigned long <API key>; static unsigned long <API key>; /* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */ static int iommu_fixed_is_weak; static struct iommu_table *<API key>(struct device *dev) { struct iommu_window *window; struct cbe_iommu *iommu; /* Current implementation uses the first window available in that * node's iommu. We -might- do something smarter later though it may * never be necessary */ iommu = cell_iommu_for_node(dev_to_node(dev)); if (iommu == NULL || list_empty(&iommu->windows)) { dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n", dev->of_node, dev_to_node(dev)); return NULL; } window = list_entry(iommu->windows.next, struct iommu_window, list); return &window->table; } /* A coherent allocation implies strong ordering */ static void *<API key>(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { if (iommu_fixed_is_weak) return <API key>(dev, <API key>(dev), size, dma_handle, device_to_mask(dev), flag, dev_to_node(dev)); else return dma_nommu_ops.alloc(dev, size, dma_handle, flag, attrs); } static void <API key>(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { if (iommu_fixed_is_weak) iommu_free_coherent(<API key>(dev), size, vaddr, dma_handle); else dma_nommu_ops.free(dev, size, vaddr, dma_handle, attrs); } static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, unsigned long attrs) { if (iommu_fixed_is_weak == (attrs & <API key>)) return dma_nommu_ops.map_page(dev, page, offset, size, direction, attrs); else return iommu_map_page(dev, <API key>(dev), page, offset, size, device_to_mask(dev), direction, attrs); } static void <API key>(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, unsigned long attrs) { if (iommu_fixed_is_weak == (attrs & <API key>)) dma_nommu_ops.unmap_page(dev, dma_addr, size, direction, attrs); else iommu_unmap_page(<API key>(dev), dma_addr, size, direction, attrs); } static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, unsigned long attrs) { if (iommu_fixed_is_weak == (attrs & <API key>)) return dma_nommu_ops.map_sg(dev, sg, nents, direction, attrs); else return ppc_iommu_map_sg(dev, <API key>(dev), sg, nents, device_to_mask(dev), direction, attrs); } static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, unsigned long attrs) { if (iommu_fixed_is_weak == (attrs & <API key>)) dma_nommu_ops.unmap_sg(dev, sg, nents, direction, attrs); else ppc_iommu_unmap_sg(<API key>(dev), sg, nents, direction, attrs); } static int <API key>(struct device *dev, u64 dma_mask); static const struct dma_map_ops dma_iommu_fixed_ops = { .alloc = <API key>, .free = <API key>, .map_sg = dma_fixed_map_sg, .unmap_sg = dma_fixed_unmap_sg, .dma_supported = <API key>, .map_page = dma_fixed_map_page, .unmap_page = <API key>, .mapping_error = <API key>, }; static void cell_dma_dev_setup(struct device *dev) { if (get_pci_dma_ops() == &dma_iommu_ops) <API key>(dev, <API key>(dev)); else if (get_pci_dma_ops() == &dma_nommu_ops) set_dma_offset(dev, <API key>); else BUG(); } static void <API key>(struct pci_dev *dev) { cell_dma_dev_setup(&dev->dev); } static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; /* We are only intereted in device addition */ if (action != <API key>) return 0; /* We use the PCI DMA ops */ dev->dma_ops = get_pci_dma_ops(); cell_dma_dev_setup(dev); return 0; } static struct notifier_block <API key> = { .notifier_call = cell_of_bus_notify }; static int __init <API key>(struct device_node *np, unsigned long *base, unsigned long *size) { const __be32 *dma_window; unsigned long index; /* Use ibm,dma-window if available, else, hard code ! */ dma_window = of_get_property(np, "ibm,dma-window", NULL); if (dma_window == NULL) { *base = 0; *size = 0x80000000u; return -ENODEV; } of_parse_dma_window(np, dma_window, &index, base, size); return 0; } static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np) { struct cbe_iommu *iommu; int nid, i; /* Get node ID */ nid = of_node_to_nid(np); if (nid < 0) { printk(KERN_ERR "iommu: failed to get node for %pOF\n", np); return NULL; } pr_debug("iommu: setting up iommu for node %d (%pOF)\n", nid, np); /* XXX todo: If we can have multiple windows on the same IOMMU, which * isn't the case today, we probably want here to check whether the * iommu for that node is already setup. * However, there might be issue with getting the size right so let's * ignore that for now. We might want to completely get rid of the * multiple window support since the cell iommu supports per-page ioids */ if (cbe_nr_iommus >= NR_IOMMUS) { printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n", np); return NULL; } /* Init base fields */ i = cbe_nr_iommus++; iommu = &iommus[i]; iommu->stab = NULL; iommu->nid = nid; snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i); INIT_LIST_HEAD(&iommu->windows); return iommu; } static void __init cell_iommu_init_one(struct device_node *np, unsigned long offset) { struct cbe_iommu *iommu; unsigned long base, size; iommu = cell_iommu_alloc(np); if (!iommu) return; /* Obtain a window for it */ <API key>(np, &base, &size); pr_debug("\ttranslating window 0x%lx...0x%lx\n", base, base + size - 1); /* Initialize the hardware */ <API key>(iommu, base, size); /* Setup the iommu_table */ <API key>(iommu, np, base, size, offset >> IOMMU_PAGE_SHIFT_4K); } static void __init cell_disable_iommus(void) { int node; unsigned long base, val; void __iomem *xregs, *cregs; /* Make sure IOC translation is disabled on all nodes */ <API key>(node) { if (cell_iommu_find_ioc(node, &base)) continue; xregs = ioremap(base, IOC_Reg_Size); if (xregs == NULL) continue; cregs = xregs + IOC_IOCmd_Offset; pr_debug("iommu: cleaning up iommu on node %d\n", node); out_be64(xregs + IOC_IOST_Origin, 0); (void)in_be64(xregs + IOC_IOST_Origin); val = in_be64(cregs + IOC_IOCmd_Cfg); val &= ~IOC_IOCmd_Cfg_TE; out_be64(cregs + IOC_IOCmd_Cfg, val); (void)in_be64(cregs + IOC_IOCmd_Cfg); iounmap(xregs); } } static int __init <API key>(void) { struct device_node *np = NULL; unsigned long base = 0, size; /* When no iommu is present, we use direct DMA ops */ set_pci_dma_ops(&dma_nommu_ops); /* First make sure all IOC translation is turned off */ cell_disable_iommus(); /* If we have no Axon, we set up the spider DMA magic offset */ if (<API key>(NULL, "axon") == NULL) <API key> = SPIDER_DMA_OFFSET; /* Now we need to check to see where the memory is mapped * in PCI space. We assume that all busses use the same dma * window which is always the case so far on Cell, thus we * pick up the first pci-internal node we can find and check * the DMA window from there. */ <API key>(np, "axon") { if (np->parent == NULL || np->parent->parent != NULL) continue; if (<API key>(np, &base, &size) == 0) break; } if (np == NULL) { <API key>(np, "pci-internal") { if (np->parent == NULL || np->parent->parent != NULL) continue; if (<API key>(np, &base, &size) == 0) break; } } of_node_put(np); /* If we found a DMA window, we check if it's big enough to enclose * all of physical memory. If not, we force enable IOMMU */ if (np && size < <API key>()) { printk(KERN_WARNING "iommu: force-enabled, dma window" " (%ldMB) smaller than total memory (%lldMB)\n", size >> 20, <API key>() >> 20); return -ENODEV; } <API key> += base; if (<API key> != 0) <API key>.dma_dev_setup = <API key>; printk("iommu: disabled, direct DMA offset is 0x%lx\n", <API key>); return 0; } /* * Fixed IOMMU mapping support * * This code adds support for setting up a fixed IOMMU mapping on certain * cell machines. For 64-bit devices this avoids the performance overhead of * mapping and unmapping pages at runtime. 32-bit devices are unable to use * the fixed mapping. * * The fixed mapping is established at boot, and maps all of physical memory * 1:1 into device space at some offset. On machines with < 30 GB of memory * we setup the fixed mapping immediately above the normal IOMMU window. * * For example a machine with 4GB of memory would end up with the normal * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to * 3GB, plus any offset required by firmware. The firmware offset is encoded * in the "dma-ranges" property. * * On machines with 30GB or more of memory, we are unable to place the fixed * mapping above the normal IOMMU window as we would run out of address space. * Instead we move the normal IOMMU window to coincide with the hash page * table, this region does not need to be part of the fixed mapping as no * device should ever be DMA'ing to it. We then setup the fixed mapping * from 0 to 32GB. */ static u64 <API key>(struct device *dev) { u64 cpu_addr, size, best_size, dev_addr = OF_BAD_ADDR; struct device_node *np; const u32 *ranges = NULL; int i, len, best, naddr, nsize, pna, range_size; np = of_node_get(dev->of_node); while (1) { naddr = of_n_addr_cells(np); nsize = of_n_size_cells(np); np = of_get_next_parent(np); if (!np) break; ranges = of_get_property(np, "dma-ranges", &len); /* Ignore empty ranges, they imply no translation required */ if (ranges && len > 0) break; } if (!ranges) { dev_dbg(dev, "iommu: no dma-ranges found\n"); goto out; } len /= sizeof(u32); pna = of_n_addr_cells(np); range_size = naddr + nsize + pna; /* dma-ranges format: * child addr : naddr cells * parent addr : pna cells * size : nsize cells */ for (i = 0, best = -1, best_size = 0; i < len; i += range_size) { cpu_addr = <API key>(np, ranges + i + naddr); size = of_read_number(ranges + i + naddr + pna, nsize); if (cpu_addr == 0 && size > best_size) { best = i; best_size = size; } } if (best >= 0) { dev_addr = of_read_number(ranges + best, naddr); } else dev_dbg(dev, "iommu: no suitable range found!\n"); out: of_node_put(np); return dev_addr; } static int <API key>(struct device *dev, u64 dma_mask) { if (dma_mask == DMA_BIT_MASK(64) && <API key>(dev) != OF_BAD_ADDR) { u64 addr = <API key>(dev) + <API key>; dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); dev_dbg(dev, "iommu: fixed addr = %llx\n", addr); set_dma_ops(dev, &dma_iommu_fixed_ops); set_dma_offset(dev, addr); return 1; } if (<API key>(dev, dma_mask)) { dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); set_dma_ops(dev, get_pci_dma_ops()); cell_dma_dev_setup(dev); return 1; } return 0; } static void insert_16M_pte(unsigned long addr, unsigned long *ptab, unsigned long base_pte) { unsigned long segment, offset; segment = addr >> IO_SEGMENT_SHIFT; offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24)); ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long)); pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n", addr, ptab, segment, offset); ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask); } static void <API key>(struct cbe_iommu *iommu, struct device_node *np, unsigned long dbase, unsigned long dsize, unsigned long fbase, unsigned long fsize) { unsigned long base_pte, uaddr, ioaddr, *ptab; ptab = <API key>(iommu, fbase, fsize, dbase, dsize, 24); <API key> = fbase; pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | (cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask); if (iommu_fixed_is_weak) pr_info("IOMMU: Using weak ordering for fixed mapping\n"); else { pr_info("IOMMU: Using strong ordering for fixed mapping\n"); base_pte |= CBE_IOPTE_SO_RW; } for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) { /* Don't touch the dynamic region */ ioaddr = uaddr + fbase; if (ioaddr >= dbase && ioaddr < (dbase + dsize)) { pr_debug("iommu: fixed/dynamic overlap, skipping\n"); continue; } insert_16M_pte(uaddr, ptab, base_pte); } mb(); } static int __init <API key>(void) { unsigned long dbase, dsize, fbase, fsize, hbase, hend; struct cbe_iommu *iommu; struct device_node *np; /* The fixed mapping is only supported on axon machines */ np = <API key>(NULL, "axon"); of_node_put(np); if (!np) { pr_debug("iommu: fixed mapping disabled, no axons found\n"); return -1; } /* We must have dma-ranges properties for fixed mapping to work */ np = <API key>(NULL, "dma-ranges"); of_node_put(np); if (!np) { pr_debug("iommu: no dma-ranges found, no fixed mapping\n"); return -1; } /* The default setup is to have the fixed mapping sit after the * dynamic region, so find the top of the largest IOMMU window * on any axon, then add the size of RAM and that's our max value. * If that is > 32GB we have to do other shennanigans. */ fbase = 0; <API key>(np, "axon") { <API key>(np, &dbase, &dsize); fbase = max(fbase, dbase + dsize); } fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT); fsize = <API key>(); if ((fbase + fsize) <= 0x800000000ul) hbase = 0; /* use the device tree window */ else { /* If we're over 32 GB we need to cheat. We can't map all of * RAM with the fixed mapping, and also fit the dynamic * region. So try to place the dynamic region where the hash * table sits, drivers never need to DMA to it, we don't * need a fixed mapping for that area. */ if (!htab_address) { pr_debug("iommu: htab is NULL, on LPAR? Huh?\n"); return -1; } hbase = __pa(htab_address); hend = hbase + htab_size_bytes; /* The window must start and end on a segment boundary */ if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) || (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) { pr_debug("iommu: hash window not segment aligned\n"); return -1; } /* Check the hash window fits inside the real DMA window */ <API key>(np, "axon") { <API key>(np, &dbase, &dsize); if (hbase < dbase || (hend > (dbase + dsize))) { pr_debug("iommu: hash window doesn't fit in" "real DMA window\n"); return -1; } } fbase = 0; } /* Setup the dynamic regions */ <API key>(np, "axon") { iommu = cell_iommu_alloc(np); BUG_ON(!iommu); if (hbase == 0) <API key>(np, &dbase, &dsize); else { dbase = hbase; dsize = htab_size_bytes; } printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx " "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, dbase + dsize, fbase, fbase + fsize); <API key>(iommu, dbase, dsize, fbase, fsize); iommu->ptab = <API key>(iommu, dbase, dsize, 0, 0, IOMMU_PAGE_SHIFT_4K); <API key>(iommu, np, dbase, dsize, fbase, fsize); <API key>(iommu); <API key>(iommu, np, dbase, dsize, 0); } dma_iommu_ops.dma_supported = <API key>; set_pci_dma_ops(&dma_iommu_ops); return 0; } static int <API key>; static int __init setup_iommu_fixed(char *str) { struct device_node *pciep; if (strcmp(str, "off") == 0) <API key> = 1; /* If we can find a pcie-endpoint in the device tree assume that * we're on a triblade or a CAB so by default the fixed mapping * should be set to be weakly ordered; but only if the boot * option WASN'T set for strong ordering */ pciep = <API key>(NULL, "pcie-endpoint"); if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) iommu_fixed_is_weak = <API key>; of_node_put(pciep); return 1; } __setup("iommu_fixed=", setup_iommu_fixed); static u64 <API key>(struct device *dev) { const struct dma_map_ops *dma_ops; if (!dev->dma_mask) return 0; if (!<API key> && <API key>(dev) != OF_BAD_ADDR) return DMA_BIT_MASK(64); dma_ops = get_dma_ops(dev); if (dma_ops->get_required_mask) return dma_ops->get_required_mask(dev); WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops); return DMA_BIT_MASK(64); } static int __init cell_iommu_init(void) { struct device_node *np; /* If IOMMU is disabled or we have little enough RAM to not need * to enable it, we setup a direct mapping. * * Note: should we make sure we have the IOMMU actually disabled ? */ if (iommu_is_off || (!iommu_force_on && <API key>() <= 0x80000000ull)) if (<API key>() == 0) goto bail; /* Setup various callbacks */ <API key>.dma_dev_setup = <API key>; ppc_md.<API key> = <API key>; if (!<API key> && <API key>() == 0) goto bail; /* Create an iommu for each /axon node. */ <API key>(np, "axon") { if (np->parent == NULL || np->parent->parent != NULL) continue; cell_iommu_init_one(np, 0); } /* Create an iommu for each toplevel /pci-internal node for * old hardware/firmware */ <API key>(np, "pci-internal") { if (np->parent == NULL || np->parent->parent != NULL) continue; cell_iommu_init_one(np, SPIDER_DMA_OFFSET); } /* Setup default PCI iommu ops */ set_pci_dma_ops(&dma_iommu_ops); bail: /* Register callbacks on OF platform device addition/removal * to handle linking them to the right DMA operations */ <API key>(&platform_bus_type, &<API key>); return 0; } <API key>(cell, cell_iommu_init);
/*! * @file km_adaptor.c * * @brief The Adaptor component provides an interface to the * driver for a kernel user. */ #include <adaptor.h> #include <sf_util.h> #include <sah_queue_manager.h> #include <sah_memory_mapper.h> #include <fsl_shw_keystore.h> #ifdef FSL_HAVE_SCC #include <linux/mxc_scc_driver.h> #elif defined (FSL_HAVE_SCC2) #include <linux/mxc_scc2_driver.h> #endif EXPORT_SYMBOL(<API key>); EXPORT_SYMBOL(sah_register); EXPORT_SYMBOL(sah_deregister); EXPORT_SYMBOL(sah_get_results); EXPORT_SYMBOL(fsl_shw_smalloc); EXPORT_SYMBOL(fsl_shw_sfree); EXPORT_SYMBOL(fsl_shw_sstatus); EXPORT_SYMBOL(<API key>); EXPORT_SYMBOL(<API key>); EXPORT_SYMBOL(<API key>); EXPORT_SYMBOL(<API key>); EXPORT_SYMBOL(<API key>); EXPORT_SYMBOL(<API key>); EXPORT_SYMBOL(<API key>); EXPORT_SYMBOL(<API key>); EXPORT_SYMBOL(<API key>); #if defined(DIAG_DRV_IF) || defined(DIAG_MEM) || defined(DIAG_ADAPTOR) #include <diagnostic.h> #endif #if defined(DIAG_DRV_IF) || defined(DIAG_MEM) || defined(DIAG_ADAPTOR) #define MAX_DUMP 16 #define DIAG_MSG_SIZE 300 static char Diag_msg[DIAG_MSG_SIZE]; #endif /* This is the wait queue to this mode of driver */ <API key>(Wait_queue_km); /*! This matches Sahara2 capabilities... */ fsl_shw_pco_t <API key> = { 1, 3, /* api version number - major & minor */ 1, 6, /* driver version number - major & minor */ { FSL_KEY_ALG_AES, FSL_KEY_ALG_DES, FSL_KEY_ALG_TDES, FSL_KEY_ALG_ARC4}, { FSL_SYM_MODE_STREAM, FSL_SYM_MODE_ECB, FSL_SYM_MODE_CBC, FSL_SYM_MODE_CTR}, { FSL_HASH_ALG_MD5, FSL_HASH_ALG_SHA1, FSL_HASH_ALG_SHA224, FSL_HASH_ALG_SHA256}, /* * The following table must be set to handle all values of key algorithm * and sym mode, and be in the correct order.. */ { /* Stream, ECB, CBC, CTR */ {0, 0, 0, 0}, /* HMAC */ {0, 1, 1, 1}, /* AES */ {0, 1, 1, 0}, /* DES */ {0, 1, 1, 0}, /* 3DES */ {1, 0, 0, 0} /* ARC4 */ }, 0, 0, 0, 0, 0, {{0, 0}} }; #ifdef DIAG_ADAPTOR void km_Dump_Chain(const sah_Desc * chain); void km_Dump_Region(const char *prefix, const unsigned char *data, unsigned length); static void km_Dump_Link(const char *prefix, const sah_Link * link); void km_Dump_Words(const char *prefix, const unsigned *data, unsigned length); #endif static void *my_malloc(void *ref, size_t n) { register void *mem; #ifndef DIAG_MEM_ERRORS mem = os_alloc_memory(n, GFP_KERNEL); #else { uint32_t rand; /* are we feeling lucky ? */ os_get_random_bytes(&rand, sizeof(rand)); if ((rand % DIAG_MEM_CONST) == 0) { mem = 0; } else { mem = os_alloc_memory(n, GFP_ATOMIC); } } #endif /* DIAG_MEM_ERRORS */ #ifdef DIAG_MEM sprintf(Diag_msg, "API kmalloc: %p for %d\n", mem, n); LOG_KDIAG(Diag_msg); #endif ref = 0; /* unused param warning */ return mem; } static sah_Head_Desc *my_alloc_head_desc(void *ref) { register sah_Head_Desc *ptr; #ifndef DIAG_MEM_ERRORS ptr = <API key>(); #else { uint32_t rand; /* are we feeling lucky ? */ os_get_random_bytes(&rand, sizeof(rand)); if ((rand % DIAG_MEM_CONST) == 0) { ptr = 0; } else { ptr = <API key>(); } } #endif ref = 0; return ptr; } static sah_Desc *my_alloc_desc(void *ref) { register sah_Desc *ptr; #ifndef DIAG_MEM_ERRORS ptr = <API key>(); #else { uint32_t rand; /* are we feeling lucky ? */ os_get_random_bytes(&rand, sizeof(rand)); if ((rand % DIAG_MEM_CONST) == 0) { ptr = 0; } else { ptr = <API key>(); } } #endif ref = 0; return ptr; } static sah_Link *my_alloc_link(void *ref) { register sah_Link *ptr; #ifndef DIAG_MEM_ERRORS ptr = sah_Alloc_Link(); #else { uint32_t rand; /* are we feeling lucky ? */ os_get_random_bytes(&rand, sizeof(rand)); if ((rand % DIAG_MEM_CONST) == 0) { ptr = 0; } else { ptr = sah_Alloc_Link(); } } #endif ref = 0; return ptr; } static void my_free(void *ref, void *ptr) { ref = 0; /* unused param warning */ #ifdef DIAG_MEM sprintf(Diag_msg, "API kfree: %p\n", ptr); LOG_KDIAG(Diag_msg); #endif os_free_memory(ptr); } static void my_free_head_desc(void *ref, sah_Head_Desc * ptr) { <API key>(ptr); } static void my_free_desc(void *ref, sah_Desc * ptr) { sah_Free_Descriptor(ptr); } static void my_free_link(void *ref, sah_Link * ptr) { sah_Free_Link(ptr); } static void *my_memcpy(void *ref, void *dest, const void *src, size_t n) { ref = 0; /* unused param warning */ return memcpy(dest, src, n); } static void *my_memset(void *ref, void *ptr, int ch, size_t n) { ref = 0; /* unused param warning */ return memset(ptr, ch, n); } /*! Standard memory manipulation routines for kernel API. */ static sah_Mem_Util <API key> = { .mu_ref = 0, .mu_malloc = my_malloc, .mu_alloc_head_desc = my_alloc_head_desc, .mu_alloc_desc = my_alloc_desc, .mu_alloc_link = my_alloc_link, .mu_free = my_free, .mu_free_head_desc = my_free_head_desc, .mu_free_desc = my_free_desc, .mu_free_link = my_free_link, .mu_memcpy = my_memcpy, .mu_memset = my_memset }; fsl_shw_return_t get_capabilities(fsl_shw_uco_t * user_ctx, fsl_shw_pco_t * capabilities) { scc_config_t *scc_capabilities; /* Fill in the Sahara2 capabilities. */ memcpy(capabilities, &<API key>, sizeof(fsl_shw_pco_t)); /* Fill in the SCC portion of the capabilities object */ scc_capabilities = <API key>(); capabilities->scc_driver_major = scc_capabilities-><API key>; capabilities->scc_driver_minor = scc_capabilities-><API key>; capabilities->scm_version = scc_capabilities->scm_version; capabilities->smn_version = scc_capabilities->smn_version; capabilities->block_size_bytes = scc_capabilities->block_size_bytes; #ifdef FSL_HAVE_SCC capabilities->scc_info.<API key> = scc_capabilities-><API key>; capabilities->scc_info.red_ram_size_blocks = scc_capabilities->red_ram_size_blocks; #elif defined(FSL_HAVE_SCC2) capabilities->scc2_info.<API key> = scc_capabilities-><API key>; capabilities->scc2_info.partition_count = scc_capabilities->partition_count; #endif return FSL_RETURN_OK_S; } /*! * Sends a request to register this user * * @brief Sends a request to register this user * * @param[in,out] user_ctx part of the structure contains input parameters and * part is filled in by the driver * * @return A return code of type #fsl_shw_return_t. */ fsl_shw_return_t sah_register(fsl_shw_uco_t * user_ctx) { fsl_shw_return_t status; /* this field is used in user mode to indicate a file open has occured. * it is used here, in kernel mode, to indicate that the uco is registered */ user_ctx->sahara_openfd = 0; /* set to 'registered' */ user_ctx->mem_util = &<API key>; /* check that uco is valid */ status = sah_validate_uco(user_ctx); /* If life is good, register this user */ if (status == FSL_RETURN_OK_S) { status = <API key>(user_ctx); } if (status != FSL_RETURN_OK_S) { user_ctx->sahara_openfd = -1; /* set to 'not registered' */ } return status; } /*! * Sends a request to deregister this user * * @brief Sends a request to deregister this user * * @param[in,out] user_ctx Info on user being deregistered. * * @return A return code of type #fsl_shw_return_t. */ fsl_shw_return_t sah_deregister(fsl_shw_uco_t * user_ctx) { fsl_shw_return_t status = FSL_RETURN_OK_S; if (user_ctx->sahara_openfd == 0) { status = <API key>(user_ctx); user_ctx->sahara_openfd = -1; /* set to 'no registered */ } return status; } /*! * Sends a request to get results for this user * * @brief Sends a request to get results for this user * * @param[in,out] arg Pointer to structure to collect results * @param uco User's context * * @return A return code of type #fsl_shw_return_t. */ fsl_shw_return_t sah_get_results(sah_results * arg, fsl_shw_uco_t * uco) { fsl_shw_return_t code = <API key>(uco, arg); if ((code == FSL_RETURN_OK_S) && (arg->actual != 0)) { <API key>(uco, arg); } return code; } /*! * This function writes the Descriptor Chain to the kernel driver. * * @brief Writes the Descriptor Chain to the kernel driver. * * @param dar A pointer to a Descriptor Chain of type sah_Head_Desc * @param uco The user context object * * @return A return code of type #fsl_shw_return_t. */ fsl_shw_return_t <API key>(sah_Head_Desc * dar, fsl_shw_uco_t * uco) { sah_Head_Desc *kernel_space_desc = NULL; fsl_shw_return_t code = FSL_RETURN_OK_S; int os_error_code = 0; unsigned blocking_mode = dar->uco_flags & <API key>; #ifdef DIAG_ADAPTOR km_Dump_Chain(&dar->desc); #endif dar->user_info = uco; dar->user_desc = dar; /* This code has been shamelessly copied from <API key>.c */ /* It needs to be moved somewhere common ... */ kernel_space_desc = <API key>(dar); if (kernel_space_desc == NULL) { /* We may have failed due to a -EFAULT as well, but we will return * -ENOMEM since either way it is a memory related failure. */ code = <API key>; #ifdef DIAG_DRV_IF LOG_KDIAG("<API key>() failed\n"); #endif } else { if (blocking_mode) { #ifdef SAHARA_POLL_MODE os_error_code = sah_Handle_Poll(dar); #else os_error_code = sah_blocking_mode(dar); #endif if (os_error_code != 0) { code = FSL_RETURN_ERROR_S; } else { /* status of actual operation */ code = dar->result; } } else { #ifdef SAHARA_POLL_MODE sah_Handle_Poll(dar); #else /* just put someting in the DAR */ <API key>(dar); #endif /* SAHARA_POLL_MODE */ } } return code; } /* System keystore context, defined in <API key>.c */ extern fsl_shw_kso_t system_keystore; fsl_shw_return_t <API key>(fsl_shw_uco_t * user_ctx, uint32_t key_length, uint64_t ownerid, uint32_t * slot) { (void)user_ctx; return keystore_slot_alloc(&system_keystore, key_length, ownerid, slot); } fsl_shw_return_t <API key>(fsl_shw_uco_t * user_ctx, uint64_t ownerid, uint32_t slot) { (void)user_ctx; return <API key>(&system_keystore, ownerid, slot); } fsl_shw_return_t <API key>(fsl_shw_uco_t * user_ctx, uint64_t ownerid, uint32_t slot, const uint8_t * key, uint32_t key_length) { (void)user_ctx; return keystore_slot_load(&system_keystore, ownerid, slot, (void *)key, key_length); } fsl_shw_return_t <API key>(fsl_shw_uco_t * user_ctx, uint64_t ownerid, uint32_t slot, uint32_t key_length, const uint8_t * key) { (void)user_ctx; return keystore_slot_read(&system_keystore, ownerid, slot, key_length, (void *)key); } fsl_shw_return_t <API key>(fsl_shw_uco_t * user_ctx, uint64_t ownerid, uint32_t slot, uint32_t key_length, uint8_t * black_data) { (void)user_ctx; return <API key>(NULL, &system_keystore, ownerid, slot, key_length, black_data); } fsl_shw_return_t <API key>(fsl_shw_uco_t * user_ctx, uint64_t ownerid, uint32_t slot, uint32_t key_length, const uint8_t * black_data) { (void)user_ctx; return <API key>(NULL, &system_keystore, ownerid, slot, key_length, black_data); } void *fsl_shw_smalloc(fsl_shw_uco_t * user_ctx, uint32_t size, const uint8_t * UMID, uint32_t permissions) { #ifdef FSL_HAVE_SCC2 int part_no; void *part_base; uint32_t part_phys; scc_config_t *scc_configuration; /* Check that the memory size requested is correct */ scc_configuration = <API key>(); if (size != scc_configuration-><API key>) { return NULL; } /* Attempt to grab a partition. */ if (<API key>(0, &part_no, &part_base, &part_phys) != SCC_RET_OK) { return NULL; } printk(KERN_ALERT "In fsh_shw_smalloc (km): partition_base:%p " "partition_base_phys: %p\n", part_base, (void *)part_phys); /* these bits should be in a separate function */ printk(KERN_ALERT "writing UMID and MAP to secure the partition\n"); <API key>(part_base, UMID, permissions); (void)user_ctx; /* unused param warning */ return part_base; #else /* FSL_HAVE_SCC2 */ (void)user_ctx; (void)size; (void)UMID; (void)permissions; return NULL; #endif /* FSL_HAVE_SCC2 */ } fsl_shw_return_t fsl_shw_sfree(fsl_shw_uco_t * user_ctx, void *address) { (void)user_ctx; #ifdef FSL_HAVE_SCC2 if (<API key>(address) == SCC_RET_OK) { return FSL_RETURN_OK_S; } #endif return FSL_RETURN_ERROR_S; } fsl_shw_return_t fsl_shw_sstatus(fsl_shw_uco_t * user_ctx, void *address, <API key> * status) { (void)user_ctx; #ifdef FSL_HAVE_SCC2 *status = <API key>(address); return FSL_RETURN_OK_S; #endif return FSL_RETURN_ERROR_S; } fsl_shw_return_t <API key>(fsl_shw_uco_t * user_ctx, void *address, uint32_t permissions) { (void)user_ctx; /* unused parameter warning */ #ifdef FSL_HAVE_SCC2 if (<API key>(address, permissions) == SCC_RET_OK) { return FSL_RETURN_OK_S; } #endif return FSL_RETURN_ERROR_S; } /* * partition_base - physical address of the partition * offset - offset, in blocks, of the data from the start of the partition * length - length, in bytes, of the data to be encrypted (multiple of 4) * black_data - virtual address that the encrypted data should be stored at * Note that this virtual address must be translatable using the __virt_to_phys * macro; ie, it can't be a specially mapped address. To do encryption with those * addresses, use the scc_encrypt_region function directly. This is to make * this function compatible with the user mode declaration, which does not know * the physical addresses of the data it is using. */ fsl_shw_return_t <API key>(fsl_shw_uco_t * user_ctx, void *partition_base, uint32_t offset_bytes, uint32_t byte_count, uint8_t * black_data, uint32_t * IV, <API key> cypher_mode) { scc_return_t scc_ret; fsl_shw_return_t retval = FSL_RETURN_ERROR_S; #ifdef FSL_HAVE_SCC2 #ifdef DIAG_ADAPTOR uint32_t *owner_32 = (uint32_t *) & (owner_id); LOG_KDIAG_ARGS ("partition base: %p, offset: %i, count: %i, black data: %p\n", partition_base, offset_bytes, byte_count, (void *)black_data); #endif (void)user_ctx; <API key>(black_data, byte_count); scc_ret = scc_encrypt_region((uint32_t) partition_base, offset_bytes, byte_count, __virt_to_phys(black_data), IV, cypher_mode); if (scc_ret == SCC_RET_OK) { retval = FSL_RETURN_OK_S; } else { retval = FSL_RETURN_ERROR_S; } /* The SCC2 DMA engine should have written to the black ram, so we need to * invalidate that region of memory. Note that the red ram is not an * because it is mapped with the cache disabled. */ os_cache_inv_range(black_data, byte_count); #else (void)scc_ret; #endif /* FSL_HAVE_SCC2 */ return retval; } /*! * Call the proper function to decrypt a region of encrypted secure memory * * @brief * * @param user_ctx User context of the partition owner (NULL in kernel) * @param partition_base Base address (physical) of the partition * @param offset_bytes Offset from base address that the decrypted data * shall be placed * @param byte_count Length of the message (bytes) * @param black_data Pointer to where the encrypted data is stored * @param owner_id * * @return status */ fsl_shw_return_t <API key>(fsl_shw_uco_t * user_ctx, void *partition_base, uint32_t offset_bytes, uint32_t byte_count, const uint8_t * black_data, uint32_t * IV, <API key> cypher_mode) { scc_return_t scc_ret; fsl_shw_return_t retval = FSL_RETURN_ERROR_S; #ifdef FSL_HAVE_SCC2 #ifdef DIAG_ADAPTOR uint32_t *owner_32 = (uint32_t *) & (owner_id); LOG_KDIAG_ARGS ("partition base: %p, offset: %i, count: %i, black data: %p\n", partition_base, offset_bytes, byte_count, (void *)black_data); #endif (void)user_ctx; /* The SCC2 DMA engine will be reading from the black ram, so we need to * make sure that the data is pushed out of the cache. Note that the red * ram is not an issue because it is mapped with the cache disabled. */ <API key>(black_data, byte_count); scc_ret = scc_decrypt_region((uint32_t) partition_base, offset_bytes, byte_count, (uint8_t *) __virt_to_phys(black_data), IV, cypher_mode); if (scc_ret == SCC_RET_OK) { retval = FSL_RETURN_OK_S; } else { retval = FSL_RETURN_ERROR_S; } #else (void)scc_ret; #endif /* FSL_HAVE_SCC2 */ return retval; } #ifdef DIAG_ADAPTOR /*! * Dump chain of descriptors to the log. * * @brief Dump descriptor chain * * @param chain Kernel virtual address of start of chain of descriptors * * @return void */ void km_Dump_Chain(const sah_Desc * chain) { while (chain != NULL) { km_Dump_Words("Desc", (unsigned *)chain, 6 /*sizeof(*chain)/sizeof(unsigned) */ ); /* place this definition elsewhere */ if (chain->ptr1) { if (chain->header & SAH_HDR_LLO) { km_Dump_Region(" Data1", chain->ptr1, chain->len1); } else { km_Dump_Link(" Link1", chain->ptr1); } } if (chain->ptr2) { if (chain->header & SAH_HDR_LLO) { km_Dump_Region(" Data2", chain->ptr2, chain->len2); } else { km_Dump_Link(" Link2", chain->ptr2); } } chain = chain->next; } } /*! * Dump chain of links to the log. * * @brief Dump chain of links * * @param prefix Text to put in front of dumped data * @param link Kernel virtual address of start of chain of links * * @return void */ static void km_Dump_Link(const char *prefix, const sah_Link * link) { while (link != NULL) { km_Dump_Words(prefix, (unsigned *)link, 3 /* # words in h/w link */ ); if (link->flags & SAH_STORED_KEY_INFO) { #ifdef CAN_DUMP_SCC_DATA uint32_t len; #endif #ifdef CAN_DUMP_SCC_DATA { char buf[50]; scc_get_slot_info(link->ownerid, link->slot, (uint32_t *) & link->data, /* RED key address */ &len); /* key length */ sprintf(buf, " SCC slot %d: ", link->slot); km_Dump_Words(buf, (void *)IO_ADDRESS((uint32_t) link->data), link->len / 4); } #else sprintf(Diag_msg, " SCC slot %d", link->slot); LOG_KDIAG(Diag_msg); #endif } else if (link->data != NULL) { km_Dump_Region(" Data", link->data, link->len); } link = link->next; } } /*! * Dump given region of data to the log. * * @brief Dump data * * @param prefix Text to put in front of dumped data * @param data Kernel virtual address of start of region to dump * @param length Amount of data to dump * * @return void */ void km_Dump_Region(const char *prefix, const unsigned char *data, unsigned length) { unsigned count; char *output; unsigned data_len; sprintf(Diag_msg, "%s (%08X,%u):", prefix, (uint32_t) data, length); /* Restrict amount of data to dump */ if (length > MAX_DUMP) { data_len = MAX_DUMP; } else { data_len = length; } /* We've already printed some text in output buffer, skip over it */ output = Diag_msg + strlen(Diag_msg); for (count = 0; count < data_len; count++) { if (count % 4 == 0) { *output++ = ' '; } sprintf(output, "%02X", *data++); output += 2; } LOG_KDIAG(Diag_msg); } /*! * Dump given wors of data to the log. * * @brief Dump data * * @param prefix Text to put in front of dumped data * @param data Kernel virtual address of start of region to dump * @param word_count Amount of data to dump * * @return void */ void km_Dump_Words(const char *prefix, const unsigned *data, unsigned word_count) { char *output; sprintf(Diag_msg, "%s (%08X,%uw): ", prefix, (uint32_t) data, word_count); /* We've already printed some text in output buffer, skip over it */ output = Diag_msg + strlen(Diag_msg); while (word_count sprintf(output, "%08X ", *data++); output += 9; } LOG_KDIAG(Diag_msg); } #endif
! { dg-do run } ! { <API key> "-msse2" { target sse2_runtime } } ! { <API key> "-mavx" { target avx_runtime } } integer :: a(1024), b(1024), k, m, i, s, t k = 4 m = 2 t = 1 do i = 1, 1024 a(i) = i - 513 b(i) = modulo (i - 52, 39) if (i.lt.52.and.b(i).ne.0) b(i) = b(i) - 39 end do s = foo (b) do i = 1, 1024 if (a(i).ne.((i - 513) * b(i))) call abort if (i.lt.52.and.modulo (i - 52, 39).ne.0) then if (b(i).ne.(modulo (i - 52, 39) - 39)) call abort else if (b(i).ne.(modulo (i - 52, 39))) call abort end if a(i) = i - 513 end do if (k.ne.(4 + 3 * 1024).or.s.ne.1596127) call abort k = 4 m = 2 t = 1 s = bar (b) do i = 1, 1024 if (a(i).ne.((i - 513) * b(i))) call abort if (i.lt.52.and.modulo (i - 52, 39).ne.0) then if (b(i).ne.(modulo (i - 52, 39) - 39)) call abort else if (b(i).ne.(modulo (i - 52, 39))) call abort end if a(i) = i - 513 end do if (k.ne.(4 + 3 * 1024).or.s.ne.1596127) call abort k = 4 m = 2 t = 1 s = baz (b) do i = 1, 1024 if (a(i).ne.((i - 513) * b(i))) call abort if (i.lt.52.and.modulo (i - 52, 39).ne.0) then if (b(i).ne.(modulo (i - 52, 39) - 39)) call abort else if (b(i).ne.(modulo (i - 52, 39))) call abort end if end do if (k.ne.(4 + 3 * 1024).or.s.ne.1596127) call abort contains function foo (p) integer :: p(1024), u, v, i, s, foo s = 0 !$omp simd linear(k : m + 1) reduction(+: s) lastprivate(u, v) do i = 1, 1024 a(i) = a(i) * p(i) u = p(i) + k k = k + m + 1 v = p(i) + k s = s + p(i) + k end do !$omp end simd if (i.ne.1025) call abort if (u.ne.(36 + 4 + 3 * 1023).or.v.ne.(36 + 4 + 3 * 1024)) call abort foo = s end function foo function bar (p) integer :: p(1024), u, v, i, s, bar s = 0 !$omp simd linear(k : m + 1) reduction(+: s) lastprivate(u, v) do i = 1, 1024, t a(i) = a(i) * p(i) u = p(i) + k k = k + m + 1 v = p(i) + k s = s + p(i) + k end do !$omp end simd if (i.ne.1025) call abort if (u.ne.(36 + 4 + 3 * 1023).or.v.ne.(36 + 4 + 3 * 1024)) call abort bar = s end function bar function baz (p) integer :: p(1024), u, v, i, s, baz s = 0 !$omp simd linear(k : m + 1) reduction(+: s) lastprivate(u, v) & !$omp & linear(i : t) do i = 1, 1024, t a(i) = a(i) * p(i) u = p(i) + k k = k + m + 1 v = p(i) + k s = s + p(i) + k end do if (i.ne.1025) call abort if (u.ne.(36 + 4 + 3 * 1023).or.v.ne.(36 + 4 + 3 * 1024)) call abort baz = s end function baz end
#define MULTIPLIER 1103515245 #define OFFSET 12345 static int _seed = 1; #undef rand #define rand() ((_seed = MULTIPLIER * _seed + OFFSET) & 0x7FFFFFFF) #undef srand #define srand(seed) (_seed = int(seed), _seed = (_seed <= 0) ? 1 : _seed) #undef RAND_MAX #define RAND_MAX 2147483647 #if defined (_MSC_VER) && (_MSC_VER >= 1200) # include <windows.h> # define time(dummy) (GetTickCount()) #endif #ifdef __APPLE__ # include <cstdlib> static time_t patchedTime(time_t *) { char *str = getenv("RANDOM_SEED"); if (str == NULL) { return time(NULL); } else { return atoi(str); } } # define time(dummy) patchedTime(dummy) #endif
package networks import ( "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/pagination" ) // List returns a Pager that allows you to iterate over a collection of Network. func List(client *gophercloud.ServiceClient) pagination.Pager { return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { return NetworkPage{pagination.SinglePageBase(r)} }) } // Get returns data about a previously created Network. func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { _, r.Err = client.Get(getURL(client, id), &r.Body, nil) return }
/* Test the `vld4f32' ARM Neon intrinsic. */ /* This file was autogenerated by neon-testgen. */ /* { dg-do assemble } */ /* { <API key> arm_neon_ok } */ /* { dg-options "-save-temps -O0" } */ /* { dg-add-options arm_neon } */ #include "arm_neon.h" void test_vld4f32 (void) { float32x2x4_t out_float32x2x4_t; out_float32x2x4_t = vld4_f32 (0); } /* { dg-final { scan-assembler "vld4\.32\[ \]+\\\{((\[dD\]\[0-9\]+-\[dD\]\[0-9\]+)|(\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+))\\\}, \\\[\[rR\]\[0-9\]+\(:\[0-9\]+\)?\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */ /* { dg-final { cleanup-saved-temps } } */
#include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/usb/input.h> #define POWERMATE_VENDOR 0x077d /* Griffin Technology, Inc. */ #define <API key> 0x0410 /* Griffin PowerMate */ #define <API key> 0x04AA /* Griffin soundKnob */ #define CONTOUR_VENDOR 0x05f3 /* Contour Design, Inc. */ #define CONTOUR_JOG 0x0240 /* Jog and Shuttle */ /* these are the command codes we send to the device */ #define <API key> 0x01 #define SET_PULSE_ASLEEP 0x02 #define SET_PULSE_AWAKE 0x03 #define SET_PULSE_MODE 0x04 /* these refer to bits in the powermate_device's requires_update field. */ #define <API key> (1<<0) #define UPDATE_PULSE_ASLEEP (1<<1) #define UPDATE_PULSE_AWAKE (1<<2) #define UPDATE_PULSE_MODE (1<<3) /* at least two versions of the hardware exist, with differing payload sizes. the first three bytes always contain the "interesting" data in the relevant format. */ #define <API key> 6 #define <API key> 3 struct powermate_device { signed char *data; dma_addr_t data_dma; struct urb *irq, *config; struct usb_ctrlrequest *configcr; dma_addr_t configcr_dma; struct usb_device *udev; struct input_dev *input; spinlock_t lock; int static_brightness; int pulse_speed; int pulse_table; int pulse_asleep; int pulse_awake; int requires_update; // physical settings which are out of sync char phys[64]; }; static char pm_name_powermate[] = "Griffin PowerMate"; static char pm_name_soundknob[] = "Griffin SoundKnob"; static void <API key>(struct urb *urb); /* Callback for data arriving from the PowerMate over the USB interrupt pipe */ static void powermate_irq(struct urb *urb) { struct powermate_device *pm = urb->context; int retval; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dbg("%s - urb shutting down with status: %d", __func__, urb->status); return; default: dbg("%s - nonzero urb status received: %d", __func__, urb->status); goto exit; } /* handle updates to device state */ input_report_key(pm->input, BTN_0, pm->data[0] & 0x01); input_report_rel(pm->input, REL_DIAL, pm->data[1]); input_sync(pm->input); exit: retval = usb_submit_urb (urb, GFP_ATOMIC); if (retval) err ("%s - usb_submit_urb failed with result %d", __func__, retval); } /* Decide if we need to issue a control message and do so. Must be called with pm->lock taken */ static void <API key>(struct powermate_device *pm) { if (pm->requires_update == 0) return; /* no updates are required */ if (pm->config->status == -EINPROGRESS) return; /* an update is already in progress; it'll issue this update when it completes */ if (pm->requires_update & UPDATE_PULSE_ASLEEP){ pm->configcr->wValue = cpu_to_le16( SET_PULSE_ASLEEP ); pm->configcr->wIndex = cpu_to_le16( pm->pulse_asleep ? 1 : 0 ); pm->requires_update &= ~UPDATE_PULSE_ASLEEP; }else if (pm->requires_update & UPDATE_PULSE_AWAKE){ pm->configcr->wValue = cpu_to_le16( SET_PULSE_AWAKE ); pm->configcr->wIndex = cpu_to_le16( pm->pulse_awake ? 1 : 0 ); pm->requires_update &= ~UPDATE_PULSE_AWAKE; }else if (pm->requires_update & UPDATE_PULSE_MODE){ int op, arg; /* the powermate takes an operation and an argument for its pulse algorithm. the operation can be: 0: divide the speed 1: pulse at normal speed 2: multiply the speed the argument only has an effect for operations 0 and 2, and ranges between 1 (least effect) to 255 (maximum effect). thus, several states are equivalent and are coalesced into one state. we map this onto a range from 0 to 510, with: 0 -- 254 -- use divide (0 = slowest) 255 -- use normal speed 256 -- 510 -- use multiple (510 = fastest). Only values of 'arg' quite close to 255 are particularly useful/spectacular. */ if (pm->pulse_speed < 255) { op = 0; // divide arg = 255 - pm->pulse_speed; } else if (pm->pulse_speed > 255) { op = 2; // multiply arg = pm->pulse_speed - 255; } else { op = 1; // normal speed arg = 0; // can be any value } pm->configcr->wValue = cpu_to_le16( (pm->pulse_table << 8) | SET_PULSE_MODE ); pm->configcr->wIndex = cpu_to_le16( (arg << 8) | op ); pm->requires_update &= ~UPDATE_PULSE_MODE; } else if (pm->requires_update & <API key>) { pm->configcr->wValue = cpu_to_le16( <API key> ); pm->configcr->wIndex = cpu_to_le16( pm->static_brightness ); pm->requires_update &= ~<API key>; } else { printk(KERN_ERR "powermate: unknown update required"); pm->requires_update = 0; /* fudge the bug */ return; } /* printk("powermate: %04x %04x\n", pm->configcr->wValue, pm->configcr->wIndex); */ pm->configcr->bRequestType = 0x41; /* vendor request */ pm->configcr->bRequest = 0x01; pm->configcr->wLength = 0; <API key>(pm->config, pm->udev, usb_sndctrlpipe(pm->udev, 0), (void *) pm->configcr, NULL, 0, <API key>, pm); pm->config->setup_dma = pm->configcr_dma; pm->config->transfer_flags |= <API key>; if (usb_submit_urb(pm->config, GFP_ATOMIC)) printk(KERN_ERR "powermate: usb_submit_urb(config) failed"); } /* Called when our asynchronous control message completes. We may need to issue another immediately */ static void <API key>(struct urb *urb) { struct powermate_device *pm = urb->context; unsigned long flags; if (urb->status) printk(KERN_ERR "powermate: config urb returned %d\n", urb->status); spin_lock_irqsave(&pm->lock, flags); <API key>(pm); <API key>(&pm->lock, flags); } /* Set the LED up as described and begin the sync with the hardware if required */ static void powermate_pulse_led(struct powermate_device *pm, int static_brightness, int pulse_speed, int pulse_table, int pulse_asleep, int pulse_awake) { unsigned long flags; if (pulse_speed < 0) pulse_speed = 0; if (pulse_table < 0) pulse_table = 0; if (pulse_speed > 510) pulse_speed = 510; if (pulse_table > 2) pulse_table = 2; pulse_asleep = !!pulse_asleep; pulse_awake = !!pulse_awake; spin_lock_irqsave(&pm->lock, flags); /* mark state updates which are required */ if (static_brightness != pm->static_brightness) { pm->static_brightness = static_brightness; pm->requires_update |= <API key>; } if (pulse_asleep != pm->pulse_asleep) { pm->pulse_asleep = pulse_asleep; pm->requires_update |= (UPDATE_PULSE_ASLEEP | <API key>); } if (pulse_awake != pm->pulse_awake) { pm->pulse_awake = pulse_awake; pm->requires_update |= (UPDATE_PULSE_AWAKE | <API key>); } if (pulse_speed != pm->pulse_speed || pulse_table != pm->pulse_table) { pm->pulse_speed = pulse_speed; pm->pulse_table = pulse_table; pm->requires_update |= UPDATE_PULSE_MODE; } <API key>(pm); <API key>(&pm->lock, flags); } /* Callback from the Input layer when an event arrives from userspace to configure the LED */ static int <API key>(struct input_dev *dev, unsigned int type, unsigned int code, int _value) { unsigned int command = (unsigned int)_value; struct powermate_device *pm = input_get_drvdata(dev); if (type == EV_MSC && code == MSC_PULSELED){ /* bits 0- 7: 8 bits: LED brightness bits 8-16: 9 bits: pulsing speed modifier (0 ... 510); 0-254 = slower, 255 = standard, 256-510 = faster. bits 17-18: 2 bits: pulse table (0, 1, 2 valid) bit 19: 1 bit : pulse whilst asleep? bit 20: 1 bit : pulse constantly? */ int static_brightness = command & 0xFF; // bits 0-7 int pulse_speed = (command >> 8) & 0x1FF; // bits 8-16 int pulse_table = (command >> 17) & 0x3; // bits 17-18 int pulse_asleep = (command >> 19) & 0x1; // bit 19 int pulse_awake = (command >> 20) & 0x1; // bit 20 powermate_pulse_led(pm, static_brightness, pulse_speed, pulse_table, pulse_asleep, pulse_awake); } return 0; } static int <API key>(struct usb_device *udev, struct powermate_device *pm) { pm->data = usb_buffer_alloc(udev, <API key>, GFP_ATOMIC, &pm->data_dma); if (!pm->data) return -1; pm->configcr = usb_buffer_alloc(udev, sizeof(*(pm->configcr)), GFP_ATOMIC, &pm->configcr_dma); if (!pm->configcr) return -1; return 0; } static void <API key>(struct usb_device *udev, struct powermate_device *pm) { usb_buffer_free(udev, <API key>, pm->data, pm->data_dma); usb_buffer_free(udev, sizeof(*(pm->configcr)), pm->configcr, pm->configcr_dma); } /* Called whenever a USB device matching one in our supported devices table is connected */ static int powermate_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev (intf); struct usb_host_interface *interface; struct <API key> *endpoint; struct powermate_device *pm; struct input_dev *input_dev; int pipe, maxp; int error = -ENOMEM; interface = intf->cur_altsetting; endpoint = &interface->endpoint[0].desc; if (!<API key>(endpoint)) return -EIO; usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0a, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, interface->desc.bInterfaceNumber, NULL, 0, <API key>); pm = kzalloc(sizeof(struct powermate_device), GFP_KERNEL); input_dev = <API key>(); if (!pm || !input_dev) goto fail1; if (<API key>(udev, pm)) goto fail2; pm->irq = usb_alloc_urb(0, GFP_KERNEL); if (!pm->irq) goto fail2; pm->config = usb_alloc_urb(0, GFP_KERNEL); if (!pm->config) goto fail3; pm->udev = udev; pm->input = input_dev; usb_make_path(udev, pm->phys, sizeof(pm->phys)); strlcat(pm->phys, "/input0", sizeof(pm->phys)); spin_lock_init(&pm->lock); switch (le16_to_cpu(udev->descriptor.idProduct)) { case <API key>: input_dev->name = pm_name_powermate; break; case <API key>: input_dev->name = pm_name_soundknob; break; default: input_dev->name = pm_name_soundknob; printk(KERN_WARNING "powermate: unknown product id %04x\n", le16_to_cpu(udev->descriptor.idProduct)); } input_dev->phys = pm->phys; usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, pm); input_dev->event = <API key>; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) | BIT_MASK(EV_MSC); input_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0); input_dev->relbit[BIT_WORD(REL_DIAL)] = BIT_MASK(REL_DIAL); input_dev->mscbit[BIT_WORD(MSC_PULSELED)] = BIT_MASK(MSC_PULSELED); /* get a handle to the interrupt data pipe */ pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress); maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe)); if (maxp < <API key> || maxp > <API key>) { printk(KERN_WARNING "powermate: Expected payload of %d--%d bytes, found %d bytes!\n", <API key>, <API key>, maxp); maxp = <API key>; } usb_fill_int_urb(pm->irq, udev, pipe, pm->data, maxp, powermate_irq, pm, endpoint->bInterval); pm->irq->transfer_dma = pm->data_dma; pm->irq->transfer_flags |= <API key>; /* register our interrupt URB with the USB system */ if (usb_submit_urb(pm->irq, GFP_KERNEL)) { error = -EIO; goto fail4; } error = <API key>(pm->input); if (error) goto fail5; /* force an update of everything */ pm->requires_update = UPDATE_PULSE_ASLEEP | UPDATE_PULSE_AWAKE | UPDATE_PULSE_MODE | <API key>; powermate_pulse_led(pm, 0x80, 255, 0, 1, 0); // set default pulse parameters usb_set_intfdata(intf, pm); return 0; fail5: usb_kill_urb(pm->irq); fail4: usb_free_urb(pm->config); fail3: usb_free_urb(pm->irq); fail2: <API key>(udev, pm); fail1: input_free_device(input_dev); kfree(pm); return error; } /* Called when a USB device we've accepted ownership of is removed */ static void <API key>(struct usb_interface *intf) { struct powermate_device *pm = usb_get_intfdata (intf); usb_set_intfdata(intf, NULL); if (pm) { pm->requires_update = 0; usb_kill_urb(pm->irq); <API key>(pm->input); usb_free_urb(pm->irq); usb_free_urb(pm->config); <API key>(interface_to_usbdev(intf), pm); kfree(pm); } } static struct usb_device_id powermate_devices [] = { { USB_DEVICE(POWERMATE_VENDOR, <API key>) }, { USB_DEVICE(POWERMATE_VENDOR, <API key>) }, { USB_DEVICE(CONTOUR_VENDOR, CONTOUR_JOG) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, powermate_devices); static struct usb_driver powermate_driver = { .name = "powermate", .probe = powermate_probe, .disconnect = <API key>, .id_table = powermate_devices, }; static int __init powermate_init(void) { return usb_register(&powermate_driver); } static void __exit powermate_cleanup(void) { usb_deregister(&powermate_driver); } module_init(powermate_init); module_exit(powermate_cleanup); MODULE_AUTHOR( "William R Sowerbutts" ); MODULE_DESCRIPTION( "Griffin Technology, Inc PowerMate driver" ); MODULE_LICENSE("GPL");
(function(a){"object"===typeof module&&module.exports?module.exports=a:a(Highcharts)})(function(a){a.theme={colors:["#FDD089","#FF7F79","#A0446E","#251535"],colorAxis:{maxColor:"#60042E",minColor:"#FDD089"},plotOptions:{map:{nullColor:"#fefefc"}},navigator:{series:{color:"#FF7F79",lineColor:"#A0446E"}}};a.setOptions(a.theme)});
/*! lazysizes - v1.2.3-rc1 */ !function(a){"use strict";var b,c,d,e;a.addEventListener&&(b=a.lazySizes&&lazySizes.cfg||a.lazySizesConfig||{},c=b.lazyClass||"lazyload",d=function(){var b,d;if("string"==typeof c&&(c=document.<API key>(c)),a.lazySizes)for(b=0,d=c.length;d>b;b++)lazySizes.loader.unveil(c[b])},addEventListener("beforeprint",d,!1),!("onbeforeprint"in a)&&a.matchMedia&&(e=matchMedia("print"))&&e.addListener&&e.addListener(function(){e.matches&&d()}))}(window);
#include "G3D/platform.h" #include "G3D/PhysicsFrame.h" #include "G3D/BinaryInput.h" #include "G3D/BinaryOutput.h" namespace G3D { PhysicsFrame::PhysicsFrame() { translation = Vector3::zero(); rotation = Quat(); } PhysicsFrame::PhysicsFrame( const CoordinateFrame& coordinateFrame) { translation = coordinateFrame.translation; rotation = Quat(coordinateFrame.rotation); } PhysicsFrame PhysicsFrame::operator*(const PhysicsFrame& other) const { PhysicsFrame result; result.rotation = rotation * other.rotation; result.translation = translation + rotation.toRotationMatrix() * other.translation; return result; } CoordinateFrame PhysicsFrame::toCoordinateFrame() const { CoordinateFrame f; f.translation = translation; f.rotation = rotation.toRotationMatrix(); return f; } PhysicsFrame PhysicsFrame::lerp( const PhysicsFrame& other, float alpha) const { PhysicsFrame result; result.translation = translation.lerp(other.translation, alpha); result.rotation = rotation.slerp(other.rotation, alpha); return result; } void PhysicsFrame::deserialize(class BinaryInput& b) { translation.deserialize(b); rotation.deserialize(b); } void PhysicsFrame::serialize(class BinaryOutput& b) const { translation.serialize(b); rotation.serialize(b); } }; // namespace
/* HISTORY 02-Aug-10 L-05-28 $$1 pdeshmuk Created. */ function <API key> () { if (!pfcIsWindows()) netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect"); var session = pfcGetProESession (); var assembly = session.CurrentModel; if (assembly.Type != pfcCreate ("pfcModelType").MDL_ASSEMBLY) throw new Error (0, "Current model is not an assembly"); var simp_rep = assembly.GetActiveSimpRep(); var <API key> = simp_rep.GetInstructions(); var number_items = <API key>.Items.Count; document.getElementById("numItems").value = number_items; var <API key> = <API key>.Items.Item(number_items-1); <API key>.Action = null; simp_rep.SetInstructions(<API key>); number_items = simp_rep.GetInstructions().Items.Count; document.getElementById("numItems").value = number_items; return; } function addItemsInSimpRep () { if (!pfcIsWindows()) netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect"); var session = pfcGetProESession (); var assembly = session.CurrentModel; if (assembly.Type != pfcCreate ("pfcModelType").MDL_ASSEMBLY) throw new Error (0, "Current model is not an assembly"); var simp_rep = assembly.GetActiveSimpRep(); var <API key> = simp_rep.GetInstructions(); var number_items = <API key>.Items.Count; document.getElementById("numItems").value = number_items; var item_path = pfcCreate ("intseq"); selOptions = pfcCreate ("pfcSelectionOptions").Create ("feature"); selOptions.MaxNumSels = parseInt (1); var selections = void null; try { selections = session.Select (selOptions, void null); } catch (err) { if (pfcGetExceptionType (err) == "<API key>" || pfcGetExceptionType (err) == "<API key>") return (void null); else throw err; } if (selections.Count == 0) return (void null); var selection = selections.Item(0); var componentpath = selection.Path; var intseqIds = componentpath.ComponentIds; item_path.Append(intseqIds.Item(0)); <API key> = pfcCreate("<API key>").Create(item_path); simp_rep_item = pfcCreate("pfcSimpRepItem").Create(<API key>); simp_rep_action = pfcCreate("pfcSimpRepExclude").Create(); simp_rep_item.Action = simp_rep_action; <API key>.Items.Append(simp_rep_item); simp_rep.SetInstructions(<API key>); <API key> = simp_rep.GetInstructions(); number_items = <API key>.Items.Count; document.getElementById("numItems").value = number_items; return; }
/** * A date picker component which shows a Date Picker on the screen. This class extends from {@link Ext.picker.Picker} * and {@link Ext.Sheet} so it is a popup. * * This component has no required configurations. * * ## Examples * * @example miniphone preview * var datePicker = Ext.create('Ext.picker.Date'); * Ext.Viewport.add(datePicker); * datePicker.show(); * * You may want to adjust the {@link #yearFrom} and {@link #yearTo} properties: * * @example miniphone preview * var datePicker = Ext.create('Ext.picker.Date', { * yearFrom: 2000, * yearTo : 2015 * }); * Ext.Viewport.add(datePicker); * datePicker.show(); * * You can set the value of the {@link Ext.picker.Date} to the current date using `new Date()`: * * @example miniphone preview * var datePicker = Ext.create('Ext.picker.Date', { * value: new Date() * }); * Ext.Viewport.add(datePicker); * datePicker.show(); * * And you can hide the titles from each of the slots by using the {@link #useTitles} configuration: * * @example miniphone preview * var datePicker = Ext.create('Ext.picker.Date', { * useTitles: false * }); * Ext.Viewport.add(datePicker); * datePicker.show(); */ Ext.define('Ext.picker.Date', { extend: 'Ext.picker.Picker', xtype: 'datepicker', alternateClassName: 'Ext.DatePicker', requires: ['Ext.DateExtras', 'Ext.util.InputBlocker'], /** * @event change * Fired when the value of this picker has changed and the done button is pressed. * @param {Ext.picker.Date} this This Picker * @param {Date} value The date value */ config: { /** * @cfg {Number} yearFrom * The start year for the date picker. If {@link #yearFrom} is greater than * {@link #yearTo} then the order of years will be reversed. * @accessor */ yearFrom: 1980, /** * @cfg {Number} [yearTo=new Date().getFullYear()] * The last year for the date picker. If {@link #yearFrom} is greater than * {@link #yearTo} then the order of years will be reversed. * @accessor */ yearTo: new Date().getFullYear(), /** * @cfg {String} monthText * The label to show for the month column. * @accessor */ monthText: 'Month', /** * @cfg {String} dayText * The label to show for the day column. * @accessor */ dayText: 'Day', /** * @cfg {String} yearText * The label to show for the year column. * @accessor */ yearText: 'Year', /** * @cfg {Array} slotOrder * An array of strings that specifies the order of the slots. * @accessor */ slotOrder: ['month', 'day', 'year'], /** * @cfg {Object/Date} value * Default value for the field and the internal {@link Ext.picker.Date} component. Accepts an object of 'year', * 'month' and 'day' values, all of which should be numbers, or a {@link Date}. * * Examples: * * - `{year: 1989, day: 1, month: 5}` = 1st May 1989 * - `new Date()` = current date * @accessor */ /** * @cfg {Array} slots * @hide * @accessor */ /** * @cfg {String/Mixed} doneButton * Can be either: * * - A {String} text to be used on the Done button. * - An {Object} as config for {@link Ext.Button}. * - `false` or `null` to hide it. * @accessor */ doneButton: true }, platformConfig: [{ theme: ['Windows'], doneButton: { iconCls: 'check2', ui: 'round', text: '' } }], initialize: function() { this.callParent(); this.on({ scope: this, delegate: '> slot', slotpick: this.onSlotPick }); this.on({ scope: this, show: this.onSlotPick }); }, setValue: function(value, animated) { if (Ext.isDate(value)) { value = { day : value.getDate(), month: value.getMonth() + 1, year : value.getFullYear() }; } this.callParent([value, animated]); this.onSlotPick(); }, getValue: function(useDom) { var values = {}, items = this.getItems().items, ln = items.length, daysInMonth, day, month, year, item, i; for (i = 0; i < ln; i++) { item = items[i]; if (item instanceof Ext.picker.Slot) { values[item.getName()] = item.getValue(useDom); } } //if all the slots return null, we should not return a date if (values.year === null && values.month === null && values.day === null) { return null; } year = Ext.isNumber(values.year) ? values.year : 1; month = Ext.isNumber(values.month) ? values.month : 1; day = Ext.isNumber(values.day) ? values.day : 1; if (month && year && month && day) { daysInMonth = this.getDaysInMonth(month, year); } day = (daysInMonth) ? Math.min(day, daysInMonth): day; return new Date(year, month - 1, day); }, /** * Updates the yearFrom configuration */ updateYearFrom: function() { if (this.initialized) { this.createSlots(); } }, /** * Updates the yearTo configuration */ updateYearTo: function() { if (this.initialized) { this.createSlots(); } }, /** * Updates the monthText configuration */ updateMonthText: function(newMonthText, oldMonthText) { var innerItems = this.getInnerItems, ln = innerItems.length, item, i; //loop through each of the current items and set the title on the correct slice if (this.initialized) { for (i = 0; i < ln; i++) { item = innerItems[i]; if ((typeof item.title == "string" && item.title == oldMonthText) || (item.title.html == oldMonthText)) { item.setTitle(newMonthText); } } } }, /** * Updates the {@link #dayText} configuration. */ updateDayText: function(newDayText, oldDayText) { var innerItems = this.getInnerItems, ln = innerItems.length, item, i; //loop through each of the current items and set the title on the correct slice if (this.initialized) { for (i = 0; i < ln; i++) { item = innerItems[i]; if ((typeof item.title == "string" && item.title == oldDayText) || (item.title.html == oldDayText)) { item.setTitle(newDayText); } } } }, /** * Updates the yearText configuration */ updateYearText: function(yearText) { var innerItems = this.getInnerItems, ln = innerItems.length, item, i; //loop through each of the current items and set the title on the correct slice if (this.initialized) { for (i = 0; i < ln; i++) { item = innerItems[i]; if (item.title == this.yearText) { item.setTitle(yearText); } } } }, // @private constructor: function() { this.callParent(arguments); this.createSlots(); }, /** * Generates all slots for all years specified by this component, and then sets them on the component * @private */ createSlots: function() { var me = this, slotOrder = me.getSlotOrder(), yearsFrom = me.getYearFrom(), yearsTo = me.getYearTo(), years = [], days = [], months = [], reverse = yearsFrom > yearsTo, ln, i, daysInMonth; while (yearsFrom) { years.push({ text : yearsFrom, value : yearsFrom }); if (yearsFrom === yearsTo) { break; } if (reverse) { yearsFrom } else { yearsFrom++; } } daysInMonth = me.getDaysInMonth(1, new Date().getFullYear()); for (i = 0; i < daysInMonth; i++) { days.push({ text : i + 1, value : i + 1 }); } for (i = 0, ln = Ext.Date.monthNames.length; i < ln; i++) { months.push({ text : Ext.Date.monthNames[i], value : i + 1 }); } var slots = []; slotOrder.forEach(function (item) { slots.push(me.createSlot(item, days, months, years)); }); me.setSlots(slots); }, /** * Returns a slot config for a specified date. * @private */ createSlot: function(name, days, months, years) { switch (name) { case 'year': return { name: 'year', align: 'center', data: years, title: this.getYearText(), flex: 3 }; case 'month': return { name: name, align: 'right', data: months, title: this.getMonthText(), flex: 4 }; case 'day': return { name: 'day', align: 'center', data: days, title: this.getDayText(), flex: 2 }; } }, onSlotPick: function() { var value = this.getValue(true), slot = this.getDaySlot(), year = value.getFullYear(), month = value.getMonth(), days = [], daysInMonth, i; if (!value || !Ext.isDate(value) || !slot) { return; } this.callParent(arguments); //get the new days of the month for this new date daysInMonth = this.getDaysInMonth(month + 1, year); for (i = 0; i < daysInMonth; i++) { days.push({ text: i + 1, value: i + 1 }); } // We don't need to update the slot days unless it has changed if (slot.getStore().getCount() == days.length) { return; } slot.getStore().setData(days); // Now we have the correct amount of days for the day slot, lets update it var store = slot.getStore(), viewItems = slot.getViewItems(), valueField = slot.getValueField(), index, item; index = store.find(valueField, value.getDate()); if (index == -1) { return; } item = Ext.get(viewItems[index]); slot.selectedIndex = index; slot.scrollToItem(item); slot.setValue(slot.getValue(true)); }, getDaySlot: function() { var innerItems = this.getInnerItems(), ln = innerItems.length, i, slot; if (this.daySlot) { return this.daySlot; } for (i = 0; i < ln; i++) { slot = innerItems[i]; if (slot.isSlot && slot.getName() == "day") { this.daySlot = slot; return slot; } } return null; }, // @private getDaysInMonth: function(month, year) { var daysInMonth = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]; return month == 2 && this.isLeapYear(year) ? 29 : daysInMonth[month-1]; }, // @private isLeapYear: function(year) { return !!((year & 3) === 0 && (year % 100 || (year % 400 === 0 && year))); }, onDoneButtonTap: function() { var oldValue = this._value, newValue = this.getValue(true), testValue = newValue; if (Ext.isDate(newValue)) { testValue = newValue.toDateString(); } if (Ext.isDate(oldValue)) { oldValue = oldValue.toDateString(); } if (testValue != oldValue) { this.fireEvent('change', this, newValue); } this.hide(); Ext.util.InputBlocker.unblockInputs(); } });
function acosh (arg) { // + original by: Onno Marsman // * example 1: acosh(8723321.4); // * returns 1: 16.674657798418625 return Math.log(arg + Math.sqrt(arg * arg - 1)); }
// @target: ES3 // @sourcemap: true interface I {} var x = 0;
#!/bin/bash # This script: # - assumes there are 2 network interfaces connected to $PUBLIC_SUBNET and $PRIVATE_SUBNET respectively # - finds IP addresses from those interfaces using `ip` command # - exports the IPs as variables function getIpForSubnet { if [ -z $1 ]; then echo Subnet parameter undefined exit 1 else ip r | grep $1 | sed 's/.*src\ \([^\ ]*\).*/\1/' fi } if [ -z $PUBLIC_SUBNET ]; then PUBLIC_IP=0.0.0.0 echo PUBLIC_SUBNET undefined. PUBLIC_IP defaults to $PUBLIC_IP else export PUBLIC_IP=`getIpForSubnet $PUBLIC_SUBNET` fi if [ -z $PRIVATE_SUBNET ]; then PRIVATE_IP=127.0.0.1 echo PRIVATE_SUBNET undefined. PRIVATE_IP defaults to $PRIVATE_IP else export PRIVATE_IP=`getIpForSubnet $PRIVATE_SUBNET` fi echo Routing table: ip r echo PUBLIC_SUBNET=$PUBLIC_SUBNET echo PRIVATE_SUBNET=$PRIVATE_SUBNET echo PUBLIC_IP=$PUBLIC_IP echo PRIVATE_IP=$PRIVATE_IP
package opts import ( "fmt" "testing" ) func TestParseHost(t *testing.T) { invalid := []string{ "anything", "something with spaces", ": "unknown: "tcp://:port", "tcp://invalid", "tcp://invalid:port", } valid := map[string]string{ "": DefaultHost, " ": DefaultHost, " ": DefaultHost, "fd: "fd://something": "fd://something", "tcp://host:": fmt.Sprintf("tcp://host:%d", DefaultHTTPPort), "tcp://": DefaultTCPHost, "tcp://:2375": fmt.Sprintf("tcp://%s:2375", DefaultHTTPHost), "tcp://:2376": fmt.Sprintf("tcp://%s:2376", DefaultHTTPHost), "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", "tcp://192.168:8080": "tcp://192.168:8080", "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P " tcp://:7777/path ": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), "tcp://docker.com:2375": "tcp://docker.com:2375", "unix://": "unix://" + DefaultUnixSocket, "unix://path/to/socket": "unix://path/to/socket", "npipe://": "npipe://" + DefaultNamedPipe, "npipe:////./pipe/foo": "npipe:////./pipe/foo", } for _, value := range invalid { if _, err := ParseHost(false, value); err == nil { t.Errorf("Expected an error for %v, got [nil]", value) } } for value, expected := range valid { if actual, err := ParseHost(false, value); err != nil || actual != expected { t.Errorf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) } } } func <API key>(t *testing.T) { invalids := map[string]string{ "0.0.0.0": "Invalid bind address format: 0.0.0.0", "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", "tcp://unix:///run/docker.sock": "Invalid bind address format: unix", " tcp://:7777/path ": "Invalid bind address format: tcp://:7777/path ", "tcp": "Invalid bind address format: tcp", "unix": "Invalid bind address format: unix", "fd": "Invalid bind address format: fd", "": "Invalid bind address format: ", } valids := map[string]string{ "0.0.0.1:": "tcp://0.0.0.1:2375", "0.0.0.1:5555": "tcp://0.0.0.1:5555", "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", "[::1]:": "tcp://[::1]:2375", "[::1]:5555/path": "tcp://[::1]:5555/path", "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2375", "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", ":6666": fmt.Sprintf("tcp://%s:6666", DefaultHTTPHost), ":6666/path": fmt.Sprintf("tcp://%s:6666/path", DefaultHTTPHost), "tcp://": DefaultTCPHost, "tcp://:7777": fmt.Sprintf("tcp://%s:7777", DefaultHTTPHost), "tcp://:7777/path": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), "unix:///run/docker.sock": "unix:///run/docker.sock", "unix://": "unix://" + DefaultUnixSocket, "fd: "fd://something": "fd://something", "localhost:": "tcp://localhost:2375", "localhost:5555": "tcp://localhost:5555", "localhost:5555/path": "tcp://localhost:5555/path", } for invalidAddr, expectedError := range invalids { if addr, err := <API key>(invalidAddr); err == nil || err.Error() != expectedError { t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) } } for validAddr, expectedAddr := range valids { if addr, err := <API key>(validAddr); err != nil || addr != expectedAddr { t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) } } } func TestParseTCP(t *testing.T) { var ( defaultHTTPHost = "tcp://127.0.0.1:2376" ) invalids := map[string]string{ "0.0.0.0": "Invalid bind address format: 0.0.0.0", "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", } valids := map[string]string{ "": defaultHTTPHost, "tcp://": defaultHTTPHost, "0.0.0.1:": "tcp://0.0.0.1:2376", "0.0.0.1:5555": "tcp://0.0.0.1:5555", "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", ":6666": "tcp://127.0.0.1:6666", ":6666/path": "tcp://127.0.0.1:6666/path", "tcp://:7777": "tcp://127.0.0.1:7777", "tcp://:7777/path": "tcp://127.0.0.1:7777/path", "[::1]:": "tcp://[::1]:2376", "[::1]:5555": "tcp://[::1]:5555", "[::1]:5555/path": "tcp://[::1]:5555/path", "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2376", "[0:0:0:0:0:0:0:1]:5555": "tcp://[0:0:0:0:0:0:0:1]:5555", "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", "localhost:": "tcp://localhost:2376", "localhost:5555": "tcp://localhost:5555", "localhost:5555/path": "tcp://localhost:5555/path", } for invalidAddr, expectedError := range invalids { if addr, err := parseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) } } for validAddr, expectedAddr := range valids { if addr, err := parseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) } } } func <API key>(t *testing.T) { if _, err := <API key>("unix", "tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { t.Fatalf("Expected an error, got %v", err) } if _, err := <API key>("unix", "unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { t.Fatalf("Expected an error, got %v", err) } if v, err := <API key>("unix", "", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") } }
/* * Authors: * Jerome Glisse <glisse@freedesktop.org> * Thomas Hellstrom <<API key>> * Dave Airlie */ #include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_driver.h> #include <ttm/ttm_placement.h> #include <ttm/ttm_module.h> #include <ttm/ttm_page_alloc.h> #include <drm/drmP.h> #include <drm/radeon_drm.h> #include <linux/seq_file.h> #include <linux/slab.h> #include "radeon_reg.h" #include "radeon.h" #define <API key> (0x100000000ULL >> PAGE_SHIFT) static int <API key>(struct radeon_device *rdev); static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) { struct radeon_mman *mman; struct radeon_device *rdev; mman = container_of(bdev, struct radeon_mman, bdev); rdev = container_of(mman, struct radeon_device, mman); return rdev; } /* * Global memory. */ static int <API key>(struct <API key> *ref) { return ttm_mem_global_init(ref->object); } static void <API key>(struct <API key> *ref) { <API key>(ref->object); } static int <API key>(struct radeon_device *rdev) { struct <API key> *global_ref; int r; rdev->mman.<API key> = false; global_ref = &rdev->mman.mem_global_ref; global_ref->global_type = DRM_GLOBAL_TTM_MEM; global_ref->size = sizeof(struct ttm_mem_global); global_ref->init = &<API key>; global_ref->release = &<API key>; r = drm_global_item_ref(global_ref); if (r != 0) { DRM_ERROR("Failed setting up TTM memory accounting " "subsystem.\n"); return r; } rdev->mman.bo_global_ref.mem_glob = rdev->mman.mem_global_ref.object; global_ref = &rdev->mman.bo_global_ref.ref; global_ref->global_type = DRM_GLOBAL_TTM_BO; global_ref->size = sizeof(struct ttm_bo_global); global_ref->init = &ttm_bo_global_init; global_ref->release = &<API key>; r = drm_global_item_ref(global_ref); if (r != 0) { DRM_ERROR("Failed setting up TTM BO subsystem.\n"); <API key>(&rdev->mman.mem_global_ref); return r; } rdev->mman.<API key> = true; return 0; } static void <API key>(struct radeon_device *rdev) { if (rdev->mman.<API key>) { <API key>(&rdev->mman.bo_global_ref.ref); <API key>(&rdev->mman.mem_global_ref); rdev->mman.<API key> = false; } } struct ttm_backend *<API key>(struct radeon_device *rdev); static struct ttm_backend* <API key>(struct ttm_bo_device *bdev) { struct radeon_device *rdev; rdev = radeon_get_rdev(bdev); #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { return <API key>(bdev, rdev->ddev->agp->bridge); } else #endif { return <API key>(rdev); } } static int <API key>(struct ttm_bo_device *bdev, uint32_t flags) { return 0; } static int <API key>(struct ttm_bo_device *bdev, uint32_t type, struct <API key> *man) { struct radeon_device *rdev; rdev = radeon_get_rdev(bdev); switch (type) { case TTM_PL_SYSTEM: /* System memory */ man->flags = <API key>; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_TT: man->func = &ttm_bo_manager_func; man->gpu_offset = rdev->mc.gtt_start; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; man->flags = <API key> | <API key>; #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) { DRM_ERROR("AGP is not enabled for memory type %u\n", (unsigned)type); return -EINVAL; } if (!rdev->ddev->agp->cant_use_aperture) man->flags = <API key>; man->available_caching = <API key> | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; } #endif break; case TTM_PL_VRAM: /* "On-card" video ram */ man->func = &ttm_bo_manager_func; man->gpu_offset = rdev->mc.vram_start; man->flags = <API key> | <API key>; man->available_caching = <API key> | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; break; default: DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); return -EINVAL; } return 0; } static void radeon_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { struct radeon_bo *rbo; static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; if (!<API key>(bo)) { placement->fpfn = 0; placement->lpfn = 0; placement->placement = &placements; placement->busy_placement = &placements; placement->num_placement = 1; placement->num_busy_placement = 1; return; } rbo = container_of(bo, struct radeon_bo, tbo); switch (bo->mem.mem_type) { case TTM_PL_VRAM: if (rbo->rdev->cp.ready == false) <API key>(rbo, <API key>); else <API key>(rbo, <API key>); break; case TTM_PL_TT: default: <API key>(rbo, <API key>); } *placement = rbo->placement; } static int <API key>(struct ttm_buffer_object *bo, struct file *filp) { return 0; } static void radeon_move_null(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct ttm_mem_reg *old_mem = &bo->mem; BUG_ON(old_mem->mm_node != NULL); *old_mem = *new_mem; new_mem->mm_node = NULL; } static int radeon_move_blit(struct ttm_buffer_object *bo, bool evict, int no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem, struct ttm_mem_reg *old_mem) { struct radeon_device *rdev; uint64_t old_start, new_start; struct radeon_fence *fence; int r; rdev = radeon_get_rdev(bo->bdev); r = radeon_fence_create(rdev, &fence); if (unlikely(r)) { return r; } old_start = old_mem->start << PAGE_SHIFT; new_start = new_mem->start << PAGE_SHIFT; switch (old_mem->mem_type) { case TTM_PL_VRAM: old_start += rdev->mc.vram_start; break; case TTM_PL_TT: old_start += rdev->mc.gtt_start; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); return -EINVAL; } switch (new_mem->mem_type) { case TTM_PL_VRAM: new_start += rdev->mc.vram_start; break; case TTM_PL_TT: new_start += rdev->mc.gtt_start; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); return -EINVAL; } if (!rdev->cp.ready) { DRM_ERROR("Trying to move memory with CP turned off.\n"); return -EINVAL; } r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); /* FIXME: handle copy error */ r = <API key>(bo, (void *)fence, NULL, evict, no_wait_reserve, no_wait_gpu, new_mem); radeon_fence_unref(&fence); return r; } static int <API key>(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; u32 placements; struct ttm_placement placement; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; placement.fpfn = 0; placement.lpfn = 0; placement.num_placement = 1; placement.placement = &placements; placement.num_busy_placement = 1; placement.busy_placement = &placements; placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); if (unlikely(r)) { return r; } r = <API key>(bo->ttm, tmp_mem.placement); if (unlikely(r)) { goto out_cleanup; } r = ttm_tt_bind(bo->ttm, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; } static int <API key>(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; struct ttm_placement placement; u32 placements; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; placement.fpfn = 0; placement.lpfn = 0; placement.num_placement = 1; placement.placement = &placements; placement.num_busy_placement = 1; placement.busy_placement = &placements; placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); if (unlikely(r)) { return r; } r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; } static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; int r; rdev = radeon_get_rdev(bo->bdev); if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { radeon_move_null(bo, new_mem); return 0; } if ((old_mem->mem_type == TTM_PL_TT && new_mem->mem_type == TTM_PL_SYSTEM) || (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_TT)) { /* bind is enough */ radeon_move_null(bo, new_mem); return 0; } if (!rdev->cp.ready || rdev->asic->copy == NULL) { /* use memcpy */ goto memcpy; } if (old_mem->mem_type == TTM_PL_VRAM && new_mem->mem_type == TTM_PL_SYSTEM) { r = <API key>(bo, evict, interruptible, no_wait_reserve, no_wait_gpu, new_mem); } else if (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_VRAM) { r = <API key>(bo, evict, interruptible, no_wait_reserve, no_wait_gpu, new_mem); } else { r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem); } if (r) { memcpy: r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); } return r; } static int <API key>(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct <API key> *man = &bdev->man[mem->mem_type]; struct radeon_device *rdev = radeon_get_rdev(bdev); mem->bus.addr = NULL; mem->bus.offset = 0; mem->bus.size = mem->num_pages << PAGE_SHIFT; mem->bus.base = 0; mem->bus.is_iomem = false; if (!(man->flags & <API key>)) return -EINVAL; switch (mem->mem_type) { case TTM_PL_SYSTEM: /* system memory */ return 0; case TTM_PL_TT: #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { /* RADEON_IS_AGP is set only if AGP is active */ mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = rdev->mc.agp_base; mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; } #endif break; case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; /* check if it's visible */ if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) return -EINVAL; mem->bus.base = rdev->mc.aper_base; mem->bus.is_iomem = true; break; default: return -EINVAL; } return 0; } static void <API key>(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { } static int <API key>(void *sync_obj, void *sync_arg, bool lazy, bool interruptible) { return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); } static int <API key>(void *sync_obj, void *sync_arg) { return 0; } static void <API key>(void **sync_obj) { radeon_fence_unref((struct radeon_fence **)sync_obj); } static void *radeon_sync_obj_ref(void *sync_obj) { return radeon_fence_ref((struct radeon_fence *)sync_obj); } static bool <API key>(void *sync_obj, void *sync_arg) { return <API key>((struct radeon_fence *)sync_obj); } static struct ttm_bo_driver radeon_bo_driver = { .<API key> = &<API key>, .invalidate_caches = &<API key>, .init_mem_type = &<API key>, .evict_flags = &radeon_evict_flags, .move = &radeon_bo_move, .verify_access = &<API key>, .sync_obj_signaled = &<API key>, .sync_obj_wait = &<API key>, .sync_obj_flush = &<API key>, .sync_obj_unref = &<API key>, .sync_obj_ref = &radeon_sync_obj_ref, .move_notify = &<API key>, .<API key> = &<API key>, .io_mem_reserve = &<API key>, .io_mem_free = &<API key>, }; int radeon_ttm_init(struct radeon_device *rdev) { int r; r = <API key>(rdev); if (r) { return r; } /* No others user of address space so set it to 0 */ r = ttm_bo_device_init(&rdev->mman.bdev, rdev->mman.bo_global_ref.ref.object, &radeon_bo_driver, <API key>, rdev->need_dma32); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; } rdev->mman.initialized = true; r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, rdev->mc.real_vram_size >> PAGE_SHIFT); if (r) { DRM_ERROR("Failed initializing VRAM heap.\n"); return r; } r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true, <API key>, &rdev->stollen_vga_memory); if (r) { return r; } r = radeon_bo_reserve(rdev->stollen_vga_memory, false); if (r) return r; r = radeon_bo_pin(rdev->stollen_vga_memory, <API key>, NULL); radeon_bo_unreserve(rdev->stollen_vga_memory); if (r) { radeon_bo_unref(&rdev->stollen_vga_memory); return r; } DRM_INFO("radeon: %uM of VRAM memory ready\n", (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, rdev->mc.gtt_size >> PAGE_SHIFT); if (r) { DRM_ERROR("Failed initializing GTT heap.\n"); return r; } DRM_INFO("radeon: %uM of GTT memory ready.\n", (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; } r = <API key>(rdev); if (r) { DRM_ERROR("Failed to init debugfs\n"); return r; } return 0; } void radeon_ttm_fini(struct radeon_device *rdev) { int r; if (!rdev->mman.initialized) return; if (rdev->stollen_vga_memory) { r = radeon_bo_reserve(rdev->stollen_vga_memory, false); if (r == 0) { radeon_bo_unpin(rdev->stollen_vga_memory); radeon_bo_unreserve(rdev->stollen_vga_memory); } radeon_bo_unref(&rdev->stollen_vga_memory); } ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); <API key>(&rdev->mman.bdev); radeon_gart_fini(rdev); <API key>(rdev); rdev->mman.initialized = false; DRM_INFO("radeon: ttm finalized\n"); } /* this should only be called at bootup or when userspace * isn't running */ void <API key>(struct radeon_device *rdev, u64 size) { struct <API key> *man; if (!rdev->mman.initialized) return; man = &rdev->mman.bdev.man[TTM_PL_VRAM]; /* this just adjusts TTM size idea, which sets lpfn to the correct value */ man->size = size >> PAGE_SHIFT; } static struct <API key> radeon_ttm_vm_ops; static const struct <API key> *ttm_vm_ops = NULL; static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct ttm_buffer_object *bo; struct radeon_device *rdev; int r; bo = (struct ttm_buffer_object *)vma->vm_private_data; if (bo == NULL) { return VM_FAULT_NOPAGE; } rdev = radeon_get_rdev(bo->bdev); mutex_lock(&rdev->vram_mutex); r = ttm_vm_ops->fault(vma, vmf); mutex_unlock(&rdev->vram_mutex); return r; } int radeon_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv; struct radeon_device *rdev; int r; if (unlikely(vma->vm_pgoff < <API key>)) { return drm_mmap(filp, vma); } file_priv = filp->private_data; rdev = file_priv->minor->dev->dev_private; if (rdev == NULL) { return -EINVAL; } r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); if (unlikely(r != 0)) { return r; } if (unlikely(ttm_vm_ops == NULL)) { ttm_vm_ops = vma->vm_ops; radeon_ttm_vm_ops = *ttm_vm_ops; radeon_ttm_vm_ops.fault = &radeon_ttm_fault; } vma->vm_ops = &radeon_ttm_vm_ops; return 0; } /* * TTM backend functions. */ struct radeon_ttm_backend { struct ttm_backend backend; struct radeon_device *rdev; unsigned long num_pages; struct page **pages; struct page *dummy_read_page; bool populated; bool bound; unsigned offset; }; static int <API key>(struct ttm_backend *backend, unsigned long num_pages, struct page **pages, struct page *dummy_read_page) { struct radeon_ttm_backend *gtt; gtt = container_of(backend, struct radeon_ttm_backend, backend); gtt->pages = pages; gtt->num_pages = num_pages; gtt->dummy_read_page = dummy_read_page; gtt->populated = true; return 0; } static void <API key>(struct ttm_backend *backend) { struct radeon_ttm_backend *gtt; gtt = container_of(backend, struct radeon_ttm_backend, backend); gtt->pages = NULL; gtt->num_pages = 0; gtt->dummy_read_page = NULL; gtt->populated = false; gtt->bound = false; } static int <API key>(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) { struct radeon_ttm_backend *gtt; int r; gtt = container_of(backend, struct radeon_ttm_backend, backend); gtt->offset = bo_mem->start << PAGE_SHIFT; if (!gtt->num_pages) { WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); } r = radeon_gart_bind(gtt->rdev, gtt->offset, gtt->num_pages, gtt->pages); if (r) { DRM_ERROR("failed to bind %lu pages at 0x%08X\n", gtt->num_pages, gtt->offset); return r; } gtt->bound = true; return 0; } static int <API key>(struct ttm_backend *backend) { struct radeon_ttm_backend *gtt; gtt = container_of(backend, struct radeon_ttm_backend, backend); radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages); gtt->bound = false; return 0; } static void <API key>(struct ttm_backend *backend) { struct radeon_ttm_backend *gtt; gtt = container_of(backend, struct radeon_ttm_backend, backend); if (gtt->bound) { <API key>(backend); } kfree(gtt); } static struct ttm_backend_func radeon_backend_func = { .populate = &<API key>, .clear = &<API key>, .bind = &<API key>, .unbind = &<API key>, .destroy = &<API key>, }; struct ttm_backend *<API key>(struct radeon_device *rdev) { struct radeon_ttm_backend *gtt; gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL); if (gtt == NULL) { return NULL; } gtt->backend.bdev = &rdev->mman.bdev; gtt->backend.flags = 0; gtt->backend.func = &radeon_backend_func; gtt->rdev = rdev; gtt->pages = NULL; gtt->num_pages = 0; gtt->dummy_read_page = NULL; gtt->populated = false; gtt->bound = false; return &gtt->backend; } #define <API key> 2 #if defined(CONFIG_DEBUG_FS) static int <API key>(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; int ret; struct ttm_bo_global *glob = rdev->mman.bdev.glob; spin_lock(&glob->lru_lock); ret = drm_mm_dump_table(m, mm); spin_unlock(&glob->lru_lock); return ret; } #endif static int <API key>(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) static struct drm_info_list <API key>[<API key>+1]; static char <API key>[<API key>+1][32]; unsigned i; for (i = 0; i < <API key>; i++) { if (i == 0) sprintf(<API key>[i], "radeon_vram_mm"); else sprintf(<API key>[i], "radeon_gtt_mm"); <API key>[i].name = <API key>[i]; <API key>[i].show = &<API key>; <API key>[i].driver_features = 0; if (i == 0) <API key>[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv; else <API key>[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv; } /* Add ttm page pool to debugfs */ sprintf(<API key>[i], "ttm_page_pool"); <API key>[i].name = <API key>[i]; <API key>[i].show = &<API key>; <API key>[i].driver_features = 0; <API key>[i].data = NULL; return <API key>(rdev, <API key>, <API key>+1); #endif return 0; }
YUI.add('router', function (Y, NAME) { /** Provides URL-based routing using HTML5 `pushState()` or the location hash. @module app @submodule router @since 3.4.0 **/ var HistoryHash = Y.HistoryHash, QS = Y.QueryString, YArray = Y.Array, YLang = Y.Lang, YObject = Y.Object, win = Y.config.win, // Holds all the active router instances. This supports the static // `dispatch()` method which causes all routers to dispatch. instances = [], // We have to queue up pushState calls to avoid race conditions, since the // popstate event doesn't actually provide any info on what URL it's // associated with. saveQueue = [], /** Fired when the router is ready to begin dispatching to route handlers. You shouldn't need to wait for this event unless you plan to implement some kind of custom dispatching logic. It's used internally in order to avoid dispatching to an initial route if a browser history change occurs first. @event ready @param {Boolean} dispatched `true` if routes have already been dispatched (most likely due to a history change). @fireOnce **/ EVT_READY = 'ready'; /** Provides URL-based routing using HTML5 `pushState()` or the location hash. This makes it easy to wire up route handlers for different application states while providing full back/forward navigation support and bookmarkable, shareable URLs. @class Router @param {Object} [config] Config properties. @param {Boolean} [config.html5] Overrides the default capability detection and forces this router to use (`true`) or not use (`false`) HTML5 history. @param {String} [config.root=''] Root path from which all routes should be evaluated. @param {Array} [config.routes=[]] Array of route definition objects. @constructor @extends Base @since 3.4.0 **/ function Router() { Router.superclass.constructor.apply(this, arguments); } Y.Router = Y.extend(Router, Y.Base, { /** Whether or not `_dispatch()` has been called since this router was instantiated. @property _dispatched @type Boolean @default undefined @protected **/ /** Whether or not we're currently in the process of dispatching to routes. @property _dispatching @type Boolean @default undefined @protected **/ /** History event handle for the `history:change` or `hashchange` event subscription. @property _historyEvents @type EventHandle @protected **/ /** Cached copy of the `html5` attribute for internal use. @property _html5 @type Boolean @protected **/ /** Map which holds the registered param handlers in the form: `name` -> RegExp | Function. @property _params @type Object @protected @since 3.12.0 **/ /** Whether or not the `ready` event has fired yet. @property _ready @type Boolean @default undefined @protected **/ /** Regex used to break up a URL string around the URL's path. Subpattern captures: 1. Origin, everything before the URL's path-part. 2. The URL's path-part. 3. The URL's query. 4. The URL's hash fragment. @property _regexURL @type RegExp @protected @since 3.5.0 **/ _regexURL: /^((?:[^\/ /** Regex used to match parameter placeholders in route paths. Subpattern captures: 1. Parameter prefix character. Either a `:` for subpath parameters that should only match a single level of a path, or `*` for splat parameters that should match any number of path levels. 2. Parameter name, if specified, otherwise it is a wildcard match. @property _regexPathParam @type RegExp @protected **/ _regexPathParam: /([:*])([\w\-]+)?/g, /** Regex that matches and captures the query portion of a URL, minus the preceding `?` character, and discarding the hash portion of the URL if any. @property _regexUrlQuery @type RegExp @protected **/ _regexUrlQuery: /\?([^ /** Regex that matches everything before the path portion of a URL (the origin). This will be used to strip this part of the URL from a string when we only want the path. @property _regexUrlOrigin @type RegExp @protected **/ _regexUrlOrigin: /^(?:[^\/ /** Collection of registered routes. @property _routes @type Array @protected **/ initializer: function (config) { var self = this; self._html5 = self.get('html5'); self._params = {}; self._routes = []; self._url = self._getURL(); // Necessary because setters don't run on init. self._setRoutes(config && config.routes ? config.routes : self.get('routes')); // Set up a history instance or hashchange listener. if (self._html5) { self._history = new Y.HistoryHTML5({force: true}); self._historyEvents = Y.after('history:change', self._afterHistoryChange, self); } else { self._historyEvents = Y.on('hashchange', self._afterHistoryChange, win, self); } // Fire a `ready` event once we're ready to route. We wait first for all // subclass initializers to finish, then for window.onload, and then an // additional 20ms to allow the browser to fire a useless initial // `popstate` event if it wants to (and Chrome always wants to). self.publish(EVT_READY, { defaultFn : self._defReadyFn, fireOnce : true, preventable: false }); self.once('initializedChange', function () { Y.once('load', function () { setTimeout(function () { self.fire(EVT_READY, {dispatched: !!self._dispatched}); }, 20); }); }); // Store this router in the collection of all active router instances. instances.push(this); }, destructor: function () { var instanceIndex = YArray.indexOf(instances, this); // Remove this router from the collection of active router instances. if (instanceIndex > -1) { instances.splice(instanceIndex, 1); } if (this._historyEvents) { this._historyEvents.detach(); } }, /** Dispatches to the first route handler that matches the current URL, if any. If `dispatch()` is called before the `ready` event has fired, it will automatically wait for the `ready` event before dispatching. Otherwise it will dispatch immediately. @method dispatch @chainable **/ dispatch: function () { this.once(EVT_READY, function () { var req, res; this._ready = true; if (!this.upgrade()) { req = this._getRequest('dispatch'); res = this._getResponse(req); this._dispatch(req, res); } }); return this; }, /** Gets the current route path. @method getPath @return {String} Current route path. **/ getPath: function () { return this._getPath(); }, /** Returns `true` if this router has at least one route that matches the specified URL, `false` otherwise. This also checks that any named `param` handlers also accept app param values in the `url`. This method enforces the same-origin security constraint on the specified `url`; any URL which is not from the same origin as the current URL will always return `false`. @method hasRoute @param {String} url URL to match. @return {Boolean} `true` if there's at least one matching route, `false` otherwise. **/ hasRoute: function (url) { var path, routePath, routes; if (!this._hasSameOrigin(url)) { return false; } if (!this._html5) { url = this._upgradeURL(url); } // Get just the path portion of the specified `url`. The `match()` // method does some special checking that the `path` is within the root. path = this.removeQuery(url.replace(this._regexUrlOrigin, '')); routes = this.match(path); if (!routes.length) { return false; } routePath = this.removeRoot(path); // Check that there's at least one route whose param handlers also // accept all the param values. return !!YArray.filter(routes, function (route) { // Get the param values for the route and path to see whether the // param handlers accept or reject the param values. Include any // route whose named param handlers accept *all* param values. This // will return `false` if a param handler rejects a param value. return this._getParamValues(route, routePath); }, this).length; }, /** Returns an array of route objects that match the specified URL path. If this router has a `root`, then the specified `path` _must_ be semantically within the `root` path to match any routes. This method is called internally to determine which routes match the current path whenever the URL changes. You may override it if you want to customize the route matching logic, although this usually shouldn't be necessary. Each returned route object has the following properties: * `callback`: A function or a string representing the name of a function this router that should be executed when the route is triggered. * `keys`: An array of strings representing the named parameters defined in the route's path specification, if any. * `path`: The route's path specification, which may be either a string or a regex. * `regex`: A regular expression version of the route's path specification. This regex is used to determine whether the route matches a given path. @example router.route('/foo', function () {}); router.match('/foo'); // => [{callback: ..., keys: [], path: '/foo', regex: ...}] @method match @param {String} path URL path to match. This should be an absolute path that starts with a slash: "/". @return {Object[]} Array of route objects that match the specified path. **/ match: function (path) { var root = this.get('root'); if (root) { // The `path` must be semantically within this router's `root` path // or mount point, if it's not then no routes should be considered a // match. if (!this._pathHasRoot(root, path)) { return []; } // Remove this router's `root` from the `path` before checking the // routes for any matches. path = this.removeRoot(path); } return YArray.filter(this._routes, function (route) { return path.search(route.regex) > -1; }); }, /** Adds a handler for a route param specified by _name_. Param handlers can be registered via this method and are used to validate/format values of named params in routes before dispatching to the route's handler functions. Using param handlers allows routes to defined using string paths which allows for `req.params` to use named params, but still applying extra validation or formatting to the param values parsed from the URL. If a param handler regex or function returns a value of `false`, `null`, `undefined`, or `NaN`, the current route will not match and be skipped. All other return values will be used in place of the original param value parsed from the URL. @example router.param('postId', function (value) { return parseInt(value, 10); }); router.param('username', /^\w+$/); router.route('/posts/:postId', function (req) { }); router.route('/users/:username', function (req) { // `req.params.username` is an array because the result of calling // `exec()` on the regex is assigned as the param's value. }); router.route('*', function () { }); // URLs which match routes: router.save('/posts/1'); // => "Post: 1" router.save('/users/ericf'); // => "User: ericf" // URLs which do not match routes because params fail validation: router.save('/posts/a'); // => "Catch-all no routes matched!" router.save('/users/ericf,rgrove'); // => "Catch-all no routes matched!" @method param @param {String} name Name of the param used in route paths. @param {Function|RegExp} handler Function to invoke or regular expression to `exec()` during route dispatching whose return value is used as the new param value. Values of `false`, `null`, `undefined`, or `NaN` will cause the current route to not match and be skipped. When a function is specified, it will be invoked in the context of this instance with the following parameters: @param {String} handler.value The current param value parsed from the URL. @param {String} handler.name The name of the param. @chainable @since 3.12.0 **/ param: function (name, handler) { this._params[name] = handler; return this; }, /** Removes the `root` URL from the front of _url_ (if it's there) and returns the result. The returned path will always have a leading `/`. @method removeRoot @param {String} url URL. @return {String} Rootless path. **/ removeRoot: function (url) { var root = this.get('root'), path; // Strip out the non-path part of the URL, if any (e.g. url = url.replace(this._regexUrlOrigin, ''); // Return the host-less URL if there's no `root` path to further remove. if (!root) { return url; } path = this.removeQuery(url); // Remove the `root` from the `url` if it's the same or its path is // semantically within the root path. if (path === root || this._pathHasRoot(root, path)) { url = url.substring(root.length); } return url.charAt(0) === '/' ? url : '/' + url; }, /** Removes a query string from the end of the _url_ (if one exists) and returns the result. @method removeQuery @param {String} url URL. @return {String} Queryless path. **/ removeQuery: function (url) { return url.replace(/\?.*$/, ''); }, replace: function (url) { return this._queue(url, true); }, * Route: `/file/*path` * URL: `/file/foo/bar/baz.txt`, params: `{path: 'foo/bar/baz.txt'}` * URL: `/file/foo`, params: `{path: 'foo'}` **Middleware**: Routes also support an arbitrary number of callback functions. This allows you to easily reuse parts of your route-handling code with different route. This method is liberal in how it processes the specified `callbacks`, you can specify them as separate arguments, or as arrays, or both. If multiple route match a given URL, they will be executed in the order they were added. The first route that was added will be the first to be executed. **Passing Control**: Invoking the `next()` function within a route callback will pass control to the next callback function (if any) or route handler (if any). If a value is passed to `next()`, it's assumed to be an error, therefore stopping the dispatch chain, unless that value is: `"route"`, which is special case and dispatching will skip to the next route handler. This allows middleware to skip any remaining middleware for a particular route. @example router.route('/photos/:tag/:page', function (req, res, next) { }); // Using middleware. router.findUser = function (req, res, next) { req.user = this.get('users').findById(req.params.user); next(); }; router.route('/users/:user', 'findUser', function (req, res, next) { // The `findUser` middleware puts the `user` object on the `req`. }); @method route @param {String|RegExp|Object} route Route to match. May be a string or a regular expression, or a route object. @param {Array|Function|String} callbacks* Callback functions to call whenever this route is triggered. These can be specified as separate arguments, or in arrays, or both. If a callback is specified as a string, the named function will be called on this router instance. @param {Object} callbacks.req Request object containing information about the request. It contains the following properties. @param {Array|Object} callbacks.req.params Captured parameters matched by the route path specification. If a string path was used and contained named parameters, then this will be a key/value hash mapping parameter names to their matched values. If a regex path was used, this will be an array of subpattern matches starting at index 0 for the full match, then 1 for the first subpattern match, and so on. @param {String} callbacks.req.path The current URL path. @param {Number} callbacks.req.pendingCallbacks Number of remaining callbacks the route handler has after this one in the dispatch chain. @param {Number} callbacks.req.pendingRoutes Number of matching routes after this one in the dispatch chain. @param {Object} callbacks.req.query Query hash representing the URL query string, if any. Parameter names are keys, and are mapped to parameter values. @param {Object} callbacks.req.route Reference to the current route object whose callbacks are being dispatched. @param {Object} callbacks.req.router Reference to this router instance. @param {String} callbacks.req.src What initiated the dispatch. In an HTML5 browser, when the back/forward buttons are used, this property will have a value of "popstate". When the `dispath()` method is called, the `src` will be `"dispatch"`. @param {String} callbacks.req.url The full URL. @param {Object} callbacks.res Response object containing methods and information that relate to responding to a request. It contains the following properties. @param {Object} callbacks.res.req Reference to the request object. @param {Function} callbacks.next Function to pass control to the next callback or the next matching route if no more callbacks (middleware) exist for the current route handler. If you don't call this function, then no further callbacks or route handlers will be executed, even if there are more that match. If you do call this function, then the next callback (if any) or matching route handler (if any) will be called. All of these functions will receive the same `req` and `res` objects that were passed to this route (so you can use these objects to pass data along to subsequent callbacks and routes). @param {String} [callbacks.next.err] Optional error which will stop the dispatch chaining for this `req`, unless the value is `"route"`, which is special cased to jump skip past any callbacks for the current route and pass control the next route handler. @chainable **/ route: function (route, callbacks) { // Grab callback functions from var-args. callbacks = YArray(arguments, 1, true); var keys, regex; // Supports both the `route(path, callbacks)` and `route(config)` call // signatures, allowing for fully-processed route configs to be passed. if (typeof route === 'string' || YLang.isRegExp(route)) { // Flatten `callbacks` into a single dimension array. callbacks = YArray.flatten(callbacks); keys = []; regex = this._getRegex(route, keys); route = { callbacks: callbacks, keys : keys, path : route, regex : regex }; } else { // Look for any configured `route.callbacks` and fallback to // `route.callback` for back-compat, append var-arg `callbacks`, // then flatten the entire collection to a single dimension array. callbacks = YArray.flatten( [route.callbacks || route.callback || []].concat(callbacks) ); // Check for previously generated regex, also fallback to `regexp` // for greater interop. keys = route.keys; regex = route.regex || route.regexp; // Generates the route's regex if it doesn't already have one. if (!regex) { keys = []; regex = this._getRegex(route.path, keys); } // Merge specified `route` config object with processed data. route = Y.merge(route, { callbacks: callbacks, keys : keys, path : route.path || regex, regex : regex }); } this._routes.push(route); return this; }, save: function (url) { return this._queue(url); }, /** Upgrades a hash-based URL to an HTML5 URL if necessary. In non-HTML5 browsers, this method is a noop. @method upgrade @return {Boolean} `true` if the URL was upgraded, `false` otherwise. **/ upgrade: function () { if (!this._html5) { return false; } // Get the resolve hash path. var hashPath = this._getHashPath(); if (hashPath) { // This is an HTML5 browser and we have a hash-based path in the // URL, so we need to upgrade the URL to a non-hash URL. This // will trigger a `history:change` event, which will in turn // trigger a dispatch. this.once(EVT_READY, function () { this.replace(hashPath); }); return true; } return false; }, /** Wrapper around `decodeURIComponent` that also converts `+` chars into spaces. @method _decode @param {String} string String to decode. @return {String} Decoded string. @protected **/ _decode: function (string) { return decodeURIComponent(string.replace(/\+/g, ' ')); }, /** Shifts the topmost `_save()` call off the queue and executes it. Does nothing if the queue is empty. @method _dequeue @chainable @see _queue @protected **/ _dequeue: function () { var self = this, fn; // If window.onload hasn't yet fired, wait until it has before // dequeueing. This will ensure that we don't call pushState() before an // initial popstate event has fired. if (!YUI.Env.windowLoaded) { Y.once('load', function () { self._dequeue(); }); return this; } fn = saveQueue.shift(); return fn ? fn() : this; }, /** Dispatches to the first route handler that matches the specified _path_. If called before the `ready` event has fired, the dispatch will be aborted. This ensures normalized behavior between Chrome (which fires a `popstate` event on every pageview) and other browsers (which do not). @method _dispatch @param {object} req Request object. @param {String} res Response object. @chainable @protected **/ _dispatch: function (req, res) { var self = this, routes = self.match(req.path), callbacks = [], routePath, paramValues; self._dispatching = self._dispatched = true; if (!routes || !routes.length) { self._dispatching = false; return self; } routePath = self.removeRoot(req.path); function next(err) { var callback, name, route; if (err) { // Special case "route" to skip to the next route handler // avoiding any additional callbacks for the current route. if (err === 'route') { callbacks = []; next(); } else { Y.error(err); } } else if ((callback = callbacks.shift())) { if (typeof callback === 'string') { name = callback; callback = self[name]; if (!callback) { Y.error('Router: Callback not found: ' + name, null, 'router'); } } // Allow access to the number of remaining callbacks for the // route. req.pendingCallbacks = callbacks.length; callback.call(self, req, res, next); } else if ((route = routes.shift())) { paramValues = self._getParamValues(route, routePath); if (!paramValues) { // Skip this route because one of the param handlers // rejected a param value in the `routePath`. next('route'); return; } // Expose the processed param values. req.params = paramValues; // Allow access to current route and the number of remaining // routes for this request. req.route = route; req.pendingRoutes = routes.length; // Make a copy of this route's `callbacks` so the original array // is preserved. callbacks = route.callbacks.concat(); // Execute this route's `callbacks`. next(); } } next(); self._dispatching = false; return self._dequeue(); }, /** Returns the resolved path from the hash fragment, or an empty string if the hash is not path-like. @method _getHashPath @param {String} [hash] Hash fragment to resolve into a path. By default this will be the hash from the current URL. @return {String} Current hash path, or an empty string if the hash is empty. @protected **/ _getHashPath: function (hash) { hash || (hash = HistoryHash.getHash()); // Make sure the `hash` is path-like. if (hash && hash.charAt(0) === '/') { return this._joinURL(hash); } return ''; }, _getOrigin: function () { var location = Y.getLocation(); return location.origin || (location.protocol + '//' + location.host); }, /** Getter for the `params` attribute. @method _getParams @return {Object} Mapping of param handlers: `name` -> RegExp | Function. @protected @since 3.12.0 **/ _getParams: function () { return Y.merge(this._params); }, /** Gets the param values for the specified `route` and `path`, suitable to use form `req.params`. **Note:** This method will return `false` if a named param handler rejects a param value. @method _getParamValues @param {Object} route The route to get param values for. @param {String} path The route path (root removed) that provides the param values. @return {Boolean|Array|Object} The collection of processed param values. Either a hash of `name` -> `value` for named params processed by this router's param handlers, or an array of matches for a route with unnamed params. If a named param handler rejects a value, then `false` will be returned. @protected @since 3.16.0 **/ _getParamValues: function (route, path) { var matches, paramsMatch, paramValues; // Decode each of the path params so that the any URL-encoded path // segments are decoded in the `req.params` object. matches = YArray.map(route.regex.exec(path) || [], function (match) { // Decode matches, or coerce `undefined` matches to an empty // string to match expectations of working with `req.params` // in the context of route dispatching, and normalize // browser differences in their handling of regex NPCGs: return (match && this._decode(match)) || ''; }, this); // Simply return the array of decoded values when the route does *not* // use named parameters. if (matches.length - 1 !== route.keys.length) { return matches; } // Remove the first "match" from the param values, because it's just the // `path` processed by the route's regex, and map the values to the keys // to create the name params collection. paramValues = YArray.hash(route.keys, matches.slice(1)); // Pass each named param value to its handler, if there is one, for // validation/processing. If a param value is rejected by a handler, // then the params don't match and a falsy value is returned. paramsMatch = YArray.every(route.keys, function (name) { var paramHandler = this._params[name], value = paramValues[name]; if (paramHandler && value && typeof value === 'string') { // Check if `paramHandler` is a RegExp, because this // is true in Android 2.3 and other browsers! value = YLang.isRegExp(paramHandler) ? paramHandler.exec(value) : paramHandler.call(this, value, name); if (value !== false && YLang.isValue(value)) { // Update the named param to the value from the handler. paramValues[name] = value; return true; } // Consider the param value as rejected by the handler. return false; } return true; }, this); if (paramsMatch) { return paramValues; } // Signal that a param value was rejected by a named param handler. return false; }, /** Gets the current route path. @method _getPath @return {String} Current route path. @protected **/ _getPath: function () { var path = (!this._html5 && this._getHashPath()) || Y.getLocation().pathname; return this.removeQuery(path); }, /** Returns the current path root after popping off the last path segment, making it useful for resolving other URL paths against. The path root will always begin and end with a '/'. @method _getPathRoot @return {String} The URL's path root. @protected @since 3.5.0 **/ _getPathRoot: function () { var slash = '/', path = Y.getLocation().pathname, segments; if (path.charAt(path.length - 1) === slash) { return path; } segments = path.split(slash); segments.pop(); return segments.join(slash) + slash; }, /** Gets the current route query string. @method _getQuery @return {String} Current route query string. @protected **/ _getQuery: function () { var location = Y.getLocation(), hash, matches; if (this._html5) { return location.search.substring(1); } hash = HistoryHash.getHash(); matches = hash.match(this._regexUrlQuery); return hash && matches ? matches[1] : location.search.substring(1); }, /** Creates a regular expression from the given route specification. If _path_ is already a regex, it will be returned unmodified. @method _getRegex @param {String|RegExp} path Route path specification. @param {Array} keys Array reference to which route parameter names will be added. @return {RegExp} Route regex. @protected **/ _getRegex: function (path, keys) { if (YLang.isRegExp(path)) { return path; } // Special case for catchall paths. if (path === '*') { return (/.*/); } path = path.replace(this._regexPathParam, function (match, operator, key) { // Only `*` operators are supported for key-less matches to allowing /** Gets a request object that can be passed to a route handler. @method _getRequest @param {String} src What initiated the URL change and need for the request. @return {Object} Request object. @protected **/ _getRequest: function (src) { return { path : this._getPath(), query : this._parseQuery(this._getQuery()), url : this._getURL(), router: this, src : src }; }, /** Gets a response object that can be passed to a route handler. @method _getResponse @param {Object} req Request object. @return {Object} Response Object. @protected **/ _getResponse: function (req) { return {req: req}; }, /** Getter for the `routes` attribute. @method _getRoutes @return {Object[]} Array of route objects. @protected **/ _getRoutes: function () { return this._routes.concat(); }, /** Gets the current full URL. @method _getURL @return {String} URL. @protected **/ _getURL: function () { var url = Y.getLocation().toString(); if (!this._html5) { url = this._upgradeURL(url); } return url; }, /** Returns `true` when the specified `url` is from the same origin as the current URL; i.e., the protocol, host, and port of the URLs are the same. All host or path relative URLs are of the same origin. A scheme-relative URL is first prefixed with the current scheme before being evaluated. @method _hasSameOrigin @param {String} url URL to compare origin with the current URL. @return {Boolean} Whether the URL has the same origin of the current URL. @protected **/ _hasSameOrigin: function (url) { var origin = ((url && url.match(this._regexUrlOrigin)) || [])[0]; // Prepend current scheme to scheme-relative URLs. if (origin && origin.indexOf(' origin = Y.getLocation().protocol + origin; } return !origin || origin === this._getOrigin(); }, /** Joins the `root` URL to the specified _url_, normalizing leading/trailing `/` characters. @example router.set('root', '/foo'); router._joinURL('bar'); // => '/foo/bar' router._joinURL('/bar'); // => '/foo/bar' router.set('root', '/foo/'); router._joinURL('bar'); // => '/foo/bar' router._joinURL('/bar'); // => '/foo/bar' @method _joinURL @param {String} url URL to append to the `root` URL. @return {String} Joined URL. @protected **/ _joinURL: function (url) { var root = this.get('root'); // Causes `url` to _always_ begin with a "/". url = this.removeRoot(url); if (url.charAt(0) === '/') { url = url.substring(1); } return root && root.charAt(root.length - 1) === '/' ? root + url : root + '/' + url; }, /** Returns a normalized path, ridding it of any '..' segments and properly handling leading and trailing slashes. @method _normalizePath @param {String} path URL path to normalize. @return {String} Normalized path. @protected @since 3.5.0 **/ _normalizePath: function (path) { var dots = '..', slash = '/', i, len, normalized, segments, segment, stack; if (!path || path === slash) { return slash; } segments = path.split(slash); stack = []; for (i = 0, len = segments.length; i < len; ++i) { segment = segments[i]; if (segment === dots) { stack.pop(); } else if (segment) { stack.push(segment); } } normalized = slash + stack.join(slash); // Append trailing slash if necessary. if (normalized !== slash && path.charAt(path.length - 1) === slash) { normalized += slash; } return normalized; }, /** Parses a URL query string into a key/value hash. If `Y.QueryString.parse` is available, this method will be an alias to that. @method _parseQuery @param {String} query Query string to parse. @return {Object} Hash of key/value pairs for query parameters. @protected **/ _parseQuery: QS && QS.parse ? QS.parse : function (query) { var decode = this._decode, params = query.split('&'), i = 0, len = params.length, result = {}, param; for (; i < len; ++i) { param = params[i].split('='); if (param[0]) { result[decode(param[0])] = decode(param[1] || ''); } } return result; }, /** Returns `true` when the specified `path` is semantically within the specified `root` path. If the `root` does not end with a trailing slash ("/"), one will be added before the `path` is evaluated against the root path. @example this._pathHasRoot('/app', '/app/foo'); // => true this._pathHasRoot('/app/', '/app/foo'); // => true this._pathHasRoot('/app/', '/app/'); // => true this._pathHasRoot('/app', '/foo/bar'); // => false this._pathHasRoot('/app/', '/foo/bar'); // => false this._pathHasRoot('/app/', '/app'); // => false this._pathHasRoot('/app', '/app'); // => false @method _pathHasRoot @param {String} root Root path used to evaluate whether the specificed `path` is semantically within. A trailing slash ("/") will be added if it does not already end with one. @param {String} path Path to evaluate for containing the specified `root`. @return {Boolean} Whether or not the `path` is semantically within the `root` path. @protected @since 3.13.0 **/ _pathHasRoot: function (root, path) { var rootPath = root.charAt(root.length - 1) === '/' ? root : root + '/'; return path.indexOf(rootPath) === 0; }, /** Queues up a `_save()` call to run after all previously-queued calls have finished. This is necessary because if we make multiple `_save()` calls before the first call gets dispatched, then both calls will dispatch to the last call's URL. All arguments passed to `_queue()` will be passed on to `_save()` when the queued function is executed. @method _queue @chainable @see _dequeue @protected **/ _queue: function () { var args = arguments, self = this; saveQueue.push(function () { if (self._html5) { if (Y.UA.ios && Y.UA.ios < 5) { // iOS <5 has buggy HTML5 history support, and needs to be // synchronous. self._save.apply(self, args); } else { // Wrapped in a timeout to ensure that _save() calls are // always processed asynchronously. This ensures consistency // between HTML5- and hash-based history. setTimeout(function () { self._save.apply(self, args); }, 1); } } else { self._dispatching = true; // otherwise we'll dequeue too quickly self._save.apply(self, args); } return self; }); return !this._dispatching ? this._dequeue() : this; }, /** Returns the normalized result of resolving the `path` against the current path. Falsy values for `path` will return just the current path. @method _resolvePath @param {String} path URL path to resolve. @return {String} Resolved path. @protected @since 3.5.0 **/ _resolvePath: function (path) { if (!path) { return Y.getLocation().pathname; } if (path.charAt(0) !== '/') { path = this._getPathRoot() + path; } return this._normalizePath(path); }, /** Resolves the specified URL against the current URL. This method resolves URLs like a browser does and will always return an absolute URL. When the specified URL is already absolute, it is assumed to be fully resolved and is simply returned as is. Scheme-relative URLs are prefixed with the current protocol. Relative URLs are giving the current URL's origin and are resolved and normalized against the current path root. @method _resolveURL @param {String} url URL to resolve. @return {String} Resolved URL. @protected @since 3.5.0 **/ _resolveURL: function (url) { var parts = url && url.match(this._regexURL), origin, path, query, hash, resolved; if (!parts) { return Y.getLocation().toString(); } origin = parts[1]; path = parts[2]; query = parts[3]; hash = parts[4]; // Absolute and scheme-relative URLs are assumed to be fully-resolved. if (origin) { // Prepend the current scheme for scheme-relative URLs. if (origin.indexOf(' origin = Y.getLocation().protocol + origin; } return origin + (path || '/') + (query || '') + (hash || ''); } // Will default to the current origin and current path. resolved = this._getOrigin() + this._resolvePath(path); // A path or query for the specified URL trumps the current URL's. if (path || query) { return resolved + (query || '') + (hash || ''); } query = this._getQuery(); return resolved + (query ? ('?' + query) : '') + (hash || ''); }, /** Saves a history entry using either `pushState()` or the location hash. This method enforces the same-origin security constraint; attempting to save a `url` that is not from the same origin as the current URL will result in an error. @method _save @param {String} [url] URL for the history entry. @param {Boolean} [replace=false] If `true`, the current history entry will be replaced instead of a new one being added. @chainable @protected **/ _save: function (url, replace) { var urlIsString = typeof url === 'string', currentPath, root, hash; // Perform same-origin check on the specified URL. if (urlIsString && !this._hasSameOrigin(url)) { Y.error('Security error: The new URL must be of the same origin as the current URL.'); return this; } // Joins the `url` with the `root`. if (urlIsString) { url = this._joinURL(url); } // Force _ready to true to ensure that the history change is handled // even if _save is called before the `ready` event fires. this._ready = true; if (this._html5) { this._history[replace ? 'replace' : 'add'](null, {url: url}); } else { currentPath = Y.getLocation().pathname; root = this.get('root'); hash = HistoryHash.getHash(); if (!urlIsString) { url = hash; } // Determine if the `root` already exists in the current location's // `pathname`, and if it does then we can exclude it from the // hash-based path. No need to duplicate the info in the URL. if (root === currentPath || root === this._getPathRoot()) { url = this.removeRoot(url); } // The `hashchange` event only fires when the new hash is actually // different. This makes sure we'll always dequeue and dispatch // _all_ router instances, mimicking the HTML5 behavior. if (url === hash) { Y.Router.dispatch(); } else { HistoryHash[replace ? 'replaceHash' : 'setHash'](url); } } return this; }, /** Setter for the `params` attribute. @method _setParams @param {Object} params Map in the form: `name` -> RegExp | Function. @return {Object} The map of params: `name` -> RegExp | Function. @protected @since 3.12.0 **/ _setParams: function (params) { this._params = {}; YObject.each(params, function (regex, name) { this.param(name, regex); }, this); return Y.merge(this._params); }, /** Setter for the `routes` attribute. @method _setRoutes @param {Object[]} routes Array of route objects. @return {Object[]} Array of route objects. @protected **/ _setRoutes: function (routes) { this._routes = []; YArray.each(routes, function (route) { this.route(route); }, this); return this._routes.concat(); }, _upgradeURL: function (url) { // We should not try to upgrade paths for external URLs. if (!this._hasSameOrigin(url)) { return url; } var hash = (url.match(/ hashPrefix = Y.HistoryHash.hashPrefix, hashPath; // Strip any hash prefix, like hash-bangs. if (hashPrefix && hash.indexOf(hashPrefix) === 0) { hash = hash.replace(hashPrefix, ''); } // If the hash looks like a URL path, assume it is, and upgrade it! if (hash) { hashPath = this._getHashPath(hash); if (hashPath) { return this._resolveURL(hashPath); } } return url; }, /** Handles `history:change` and `hashchange` events. @method _afterHistoryChange @param {EventFacade} e @protected **/ _afterHistoryChange: function (e) { var self = this, src = e.src, prevURL = self._url, currentURL = self._getURL(), req, res; self._url = currentURL; // Handles the awkwardness that is the `popstate` event. HTML5 browsers // fire `popstate` right before they fire `hashchange`, and Chrome fires // `popstate` on page load. If this router is not ready or the previous // and current URLs only differ by their hash, then we want to ignore // this `popstate` event. if (src === 'popstate' && (!self._ready || prevURL.replace(/ return; } req = self._getRequest(src); res = self._getResponse(req); self._dispatch(req, res); }, /** Default handler for the `ready` event. @method _defReadyFn @param {EventFacade} e @protected **/ _defReadyFn: function (e) { this._ready = true; } }, { NAME: 'router', ATTRS: { /** Whether or not this browser is capable of using HTML5 history. Setting this to `false` will force the use of hash-based history even on HTML5 browsers, but please don't do this unless you understand the consequences. @attribute html5 @type Boolean @initOnly **/ html5: { // Android versions lower than 3.0 are buggy and don't update // window.location after a pushState() call, so we fall back to // hash-based history for them. valueFn: function () { return Y.Router.html5; }, writeOnce: 'initOnly' }, /** Map of params handlers in the form: `name` -> RegExp | Function. If a param handler regex or function returns a value of `false`, `null`, `undefined`, or `NaN`, the current route will not match and be skipped. All other return values will be used in place of the original param value parsed from the URL. This attribute is intended to be used to set params at init time, or to completely reset all params after init. To add params after init without resetting all existing params, use the `param()` method. @attribute params @type Object @default `{}` @see param @since 3.12.0 **/ params: { value : {}, getter: '_getParams', setter: '_setParams' }, root: { value: '' }, /** Array of route objects. Each item in the array must be an object with the following properties in order to be processed by the router: * `path`: String or regex representing the path to match. See the docs for the `route()` method for more details. * `callbacks`: Function or a string representing the name of a function on this router instance that should be called when the route is triggered. An array of functions and/or strings may also be provided. See the docs for the `route()` method for more details. If a route object contains a `regex` or `regexp` property, or if its `path` is a regular express, then the route will be considered to be fully-processed. Any fully-processed routes may contain the following properties: * `regex`: The regular expression representing the path to match, this property may also be named `regexp` for greater compatibility. * `keys`: Array of named path parameters used to populate `req.params` objects when dispatching to route handlers. Any additional data contained on these route objects will be retained. This is useful to store extra metadata about a route; e.g., a `name` to give routes logical names. This attribute is intended to be used to set routes at init time, or to completely reset all routes after init. To add routes after init without resetting all existing routes, use the `route()` method. @attribute routes @type Object[] @default `[]` @see route **/ routes: { value : [], getter: '_getRoutes', setter: '_setRoutes' } }, // Used as the default value for the `html5` attribute, and for testing. html5: Y.HistoryBase.html5 && (!Y.UA.android || Y.UA.android >= 3), // To make this testable. _instances: instances, /** Dispatches to the first route handler that matches the specified `path` for all active router instances. This provides a mechanism to cause all active router instances to dispatch to their route handlers without needing to change the URL or fire the `history:change` or `hashchange` event. @method dispatch @static @since 3.6.0 **/ dispatch: function () { var i, len, router, req, res; for (i = 0, len = instances.length; i < len; i += 1) { router = instances[i]; if (router) { req = router._getRequest('dispatch'); res = router._getResponse(req); router._dispatch(req, res); } } } }); /** The `Controller` class was deprecated in YUI 3.5.0 and is now an alias for the `Router` class. Use that class instead. This alias will be removed in a future version of YUI. @class Controller @constructor @extends Base @deprecated Use `Router` instead. @see Router **/ Y.Controller = Y.Router; }, '3.16.0', {"optional": ["querystring-parse"], "requires": ["array-extras", "base-build", "history"]});
## 2015-04-28 - Version 5.2.0 Summary This release adds several new features for expanded configuration, support for SSL Ciphers, several bugfixes, and improved tests. #Features - New parameters to class `rabbitmq` - `ssl_ciphers` - New parameters to class `rabbitmq::config` - `interface` - `ssl_interface` - New parameters to type `rabbitmq_exchange` - `internal` - `auto_delete` - `durable` - Adds syncing with Modulesync - Adds support for SSL Ciphers - Adds `file_limit` support for RedHat platforms #Bugfixes - Will not create `rabbitmqadmin.conf` if admin is disabled - Fixes `check_password` - Fix to allow bindings and queues to be created when non-default management port is being used by rabbitmq. (MODULES-1856) - `rabbitmq_policy` converts known parameters to integers - Updates apt key for full fingerprint compliance. - Adds a missing `routing_key` param to rabbitmqadmin absent binding call. ## 2015-03-10 - Version 5.1.0 Summary This release adds several features for greater flexibility in configuration of rabbitmq, includes a number of bug fixes, and bumps the minimum required version of puppetlabs-stdlib to 3.0.0. #Changes to defaults - The default environment variables in `rabbitmq::config` have been renamed from `RABBITMQ_NODE_PORT` and `<API key>` to `NODE_PORT` and `NODE_IP_ADDRESS` (MODULES-1673) #Features - New parameters to class `rabbitmq` - `file_limit` - `interface` - `ldap_other_bind` - `<API key>` - `ssl_interface` - `ssl_versions` - `rabbitmq_group` - `rabbitmq_home` - `rabbitmq_user` - Add `rabbitmq_queue` and `rabbitmq_binding` types - Update the providers to be able to retry commands #Bugfixes - Cleans up the formatting for rabbitmq.conf for readability - Update tag splitting in the `rabbitmqctl` provider for `rabbitmq_user` to work with comma or space separated tags - Do not enforce the source value for the yum provider (MODULES-1631) - Fix conditional around `$pin` - Remove broken SSL option in rabbitmqadmin.conf (MODULES-1691) - Fix issues in `rabbitmq_user` with admin and no tags - Fix issues in `rabbitmq_user` with tags not being sorted - Fix broken check for existing exchanges in `rabbitmq_exchange` ## 2014-12-22 - Version 5.0.0 Summary This release fixes a longstanding security issue where the rabbitmq erlang cookie was exposed as a fact by managing the cookie with a provider. It also drops support for Puppet 2.7, adds many features and fixes several bugs. # <API key> Changes - Removed the <API key> fact and replaced the logic to manage that cookie with a provider. - Dropped official support for Puppet 2.7 (EOL 9/30/2014 https://groups.google.com/forum/#!topic/puppet-users/QLguMcLraLE ) - Changed the default value of $rabbitmq::params::<API key> to not contain a variable - Removed deprecated parameters: $rabbitmq::cluster_disk_nodes, $rabbitmq::server::manage_service, and $rabbitmq::server::<API key> # Features - Add tcp_keepalive parameter to enable TCP keepalive - Use https to download rabbitmqadmin tool when $rabbitmq::ssl is true - Add key_content parameter for offline Debian package installations - Use 16 character apt key to avoid potential collisions - Add rabbitmq_policy type, including support for rabbitmq <3.2.0 - Add rabbitmq::ensure_repo parameter - Add ability to change rabbitmq_user password - Allow disk as a valid cluster node type # Bugfixes - Avoid attempting to install rabbitmqadmin via a proxy (since it is downloaded from localhost) - Optimize check for RHEL GPG key - Configure ssl_listener in stomp only if using ssl - Use rpm as default package provider for RedHat, bringing the module in line with the documented instructions to manage erlang separately and allowing the default version and source parameters to become meaningful - Configure cacertfile only if verify_none is not set - Use -q flag for rabbitmqctl commands to avoid parsing inconsistent debug output - Use the -m flag for rabbitmqplugins commands, again to avoid parsing inconsistent debug output - Strip backslashes from the rabbitmqctl output to avoid parsing issues - Fix limitation where version parameter was ignored - Add /etc/rabbitmq/rabbitmqadmin.conf to fix rabbitmqadmin port usage when ssl is on - Fix linter errors and warnings - Add, update, and fix tests - Update docs ## 2014-08-20 - Version 4.1.0 Summary This release adds several new features, fixes bugs, and improves tests and documentation. # Features - Autorequire the rabbitmq-server service in the rabbitmq_vhost type - Add credentials to rabbitmqadmin URL - Added $ssl_only parameter to rabbitmq, rabbitmq::params, and rabbitmq::config - Added property tags to rabbitmq_user provider # Bugfixes - Fix erroneous commas in rabbitmq::config - Use correct ensure value for the rabbitmq_stomp rabbitmq_plugin - Set HOME env variable to nil when leveraging rabbitmq to remove type error from Python script - Fix location for rabbitmq-plugins for RHEL - Remove validation for package_source to allow it to be set to false - Allow LDAP auth configuration without configuring stomp - Added missing $ssl_verify and $<API key> to rabbitmq::config ## 2014-05-16 - Version 4.0.0 Summary This release includes many new features and bug fixes. With the exception of erlang management this should be backwards compatible with 3.1.0. # <API key> Changes - erlang_manage was removed. You will need to manage erlang separately. See the README for more information on how to configure this. # Features - Improved SSL support - Add LDAP support - Add ability to manage RabbitMQ repositories - Add ability to manage Erlang kernel configuration options - Improved handling of user tags - Use nanliu-staging module instead of hardcoded 'curl' - Switch to yum or zypper provider instead of rpm - Add ability to manage STOMP plugin installation. - Allow empty permission fields - Convert existing system tests to beaker acceptance tests. # Bugfixes - exchanges no longer recreated on each puppet run if non-default vhost is used - Allow port to be UNSET - Re-added rabbitmq::server class - Deprecated previously unused manage_service variable in favor of service_manage - Use correct key for rabbitmq apt::source - <API key> variable removed - It previously did nothing, will now at least throw a warning if you try to use it - Remove unnecessary dependency on Class['rabbitmq::repo::rhel'] in rabbitmq::install ## 2013-09-14 - Version 3.1.0 Summary This release focuses on a few small (but critical) bugfixes as well as extends the amount of custom RabbitMQ configuration you can do with the module. # Features - You can now change RabbitMQ 'Config Variables' via the parameter `config_variables`. - You can now change RabbitMQ 'Environment Variables' via the parameter `<API key>`. - ArchLinux support added. # Fixes - Make use of the user/password parameters in rabbitmq_exchange{} - Correct the read/write parameter order on set_permissions/list_permissions as they were reversed. - Make the module pull down 3.1.5 by default. ## 2013-07-18 3.0.0 Summary This release heavily refactors the RabbitMQ and changes functionality in several key ways. Please pay attention to the new README.md file for details of how to interact with the class now. Puppet 3 and RHEL are now fully supported. The default version of RabbitMQ has changed to a 3.x release. # Bugfixes - Improve travis testing options. - Stop reimporting the GPG key on every run on RHEL and Debian. - Fix documentation to make it clear you don't have to set provider => each time. - Reference the standard rabbitmq port in the documentation instead of a custom port. - Fixes to the README formatting. # Features - Refactor the module to fix RHEL support. All interaction with the module is now done through the main rabbitmq class. - Add support for mirrored queues (Only on Debian family distributions currently) - Add rabbitmq_exchange provider (using rabbitmqadmin) - Add new `rabbitmq` class parameters: - `manage_service`: Boolean to choose if Puppet should manage the service. (For pacemaker/HA setups) - Add SuSE support. # Incompatible Changes - Rabbitmq::server has been removed and is now rabbitmq::config. You should not use this class directly, only via the main rabbitmq class. ## 2013-04-11 2.1.0 - remove puppetversion from rabbitmq.config template - add cluster support - escape resource names in regexp ## 2012-07-31 Jeff McCune <jeff@puppetlabs.com> 2.0.2 - Re-release 2.0.1 with $EDITOR droppings cleaned up ## 2012-05-03 2.0.0 - added support for new-style admin users - added support for rabbitmq 2.7.1 ## 2011-06-14 Dan Bode <dan@Puppetlabs.com> 2.0.0rc1 - Massive refactor: - added native types for user/vhost/user_permissions - added apt support for vendor packages - added smoke tests ## 2011-04-08 Jeff McCune <jeff@puppetlabs.com> 1.0.4 - Update module for RabbitMQ 2.4.1 and <API key> package. ## 2011-03-24 1.0.3 - Initial release to the forge. Reviewed by Cody. Whitespace is good. ## 2011-03-22 1.0.2 - Whitespace only fix again... ack '\t' is my friend... ## 2011-03-22 1.0.1 - Whitespace only fix. ## 2011-03-22 1.0.0 - Initial Release. Manage the package, file and service.
#include <linux/init.h> #include <asm/page.h> #include <asm/sizes.h> #include <asm/mach/map.h> #include "hardware.h" #define <API key> 0x02020000 #define <API key> 0x021e8000 #define <API key> 0x021ec000 #define <API key> 0x021f0000 #define <API key> 0x021f4000 /* * <API key> is put in the middle to force the expansion * of IMX6Q_UART##n##_BASE_ADDR. */ #define <API key>(n) IMX6Q_UART##n##_BASE_ADDR #define IMX6Q_UART_BASE(n) <API key>(n) #define <API key> IMX6Q_UART_BASE(<API key>) static struct map_desc imx_lluart_desc = { #ifdef <API key> .virtual = IMX_IO_P2V(<API key>), .pfn = __phys_to_pfn(<API key>), .length = 0x4000, .type = MT_DEVICE, #endif }; void __init imx_lluart_map_io(void) { if (imx_lluart_desc.virtual) iotable_init(&imx_lluart_desc, 1); }
/** * List compiled by mystix on the extjs.com forums. * Thank you Mystix! */ /* Slovak Translation by Michal Thomka * 14 April 2007 */ Ext.UpdateManager.defaults.indicatorText = '<div class="loading-indicator">Nahrávam...</div>'; if(Ext.View){ Ext.View.prototype.emptyText = ""; } if(Ext.grid.GridPanel){ Ext.grid.GridPanel.prototype.ddText = "{0} označených riadkov"; } if(Ext.TabPanelItem){ Ext.TabPanelItem.prototype.closeText = "Zavrieť túto záložku"; } if(Ext.form.Field){ Ext.form.Field.prototype.invalidText = "Hodnota v tomto poli je nesprávna"; } if(Ext.LoadMask){ Ext.LoadMask.prototype.msg = "Nahrávam..."; } Date.monthNames = [ "Január", "Február", "Marec", "Apríl", "Máj", "Jún", "Júl", "August", "September", "Október", "November", "December" ]; Date.dayNames = [ "Nedeľa", "Pondelok", "Utorok", "Streda", "Štvrtok", "Piatok", "Sobota" ]; if(Ext.MessageBox){ Ext.MessageBox.buttonText = { ok : "OK", cancel : "Zrušiť", yes : "Áno", no : "Nie" }; } if(Ext.util.Format){ Ext.util.Format.date = function(v, format){ if(!v) return ""; if(!(v instanceof Date)) v = new Date(Date.parse(v)); return v.dateFormat(format || "d.m.Y"); }; } if(Ext.DatePicker){ Ext.apply(Ext.DatePicker.prototype, { todayText : "Dnes", minText : "Tento dátum je menší ako minimálny možný dátum", maxText : "Tento dátum je väčší ako maximálny možný dátum", disabledDaysText : "", disabledDatesText : "", monthNames : Date.monthNames, dayNames : Date.dayNames, nextText : 'Ďalší Mesiac (Control+Doprava)', prevText : 'Predch. Mesiac (Control+Doľava)', monthYearText : 'Vyberte Mesiac (Control+Hore/Dole pre posun rokov)', todayTip : "{0} (Medzerník)", format : "d.m.Y" }); } if(Ext.PagingToolbar){ Ext.apply(Ext.PagingToolbar.prototype, { beforePageText : "Strana", afterPageText : "z {0}", firstText : "Prvá Strana", prevText : "Predch. Strana", nextText : "Ďalšia Strana", lastText : "Posledná strana", refreshText : "Obnoviť", displayMsg : "Zobrazujem {0} - {1} z {2}", emptyMsg : 'Žiadne dáta' }); } if(Ext.form.TextField){ Ext.apply(Ext.form.TextField.prototype, { minLengthText : "Minimálna dĺžka pre toto pole je {0}", maxLengthText : "Maximálna dĺžka pre toto pole je {0}", blankText : "Toto pole je povinné", regexText : "", emptyText : null }); } if(Ext.form.NumberField){ Ext.apply(Ext.form.NumberField.prototype, { minText : "Minimálna hodnota pre toto pole je {0}", maxText : "Maximálna hodnota pre toto pole je {0}", nanText : "{0} je nesprávne číslo" }); } if(Ext.form.DateField){ Ext.apply(Ext.form.DateField.prototype, { disabledDaysText : "Zablokované", disabledDatesText : "Zablokované", minText : "Dátum v tomto poli musí byť až po {0}", maxText : "Dátum v tomto poli musí byť pred {0}", invalidText : "{0} nie je správny dátum - musí byť vo formáte {1}", format : "d.m.Y" }); } if(Ext.form.ComboBox){ Ext.apply(Ext.form.ComboBox.prototype, { loadingText : "Nahrávam...", valueNotFoundText : undefined }); } if(Ext.form.VTypes){ Ext.apply(Ext.form.VTypes, { emailText : 'Toto pole musí byť e-mailová adresa vo formáte "user@example.com"', urlText : 'Toto pole musí byť URL vo formáte "http:/'+'/www.example.com"', alphaText : 'Toto pole može obsahovať iba písmená a znak _', alphanumText : 'Toto pole može obsahovať iba písmená, čísla a znak _' }); } if(Ext.grid.GridView){ Ext.apply(Ext.grid.GridView.prototype, { sortAscText : "Zoradiť vzostupne", sortDescText : "Zoradiť zostupne", lockText : "Zamknúť stľpec", unlockText : "Odomknúť stľpec", columnsText : "Stľpce" }); } if(Ext.grid.PropertyColumnModel){ Ext.apply(Ext.grid.PropertyColumnModel.prototype, { nameText : "Názov", valueText : "Hodnota", dateFormat : "d.m.Y" }); } if(Ext.layout.BorderLayout && Ext.layout.BorderLayout.SplitRegion){ Ext.apply(Ext.layout.BorderLayout.SplitRegion.prototype, { splitTip : "Potiahnite pre zmenu rozmeru", collapsibleSplitTip : "Potiahnite pre zmenu rozmeru. Dvojklikom schováte." }); }
/* <API key>: GPL-2.0 */ /* * ATI Frame Buffer Device Driver Core Definitions */ #include <linux/spinlock.h> #include <linux/wait.h> /* * Elements of the hardware specific atyfb_par structure */ struct crtc { u32 vxres; u32 vyres; u32 xoffset; u32 yoffset; u32 bpp; u32 h_tot_disp; u32 h_sync_strt_wid; u32 v_tot_disp; u32 v_sync_strt_wid; u32 vline_crnt_vline; u32 off_pitch; u32 gen_cntl; u32 dp_pix_width; /* acceleration */ u32 dp_chain_mask; /* acceleration */ #ifdef <API key> u32 horz_stretching; u32 vert_stretching; u32 ext_vert_stretch; u32 shadow_h_tot_disp; u32 <API key>; u32 shadow_v_tot_disp; u32 <API key>; u32 lcd_gen_cntl; u32 lcd_config_panel; u32 lcd_index; #endif }; struct aty_interrupt { wait_queue_head_t wait; unsigned int count; int pan_display; }; struct pll_info { int pll_max; int pll_min; int sclk, mclk, mclk_pm, xclk; int ref_div; int ref_clk; int ecp_max; }; typedef struct { u16 unknown1; u16 PCLK_min_freq; u16 PCLK_max_freq; u16 unknown2; u16 ref_freq; u16 ref_divider; u16 unknown3; u16 MCLK_pwd; u16 MCLK_max_freq; u16 XCLK_max_freq; u16 SCLK_freq; } __attribute__ ((packed)) PLL_BLOCK_MACH64; struct pll_514 { u8 m; u8 n; }; struct pll_18818 { u32 program_bits; u32 locationAddr; u32 period_in_ps; u32 post_divider; }; struct pll_ct { u8 pll_ref_div; u8 pll_gen_cntl; u8 mclk_fb_div; u8 mclk_fb_mult; /* 2 ro 4 */ u8 sclk_fb_div; u8 pll_vclk_cntl; u8 vclk_post_div; u8 vclk_fb_div; u8 pll_ext_cntl; u8 ext_vpll_cntl; u8 spll_cntl2; u32 dsp_config; /* Mach64 GTB DSP */ u32 dsp_on_off; /* Mach64 GTB DSP */ u32 dsp_loop_latency; u32 fifo_size; u32 xclkpagefaultdelay; u32 xclkmaxrasdelay; u8 xclk_ref_div; u8 xclk_post_div; u8 mclk_post_div_real; u8 xclk_post_div_real; u8 vclk_post_div_real; u8 features; #ifdef <API key> u32 xres; /* use for LCD stretching/scaling */ #endif }; /* for pll_ct.features */ #define DONT_USE_SPLL 0x1 #define DONT_USE_XDLL 0x2 #define USE_CPUCLK 0x4 #define POWERDOWN_PLL 0x8 union aty_pll { struct pll_ct ct; struct pll_514 ibm514; struct pll_18818 ics2595; }; /* * The hardware parameters for each card */ struct atyfb_par { u32 pseudo_palette[16]; struct { u8 red, green, blue; } palette[256]; const struct aty_dac_ops *dac_ops; const struct aty_pll_ops *pll_ops; void __iomem *ati_regbase; unsigned long clk_wr_offset; /* meaning overloaded, clock id by CT */ struct crtc crtc; union aty_pll pll; struct pll_info pll_limits; u32 features; u32 ref_clk_per; u32 pll_per; u32 mclk_per; u32 xclk_per; u8 bus_type; u8 ram_type; u8 mem_refresh_rate; u16 pci_id; u32 accel_flags; int blitter_may_be_busy; int asleep; int lock_blank; unsigned long res_start; unsigned long res_size; struct pci_dev *pdev; #ifdef __sparc__ struct pci_mmap_map *mmap_map; u8 mmaped; #endif int open; #ifdef <API key> unsigned long bios_base_phys; unsigned long bios_base; unsigned long lcd_table; u16 lcd_width; u16 lcd_height; u32 lcd_pixclock; u16 lcd_refreshrate; u16 lcd_htotal; u16 lcd_hdisp; u16 lcd_hsync_dly; u16 lcd_hsync_len; u16 lcd_vtotal; u16 lcd_vdisp; u16 lcd_vsync_len; u16 lcd_right_margin; u16 lcd_lower_margin; u16 lcd_hblank_len; u16 lcd_vblank_len; #endif unsigned long aux_start; /* auxiliary aperture */ unsigned long aux_size; struct aty_interrupt vblank; unsigned long irq_flags; unsigned int irq; spinlock_t int_lock; int wc_cookie; u32 mem_cntl; struct crtc saved_crtc; union aty_pll saved_pll; }; /* * ATI Mach64 features */ #define M64_HAS(feature) ((par)->features & (M64F_##feature)) #define M64F_RESET_3D 0x00000001 #define M64F_MAGIC_FIFO 0x00000002 #define M64F_GTB_DSP 0x00000004 #define M64F_FIFO_32 0x00000008 #define <API key> 0x00000010 #define M64F_MAGIC_POSTDIV 0x00000020 #define M64F_INTEGRATED 0x00000040 #define M64F_CT_BUS 0x00000080 #define M64F_VT_BUS 0x00000100 #define M64F_MOBIL_BUS 0x00000200 #define M64F_GX 0x00000400 #define M64F_CT 0x00000800 #define M64F_VT 0x00001000 #define M64F_GT 0x00002000 #define <API key> 0x00004000 #define M64F_G3_PB_1_1 0x00008000 #define M64F_G3_PB_1024x768 0x00010000 #define M64F_EXTRA_BRIGHT 0x00020000 #define M64F_LT_LCD_REGS 0x00040000 #define M64F_XL_DLL 0x00080000 #define M64F_MFB_FORCE_4 0x00100000 #define M64F_HW_TRIPLE 0x00200000 #define M64F_XL_MEM 0x00400000 /* * Register access */ static inline u32 aty_ld_le32(int regindex, const struct atyfb_par *par) { /* Hack for bloc 1, should be cleanly optimized by compiler */ if (regindex >= 0x400) regindex -= 0x800; #ifdef CONFIG_ATARI return in_le32(par->ati_regbase + regindex); #else return readl(par->ati_regbase + regindex); #endif } static inline void aty_st_le32(int regindex, u32 val, const struct atyfb_par *par) { /* Hack for bloc 1, should be cleanly optimized by compiler */ if (regindex >= 0x400) regindex -= 0x800; #ifdef CONFIG_ATARI out_le32(par->ati_regbase + regindex, val); #else writel(val, par->ati_regbase + regindex); #endif } static inline void aty_st_le16(int regindex, u16 val, const struct atyfb_par *par) { /* Hack for bloc 1, should be cleanly optimized by compiler */ if (regindex >= 0x400) regindex -= 0x800; #ifdef CONFIG_ATARI out_le16(par->ati_regbase + regindex, val); #else writel(val, par->ati_regbase + regindex); #endif } static inline u8 aty_ld_8(int regindex, const struct atyfb_par *par) { /* Hack for bloc 1, should be cleanly optimized by compiler */ if (regindex >= 0x400) regindex -= 0x800; #ifdef CONFIG_ATARI return in_8(par->ati_regbase + regindex); #else return readb(par->ati_regbase + regindex); #endif } static inline void aty_st_8(int regindex, u8 val, const struct atyfb_par *par) { /* Hack for bloc 1, should be cleanly optimized by compiler */ if (regindex >= 0x400) regindex -= 0x800; #ifdef CONFIG_ATARI out_8(par->ati_regbase + regindex, val); #else writeb(val, par->ati_regbase + regindex); #endif } #if defined(CONFIG_PM) || defined(<API key>) || \ defined (<API key>) || defined (<API key>) extern void aty_st_lcd(int index, u32 val, const struct atyfb_par *par); extern u32 aty_ld_lcd(int index, const struct atyfb_par *par); #endif /* * DAC operations */ struct aty_dac_ops { int (*set_dac) (const struct fb_info * info, const union aty_pll * pll, u32 bpp, u32 accel); }; extern const struct aty_dac_ops aty_dac_ibm514; /* IBM RGB514 */ extern const struct aty_dac_ops aty_dac_ati68860b; /* ATI 68860-B */ extern const struct aty_dac_ops aty_dac_att21c498; /* AT&T 21C498 */ extern const struct aty_dac_ops aty_dac_unsupported; /* unsupported */ extern const struct aty_dac_ops aty_dac_ct; /* Integrated */ /* * Clock operations */ struct aty_pll_ops { int (*var_to_pll) (const struct fb_info * info, u32 vclk_per, u32 bpp, union aty_pll * pll); u32 (*pll_to_var) (const struct fb_info * info, const union aty_pll * pll); void (*set_pll) (const struct fb_info * info, const union aty_pll * pll); void (*get_pll) (const struct fb_info *info, union aty_pll * pll); int (*init_pll) (const struct fb_info * info, union aty_pll * pll); void (*resume_pll)(const struct fb_info *info, union aty_pll *pll); }; extern const struct aty_pll_ops aty_pll_ati18818_1; /* ATI 18818 */ extern const struct aty_pll_ops aty_pll_stg1703; /* STG 1703 */ extern const struct aty_pll_ops aty_pll_ch8398; /* Chrontel 8398 */ extern const struct aty_pll_ops aty_pll_att20c408; /* AT&T 20C408 */ extern const struct aty_pll_ops aty_pll_ibm514; /* IBM RGB514 */ extern const struct aty_pll_ops aty_pll_unsupported; /* unsupported */ extern const struct aty_pll_ops aty_pll_ct; /* Integrated */ extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll); extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par); /* * Hardware cursor support */ extern int aty_init_cursor(struct fb_info *info); /* * Hardware acceleration */ static inline void wait_for_fifo(u16 entries, const struct atyfb_par *par) { while ((aty_ld_le32(FIFO_STAT, par) & 0xffff) > ((u32) (0x8000 >> entries))); } static inline void wait_for_idle(struct atyfb_par *par) { wait_for_fifo(16, par); while ((aty_ld_le32(GUI_STAT, par) & 1) != 0); par->blitter_may_be_busy = 0; } extern void aty_reset_engine(const struct atyfb_par *par); extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info); extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par); void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); void atyfb_imageblit(struct fb_info *info, const struct fb_image *image);
namespace System.Drawing.Drawing2D { public enum SmoothingMode { Invalid = QualityMode.Invalid, Default = QualityMode.Default, HighSpeed = QualityMode.Low, HighQuality = QualityMode.High, None, AntiAlias } }
namespace System.Drawing.Drawing2D { public enum InterpolationMode { Invalid = QualityMode.Invalid, Default = QualityMode.Default, Low = QualityMode.Low, High = QualityMode.High, Bilinear, Bicubic, NearestNeighbor, HighQualityBilinear, HighQualityBicubic } }
<?php // Moodle is free software: you can redistribute it and/or modify // (at your option) any later version. // Moodle is distributed in the hope that it will be useful, // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the namespace core_privacy\local\request; defined('MOODLE_INTERNAL') || die(); class approved_userlist extends userlist_base { /** * Create a new approved userlist. * * @param \context $context The context. * @param string $component the frankenstyle component name. * @param \int[] $userids The list of userids present in this list. */ public function __construct(\context $context, string $component, array $userids) { parent::__construct($context, $component); $this->set_userids($userids); } /** * Create an approved userlist from a userlist. * * @param userlist $userlist The source list * @return approved_userlist The newly created approved userlist. */ public static function <API key>(userlist $userlist) : approved_userlist { $newlist = new static($userlist->get_context(), $userlist->get_component(), $userlist->get_userids()); return $newlist; } }
<?php App::uses('Router', 'Routing'); App::uses('Hash', 'Utility'); App::uses('Inflector', 'Utility'); /** * Abstract base class for all other Helpers in CakePHP. * Provides common methods and features. * * @package Cake.View */ class Helper extends Object { /** * Settings for this helper. * * @var array */ public $settings = array(); /** * List of helpers used by this helper * * @var array */ public $helpers = array(); /** * A helper lookup table used to lazy load helper objects. * * @var array */ protected $_helperMap = array(); /** * The current theme name if any. * * @var string */ public $theme = null; /** * Request object * * @var CakeRequest */ public $request = null; /** * Plugin path * * @var string */ public $plugin = null; /** * Holds the fields array('field_name' => array('type' => 'string', 'length' => 100), * primaryKey and validates array('field_name') * * @var array */ public $fieldset = array(); /** * Holds tag templates. * * @var array */ public $tags = array(); /** * Holds the content to be cleaned. * * @var mixed */ protected $_tainted = null; /** * Holds the cleaned content. * * @var mixed */ protected $_cleaned = null; /** * The View instance this helper is attached to * * @var View */ protected $_View; /** * A list of strings that should be treated as suffixes, or * sub inputs for a parent input. This is used for date/time * inputs primarily. * * @var array */ protected $_fieldSuffixes = array( 'year', 'month', 'day', 'hour', 'min', 'second', 'meridian' ); /** * The name of the current model entities are in scope of. * * @see Helper::setEntity() * @var string */ protected $_modelScope; /** * The name of the current model association entities are in scope of. * * @see Helper::setEntity() * @var string */ protected $_association; /** * The dot separated list of elements the current field entity is for. * * @see Helper::setEntity() * @var string */ protected $_entityPath; /** * Minimized attributes * * @var array */ protected $<API key> = array( 'compact', 'checked', 'declare', 'readonly', 'disabled', 'selected', 'defer', 'ismap', 'nohref', 'noshade', 'nowrap', 'multiple', 'noresize', 'autoplay', 'controls', 'loop', 'muted', 'required', 'novalidate', 'formnovalidate' ); /** * Format to attribute * * @var string */ protected $_attributeFormat = '%s="%s"'; /** * Format to attribute * * @var string */ protected $<API key> = '%s="%s"'; /** * Default Constructor * * @param View $View The View this helper is being attached to. * @param array $settings Configuration settings for the helper. */ public function __construct(View $View, $settings = array()) { $this->_View = $View; $this->request = $View->request; if ($settings) { $this->settings = Hash::merge($this->settings, $settings); } if (!empty($this->helpers)) { $this->_helperMap = ObjectCollection::<API key>($this->helpers); } } /** * Provide non fatal errors on missing method calls. * * @param string $method Method to invoke * @param array $params Array of params for the method. * @return void */ public function __call($method, $params) { trigger_error(__d('cake_dev', 'Method %1$s::%2$s does not exist', get_class($this), $method), E_USER_WARNING); } /** * Lazy loads helpers. Provides access to deprecated request properties as well. * * @param string $name Name of the property being accessed. * @return mixed Helper or property found at $name * @deprecated Accessing request properties through this method is deprecated and will be removed in 3.0. */ public function __get($name) { if (isset($this->_helperMap[$name]) && !isset($this->{$name})) { $settings = array('enabled' => false) + (array)$this->_helperMap[$name]['settings']; $this->{$name} = $this->_View->loadHelper($this->_helperMap[$name]['class'], $settings); } if (isset($this->{$name})) { return $this->{$name}; } switch ($name) { case 'base': case 'here': case 'webroot': case 'data': return $this->request->{$name}; case 'action': return isset($this->request->params['action']) ? $this->request->params['action'] : ''; case 'params': return $this->request; } } /** * Provides backwards compatibility access for setting values to the request object. * * @param string $name Name of the property being accessed. * @param mixed $value Value to set. * @return void * @deprecated This method will be removed in 3.0 */ public function __set($name, $value) { switch ($name) { case 'base': case 'here': case 'webroot': case 'data': $this->request->{$name} = $value; return; case 'action': $this->request->params['action'] = $value; return; } $this->{$name} = $value; } public function url($url = null, $full = false) { return h(Router::url($url, $full)); } /** * Checks if a file exists when theme is used, if no file is found default location is returned * * @param string $file The file to create a webroot path to. * @return string Web accessible path to file. */ public function webroot($file) { $asset = explode('?', $file); $asset[1] = isset($asset[1]) ? '?' . $asset[1] : null; $webPath = "{$this->request->webroot}" . $asset[0]; $file = $asset[0]; if (!empty($this->theme)) { $file = trim($file, '/'); $theme = $this->theme . '/'; if (DS === '\\') { $file = str_replace('/', '\\', $file); } if (file_exists(Configure::read('App.www_root') . 'theme' . DS . $this->theme . DS . $file)) { $webPath = "{$this->request->webroot}theme/" . $theme . $asset[0]; } else { $themePath = App::themePath($this->theme); $path = $themePath . 'webroot' . DS . $file; if (file_exists($path)) { $webPath = "{$this->request->webroot}theme/" . $theme . $asset[0]; } } } if (strpos($webPath, '//') !== false) { return str_replace('//', '/', $webPath . $asset[1]); } return $webPath . $asset[1]; } /** * Generate URL for given asset file. Depending on options passed provides full URL with domain name. * Also calls Helper::assetTimestamp() to add timestamp to local files * * @param string|array $path Path string or URL array * @param array $options Options array. Possible keys: * `fullBase` Return full URL with domain name * `pathPrefix` Path prefix for relative URLs * `ext` Asset extension to append * `plugin` False value will prevent parsing path as a plugin * @return string Generated URL */ public function assetUrl($path, $options = array()) { if (is_array($path)) { return $this->url($path, !empty($options['fullBase'])); } if (strpos($path, '://') !== false) { return $path; } if (!array_key_exists('plugin', $options) || $options['plugin'] !== false) { list($plugin, $path) = $this->_View->pluginSplit($path, false); } if (!empty($options['pathPrefix']) && $path[0] !== '/') { $path = $options['pathPrefix'] . $path; } if ( !empty($options['ext']) && strpos($path, '?') === false && substr($path, -strlen($options['ext'])) !== $options['ext'] ) { $path .= $options['ext']; } if (preg_match('|^([a-z0-9]+:)?//|', $path)) { return $path; } if (isset($plugin)) { $path = Inflector::underscore($plugin) . '/' . $path; } $path = $this->_encodeUrl($this->assetTimestamp($this->webroot($path))); if (!empty($options['fullBase'])) { $path = rtrim(Router::fullBaseUrl(), '/') . '/' . ltrim($path, '/'); } return $path; } /** * Encodes a URL for use in HTML attributes. * * @param string $url The URL to encode. * @return string The URL encoded for both URL & HTML contexts. */ protected function _encodeUrl($url) { $path = parse_url($url, PHP_URL_PATH); $parts = array_map('rawurldecode', explode('/', $path)); $parts = array_map('rawurlencode', $parts); $encoded = implode('/', $parts); return h(str_replace($path, $encoded, $url)); } public function assetTimestamp($path) { $stamp = Configure::read('Asset.timestamp'); $timestampEnabled = $stamp === 'force' || ($stamp === true && Configure::read('debug') > 0); if ($timestampEnabled && strpos($path, '?') === false) { $filepath = preg_replace( '/^' . preg_quote($this->request->webroot, '/') . '/', '', urldecode($path) ); $webrootPath = WWW_ROOT . str_replace('/', DS, $filepath); if (file_exists($webrootPath)) { //@<API key> return $path . '?' . @filemtime($webrootPath); //@<API key> } $segments = explode('/', ltrim($filepath, '/')); if ($segments[0] === 'theme') { $theme = $segments[1]; unset($segments[0], $segments[1]); $themePath = App::themePath($theme) . 'webroot' . DS . implode(DS, $segments); //@<API key> return $path . '?' . @filemtime($themePath); //@<API key> } else { $plugin = Inflector::camelize($segments[0]); if (CakePlugin::loaded($plugin)) { unset($segments[0]); $pluginPath = CakePlugin::path($plugin) . 'webroot' . DS . implode(DS, $segments); //@<API key> return $path . '?' . @filemtime($pluginPath); //@<API key> } } } return $path; } /** * Used to remove harmful tags from content. Removes a number of well known XSS attacks * from content. However, is not guaranteed to remove all possibilities. Escaping * content is the best way to prevent all possible attacks. * * @param string|array $output Either an array of strings to clean or a single string to clean. * @return string|array cleaned content for output * @deprecated This method will be removed in 3.0 */ public function clean($output) { $this->_reset(); if (empty($output)) { return null; } if (is_array($output)) { foreach ($output as $key => $value) { $return[$key] = $this->clean($value); } return $return; } $this->_tainted = $output; $this->_clean(); return $this->_cleaned; } /** * Returns a space-delimited string with items of the $options array. If a key * of $options array happens to be one of those listed in `Helper::$<API key>` * * And its value is one of: * * - '1' (string) * - 1 (integer) * - true (boolean) * - 'true' (string) * * Then the value will be reset to be identical with key's name. * If the value is not one of these 3, the parameter is not output. * * 'escape' is a special option in that it controls the conversion of * attributes to their html-entity encoded equivalents. Set to false to disable html-encoding. * * If value for any option key is set to `null` or `false`, that option will be excluded from output. * * @param array $options Array of options. * @param array $exclude Array of options to be excluded, the options here will not be part of the return. * @param string $insertBefore String to be inserted before options. * @param string $insertAfter String to be inserted after options. * @return string Composed attributes. * @deprecated This method will be moved to HtmlHelper in 3.0 */ protected function _parseAttributes($options, $exclude = null, $insertBefore = ' ', $insertAfter = null) { if (!is_string($options)) { $options = (array)$options + array('escape' => true); if (!is_array($exclude)) { $exclude = array(); } $exclude = array('escape' => true) + array_flip($exclude); $escape = $options['escape']; $attributes = array(); foreach ($options as $key => $value) { if (!isset($exclude[$key]) && $value !== false && $value !== null) { $attributes[] = $this->_formatAttribute($key, $value, $escape); } } $out = implode(' ', $attributes); } else { $out = $options; } return $out ? $insertBefore . $out . $insertAfter : ''; } /** * Formats an individual attribute, and returns the string value of the composed attribute. * Works with minimized attributes that have the same value as their name such as 'disabled' and 'checked' * * @param string $key The name of the attribute to create * @param string $value The value of the attribute to create. * @param boolean $escape Define if the value must be escaped * @return string The composed attribute. * @deprecated This method will be moved to HtmlHelper in 3.0 */ protected function _formatAttribute($key, $value, $escape = true) { if (is_array($value)) { $value = implode(' ', $value); } if (is_numeric($key)) { return sprintf($this-><API key>, $value, $value); } $truthy = array(1, '1', true, 'true', $key); $isMinimized = in_array($key, $this-><API key>); if ($isMinimized && in_array($value, $truthy, true)) { return sprintf($this-><API key>, $key, $key); } if ($isMinimized) { return ''; } return sprintf($this->_attributeFormat, $key, ($escape ? h($value) : $value)); } /** * Returns a string to be used as onclick handler for confirm dialogs. * * @param string $message Message to be displayed * @param string $okCode Code to be executed after user chose 'OK' * @param string $cancelCode Code to be executed after user chose 'Cancel' * @param array $options Array of options * @return string onclick JS code */ protected function _confirm($message, $okCode, $cancelCode = '', $options = array()) { $message = json_encode($message); $confirm = "if (confirm({$message})) { {$okCode} } {$cancelCode}"; if (isset($options['escape']) && $options['escape'] === false) { $confirm = h($confirm); } return $confirm; } /** * Sets this helper's model and field properties to the dot-separated value-pair in $entity. * * @param string $entity A field name, like "ModelName.fieldName" or "ModelName.ID.fieldName" * @param boolean $setScope Sets the view scope to the model specified in $tagValue * @return void */ public function setEntity($entity, $setScope = false) { if ($entity === null) { $this->_modelScope = false; } if ($setScope === true) { $this->_modelScope = $entity; } $parts = array_values(Hash::filter(explode('.', $entity))); if (empty($parts)) { return; } $count = count($parts); $lastPart = isset($parts[$count - 1]) ? $parts[$count - 1] : null; // Either 'body' or 'date.month' type inputs. if ( ($count === 1 && $this->_modelScope && !$setScope) || ( $count === 2 && in_array($lastPart, $this->_fieldSuffixes) && $this->_modelScope && $parts[0] !== $this->_modelScope ) ) { $entity = $this->_modelScope . '.' . $entity; } // 0.name, 0.created.month style inputs. Excludes inputs with the modelScope in them. if ( $count >= 2 && is_numeric($parts[0]) && !is_numeric($parts[1]) && $this->_modelScope && strpos($entity, $this->_modelScope) === false ) { $entity = $this->_modelScope . '.' . $entity; } $this->_association = null; $isHabtm = ( isset($this->fieldset[$this->_modelScope]['fields'][$parts[0]]['type']) && $this->fieldset[$this->_modelScope]['fields'][$parts[0]]['type'] === 'multiple' ); // habtm models are special if ($count === 1 && $isHabtm) { $this->_association = $parts[0]; $entity = $parts[0] . '.' . $parts[0]; } else { // check for associated model. $reversed = array_reverse($parts); foreach ($reversed as $i => $part) { if ($i > 0 && preg_match('/^[A-Z]/', $part)) { $this->_association = $part; break; } } } $this->_entityPath = $entity; } /** * Returns the entity reference of the current context as an array of identity parts * * @return array An array containing the identity elements of an entity */ public function entity() { return explode('.', $this->_entityPath); } /** * Gets the currently-used model of the rendering context. * * @return string */ public function model() { if ($this->_association) { return $this->_association; } return $this->_modelScope; } /** * Gets the currently-used model field of the rendering context. * Strips off field suffixes such as year, month, day, hour, min, meridian * when the current entity is longer than 2 elements. * * @return string */ public function field() { $entity = $this->entity(); $count = count($entity); $last = $entity[$count - 1]; if ($count > 2 && in_array($last, $this->_fieldSuffixes)) { $last = isset($entity[$count - 2]) ? $entity[$count - 2] : null; } return $last; } /** * Generates a DOM ID for the selected element, if one is not set. * Uses the current View::entity() settings to generate a CamelCased id attribute. * * @param array|string $options Either an array of html attributes to add $id into, or a string * with a view entity path to get a domId for. * @param string $id The name of the 'id' attribute. * @return mixed If $options was an array, an array will be returned with $id set. If a string * was supplied, a string will be returned. */ public function domId($options = null, $id = 'id') { if (is_array($options) && array_key_exists($id, $options) && $options[$id] === null) { unset($options[$id]); return $options; } elseif (!is_array($options) && $options !== null) { $this->setEntity($options); return $this->domId(); } $entity = $this->entity(); $model = array_shift($entity); $dom = $model . implode('', array_map(array('Inflector', 'camelize'), $entity)); if (is_array($options) && !array_key_exists($id, $options)) { $options[$id] = $dom; } elseif ($options === null) { return $dom; } return $options; } /** * Gets the input field name for the current tag. Creates input name attributes * using CakePHP's data[Model][field] formatting. * * @param array|string $options If an array, should be an array of attributes that $key needs to be added to. * If a string or null, will be used as the View entity. * @param string $field Field name. * @param string $key The name of the attribute to be set, defaults to 'name' * @return mixed If an array was given for $options, an array with $key set will be returned. * If a string was supplied a string will be returned. */ protected function _name($options = array(), $field = null, $key = 'name') { if ($options === null) { $options = array(); } elseif (is_string($options)) { $field = $options; $options = 0; } if (!empty($field)) { $this->setEntity($field); } if (is_array($options) && array_key_exists($key, $options)) { return $options; } switch ($field) { case '_method': $name = $field; break; default: $name = 'data[' . implode('][', $this->entity()) . ']'; } if (is_array($options)) { $options[$key] = $name; return $options; } return $name; } /** * Gets the data for the current tag * * @param array|string $options If an array, should be an array of attributes that $key needs to be added to. * If a string or null, will be used as the View entity. * @param string $field Field name. * @param string $key The name of the attribute to be set, defaults to 'value' * @return mixed If an array was given for $options, an array with $key set will be returned. * If a string was supplied a string will be returned. */ public function value($options = array(), $field = null, $key = 'value') { if ($options === null) { $options = array(); } elseif (is_string($options)) { $field = $options; $options = 0; } if (is_array($options) && isset($options[$key])) { return $options; } if (!empty($field)) { $this->setEntity($field); } $result = null; $data = $this->request->data; $entity = $this->entity(); if (!empty($data) && is_array($data) && !empty($entity)) { $result = Hash::get($data, implode('.', $entity)); } $habtmKey = $this->field(); if (empty($result) && isset($data[$habtmKey][$habtmKey]) && is_array($data[$habtmKey])) { $result = $data[$habtmKey][$habtmKey]; } elseif (empty($result) && isset($data[$habtmKey]) && is_array($data[$habtmKey])) { if (ClassRegistry::isKeySet($habtmKey)) { $model = ClassRegistry::getObject($habtmKey); $result = $this->_selectedArray($data[$habtmKey], $model->primaryKey); } } if (is_array($options)) { if ($result === null && isset($options['default'])) { $result = $options['default']; } unset($options['default']); } if (is_array($options)) { $options[$key] = $result; return $options; } return $result; } /** * Sets the defaults for an input tag. Will set the * name, value, and id attributes for an array of html attributes. * * @param string $field The field name to initialize. * @param array $options Array of options to use while initializing an input field. * @return array Array options for the form input. */ protected function _initInputField($field, $options = array()) { if ($field !== null) { $this->setEntity($field); } $options = (array)$options; $options = $this->_name($options); $options = $this->value($options); $options = $this->domId($options); return $options; } /** * Adds the given class to the element options * * @param array $options Array options/attributes to add a class to * @param string $class The class name being added. * @param string $key the key to use for class. * @return array Array of options with $key set. */ public function addClass($options = array(), $class = null, $key = 'class') { if (isset($options[$key]) && trim($options[$key])) { $options[$key] .= ' ' . $class; } else { $options[$key] = $class; } return $options; } /** * Returns a string generated by a helper method * * This method can be overridden in subclasses to do generalized output post-processing * * @param string $str String to be output. * @return string * @deprecated This method will be removed in future versions. */ public function output($str) { return $str; } /** * Before render callback. beforeRender is called before the view file is rendered. * * Overridden in subclasses. * * @param string $viewFile The view file that is going to be rendered * @return void */ public function beforeRender($viewFile) { } /** * After render callback. afterRender is called after the view file is rendered * but before the layout has been rendered. * * Overridden in subclasses. * * @param string $viewFile The view file that was rendered. * @return void */ public function afterRender($viewFile) { } /** * Before layout callback. beforeLayout is called before the layout is rendered. * * Overridden in subclasses. * * @param string $layoutFile The layout about to be rendered. * @return void */ public function beforeLayout($layoutFile) { } /** * After layout callback. afterLayout is called after the layout has rendered. * * Overridden in subclasses. * * @param string $layoutFile The layout file that was rendered. * @return void */ public function afterLayout($layoutFile) { } /** * Before render file callback. * Called before any view fragment is rendered. * * Overridden in subclasses. * * @param string $viewFile The file about to be rendered. * @return void */ public function beforeRenderFile($viewFile) { } /** * After render file callback. * Called after any view fragment is rendered. * * Overridden in subclasses. * * @param string $viewFile The file just be rendered. * @param string $content The content that was rendered. * @return void */ public function afterRenderFile($viewFile, $content) { } /** * Transforms a recordset from a hasAndBelongsToMany association to a list of selected * options for a multiple select element * * @param string|array $data Data array or model name. * @param string $key Field name. * @return array */ protected function _selectedArray($data, $key = 'id') { if (!is_array($data)) { $model = $data; if (!empty($this->request->data[$model][$model])) { return $this->request->data[$model][$model]; } if (!empty($this->request->data[$model])) { $data = $this->request->data[$model]; } } $array = array(); if (!empty($data)) { foreach ($data as $row) { if (isset($row[$key])) { $array[$row[$key]] = $row[$key]; } } } return empty($array) ? null : $array; } /** * Resets the vars used by Helper::clean() to null * * @return void */ protected function _reset() { $this->_tainted = null; $this->_cleaned = null; } /** * Removes harmful content from output * * @return void */ protected function _clean() { if (<API key>()) { $this->_cleaned = stripslashes($this->_tainted); } else { $this->_cleaned = $this->_tainted; } $this->_cleaned = str_replace(array("&amp;", "&lt;", "&gt;"), array("&amp;amp;", "&amp;lt;", "&amp;gt;"), $this->_cleaned); $this->_cleaned = preg_replace('#(&\#*\w+)[\x00-\x20]+;#u', "$1;", $this->_cleaned); $this->_cleaned = preg_replace('#(&\#x*)([0-9A-F]+);*#iu', "$1$2;", $this->_cleaned); $this->_cleaned = html_entity_decode($this->_cleaned, ENT_COMPAT, "UTF-8"); $this->_cleaned = preg_replace('#(<[^>]+[\x00-\x20\"\'\/])(on|xmlns)[^>]*>#iUu', "$1>", $this->_cleaned); $this->_cleaned = preg_replace('#([a-z]*)[\x00-\x20]*=[\x00-\x20]*([\`\'\"]*)[\\x00-\x20]*j[\x00-\x20]*a[\x00-\x20]*v[\x00-\x20]*a[\x00-\x20]*s[\x00-\x20]*c[\x00-\x20]*r[\x00-\x20]*i[\x00-\x20]*p[\x00-\x20]*t[\x00-\x20]*:#iUu', '$1=$2nojavascript...', $this->_cleaned); $this->_cleaned = preg_replace('#([a-z]*)[\x00-\x20]*=([\'\"]*)[\x00-\x20]*v[\x00-\x20]*b[\x00-\x20]*s[\x00-\x20]*c[\x00-\x20]*r[\x00-\x20]*i[\x00-\x20]*p[\x00-\x20]*t[\x00-\x20]*:#iUu', '$1=$2novbscript...', $this->_cleaned); $this->_cleaned = preg_replace('#([a-z]*)[\x00-\x20]*=*([\'\"]*)[\x00-\x20]*-moz-binding[\x00-\x20]*:#iUu', '$1=$2nomozbinding...', $this->_cleaned); $this->_cleaned = preg_replace('#([a-z]*)[\x00-\x20]*=([\'\"]*)[\x00-\x20]*data[\x00-\x20]*:#Uu', '$1=$2nodata...', $this->_cleaned); $this->_cleaned = preg_replace('#(<[^>]+)style[\x00-\x20]*=[\x00-\x20]*([\`\'\"]*).*expression[\x00-\x20]*\([^>]*>#iU', "$1>", $this->_cleaned); $this->_cleaned = preg_replace('#(<[^>]+)style[\x00-\x20]*=[\x00-\x20]*([\`\'\"]*).*behaviour[\x00-\x20]*\([^>]*>#iU', "$1>", $this->_cleaned); $this->_cleaned = preg_replace('#(<[^>]+)style[\x00-\x20]*=[\x00-\x20]*([\`\'\"]*).*s[\x00-\x20]*c[\x00-\x20]*r[\x00-\x20]*i[\x00-\x20]*p[\x00-\x20]*t[\x00-\x20]*:*[^>]*>#iUu', "$1>", $this->_cleaned);
# ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY # IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR # PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. param( [switch]$Force = $false ) $scriptPath = $(Split-Path $MyInvocation.MyCommand.Path) $firewallRuleName = "StreamSocketSample - HTTP 443" $requiredFeatures = "IIS-WebServer", "IIS-WebServerRole" $settingsFile = "$scriptPath\<API key>.xml" $settings = @{"featuresToEnable"=""; "<API key>"=""} # Check if running as Administrator. $windowsIdentity = [System.Security.Principal.WindowsIdentity]::GetCurrent() $principal = New-Object System.Security.Principal.WindowsPrincipal($windowsIdentity) $administratorRole = [System.Security.Principal.WindowsBuiltInRole]::Administrator if ($principal.IsInRole($administratorRole) -eq $false) { "Please run the script from elevated PowerShell." return } # Check if the config file was created. If yes, user should first run RemoveServer.ps1. if (Test-Path $settingsFile) { "The script has already been run. Please run RemoveServer.ps1 before running it again." return } if(!$Force) { Write-Warning "This script will attempt to enable IIS and configure HTTPS." Write-Warning "The script may interfere with any existing IIS configuration on this machine." Write-Warning "Press N to abort if IIS is already configured on this PC." $answer = Read-Host '[Y] Yes [N] No (default is "N")' if ($answer -ne "Y") { Write-Host "Aborted by user." return } } # Get features that should be enabled. $featuresToEnable = @() <API key> -Online | ?{ $_.State -eq [Microsoft.Dism.Commands.FeatureState]::Disabled -and $requiredFeatures -contains $_.FeatureName } | %{ $featuresToEnable += $_.FeatureName } # Save enabled features to the config file. $settings.featuresToEnable = $featuresToEnable $settings | Export-Clixml -Path $settingsFile # Enable features. if ($featuresToEnable.Count -gt 0) { "Enabling following features: $($featuresToEnable -join ", ")." <API key> -Online -FeatureName $featuresToEnable > $null } # Add firewall rule. "Adding firewall rule `'$firewallRuleName`'." New-NetFirewallRule -DisplayName $firewallRuleName -Direction Inbound -Protocol TCP -LocalPort 443 -Action Allow > $null "Creating self-signed certificate." $cert = <API key> -DnsName www.fabrikam.com -CertStoreLocation cert:\LocalMachine\My $settings.<API key> = $cert.Thumbprint $settings | Export-Clixml -Path $settingsFile "Creating SSL IIS binding" New-WebBinding -Name "Default Web Site" -IP "*" -Port 443 -Protocol https "Removing all existing SSL bindings" Remove-Item IIS:\SslBindings\* "Assigning certificate to the binding" cd IIS:\SslBindings $cert | New-Item 0.0.0.0!443 | out-null "Done: https://localhost should now display an invalid-certificate web-page." cd $scriptPath
#if !defined(<API key>) #define <API key> #include <boost/vmd/detail/setup.hpp> #if BOOST_PP_VARIADICS #include <boost/preprocessor/control/iif.hpp> #include <boost/preprocessor/seq/to_list.hpp> // #include <boost/vmd/identity.hpp> #include <boost/vmd/is_empty.hpp> /* The succeeding comments in this file are in doxygen format. */ /** \file */ /** \def <API key>(seq) \brief converts a seq to a list. seq = seq to be converted. If the seq is an empty seq it is converted to an empty list (BOOST_PP_NIL). Otherwise the seq is converted to a list with the same number of elements as the seq. */ #if BOOST_VMD_MSVC #define <API key>(seq) \ BOOST_PP_IIF \ ( \ BOOST_VMD_IS_EMPTY(seq), \ <API key>, \ <API key> \ ) \ (seq) \ #define <API key>(seq) BOOST_PP_NIL #define <API key>(seq) <API key>(seq) #else #define <API key>(seq) \ BOOST_PP_IIF \ ( \ BOOST_VMD_IS_EMPTY(seq), \ BOOST_VMD_IDENTITY(BOOST_PP_NIL), \ <API key> \ ) \ (seq) \ #endif #endif /* BOOST_PP_VARIADICS */ #endif /* <API key> */
#include <linux/poll.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/uinput.h> #include <linux/input/mt.h> #include "../input-compat.h" static int uinput_dev_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct uinput_device *udev = input_get_drvdata(dev); udev->buff[udev->head].type = type; udev->buff[udev->head].code = code; udev->buff[udev->head].value = value; do_gettimeofday(&udev->buff[udev->head].time); udev->head = (udev->head + 1) % UINPUT_BUFFER_SIZE; <API key>(&udev->waitq); return 0; } /* Atomically allocate an ID for the given request. Returns 0 on success. */ static bool <API key>(struct uinput_device *udev, struct uinput_request *request) { unsigned int id; bool reserved = false; spin_lock(&udev->requests_lock); for (id = 0; id < UINPUT_NUM_REQUESTS; id++) { if (!udev->requests[id]) { request->id = id; udev->requests[id] = request; reserved = true; break; } } spin_unlock(&udev->requests_lock); return reserved; } static struct uinput_request *uinput_request_find(struct uinput_device *udev, unsigned int id) { /* Find an input request, by ID. Returns NULL if the ID isn't valid. */ if (id >= UINPUT_NUM_REQUESTS) return NULL; return udev->requests[id]; } static int <API key>(struct uinput_device *udev, struct uinput_request *request) { /* Allocate slot. If none are available right away, wait. */ return <API key>(udev->requests_waitq, <API key>(udev, request)); } static void uinput_request_done(struct uinput_device *udev, struct uinput_request *request) { /* Mark slot as available */ udev->requests[request->id] = NULL; wake_up(&udev->requests_waitq); complete(&request->done); } static int uinput_request_send(struct uinput_device *udev, struct uinput_request *request) { int retval; retval = <API key>(&udev->mutex); if (retval) return retval; if (udev->state != UIST_CREATED) { retval = -ENODEV; goto out; } init_completion(&request->done); /* * Tell our userspace application about this new request * by queueing an input event. */ uinput_dev_event(udev->dev, EV_UINPUT, request->code, request->id); out: mutex_unlock(&udev->mutex); return retval; } static int <API key>(struct uinput_device *udev, struct uinput_request *request) { int error; error = <API key>(udev, request); if (error) return error; error = uinput_request_send(udev, request); if (error) { uinput_request_done(udev, request); return error; } wait_for_completion(&request->done); return request->retval; } /* * Fail all outstanding requests so handlers don't wait for the userspace * to finish processing them. */ static void <API key>(struct uinput_device *udev) { struct uinput_request *request; int i; spin_lock(&udev->requests_lock); for (i = 0; i < UINPUT_NUM_REQUESTS; i++) { request = udev->requests[i]; if (request) { request->retval = -ENODEV; uinput_request_done(udev, request); } } spin_unlock(&udev->requests_lock); } static void uinput_dev_set_gain(struct input_dev *dev, u16 gain) { uinput_dev_event(dev, EV_FF, FF_GAIN, gain); } static void <API key>(struct input_dev *dev, u16 magnitude) { uinput_dev_event(dev, EV_FF, FF_AUTOCENTER, magnitude); } static int uinput_dev_playback(struct input_dev *dev, int effect_id, int value) { return uinput_dev_event(dev, EV_FF, effect_id, value); } static int <API key>(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old) { struct uinput_device *udev = input_get_drvdata(dev); struct uinput_request request; /* * uinput driver does not currently support periodic effects with * custom waveform since it does not have a way to pass buffer of * samples (custom_data) to userspace. If ever there is a device * supporting custom waveforms we would need to define an additional * ioctl (UI_UPLOAD_SAMPLES) but for now we just bail out. */ if (effect->type == FF_PERIODIC && effect->u.periodic.waveform == FF_CUSTOM) return -EINVAL; request.code = UI_FF_UPLOAD; request.u.upload.effect = effect; request.u.upload.old = old; return <API key>(udev, &request); } static int <API key>(struct input_dev *dev, int effect_id) { struct uinput_device *udev = input_get_drvdata(dev); struct uinput_request request; if (!test_bit(EV_FF, dev->evbit)) return -ENOSYS; request.code = UI_FF_ERASE; request.u.effect_id = effect_id; return <API key>(udev, &request); } static void <API key>(struct uinput_device *udev) { const char *name, *phys; struct input_dev *dev = udev->dev; enum uinput_state old_state = udev->state; udev->state = UIST_NEW_DEVICE; if (dev) { name = dev->name; phys = dev->phys; if (old_state == UIST_CREATED) { <API key>(udev); <API key>(dev); } else { input_free_device(dev); } kfree(name); kfree(phys); udev->dev = NULL; } } static int <API key>(struct uinput_device *udev) { struct input_dev *dev = udev->dev; int error, nslot; if (udev->state != UIST_SETUP_COMPLETE) { printk(KERN_DEBUG "%s: write device info first\n", UINPUT_NAME); return -EINVAL; } if (test_bit(EV_ABS, dev->evbit)) { input_alloc_absinfo(dev); if (!dev->absinfo) { error = -EINVAL; goto fail1; } if (test_bit(ABS_MT_SLOT, dev->absbit)) { nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1; error = input_mt_init_slots(dev, nslot, 0); if (error) goto fail1; } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { <API key>(dev, 60); } } if (test_bit(EV_FF, dev->evbit) && !udev->ff_effects_max) { printk(KERN_DEBUG "%s: ff_effects_max should be non-zero when FF_BIT is set\n", UINPUT_NAME); error = -EINVAL; goto fail1; } if (udev->ff_effects_max) { error = input_ff_create(dev, udev->ff_effects_max); if (error) goto fail1; dev->ff->upload = <API key>; dev->ff->erase = <API key>; dev->ff->playback = uinput_dev_playback; dev->ff->set_gain = uinput_dev_set_gain; dev->ff->set_autocenter = <API key>; } error = <API key>(udev->dev); if (error) goto fail2; udev->state = UIST_CREATED; return 0; fail2: input_ff_destroy(dev); fail1: <API key>(udev); return error; } static int uinput_open(struct inode *inode, struct file *file) { struct uinput_device *newdev; newdev = kzalloc(sizeof(struct uinput_device), GFP_KERNEL); if (!newdev) return -ENOMEM; mutex_init(&newdev->mutex); spin_lock_init(&newdev->requests_lock); init_waitqueue_head(&newdev->requests_waitq); init_waitqueue_head(&newdev->waitq); newdev->state = UIST_NEW_DEVICE; file->private_data = newdev; nonseekable_open(inode, file); return 0; } static int <API key>(struct input_dev *dev, unsigned int code, const struct input_absinfo *abs) { int min, max; min = abs->minimum; max = abs->maximum; if ((min != 0 || max != 0) && max <= min) { printk(KERN_DEBUG "%s: invalid abs[%02x] min:%d max:%d\n", UINPUT_NAME, code, min, max); return -EINVAL; } if (abs->flat > max - min) { printk(KERN_DEBUG "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n", UINPUT_NAME, code, abs->flat, min, max); return -EINVAL; } return 0; } static int <API key>(struct input_dev *dev) { unsigned int cnt; int error; if (!test_bit(EV_ABS, dev->evbit)) return 0; /* * Check if absmin/absmax/absfuzz/absflat are sane. */ for_each_set_bit(cnt, dev->absbit, ABS_CNT) { if (!dev->absinfo) return -EINVAL; error = <API key>(dev, cnt, &dev->absinfo[cnt]); if (error) return error; } return 0; } static int <API key>(struct uinput_device *udev) { udev->dev = <API key>(); if (!udev->dev) return -ENOMEM; udev->dev->event = uinput_dev_event; input_set_drvdata(udev->dev, udev); return 0; } static int uinput_dev_setup(struct uinput_device *udev, struct uinput_setup __user *arg) { struct uinput_setup setup; struct input_dev *dev; if (udev->state == UIST_CREATED) return -EINVAL; if (copy_from_user(&setup, arg, sizeof(setup))) return -EFAULT; if (!setup.name[0]) return -EINVAL; dev = udev->dev; dev->id = setup.id; udev->ff_effects_max = setup.ff_effects_max; kfree(dev->name); dev->name = kstrndup(setup.name, <API key>, GFP_KERNEL); if (!dev->name) return -ENOMEM; udev->state = UIST_SETUP_COMPLETE; return 0; } static int uinput_abs_setup(struct uinput_device *udev, struct uinput_setup __user *arg, size_t size) { struct uinput_abs_setup setup = {}; struct input_dev *dev; int error; if (size > sizeof(setup)) return -E2BIG; if (udev->state == UIST_CREATED) return -EINVAL; if (copy_from_user(&setup, arg, size)) return -EFAULT; if (setup.code > ABS_MAX) return -ERANGE; dev = udev->dev; error = <API key>(dev, setup.code, &setup.absinfo); if (error) return error; input_alloc_absinfo(dev); if (!dev->absinfo) return -ENOMEM; set_bit(setup.code, dev->absbit); dev->absinfo[setup.code] = setup.absinfo; return 0; } /* legacy setup via write() */ static int <API key>(struct uinput_device *udev, const char __user *buffer, size_t count) { struct uinput_user_dev *user_dev; struct input_dev *dev; int i; int retval; if (count != sizeof(struct uinput_user_dev)) return -EINVAL; if (!udev->dev) { retval = <API key>(udev); if (retval) return retval; } dev = udev->dev; user_dev = memdup_user(buffer, sizeof(struct uinput_user_dev)); if (IS_ERR(user_dev)) return PTR_ERR(user_dev); udev->ff_effects_max = user_dev->ff_effects_max; /* Ensure name is filled in */ if (!user_dev->name[0]) { retval = -EINVAL; goto exit; } kfree(dev->name); dev->name = kstrndup(user_dev->name, <API key>, GFP_KERNEL); if (!dev->name) { retval = -ENOMEM; goto exit; } dev->id.bustype = user_dev->id.bustype; dev->id.vendor = user_dev->id.vendor; dev->id.product = user_dev->id.product; dev->id.version = user_dev->id.version; for (i = 0; i < ABS_CNT; i++) { input_abs_set_max(dev, i, user_dev->absmax[i]); input_abs_set_min(dev, i, user_dev->absmin[i]); input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]); input_abs_set_flat(dev, i, user_dev->absflat[i]); } retval = <API key>(dev); if (retval < 0) goto exit; udev->state = UIST_SETUP_COMPLETE; retval = count; exit: kfree(user_dev); return retval; } static ssize_t <API key>(struct uinput_device *udev, const char __user *buffer, size_t count) { struct input_event ev; size_t bytes = 0; if (count != 0 && count < input_event_size()) return -EINVAL; while (bytes + input_event_size() <= count) { /* * Note that even if some events were fetched successfully * we are still going to return EFAULT instead of partial * count to let userspace know that it got it's buffers * all wrong. */ if (<API key>(buffer + bytes, &ev)) return -EFAULT; input_event(udev->dev, ev.type, ev.code, ev.value); bytes += input_event_size(); } return bytes; } static ssize_t uinput_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct uinput_device *udev = file->private_data; int retval; if (count == 0) return 0; retval = <API key>(&udev->mutex); if (retval) return retval; retval = udev->state == UIST_CREATED ? <API key>(udev, buffer, count) : <API key>(udev, buffer, count); mutex_unlock(&udev->mutex); return retval; } static bool <API key>(struct uinput_device *udev, struct input_event *event) { bool have_event; spin_lock_irq(&udev->dev->event_lock); have_event = udev->head != udev->tail; if (have_event) { *event = udev->buff[udev->tail]; udev->tail = (udev->tail + 1) % UINPUT_BUFFER_SIZE; } spin_unlock_irq(&udev->dev->event_lock); return have_event; } static ssize_t <API key>(struct uinput_device *udev, char __user *buffer, size_t count) { struct input_event event; size_t read = 0; while (read + input_event_size() <= count && <API key>(udev, &event)) { if (input_event_to_user(buffer + read, &event)) return -EFAULT; read += input_event_size(); } return read; } static ssize_t uinput_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct uinput_device *udev = file->private_data; ssize_t retval; if (count != 0 && count < input_event_size()) return -EINVAL; do { retval = <API key>(&udev->mutex); if (retval) return retval; if (udev->state != UIST_CREATED) retval = -ENODEV; else if (udev->head == udev->tail && (file->f_flags & O_NONBLOCK)) retval = -EAGAIN; else retval = <API key>(udev, buffer, count); mutex_unlock(&udev->mutex); if (retval || count == 0) break; if (!(file->f_flags & O_NONBLOCK)) retval = <API key>(udev->waitq, udev->head != udev->tail || udev->state != UIST_CREATED); } while (retval == 0); return retval; } static unsigned int uinput_poll(struct file *file, poll_table *wait) { struct uinput_device *udev = file->private_data; poll_wait(file, &udev->waitq, wait); if (udev->head != udev->tail) return POLLIN | POLLRDNORM; return 0; } static int uinput_release(struct inode *inode, struct file *file) { struct uinput_device *udev = file->private_data; <API key>(udev); kfree(udev); return 0; } #ifdef CONFIG_COMPAT struct <API key> { __u32 request_id; __s32 retval; struct ff_effect_compat effect; struct ff_effect_compat old; }; static int <API key>(char __user *buffer, const struct uinput_ff_upload *ff_up) { if (in_compat_syscall()) { struct <API key> ff_up_compat; ff_up_compat.request_id = ff_up->request_id; ff_up_compat.retval = ff_up->retval; /* * It so happens that the pointer that gives us the trouble * is the last field in the structure. Since we don't support * custom waveforms in uinput anyway we can just copy the whole * thing (to the compat size) and ignore the pointer. */ memcpy(&ff_up_compat.effect, &ff_up->effect, sizeof(struct ff_effect_compat)); memcpy(&ff_up_compat.old, &ff_up->old, sizeof(struct ff_effect_compat)); if (copy_to_user(buffer, &ff_up_compat, sizeof(struct <API key>))) return -EFAULT; } else { if (copy_to_user(buffer, ff_up, sizeof(struct uinput_ff_upload))) return -EFAULT; } return 0; } static int <API key>(const char __user *buffer, struct uinput_ff_upload *ff_up) { if (in_compat_syscall()) { struct <API key> ff_up_compat; if (copy_from_user(&ff_up_compat, buffer, sizeof(struct <API key>))) return -EFAULT; ff_up->request_id = ff_up_compat.request_id; ff_up->retval = ff_up_compat.retval; memcpy(&ff_up->effect, &ff_up_compat.effect, sizeof(struct ff_effect_compat)); memcpy(&ff_up->old, &ff_up_compat.old, sizeof(struct ff_effect_compat)); } else { if (copy_from_user(ff_up, buffer, sizeof(struct uinput_ff_upload))) return -EFAULT; } return 0; } #else static int <API key>(char __user *buffer, const struct uinput_ff_upload *ff_up) { if (copy_to_user(buffer, ff_up, sizeof(struct uinput_ff_upload))) return -EFAULT; return 0; } static int <API key>(const char __user *buffer, struct uinput_ff_upload *ff_up) { if (copy_from_user(ff_up, buffer, sizeof(struct uinput_ff_upload))) return -EFAULT; return 0; } #endif #define uinput_set_bit(_arg, _bit, _max) \ ({ \ int __ret = 0; \ if (udev->state == UIST_CREATED) \ __ret = -EINVAL; \ else if ((_arg) > (_max)) \ __ret = -EINVAL; \ else set_bit((_arg), udev->dev->_bit); \ __ret; \ }) static int uinput_str_to_user(void __user *dest, const char *str, unsigned int maxlen) { char __user *p = dest; int len, ret; if (!str) return -ENOENT; if (maxlen == 0) return -EINVAL; len = strlen(str) + 1; if (len > maxlen) len = maxlen; ret = copy_to_user(p, str, len); if (ret) return -EFAULT; /* force terminating '\0' */ ret = put_user(0, p + len - 1); return ret ? -EFAULT : len; } static long <API key>(struct file *file, unsigned int cmd, unsigned long arg, void __user *p) { int retval; struct uinput_device *udev = file->private_data; struct uinput_ff_upload ff_up; struct uinput_ff_erase ff_erase; struct uinput_request *req; char *phys; const char *name; unsigned int size; retval = <API key>(&udev->mutex); if (retval) return retval; if (!udev->dev) { retval = <API key>(udev); if (retval) goto out; } switch (cmd) { case UI_GET_VERSION: if (put_user(UINPUT_VERSION, (unsigned int __user *)p)) retval = -EFAULT; goto out; case UI_DEV_CREATE: retval = <API key>(udev); goto out; case UI_DEV_DESTROY: <API key>(udev); goto out; case UI_DEV_SETUP: retval = uinput_dev_setup(udev, p); goto out; /* UI_ABS_SETUP is handled in the variable size ioctls */ case UI_SET_EVBIT: retval = uinput_set_bit(arg, evbit, EV_MAX); goto out; case UI_SET_KEYBIT: retval = uinput_set_bit(arg, keybit, KEY_MAX); goto out; case UI_SET_RELBIT: retval = uinput_set_bit(arg, relbit, REL_MAX); goto out; case UI_SET_ABSBIT: retval = uinput_set_bit(arg, absbit, ABS_MAX); goto out; case UI_SET_MSCBIT: retval = uinput_set_bit(arg, mscbit, MSC_MAX); goto out; case UI_SET_LEDBIT: retval = uinput_set_bit(arg, ledbit, LED_MAX); goto out; case UI_SET_SNDBIT: retval = uinput_set_bit(arg, sndbit, SND_MAX); goto out; case UI_SET_FFBIT: retval = uinput_set_bit(arg, ffbit, FF_MAX); goto out; case UI_SET_SWBIT: retval = uinput_set_bit(arg, swbit, SW_MAX); goto out; case UI_SET_PROPBIT: retval = uinput_set_bit(arg, propbit, INPUT_PROP_MAX); goto out; case UI_SET_PHYS: if (udev->state == UIST_CREATED) { retval = -EINVAL; goto out; } phys = strndup_user(p, 1024); if (IS_ERR(phys)) { retval = PTR_ERR(phys); goto out; } kfree(udev->dev->phys); udev->dev->phys = phys; goto out; case UI_BEGIN_FF_UPLOAD: retval = <API key>(p, &ff_up); if (retval) goto out; req = uinput_request_find(udev, ff_up.request_id); if (!req || req->code != UI_FF_UPLOAD || !req->u.upload.effect) { retval = -EINVAL; goto out; } ff_up.retval = 0; ff_up.effect = *req->u.upload.effect; if (req->u.upload.old) ff_up.old = *req->u.upload.old; else memset(&ff_up.old, 0, sizeof(struct ff_effect)); retval = <API key>(p, &ff_up); goto out; case UI_BEGIN_FF_ERASE: if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) { retval = -EFAULT; goto out; } req = uinput_request_find(udev, ff_erase.request_id); if (!req || req->code != UI_FF_ERASE) { retval = -EINVAL; goto out; } ff_erase.retval = 0; ff_erase.effect_id = req->u.effect_id; if (copy_to_user(p, &ff_erase, sizeof(ff_erase))) { retval = -EFAULT; goto out; } goto out; case UI_END_FF_UPLOAD: retval = <API key>(p, &ff_up); if (retval) goto out; req = uinput_request_find(udev, ff_up.request_id); if (!req || req->code != UI_FF_UPLOAD || !req->u.upload.effect) { retval = -EINVAL; goto out; } req->retval = ff_up.retval; uinput_request_done(udev, req); goto out; case UI_END_FF_ERASE: if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) { retval = -EFAULT; goto out; } req = uinput_request_find(udev, ff_erase.request_id); if (!req || req->code != UI_FF_ERASE) { retval = -EINVAL; goto out; } req->retval = ff_erase.retval; uinput_request_done(udev, req); goto out; } size = _IOC_SIZE(cmd); /* Now check variable-length commands */ switch (cmd & ~IOCSIZE_MASK) { case UI_GET_SYSNAME(0): if (udev->state != UIST_CREATED) { retval = -ENOENT; goto out; } name = dev_name(&udev->dev->dev); retval = uinput_str_to_user(p, name, size); goto out; case UI_ABS_SETUP & ~IOCSIZE_MASK: retval = uinput_abs_setup(udev, p, size); goto out; } retval = -EINVAL; out: mutex_unlock(&udev->mutex); return retval; } static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return <API key>(file, cmd, arg, (void __user *)arg); } #ifdef CONFIG_COMPAT #define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t) static long uinput_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { if (cmd == UI_SET_PHYS_COMPAT) cmd = UI_SET_PHYS; return <API key>(file, cmd, arg, compat_ptr(arg)); } #endif static const struct file_operations uinput_fops = { .owner = THIS_MODULE, .open = uinput_open, .release = uinput_release, .read = uinput_read, .write = uinput_write, .poll = uinput_poll, .unlocked_ioctl = uinput_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = uinput_compat_ioctl, #endif .llseek = no_llseek, }; static struct miscdevice uinput_misc = { .fops = &uinput_fops, .minor = UINPUT_MINOR, .name = UINPUT_NAME, }; module_misc_device(uinput_misc); <API key>(UINPUT_MINOR); MODULE_ALIAS("devname:" UINPUT_NAME); MODULE_AUTHOR("Aristeu Sergio Rozanski Filho"); MODULE_DESCRIPTION("User level driver support for input subsystem"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.3");
<?php /** * Used for data cleanup and post-processing * * * This class can be overloaded with {@see SimplePie::set_sanitize_class()} * * @package SimplePie * @todo Move to using an actual HTML parser (this will allow tags to be properly stripped, and to switch between HTML and XHTML), this will also make it easier to shorten a string while preserving HTML tags */ class SimplePie_Sanitize { // Private vars var $base; // Options var $remove_div = true; var $image_handler = ''; var $strip_htmltags = array('base', 'blink', 'body', 'doctype', 'embed', 'font', 'form', 'frame', 'frameset', 'html', 'iframe', 'input', 'marquee', 'meta', 'noscript', 'object', 'param', 'script', 'style'); var $<API key> = false; var $strip_attributes = array('bgsound', 'class', 'expr', 'id', 'style', 'onclick', 'onerror', 'onfinish', 'onmouseover', 'onmouseout', 'onfocus', 'onblur', 'lowsrc', 'dynsrc'); var $strip_comments = false; var $output_encoding = 'UTF-8'; var $enable_cache = true; var $cache_location = './cache'; var $cache_name_function = 'md5'; var $timeout = 10; var $useragent = ''; var $force_fsockopen = false; var $<API key> = null; public function __construct() { // Set defaults $this-><API key>(null); } public function remove_div($enable = true) { $this->remove_div = (bool) $enable; } public function set_image_handler($page = false) { if ($page) { $this->image_handler = (string) $page; } else { $this->image_handler = false; } } public function set_registry(SimplePie_Registry $registry) { $this->registry = $registry; } public function pass_cache_data($enable_cache = true, $cache_location = './cache', $cache_name_function = 'md5', $cache_class = 'SimplePie_Cache') { if (isset($enable_cache)) { $this->enable_cache = (bool) $enable_cache; } if ($cache_location) { $this->cache_location = (string) $cache_location; } if ($cache_name_function) { $this->cache_name_function = (string) $cache_name_function; } } public function pass_file_data($file_class = 'SimplePie_File', $timeout = 10, $useragent = '', $force_fsockopen = false) { if ($timeout) { $this->timeout = (string) $timeout; } if ($useragent) { $this->useragent = (string) $useragent; } if ($force_fsockopen) { $this->force_fsockopen = (string) $force_fsockopen; } } public function strip_htmltags($tags = array('base', 'blink', 'body', 'doctype', 'embed', 'font', 'form', 'frame', 'frameset', 'html', 'iframe', 'input', 'marquee', 'meta', 'noscript', 'object', 'param', 'script', 'style')) { if ($tags) { if (is_array($tags)) { $this->strip_htmltags = $tags; } else { $this->strip_htmltags = explode(',', $tags); } } else { $this->strip_htmltags = false; } } public function <API key>($encode = false) { $this-><API key> = (bool) $encode; } public function strip_attributes($attribs = array('bgsound', 'class', 'expr', 'id', 'style', 'onclick', 'onerror', 'onfinish', 'onmouseover', 'onmouseout', 'onfocus', 'onblur', 'lowsrc', 'dynsrc')) { if ($attribs) { if (is_array($attribs)) { $this->strip_attributes = $attribs; } else { $this->strip_attributes = explode(',', $attribs); } } else { $this->strip_attributes = false; } } public function strip_comments($strip = false) { $this->strip_comments = (bool) $strip; } public function set_output_encoding($encoding = 'UTF-8') { $this->output_encoding = (string) $encoding; } /** * Set element/attribute key/value pairs of HTML attributes * containing URLs that need to be resolved relative to the feed * * Defaults to |a|@href, |area|@href, |blockquote|@cite, |del|@cite, * |form|@action, |img|@longdesc, |img|@src, |input|@src, |ins|@cite, * |q|@cite * * @since 1.0 * @param array|null $element_attribute Element/attribute key/value pairs, null for default */ public function <API key>($element_attribute = null) { if ($element_attribute === null) { $element_attribute = array( 'a' => 'href', 'area' => 'href', 'blockquote' => 'cite', 'del' => 'cite', 'form' => 'action', 'img' => array( 'longdesc', 'src' ), 'input' => 'src', 'ins' => 'cite', 'q' => 'cite' ); } $this-><API key> = (array) $element_attribute; } public function sanitize($data, $type, $base = '') { $data = trim($data); if ($data !== '' || $type & <API key>) { if ($type & <API key>) { if (preg_match('/(&(#(x[0-9a-fA-F]+|[0-9]+)|[a-zA-Z0-9]+)|<\/[A-Za-z][^\x09\x0A\x0B\x0C\x0D\x20\x2F\x3E]*' . <API key> . '>)/', $data)) { $type |= <API key>; } else { $type |= <API key>; } } if ($type & <API key>) { $data = base64_decode($data); } if ($type & (<API key> | <API key>)) { if (!class_exists('DOMDocument')) { $this->registry->call('Misc', 'error', array('DOMDocument not found, unable to use sanitizer', E_USER_WARNING, __FILE__, __LINE__)); return ''; } $document = new DOMDocument(); $document->encoding = 'UTF-8'; $data = $this->preprocess($data, $type); set_error_handler(array('SimplePie_Misc', 'silence_errors')); $document->loadHTML($data); <API key>(); // Strip comments if ($this->strip_comments) { $xpath = new DOMXPath($document); $comments = $xpath->query('//comment()'); foreach ($comments as $comment) { $comment->parentNode->removeChild($comment); } } // Strip out HTML tags and attributes that might cause various security problems. // Based on recommendations by Mark Pilgrim at: if ($this->strip_htmltags) { foreach ($this->strip_htmltags as $tag) { $this->strip_tag($tag, $document, $type); } } if ($this->strip_attributes) { foreach ($this->strip_attributes as $attrib) { $this->strip_attr($attrib, $document); } } // Replace relative URLs $this->base = $base; foreach ($this-><API key> as $element => $attributes) { $this->replace_urls($document, $element, $attributes); } // If image handling (caching, etc.) is enabled, cache and rewrite all the image tags. if (isset($this->image_handler) && ((string) $this->image_handler) !== '' && $this->enable_cache) { $images = $document-><API key>('img'); foreach ($images as $img) { if ($img->hasAttribute('src')) { $image_url = call_user_func($this->cache_name_function, $img->getAttribute('src')); $cache = $this->registry->call('Cache', 'get_handler', array($this->cache_location, $image_url, 'spi')); if ($cache->load()) { $img->setAttribute('src', $this->image_handler . $image_url); } else { $file = $this->registry->create('File', array($img->getAttribute('src'), $this->timeout, 5, array('X-FORWARDED-FOR' => $_SERVER['REMOTE_ADDR']), $this->useragent, $this->force_fsockopen)); $headers = $file->headers; if ($file->success && ($file->method & <API key> === 0 || ($file->status_code === 200 || $file->status_code > 206 && $file->status_code < 300))) { if ($cache->save(array('headers' => $file->headers, 'body' => $file->body))) { $img->setAttribute('src', $this->image_handler . $image_url); } else { trigger_error("$this->cache_location is not writeable. Make sure you've set the correct relative or absolute path, and that the location is server-writable.", E_USER_WARNING); } } } } } } // Remove the DOCTYPE // Seems to cause segfaulting if we don't do this if ($document->firstChild instanceof DOMDocumentType) { $document->removeChild($document->firstChild); } // Move everything from the body to the root $real_body = $document-><API key>('body')->item(0)->childNodes->item(0); $document->replaceChild($real_body, $document->firstChild); // Finally, convert to a HTML string $data = trim($document->saveHTML()); if ($this->remove_div) { $data = preg_replace('/^<div' . <API key> . '>/', '', $data); $data = preg_replace('/<\/div>$/', '', $data); } else { $data = preg_replace('/^<div' . <API key> . '>/', '<div>', $data); } } if ($type & <API key>) { $absolute = $this->registry->call('Misc', 'absolutize_url', array($data, $base)); if ($absolute !== false) { $data = $absolute; } } if ($type & (<API key> | <API key>)) { $data = htmlspecialchars($data, ENT_COMPAT, 'UTF-8'); } if ($this->output_encoding !== 'UTF-8') { $data = $this->registry->call('Misc', 'change_encoding', array($data, 'UTF-8', $this->output_encoding)); } } return $data; } protected function preprocess($html, $type) { $ret = ''; if ($type & ~<API key>) { // Atom XHTML constructs are wrapped with a div by default // Note: No protection if $html contains a stray </div>! $html = '<div>' . $html . '</div>'; $ret .= '<!DOCTYPE html>'; $content_type = 'text/html'; } else { $ret .= '<!DOCTYPE html PUBLIC "- $content_type = 'application/xhtml+xml'; } $ret .= '<html><head>'; $ret .= '<meta http-equiv="Content-Type" content="' . $content_type . '; charset=utf-8" />'; $ret .= '</head><body>' . $html . '</body></html>'; return $ret; } public function replace_urls($document, $tag, $attributes) { if (!is_array($attributes)) { $attributes = array($attributes); } if (!is_array($this->strip_htmltags) || !in_array($tag, $this->strip_htmltags)) { $elements = $document-><API key>($tag); foreach ($elements as $element) { foreach ($attributes as $attribute) { if ($element->hasAttribute($attribute)) { $value = $this->registry->call('Misc', 'absolutize_url', array($element->getAttribute($attribute), $this->base)); if ($value !== false) { $element->setAttribute($attribute, $value); } } } } } } public function do_strip_htmltags($match) { if ($this-><API key>) { if (isset($match[4]) && !in_array(strtolower($match[1]), array('script', 'style'))) { $match[1] = htmlspecialchars($match[1], ENT_COMPAT, 'UTF-8'); $match[2] = htmlspecialchars($match[2], ENT_COMPAT, 'UTF-8'); return "&lt;$match[1]$match[2]&gt;$match[3]&lt;/$match[1]&gt;"; } else { return htmlspecialchars($match[0], ENT_COMPAT, 'UTF-8'); } } elseif (isset($match[4]) && !in_array(strtolower($match[1]), array('script', 'style'))) { return $match[4]; } else { return ''; } } protected function strip_tag($tag, $document, $type) { $xpath = new DOMXPath($document); $elements = $xpath->query('body//' . $tag); if ($this-><API key>) { foreach ($elements as $element) { $fragment = $document-><API key>(); // For elements which aren't script or style, include the tag itself if (!in_array($tag, array('script', 'style'))) { $text = '<' . $tag; if ($element->hasAttributes()) { $attrs = array(); foreach ($element->attributes as $name => $attr) { $value = $attr->value; // In XHTML, empty values should never exist, so we repeat the value if (empty($value) && ($type & <API key>)) { $value = $name; } // For HTML, empty is fine elseif (empty($value) && ($type & <API key>)) { $attrs[] = $name; continue; } // Standard attribute text $attrs[] = $name . '="' . $attr->value . '"'; } $text .= ' ' . implode(' ', $attrs); } $text .= '>'; $fragment->appendChild(new DOMText($text)); } $number = $element->childNodes->length; for ($i = $number; $i > 0; $i { $child = $element->childNodes->item(0); $fragment->appendChild($child); } if (!in_array($tag, array('script', 'style'))) { $fragment->appendChild(new DOMText('</' . $tag . '>')); } $element->parentNode->replaceChild($fragment, $element); } return; } elseif (in_array($tag, array('script', 'style'))) { foreach ($elements as $element) { $element->parentNode->removeChild($element); } return; } else { foreach ($elements as $element) { $fragment = $document-><API key>(); $number = $element->childNodes->length; for ($i = $number; $i > 0; $i { $child = $element->childNodes->item(0); $fragment->appendChild($child); } $element->parentNode->replaceChild($fragment, $element); } } } protected function strip_attr($attrib, $document) { $xpath = new DOMXPath($document);
/* * CPU subsystem support */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/cpu.h> #include <linux/topology.h> #include <linux/device.h> #include <linux/node.h> #include <linux/gfp.h> #include <linux/slab.h> #include <linux/percpu.h> #include <linux/acpi.h> #include <linux/of.h> #include <linux/cpufeature.h> #include <linux/tick.h> #include "base.h" static DEFINE_PER_CPU(struct device *, cpu_sys_devices); static int cpu_subsys_match(struct device *dev, struct device_driver *drv) { /* ACPI style match is the only one that may succeed. */ if (<API key>(dev, drv)) return 1; return 0; } #ifdef CONFIG_HOTPLUG_CPU static void <API key>(struct cpu *cpu, unsigned int from_nid, unsigned int to_nid) { int cpuid = cpu->dev.id; <API key>(cpuid, from_nid); <API key>(cpuid, to_nid); cpu->node_id = to_nid; } static int cpu_subsys_online(struct device *dev) { struct cpu *cpu = container_of(dev, struct cpu, dev); int cpuid = dev->id; int from_nid, to_nid; int ret; from_nid = cpu_to_node(cpuid); if (from_nid == NUMA_NO_NODE) return -ENODEV; ret = cpu_up(cpuid); /* * When hot adding memory to memoryless node and enabling a cpu * on the node, node number of the cpu may internally change. */ to_nid = cpu_to_node(cpuid); if (from_nid != to_nid) <API key>(cpu, from_nid, to_nid); return ret; } static int cpu_subsys_offline(struct device *dev) { return cpu_down(dev->id); } void unregister_cpu(struct cpu *cpu) { int logical_cpu = cpu->dev.id; <API key>(logical_cpu, cpu_to_node(logical_cpu)); device_unregister(&cpu->dev); per_cpu(cpu_sys_devices, logical_cpu) = NULL; return; } #ifdef <API key> static ssize_t cpu_probe_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { ssize_t cnt; int ret; ret = <API key>(); if (ret) return ret; cnt = arch_cpu_probe(buf, count); <API key>(); return cnt; } static ssize_t cpu_release_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { ssize_t cnt; int ret; ret = <API key>(); if (ret) return ret; cnt = arch_cpu_release(buf, count); <API key>(); return cnt; } static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store); #endif /* <API key> */ #endif /* CONFIG_HOTPLUG_CPU */ struct bus_type cpu_subsys = { .name = "cpu", .dev_name = "cpu", .match = cpu_subsys_match, #ifdef CONFIG_HOTPLUG_CPU .online = cpu_subsys_online, .offline = cpu_subsys_offline, #endif }; EXPORT_SYMBOL_GPL(cpu_subsys); #ifdef CONFIG_KEXEC #include <linux/kexec.h> static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr, char *buf) { struct cpu *cpu = container_of(dev, struct cpu, dev); ssize_t rc; unsigned long long addr; int cpunum; cpunum = cpu->dev.id; /* * Might be reading other cpu's data based on which cpu read thread * has been scheduled. But cpu data (memory) is allocated once during * boot up and this data does not change there after. Hence this * operation should be safe. No locking required. */ addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum)); rc = sprintf(buf, "%Lx\n", addr); return rc; } static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL); static ssize_t <API key>(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t rc; rc = sprintf(buf, "%zu\n", sizeof(note_buf_t)); return rc; } static DEVICE_ATTR(crash_notes_size, 0400, <API key>, NULL); static struct attribute *<API key>[] = { &<API key>.attr, &<API key>.attr, NULL }; static struct attribute_group <API key> = { .attrs = <API key>, }; #endif static const struct attribute_group *<API key>[] = { #ifdef CONFIG_KEXEC &<API key>, #endif NULL }; static const struct attribute_group *<API key>[] = { #ifdef CONFIG_KEXEC &<API key>, #endif NULL }; /* * Print cpu online, possible, present, and system maps */ struct cpu_attr { struct device_attribute attr; const struct cpumask *const map; }; static ssize_t show_cpus_attr(struct device *dev, struct device_attribute *attr, char *buf) { struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); return <API key>(true, buf, ca->map); } #define _CPU_ATTR(name, map) \ { __ATTR(name, 0444, show_cpus_attr, NULL), map } /* Keep in sync with cpu_subsys_attrs */ static struct cpu_attr cpu_attrs[] = { _CPU_ATTR(online, &__cpu_online_mask), _CPU_ATTR(possible, &__cpu_possible_mask), _CPU_ATTR(present, &__cpu_present_mask), }; /* * Print values for NR_CPUS and offlined cpus */ static ssize_t <API key>(struct device *dev, struct device_attribute *attr, char *buf) { int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); return n; } static DEVICE_ATTR(kernel_max, 0444, <API key>, NULL); /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */ unsigned int total_cpus; static ssize_t print_cpus_offline(struct device *dev, struct device_attribute *attr, char *buf) { int n = 0, len = PAGE_SIZE-2; cpumask_var_t offline; /* display offline cpus < nr_cpu_ids */ if (!alloc_cpumask_var(&offline, GFP_KERNEL)) return -ENOMEM; cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask); n = scnprintf(buf, len, "%*pbl", cpumask_pr_args(offline)); free_cpumask_var(offline); /* display offline cpus >= nr_cpu_ids */ if (total_cpus && nr_cpu_ids < total_cpus) { if (n && n < len) buf[n++] = ','; if (nr_cpu_ids == total_cpus-1) n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids); else n += snprintf(&buf[n], len - n, "%d-%d", nr_cpu_ids, total_cpus-1); } n += snprintf(&buf[n], len - n, "\n"); return n; } static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL); static ssize_t print_cpus_isolated(struct device *dev, struct device_attribute *attr, char *buf) { int n = 0, len = PAGE_SIZE-2; n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(cpu_isolated_map)); return n; } static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL); #ifdef CONFIG_NO_HZ_FULL static ssize_t <API key>(struct device *dev, struct device_attribute *attr, char *buf) { int n = 0, len = PAGE_SIZE-2; n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask)); return n; } static DEVICE_ATTR(nohz_full, 0444, <API key>, NULL); #endif static void cpu_device_release(struct device *dev) { /* * This is an empty function to prevent the driver core from spitting a * warning at us. Yes, I know this is directly opposite of what the * documentation for the driver core and kobjects say, and the author * of this code has already been publically ridiculed for doing * something as foolish as this. However, at this point in time, it is * the only way to handle the issue of statically allocated cpu * devices. The different architectures will have their cpu device * code reworked to properly handle this in the near future, so this * function will then be changed to correctly free up the memory held * by the cpu device. * * Never copy this way of doing things, or you too will be made fun of * on the linux-kernel list, you have been warned. */ } #ifdef <API key> static ssize_t print_cpu_modalias(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t n; u32 i; n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:", CPU_FEATURE_TYPEVAL); for (i = 0; i < MAX_CPU_FEATURES; i++) if (cpu_have_feature(i)) { if (PAGE_SIZE < n + sizeof(",XXXX\n")) { WARN(1, "CPU features overflow page\n"); break; } n += sprintf(&buf[n], ",%04X", i); } buf[n++] = '\n'; return n; } static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env) { char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); if (buf) { print_cpu_modalias(NULL, NULL, buf); add_uevent_var(env, "MODALIAS=%s", buf); kfree(buf); } return 0; } #endif /* * register_cpu - Setup a sysfs device for a CPU. * @cpu - cpu->hotpluggable field set to 1 will generate a control file in * sysfs for this CPU. * @num - CPU number to use when creating the device. * * Initialize and register the CPU device. */ int register_cpu(struct cpu *cpu, int num) { int error; cpu->node_id = cpu_to_node(num); memset(&cpu->dev, 0x00, sizeof(struct device)); cpu->dev.id = num; cpu->dev.bus = &cpu_subsys; cpu->dev.release = cpu_device_release; cpu->dev.offline_disabled = !cpu->hotpluggable; cpu->dev.offline = !cpu_online(num); cpu->dev.of_node = of_get_cpu_node(num, NULL); #ifdef <API key> cpu->dev.bus->uevent = cpu_uevent; #endif cpu->dev.groups = <API key>; if (cpu->hotpluggable) cpu->dev.groups = <API key>; error = device_register(&cpu->dev); if (error) return error; per_cpu(cpu_sys_devices, num) = &cpu->dev; <API key>(num, cpu_to_node(num)); return 0; } struct device *get_cpu_device(unsigned cpu) { if (cpu < nr_cpu_ids && cpu_possible(cpu)) return per_cpu(cpu_sys_devices, cpu); else return NULL; } EXPORT_SYMBOL_GPL(get_cpu_device); static void <API key>(struct device *dev) { kfree(dev); } static struct device * __cpu_device_create(struct device *parent, void *drvdata, const struct attribute_group **groups, const char *fmt, va_list args) { struct device *dev = NULL; int retval = -ENODEV; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { retval = -ENOMEM; goto error; } device_initialize(dev); dev->parent = parent; dev->groups = groups; dev->release = <API key>; dev_set_drvdata(dev, drvdata); retval = <API key>(&dev->kobj, fmt, args); if (retval) goto error; retval = device_add(dev); if (retval) goto error; return dev; error: put_device(dev); return ERR_PTR(retval); } struct device *cpu_device_create(struct device *parent, void *drvdata, const struct attribute_group **groups, const char *fmt, ...) { va_list vargs; struct device *dev; va_start(vargs, fmt); dev = __cpu_device_create(parent, drvdata, groups, fmt, vargs); va_end(vargs); return dev; } EXPORT_SYMBOL_GPL(cpu_device_create); #ifdef <API key> static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL); #endif static struct attribute *cpu_root_attrs[] = { #ifdef <API key> &dev_attr_probe.attr, &dev_attr_release.attr, #endif &cpu_attrs[0].attr.attr, &cpu_attrs[1].attr.attr, &cpu_attrs[2].attr.attr, &dev_attr_kernel_max.attr, &dev_attr_offline.attr, &dev_attr_isolated.attr, #ifdef CONFIG_NO_HZ_FULL &dev_attr_nohz_full.attr, #endif #ifdef <API key> &dev_attr_modalias.attr, #endif NULL }; static struct attribute_group cpu_root_attr_group = { .attrs = cpu_root_attrs, }; static const struct attribute_group *<API key>[] = { &cpu_root_attr_group, NULL, }; bool cpu_is_hotpluggable(unsigned cpu) { struct device *dev = get_cpu_device(cpu); return dev && container_of(dev, struct cpu, dev)->hotpluggable; } EXPORT_SYMBOL_GPL(cpu_is_hotpluggable); #ifdef <API key> static DEFINE_PER_CPU(struct cpu, cpu_devices); #endif static void __init <API key>(void) { #ifdef <API key> int i; <API key>(i) { if (register_cpu(&per_cpu(cpu_devices, i), i)) panic("Failed to register CPU device"); } #endif } void __init cpu_dev_init(void) { if (<API key>(&cpu_subsys, <API key>)) panic("Failed to register CPU subsystem"); <API key>(); }
<!DOCTYPE html> <!-- DO NOT EDIT! Generated by referrer-policy/generic/tools/generate.py using referrer-policy/generic/template/test.release.html.template. --> <html> <head> <title>Referrer-Policy: Referrer Policy is set to 'origin-only'</title> <meta name="description" content="Check that all subresources in all casses get only the origin portion of the referrer URL."> <meta name="referrer" content="origin"> <link rel="author" title="Kristijan Burnik" href="burnik@chromium.org"> <link rel="help" href="https://w3c.github.io/webappsec/specs/referrer-policy/#<API key>"> <meta name="assert" content="The referrer URL is origin when a document served over http requires an http sub-resource via iframe-tag using the meta-referrer delivery method with <API key> and when the target request is same-origin."> <script src="/resources/testharness.js"></script> <script src="/resources/testharnessreport.js"></script> <!-- TODO(kristijanburnik): Minify and merge both: --> <script src="/referrer-policy/generic/common.js"></script> <script src="/referrer-policy/generic/<API key>.js?pipe=sub"></script> </head> <body> <script> <API key>( { "referrer_policy": "origin", "delivery_method": "meta-referrer", "redirection": "<API key>", "origin": "same-origin", "source_protocol": "http", "target_protocol": "http", "subresource": "iframe-tag", "subresource_path": "/referrer-policy/generic/subresource/document.py", "referrer_url": "origin" }, document.querySelector("meta[name=assert]").content, new SanityChecker() ).start(); </script> <div id="log"></div> </body> </html>
<!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <title>Binding to Remote Data - jQuery EasyUI Demo</title> <link rel="stylesheet" type="text/css" href="../../themes/default/easyui.css"> <link rel="stylesheet" type="text/css" href="../../themes/icon.css"> <link rel="stylesheet" type="text/css" href="../demo.css"> <script type="text/javascript" src="../../jquery.min.js"></script> <script type="text/javascript" src="../../jquery.easyui.min.js"></script> </head> <body> <h2>Binding to Remote Data</h2> <p>The DataList is bound to a remote data.</p> <div style="margin:20px 0"></div> <div class="easyui-datalist" title="Remote Data" style="width:400px;height:250px" data-options=" url: 'datalist_data1.json', method: 'get' "> </div> </body> </html>
<?php class <API key> extends <API key> { /** * Evaluates the constraint for parameter $other. Returns true if the * constraint is met, false otherwise. * * @param mixed $other Value or object to evaluate. * @return bool */ protected function matches($other) { json_decode($other); if (json_last_error()) { return false; } return true; } /** * Returns the description of the failure * * The beginning of failure messages is "Failed asserting that" in most * cases. This method should return the second part of that sentence. * * @param mixed $other Evaluated value or object. * @return string */ protected function failureDescription($other) { json_decode($other); $error = <API key>::determineJsonError( json_last_error() ); return sprintf( '%s is valid JSON (%s)', $this->exporter->shortenedExport($other), $error ); } /** * Returns a string representation of the constraint. * * @return string */ public function toString() { return 'is valid JSON'; } }
class Sslyze < Formula desc "SSL scanner" homepage "https://github.com/nabla-c0d3/sslyze" url "https://github.com/nabla-c0d3/sslyze/archive/release-0.11.tar.gz" sha256 "<SHA256-like>" version "0.11.0" bottle do cellar :any sha256 "<SHA256-like>" => :yosemite sha256 "<SHA256-like>" => :mavericks sha256 "<SHA256-like>" => :mountain_lion end depends_on :arch => :x86_64 depends_on :python if MacOS.version <= :snow_leopard resource "nassl" do url "https://github.com/nabla-c0d3/nassl/archive/v0.11.tar.gz" sha256 "<SHA256-like>" end resource "openssl" do url "https: sha256 "<SHA256-like>" end resource "zlib" do url "http://zlib.net/zlib-1.2.8.tar.gz" sha256 "<SHA256-like>" end def install # openssl fails on parallel build. Related issues: ENV.deparallelize resource("openssl").stage do (buildpath/"nassl/openssl-1.0.2a").install Dir["*"] end resource("zlib").stage do (buildpath/"nassl/zlib-1.2.8").install Dir["*"] end resource("nassl").stage do (buildpath/"nassl").install Dir["*"] end cd "nassl" do system "python", "buildAll_unix.py" libexec.install "test/nassl" end libexec.install %w[plugins utils sslyze.py xml_out.xsd] bin.install_symlink libexec/"sslyze.py" => "sslyze" end test do assert_equal "0.11.0", shell_output("#{bin}/sslyze --version").strip assert_match "SCAN COMPLETED", shell_output("#{bin}/sslyze --regular google.com") end end
.btn-default, .btn-primary, .btn-success, .btn-info, .btn-warning, .btn-danger { text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2); -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075); box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075); } .btn-default:active, .btn-primary:active, .btn-success:active, .btn-info:active, .btn-warning:active, .btn-danger:active, .btn-default.active, .btn-primary.active, .btn-success.active, .btn-info.active, .btn-warning.active, .btn-danger.active { -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); } .btn:active, .btn.active { background-image: none; } .btn-default { text-shadow: 0 1px 0 #fff; background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ffffff), to(#e0e0e0)); background-image: -<API key>(top, #ffffff 0%, #e0e0e0 100%); background-image: -moz-linear-gradient(top, #ffffff 0%, #e0e0e0 100%); background-image: linear-gradient(to bottom, #ffffff 0%, #e0e0e0 100%); background-repeat: repeat-x; border-color: #dbdbdb; border-color: #ccc; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); } .btn-default:hover, .btn-default:focus { background-color: #e0e0e0; background-position: 0 -15px; } .btn-default:active, .btn-default.active { background-color: #e0e0e0; border-color: #dbdbdb; } .btn-primary { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#2d6ca2)); background-image: -<API key>(top, #428bca 0%, #2d6ca2 100%); background-image: -moz-linear-gradient(top, #428bca 0%, #2d6ca2 100%); background-image: linear-gradient(to bottom, #428bca 0%, #2d6ca2 100%); background-repeat: repeat-x; border-color: #2b669a; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff2d6ca2', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); } .btn-primary:hover, .btn-primary:focus { background-color: #2d6ca2; background-position: 0 -15px; } .btn-primary:active, .btn-primary.active { background-color: #2d6ca2; border-color: #2b669a; } .btn-success { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5cb85c), to(#419641)); background-image: -<API key>(top, #5cb85c 0%, #419641 100%); background-image: -moz-linear-gradient(top, #5cb85c 0%, #419641 100%); background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%); background-repeat: repeat-x; border-color: #3e8f3e; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); } .btn-success:hover, .btn-success:focus { background-color: #419641; background-position: 0 -15px; } .btn-success:active, .btn-success.active { background-color: #419641; border-color: #3e8f3e; } .btn-warning { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f0ad4e), to(#eb9316)); background-image: -<API key>(top, #f0ad4e 0%, #eb9316 100%); background-image: -moz-linear-gradient(top, #f0ad4e 0%, #eb9316 100%); background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%); background-repeat: repeat-x; border-color: #e38d13; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); } .btn-warning:hover, .btn-warning:focus { background-color: #eb9316; background-position: 0 -15px; } .btn-warning:active, .btn-warning.active { background-color: #eb9316; border-color: #e38d13; } .btn-danger { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9534f), to(#c12e2a)); background-image: -<API key>(top, #d9534f 0%, #c12e2a 100%); background-image: -moz-linear-gradient(top, #d9534f 0%, #c12e2a 100%); background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%); background-repeat: repeat-x; border-color: #b92c28; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); } .btn-danger:hover, .btn-danger:focus { background-color: #c12e2a; background-position: 0 -15px; } .btn-danger:active, .btn-danger.active { background-color: #c12e2a; border-color: #b92c28; } .btn-info { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5bc0de), to(#2aabd2)); background-image: -<API key>(top, #5bc0de 0%, #2aabd2 100%); background-image: -moz-linear-gradient(top, #5bc0de 0%, #2aabd2 100%); background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%); background-repeat: repeat-x; border-color: #28a4c9; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); } .btn-info:hover, .btn-info:focus { background-color: #2aabd2; background-position: 0 -15px; } .btn-info:active, .btn-info.active { background-color: #2aabd2; border-color: #28a4c9; } .thumbnail, .img-thumbnail { -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); } .dropdown-menu > li > a:hover, .dropdown-menu > li > a:focus { background-color: #e8e8e8; background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f5f5f5), to(#e8e8e8)); background-image: -<API key>(top, #f5f5f5 0%, #e8e8e8 100%); background-image: -moz-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%); background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0); } .dropdown-menu > .active > a, .dropdown-menu > .active > a:hover, .dropdown-menu > .active > a:focus { background-color: #357ebd; background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#357ebd)); background-image: -<API key>(top, #428bca 0%, #357ebd 100%); background-image: -moz-linear-gradient(top, #428bca 0%, #357ebd 100%); background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0); } .navbar-default { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ffffff), to(#f8f8f8)); background-image: -<API key>(top, #ffffff 0%, #f8f8f8 100%); background-image: -moz-linear-gradient(top, #ffffff 0%, #f8f8f8 100%); background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%); background-repeat: repeat-x; border-radius: 4px; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075); box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075); } .navbar-default .navbar-nav > .active > a { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ebebeb), to(#f3f3f3)); background-image: -<API key>(top, #ebebeb 0%, #f3f3f3 100%); background-image: -moz-linear-gradient(top, #ebebeb 0%, #f3f3f3 100%); background-image: linear-gradient(to bottom, #ebebeb 0%, #f3f3f3 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff3f3f3', GradientType=0); -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075); box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075); } .navbar-brand, .navbar-nav > li > a { text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25); } .navbar-inverse { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#3c3c3c), to(#222222)); background-image: -<API key>(top, #3c3c3c 0%, #222222 100%); background-image: -moz-linear-gradient(top, #3c3c3c 0%, #222222 100%); background-image: linear-gradient(to bottom, #3c3c3c 0%, #222222 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); } .navbar-inverse .navbar-nav > .active > a { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#222222), to(#282828)); background-image: -<API key>(top, #222222 0%, #282828 100%); background-image: -moz-linear-gradient(top, #222222 0%, #282828 100%); background-image: linear-gradient(to bottom, #222222 0%, #282828 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222', endColorstr='#ff282828', GradientType=0); -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25); box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25); } .navbar-inverse .navbar-brand, .navbar-inverse .navbar-nav > li > a { text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); } .navbar-static-top, .navbar-fixed-top, .navbar-fixed-bottom { border-radius: 0; } .alert { text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2); -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05); box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05); } .alert-success { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#dff0d8), to(#c8e5bc)); background-image: -<API key>(top, #dff0d8 0%, #c8e5bc 100%); background-image: -moz-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%); background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%); background-repeat: repeat-x; border-color: #b2dba1; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0); } .alert-info { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9edf7), to(#b9def0)); background-image: -<API key>(top, #d9edf7 0%, #b9def0 100%); background-image: -moz-linear-gradient(top, #d9edf7 0%, #b9def0 100%); background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%); background-repeat: repeat-x; border-color: #9acfea; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0); } .alert-warning { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#fcf8e3), to(#f8efc0)); background-image: -<API key>(top, #fcf8e3 0%, #f8efc0 100%); background-image: -moz-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%); background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%); background-repeat: repeat-x; border-color: #f5e79e; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0); } .alert-danger { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f2dede), to(#e7c3c3)); background-image: -<API key>(top, #f2dede 0%, #e7c3c3 100%); background-image: -moz-linear-gradient(top, #f2dede 0%, #e7c3c3 100%); background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%); background-repeat: repeat-x; border-color: #dca7a7; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0); } .progress { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#ebebeb), to(#f5f5f5)); background-image: -<API key>(top, #ebebeb 0%, #f5f5f5 100%); background-image: -moz-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%); background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0); } .progress-bar { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#3071a9)); background-image: -<API key>(top, #428bca 0%, #3071a9 100%); background-image: -moz-linear-gradient(top, #428bca 0%, #3071a9 100%); background-image: linear-gradient(to bottom, #428bca 0%, #3071a9 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0); } .<API key> { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5cb85c), to(#449d44)); background-image: -<API key>(top, #5cb85c 0%, #449d44 100%); background-image: -moz-linear-gradient(top, #5cb85c 0%, #449d44 100%); background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0); } .progress-bar-info { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#5bc0de), to(#31b0d5)); background-image: -<API key>(top, #5bc0de 0%, #31b0d5 100%); background-image: -moz-linear-gradient(top, #5bc0de 0%, #31b0d5 100%); background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0); } .<API key> { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f0ad4e), to(#ec971f)); background-image: -<API key>(top, #f0ad4e 0%, #ec971f 100%); background-image: -moz-linear-gradient(top, #f0ad4e 0%, #ec971f 100%); background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0); } .progress-bar-danger { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9534f), to(#c9302c)); background-image: -<API key>(top, #d9534f 0%, #c9302c 100%); background-image: -moz-linear-gradient(top, #d9534f 0%, #c9302c 100%); background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0); } .list-group { border-radius: 4px; -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); } .list-group-item.active, .list-group-item.active:hover, .list-group-item.active:focus { text-shadow: 0 -1px 0 #3071a9; background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#3278b3)); background-image: -<API key>(top, #428bca 0%, #3278b3 100%); background-image: -moz-linear-gradient(top, #428bca 0%, #3278b3 100%); background-image: linear-gradient(to bottom, #428bca 0%, #3278b3 100%); background-repeat: repeat-x; border-color: #3278b3; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3278b3', GradientType=0); } .panel { -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); } .panel-default > .panel-heading { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f5f5f5), to(#e8e8e8)); background-image: -<API key>(top, #f5f5f5 0%, #e8e8e8 100%); background-image: -moz-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%); background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0); } .panel-primary > .panel-heading { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#428bca), to(#357ebd)); background-image: -<API key>(top, #428bca 0%, #357ebd 100%); background-image: -moz-linear-gradient(top, #428bca 0%, #357ebd 100%); background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0); } .panel-success > .panel-heading { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#dff0d8), to(#d0e9c6)); background-image: -<API key>(top, #dff0d8 0%, #d0e9c6 100%); background-image: -moz-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%); background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0); } .panel-info > .panel-heading { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#d9edf7), to(#c4e3f3)); background-image: -<API key>(top, #d9edf7 0%, #c4e3f3 100%); background-image: -moz-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%); background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0); } .panel-warning > .panel-heading { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#fcf8e3), to(#faf2cc)); background-image: -<API key>(top, #fcf8e3 0%, #faf2cc 100%); background-image: -moz-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%); background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0); } .panel-danger > .panel-heading { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#f2dede), to(#ebcccc)); background-image: -<API key>(top, #f2dede 0%, #ebcccc 100%); background-image: -moz-linear-gradient(top, #f2dede 0%, #ebcccc 100%); background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%); background-repeat: repeat-x; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0); } .well { background-image: -webkit-gradient(linear, left 0%, left 100%, from(#e8e8e8), to(#f5f5f5)); background-image: -<API key>(top, #e8e8e8 0%, #f5f5f5 100%); background-image: -moz-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%); background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%); background-repeat: repeat-x; border-color: #dcdcdc; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0); -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1); box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1); }
/* Test the `vqaddu32' ARM Neon intrinsic. */ /* This file was autogenerated by neon-testgen. */ /* { dg-do assemble } */ /* { <API key> arm_neon_ok } */ /* { dg-options "-save-temps -O0" } */ /* { dg-add-options arm_neon } */ #include "arm_neon.h" void test_vqaddu32 (void) { uint32x2_t out_uint32x2_t; uint32x2_t arg0_uint32x2_t; uint32x2_t arg1_uint32x2_t; out_uint32x2_t = vqadd_u32 (arg0_uint32x2_t, arg1_uint32x2_t); } /* { dg-final { scan-assembler "vqadd\.u32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */ /* { dg-final { cleanup-saved-temps } } */
<?php /** * @see <API key> */ require_once 'Zend/Service/Yahoo/Result.php'; class <API key> extends <API key> { /** * A summary of the result * * @var string */ public $Summary; /** * The file type of the result (text, html, pdf, etc.) * * @var string */ public $MimeType; /** * The modification time of the result (as a unix timestamp) * * @var string */ public $ModificationDate; /** * The URL for the Yahoo cache of this page, if it exists * * @var string */ public $CacheUrl; /** * The size of the cache entry * * @var int */ public $CacheSize; /** * Web result namespace * * @var string */ protected $_namespace = 'urn:yahoo:srch'; /** * Initializes the web result * * @param DOMElement $result * @return void */ public function __construct(DOMElement $result) { $this->_fields = array('Summary', 'MimeType', 'ModificationDate'); parent::__construct($result); $this->_xpath = new DOMXPath($result->ownerDocument); $this->_xpath->registerNamespace('yh', $this->_namespace); // check if the cache section exists $cacheUrl = $this->_xpath->query('./yh:Cache/yh:Url/text()', $result)->item(0); if ($cacheUrl instanceof DOMNode) { $this->CacheUrl = $cacheUrl->data; } $cacheSize = $this->_xpath->query('./yh:Cache/yh:Size/text()', $result)->item(0); if ($cacheSize instanceof DOMNode) { $this->CacheSize = (int) $cacheSize->data; } } }
#if defined(CONFIG_ARCH_MSM8X60) #include <linux/msm_audio_8X60.h> #endif #ifndef __LINUX_MSM_AUDIO_H #define __LINUX_MSM_AUDIO_H #include <linux/types.h> #include <linux/ioctl.h> /* PCM Audio */ #define AUDIO_IOCTL_MAGIC 'a' #define AUDIO_START _IOW(AUDIO_IOCTL_MAGIC, 0, unsigned) #define AUDIO_STOP _IOW(AUDIO_IOCTL_MAGIC, 1, unsigned) #define AUDIO_FLUSH _IOW(AUDIO_IOCTL_MAGIC, 2, unsigned) #define AUDIO_GET_CONFIG _IOR(AUDIO_IOCTL_MAGIC, 3, unsigned) #define AUDIO_SET_CONFIG _IOW(AUDIO_IOCTL_MAGIC, 4, unsigned) #define AUDIO_GET_STATS _IOR(AUDIO_IOCTL_MAGIC, 5, unsigned) #define AUDIO_ENABLE_AUDPP _IOW(AUDIO_IOCTL_MAGIC, 6, unsigned) #define AUDIO_SET_ADRC _IOW(AUDIO_IOCTL_MAGIC, 7, unsigned) #define AUDIO_SET_EQ _IOW(AUDIO_IOCTL_MAGIC, 8, unsigned) #define AUDIO_SET_RX_IIR _IOW(AUDIO_IOCTL_MAGIC, 9, unsigned) #define AUDIO_SET_VOLUME _IOW(AUDIO_IOCTL_MAGIC, 10, unsigned) #define AUDIO_PAUSE _IOW(AUDIO_IOCTL_MAGIC, 11, unsigned) #define AUDIO_PLAY_DTMF _IOW(AUDIO_IOCTL_MAGIC, 12, unsigned) #define AUDIO_GET_EVENT _IOR(AUDIO_IOCTL_MAGIC, 13, unsigned) #define <API key> _IOW(AUDIO_IOCTL_MAGIC, 14, unsigned) #define AUDIO_REGISTER_PMEM _IOW(AUDIO_IOCTL_MAGIC, 15, unsigned) #define <API key> _IOW(AUDIO_IOCTL_MAGIC, 16, unsigned) #define AUDIO_ASYNC_WRITE _IOW(AUDIO_IOCTL_MAGIC, 17, unsigned) #define AUDIO_ASYNC_READ _IOW(AUDIO_IOCTL_MAGIC, 18, unsigned) #define AUDIO_SET_INCALL _IOW(AUDIO_IOCTL_MAGIC, 19, struct msm_voicerec_mode) #define <API key> _IOR(AUDIO_IOCTL_MAGIC, 20, unsigned) #define <API key> _IOWR(AUDIO_IOCTL_MAGIC, 21, \ struct msm_snd_device_list) #define <API key> _IOW(AUDIO_IOCTL_MAGIC, 22, unsigned) #define <API key> _IOW(AUDIO_IOCTL_MAGIC, 23, unsigned) #define AUDIO_ROUTE_STREAM _IOW(AUDIO_IOCTL_MAGIC, 24, \ struct <API key>) #define <API key> _IOR(AUDIO_IOCTL_MAGIC, 30, unsigned) #define <API key> _IOW(AUDIO_IOCTL_MAGIC, 31, unsigned) #define AUDIO_SWITCH_DEVICE _IOW(AUDIO_IOCTL_MAGIC, 32, unsigned) #define AUDIO_SET_MUTE _IOW(AUDIO_IOCTL_MAGIC, 33, unsigned) #define AUDIO_UPDATE_ACDB _IOW(AUDIO_IOCTL_MAGIC, 34, unsigned) #define AUDIO_START_VOICE _IOW(AUDIO_IOCTL_MAGIC, 35, unsigned) #define AUDIO_STOP_VOICE _IOW(AUDIO_IOCTL_MAGIC, 36, unsigned) #define AUDIO_REINIT_ACDB _IOW(AUDIO_IOCTL_MAGIC, 39, unsigned) #define AUDIO_OUTPORT_FLUSH _IOW(AUDIO_IOCTL_MAGIC, 40, unsigned short) #define <API key> _IOW(AUDIO_IOCTL_MAGIC, 41, \ unsigned short) #define <API key> _IOR(AUDIO_IOCTL_MAGIC, 42, \ struct <API key>) /* Qualcomm extensions */ #define <API key> _IOW(AUDIO_IOCTL_MAGIC, 80, \ struct <API key>) #define <API key> _IOR(AUDIO_IOCTL_MAGIC, 81, \ struct <API key>) #define <API key> _IOR(AUDIO_IOCTL_MAGIC, 82, unsigned short) #define <API key> _IOR(AUDIO_IOCTL_MAGIC, 83, \ struct <API key>) #define AUDIO_SET_PAN _IOW(AUDIO_IOCTL_MAGIC, 84, unsigned) #define <API key> _IOW(AUDIO_IOCTL_MAGIC, 85, unsigned) #define AUDIO_SET_MBADRC _IOW(AUDIO_IOCTL_MAGIC, 86, unsigned) #define <API key> _IOW(AUDIO_IOCTL_MAGIC, 87, \ struct msm_vol_info) #define <API key> _IOW(AUDIO_IOCTL_MAGIC, 88, unsigned) #define AUDIO_ENABLE_AUDPRE _IOW(AUDIO_IOCTL_MAGIC, 89, unsigned) #define AUDIO_SET_AGC _IOW(AUDIO_IOCTL_MAGIC, 90, unsigned) #define AUDIO_SET_NS _IOW(AUDIO_IOCTL_MAGIC, 91, unsigned) #define AUDIO_SET_TX_IIR _IOW(AUDIO_IOCTL_MAGIC, 92, unsigned) #define AUDIO_GET_BUF_CFG _IOW(AUDIO_IOCTL_MAGIC, 93, \ struct msm_audio_buf_cfg) #define AUDIO_SET_BUF_CFG _IOW(AUDIO_IOCTL_MAGIC, 94, \ struct msm_audio_buf_cfg) #define AUDIO_SET_ACDB_BLK _IOW(AUDIO_IOCTL_MAGIC, 95, \ struct msm_acdb_cmd_device) #define AUDIO_GET_ACDB_BLK _IOW(AUDIO_IOCTL_MAGIC, 96, \ struct msm_acdb_cmd_device) #define <API key> 100 #define HANDSET_MIC 0x01 #define HANDSET_SPKR 0x02 #define HEADSET_MIC 0x03 #define HEADSET_SPKR_MONO 0x04 #define HEADSET_SPKR_STEREO 0x05 #define SPKR_PHONE_MIC 0x06 #define SPKR_PHONE_MONO 0x07 #define SPKR_PHONE_STEREO 0x08 #define BT_SCO_MIC 0x09 #define BT_SCO_SPKR 0x0A #define BT_A2DP_SPKR 0x0B #define TTY_HEADSET_MIC 0x0C #define TTY_HEADSET_SPKR 0x0D /* Default devices are not supported in a */ /* device switching context. Only supported */ /* for stream devices. */ /* DO NOT USE */ #define DEFAULT_TX 0x0E #define DEFAULT_RX 0x0F #define BT_A2DP_TX 0x10 #define <API key> 0x11 #define <API key> 0x12 #define <API key> 0x13 #define <API key> 0x14 #define I2S_RX 0x20 #define I2S_TX 0x21 #define ADRC_ENABLE 0x0001 #define EQ_ENABLE 0x0002 #define IIR_ENABLE 0x0004 #define <API key> 0x0008 #define MBADRC_ENABLE 0x0010 #define AGC_ENABLE 0x0001 #define NS_ENABLE 0x0002 #define TX_IIR_ENABLE 0x0004 #define FLUENCE_ENABLE 0x0008 #define VOC_REC_UPLINK 0x00 #define VOC_REC_DOWNLINK 0x01 #define VOC_REC_BOTH 0x02 struct msm_audio_config { uint32_t buffer_size; uint32_t buffer_count; uint32_t channel_count; uint32_t sample_rate; uint32_t type; uint32_t meta_field; uint32_t bits; uint32_t unused[3]; }; struct <API key> { uint32_t buffer_size; uint32_t buffer_count; }; struct msm_audio_buf_cfg{ uint32_t meta_info_enable; uint32_t frames_per_buf; }; struct msm_audio_stats { uint32_t byte_count; uint32_t sample_count; uint32_t unused[2]; }; struct msm_audio_pmem_info { int fd; void *vaddr; }; struct msm_audio_aio_buf { void *buf_addr; uint32_t buf_len; uint32_t data_len; void *private_data; unsigned short mfield_sz; /*only useful for data has meta field */ }; /* Audio routing */ #define SND_IOCTL_MAGIC 's' #define SND_MUTE_UNMUTED 0 #define SND_MUTE_MUTED 1 struct msm_mute_info { uint32_t mute; uint32_t path; }; struct msm_vol_info { uint32_t vol; uint32_t path; }; struct msm_voicerec_mode { uint32_t rec_mode; }; struct <API key> { uint32_t device; uint32_t ear_mute; uint32_t mic_mute; }; #define SND_SET_DEVICE _IOW(SND_IOCTL_MAGIC, 2, struct msm_device_config *) #define SND_METHOD_VOICE 0 struct <API key> { uint32_t device; uint32_t method; uint32_t volume; }; #define SND_SET_VOLUME _IOW(SND_IOCTL_MAGIC, 3, struct <API key> *) /* Returns the number of SND endpoints supported. */ #define <API key> _IOR(SND_IOCTL_MAGIC, 4, unsigned *) struct msm_snd_endpoint { int id; /* input and output */ char name[64]; /* output only */ }; /* Takes an index between 0 and one less than the number returned by * <API key>, and returns the SND index and name of a * SND endpoint. On input, the .id field contains the number of the * endpoint, and on exit it contains the SND index, while .name contains * the description of the endpoint. */ #define SND_GET_ENDPOINT _IOWR(SND_IOCTL_MAGIC, 5, struct msm_snd_endpoint *) #define SND_AVC_CTL _IOW(SND_IOCTL_MAGIC, 6, unsigned *) #define SND_AGC_CTL _IOW(SND_IOCTL_MAGIC, 7, unsigned *) struct <API key> { uint32_t pcm_feedback; /* 0 - disable > 0 - enable */ uint32_t buffer_count; /* Number of buffers to allocate */ uint32_t buffer_size; /* Size of buffer for capturing of PCM samples */ }; #define AUDIO_EVENT_SUSPEND 0 #define AUDIO_EVENT_RESUME 1 #define <API key> 2 #define <API key> 3 #define <API key> 4 #define <API key> 5 #define <API key> 0 #define <API key> 1 struct <API key> { uint32_t codec_type; uint32_t chan_info; uint32_t sample_rate; uint32_t bit_stream_info; uint32_t bit_rate; uint32_t unused[3]; }; struct <API key> { uint32_t dec_id; uint32_t err_msg_indicator; uint32_t err_type; }; union <API key> { struct msm_audio_aio_buf aio_buf; struct <API key> stream_info; struct <API key> error_info; int reserved; }; struct msm_audio_event { int event_type; int timeout_ms; union <API key> event_payload; }; #define MSM_SNDDEV_CAP_RX 0x1 #define MSM_SNDDEV_CAP_TX 0x2 #define <API key> 0x4 struct msm_snd_device_info { uint32_t dev_id; uint32_t dev_cap; /* bitmask describe capability of device */ char dev_name[64]; }; struct msm_snd_device_list { uint32_t num_dev; /* Indicate number of device info to be retrieved */ struct msm_snd_device_info *list; }; struct msm_dtmf_config { uint16_t path; uint16_t dtmf_hi; uint16_t dtmf_low; uint16_t duration; uint16_t tx_gain; uint16_t rx_gain; uint16_t mixing; }; #define <API key> 0 #define <API key> 1 #define <API key> 2 #define <API key> 3 struct <API key> { uint32_t stream_type; uint32_t stream_id; uint32_t dev_id; }; #define AUDIO_MAX_EQ_BANDS 12 struct msm_audio_eq_band { uint16_t band_idx; /* The band index, 0 .. 11 */ uint32_t filter_type; /* Filter band type */ uint32_t center_freq_hz; /* Filter band center frequency */ uint32_t filter_gain; /* Filter band initial gain (dB) */ /* Range is +12 dB to -12 dB with 1dB increments. */ uint32_t q_factor; } __attribute__ ((packed)); struct <API key> { uint32_t enable; /* Number of consequtive bands specified */ uint32_t num_bands; struct msm_audio_eq_band eq_bands[AUDIO_MAX_EQ_BANDS]; } __attribute__ ((packed)); struct msm_acdb_cmd_device { uint32_t command_id; uint32_t device_id; uint32_t network_id; uint32_t sample_rate_id; /* Actual sample rate value */ uint32_t interface_id; /* See interface id's above */ uint32_t algorithm_block_id; /* See enumerations above */ uint32_t total_bytes; /* Length in bytes used by buffer */ uint32_t *phys_buf; /* Physical Address of data */ }; #endif
<?php _deprecated_file( __FILE__, '4.0', 'Tribe__View_Helpers.php' ); class <API key> extends Tribe__View_Helpers {}
var hat = require('../'); var assert = require('assert'); exports.rack = function () { var rack = hat.rack(4); var seen = {}; for (var i = 0; i < 8; i++) { var id = rack(); assert.ok(!seen[id], 'seen this id'); seen[id] = true; assert.ok(id.match(/^[0-9a-f]$/)); } assert.throws(function () { for (var i = 0; i < 10; i++) rack() }); }; exports.data = function () { var rack = hat.rack(64); var a = rack('a!'); var b = rack("it's a b!") var c = rack([ 'c', 'c', 'c' ]); assert.equal(rack.get(a), 'a!'); assert.equal(rack.get(b), "it's a b!"); assert.deepEqual(rack.get(c), [ 'c', 'c', 'c' ]); assert.equal(rack.hats[a], 'a!'); assert.equal(rack.hats[b], "it's a b!"); assert.deepEqual(rack.hats[c], [ 'c', 'c', 'c' ]); rack.set(a, 'AAA'); assert.equal(rack.get(a), 'AAA'); }; exports.expandBy = function () { var rack = hat.rack(4, 16, 4); var seen = {}; for (var i = 0; i < 8; i++) { var id = rack(); assert.ok(!seen[id], 'seen this id'); seen[id] = true; assert.ok(id.match(/^[0-9a-f]$/)); } for (var i = 0; i < 8; i++) { var id = rack(); assert.ok(!seen[id], 'seen this id'); seen[id] = true; assert.ok(id.match(/^[0-9a-f]{1,2}$/)); } for (var i = 0; i < 8; i++) { var id = rack(); assert.ok(!seen[id], 'seen this id'); seen[id] = true; assert.ok(id.match(/^[0-9a-f]{2}$/)); } };
/* ThemeRoller Humanity override style sheet for jQuery date picker v4.0.0. */ @import "ui.datepick.css"; .ui-widget-header a, .ui-widget-header select { color: #ffffff; /* Set (.ui-widget-header a) colour from theme here */ } .ui-widget-header a:hover { background-color: #f5f0e5; /* Set (.ui-state-hover) colours from theme here */ color: #a46313; } .ui-widget-header select, .ui-widget-header option { background-color: #cb842e; /* Set (.ui-widget-header) background colour from theme here */ } .ui-state-highlight a { color: #060200; /* Set (.ui-state-highlight) colour from theme here */ }
#ifndef JSDOMWindowBase_h #define JSDOMWindowBase_h #include "JSDOMBinding.h" #include "JSDOMGlobalObject.h" #include <wtf/Forward.h> namespace WebCore { class DOMWindow; class Frame; class DOMWrapperWorld; class JSDOMWindow; class JSDOMWindowShell; class <API key>; class JSDOMWindowBase : public JSDOMGlobalObject { typedef JSDOMGlobalObject Base; protected: JSDOMWindowBase(JSC::VM&, JSC::Structure*, PassRefPtr<DOMWindow>, JSDOMWindowShell*); void finishCreation(JSC::VM&, JSDOMWindowShell*); static void destroy(JSCell*); public: void updateDocument(); DOMWindow* impl() const { return m_impl.get(); } <API key>* <API key>() const; // Called just before removing this window from the JSDOMWindowShell. void <API key>(); static const JSC::ClassInfo s_info; static JSC::Structure* createStructure(JSC::VM& vm, JSC::JSValue prototype) { return JSC::Structure::create(vm, 0, prototype, JSC::TypeInfo(JSC::GlobalObjectType, StructureFlags), &s_info); } static const JSC::<API key> <API key>; static bool supportsProfiling(const JSC::JSGlobalObject*); static bool <API key>(const JSC::JSGlobalObject*); static bool <API key>(const JSC::JSGlobalObject*); static bool <API key>(const JSC::JSGlobalObject*); void printErrorMessage(const String&) const; JSDOMWindowShell* shell() const; static JSC::VM* commonVM(); private: RefPtr<DOMWindow> m_impl; JSDOMWindowShell* m_shell; }; // Returns a JSDOMWindow or jsNull() // JSDOMGlobalObject* is ignored, accessing a window in any context will // use that DOMWindow's prototype chain. JSC::JSValue toJS(JSC::ExecState*, JSDOMGlobalObject*, DOMWindow*); JSC::JSValue toJS(JSC::ExecState*, DOMWindow*); // Returns JSDOMWindow or 0 JSDOMWindow* toJSDOMWindow(Frame*, DOMWrapperWorld*); JSDOMWindow* toJSDOMWindow(JSC::JSValue); } // namespace WebCore #endif // JSDOMWindowBase_h
#include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <mach/board.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/uaccess.h> #include <media/msm_gemini.h> #include "msm_gemini_sync.h" #include "msm_gemini_common.h" #define MSM_GEMINI_NAME "gemini" static int msm_gemini_open(struct inode *inode, struct file *filp) { int rc; struct msm_gemini_device *pgmn_dev = container_of(inode->i_cdev, struct msm_gemini_device, cdev); filp->private_data = pgmn_dev; GMN_DBG("%s:%d]\n", __func__, __LINE__); rc = __msm_gemini_open(pgmn_dev); GMN_DBG(KERN_INFO "%s:%d] %s open_count = %d\n", __func__, __LINE__, filp->f_path.dentry->d_name.name, pgmn_dev->open_count); return rc; } static int msm_gemini_release(struct inode *inode, struct file *filp) { int rc; struct msm_gemini_device *pgmn_dev = filp->private_data; GMN_DBG(KERN_INFO "%s:%d]\n", __func__, __LINE__); rc = <API key>(pgmn_dev); GMN_DBG(KERN_INFO "%s:%d] %s open_count = %d\n", __func__, __LINE__, filp->f_path.dentry->d_name.name, pgmn_dev->open_count); return rc; } static long msm_gemini_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int rc; struct msm_gemini_device *pgmn_dev = filp->private_data; GMN_DBG(KERN_INFO "%s:%d] cmd = %d\n", __func__, __LINE__, _IOC_NR(cmd)); rc = __msm_gemini_ioctl(pgmn_dev, cmd, arg); GMN_DBG("%s:%d]\n", __func__, __LINE__); return rc; } static const struct file_operations msm_gemini_fops = { .owner = THIS_MODULE, .open = msm_gemini_open, .release = msm_gemini_release, .unlocked_ioctl = msm_gemini_ioctl, }; static struct class *msm_gemini_class; static dev_t msm_gemini_devno; static struct msm_gemini_device *msm_gemini_device_p; static int msm_gemini_init(struct platform_device *pdev) { int rc = -1; struct device *dev; GMN_DBG("%s:%d]\n", __func__, __LINE__); msm_gemini_device_p = __msm_gemini_init(pdev); if (msm_gemini_device_p == NULL) { GMN_PR_ERR("%s: initialization failed\n", __func__); goto fail; } rc = alloc_chrdev_region(&msm_gemini_devno, 0, 1, MSM_GEMINI_NAME); if (rc < 0) { GMN_PR_ERR("%s: failed to allocate chrdev\n", __func__); goto fail_1; } if (!msm_gemini_class) { msm_gemini_class = class_create(THIS_MODULE, MSM_GEMINI_NAME); if (IS_ERR(msm_gemini_class)) { rc = PTR_ERR(msm_gemini_class); GMN_PR_ERR("%s: create device class failed\n", __func__); goto fail_2; } } dev = device_create(msm_gemini_class, NULL, MKDEV(MAJOR(msm_gemini_devno), MINOR(msm_gemini_devno)), NULL, "%s%d", MSM_GEMINI_NAME, 0); if (IS_ERR(dev)) { GMN_PR_ERR("%s: error creating device\n", __func__); rc = -ENODEV; goto fail_3; } cdev_init(&msm_gemini_device_p->cdev, &msm_gemini_fops); msm_gemini_device_p->cdev.owner = THIS_MODULE; msm_gemini_device_p->cdev.ops = (const struct file_operations *) &msm_gemini_fops; rc = cdev_add(&msm_gemini_device_p->cdev, msm_gemini_devno, 1); if (rc < 0) { GMN_PR_ERR("%s: error adding cdev\n", __func__); rc = -ENODEV; goto fail_4; } GMN_DBG("%s %s: success\n", __func__, MSM_GEMINI_NAME); return rc; fail_4: device_destroy(msm_gemini_class, msm_gemini_devno); fail_3: class_destroy(msm_gemini_class); fail_2: <API key>(msm_gemini_devno, 1); fail_1: __msm_gemini_exit(msm_gemini_device_p); fail: return rc; } static void msm_gemini_exit(void) { cdev_del(&msm_gemini_device_p->cdev); device_destroy(msm_gemini_class, msm_gemini_devno); class_destroy(msm_gemini_class); <API key>(msm_gemini_devno, 1); __msm_gemini_exit(msm_gemini_device_p); } static int __msm_gemini_probe(struct platform_device *pdev) { int rc; rc = msm_gemini_init(pdev); return rc; } static int __msm_gemini_remove(struct platform_device *pdev) { msm_gemini_exit(); return 0; } static struct platform_driver msm_gemini_driver = { .probe = __msm_gemini_probe, .remove = __msm_gemini_remove, .driver = { .name = "msm_gemini", .owner = THIS_MODULE, }, }; static int __init <API key>(void) { int rc; rc = <API key>(&msm_gemini_driver); return rc; } static void __exit <API key>(void) { <API key>(&msm_gemini_driver); } MODULE_DESCRIPTION("msm gemini jpeg driver"); MODULE_VERSION("msm gemini 0.1"); module_init(<API key>); module_exit(<API key>);
<?php namespace Zend\Feed\Writer\Extension\Threading\Renderer; use DOMDocument; use DOMElement; use Zend\Feed\Writer\Extension; class Entry extends Extension\AbstractRenderer { /** * Set to TRUE if a rendering method actually renders something. This * is used to prevent premature appending of a XML namespace declaration * until an element which requires it is actually appended. * * @var bool */ protected $called = false; /** * Render entry * * @return void */ public function render() { if (strtolower($this->getType()) == 'rss') { return; // Atom 1.0 only } $this->_setCommentLink($this->dom, $this->base); $this-><API key>($this->dom, $this->base); $this->_setCommentCount($this->dom, $this->base); if ($this->called) { $this->_appendNamespaces(); } } /** * Append entry namespaces * * @return void */ protected function _appendNamespaces() { $this->getRootElement()->setAttribute('xmlns:thr', 'http://purl.org/syndication/thread/1.0'); } /** * Set comment link * * @param DOMDocument $dom * @param DOMElement $root * @return void */ protected function _setCommentLink(DOMDocument $dom, DOMElement $root) { $link = $this->getDataContainer()->getCommentLink(); if (!$link) { return; } $clink = $this->dom->createElement('link'); $clink->setAttribute('rel', 'replies'); $clink->setAttribute('type', 'text/html'); $clink->setAttribute('href', $link); $count = $this->getDataContainer()->getCommentCount(); if ($count !== null) { $clink->setAttribute('thr:count', $count); } $root->appendChild($clink); $this->called = true; } /** * Set comment feed links * * @param DOMDocument $dom * @param DOMElement $root * @return void */ protected function <API key>(DOMDocument $dom, DOMElement $root) { $links = $this->getDataContainer()->getCommentFeedLinks(); if (!$links || empty($links)) { return; } foreach ($links as $link) { $flink = $this->dom->createElement('link'); $flink->setAttribute('rel', 'replies'); $flink->setAttribute('type', 'application/' . $link['type'] . '+xml'); $flink->setAttribute('href', $link['uri']); $count = $this->getDataContainer()->getCommentCount(); if ($count !== null) { $flink->setAttribute('thr:count', $count); } $root->appendChild($flink); $this->called = true; } } /** * Set entry comment count * * @param DOMDocument $dom * @param DOMElement $root * @return void */ protected function _setCommentCount(DOMDocument $dom, DOMElement $root) { $count = $this->getDataContainer()->getCommentCount(); if ($count === null) { return; } $tcount = $this->dom->createElement('thr:total'); $tcount->nodeValue = $count; $root->appendChild($tcount); $this->called = true; } }
import {Parser} from "./state" import {SourceLocation} from "./locutil" export class Node { constructor(parser, pos, loc) { this.type = "" this.start = pos this.end = 0 if (parser.options.locations) this.loc = new SourceLocation(parser, loc) if (parser.options.directSourceFile) this.sourceFile = parser.options.directSourceFile if (parser.options.ranges) this.range = [pos, 0] } } // Start an AST node, attaching a start offset. const pp = Parser.prototype pp.startNode = function() { return new Node(this, this.start, this.startLoc) } pp.startNodeAt = function(pos, loc) { return new Node(this, pos, loc) } // Finish an AST node, adding `type` and `end` properties. function finishNodeAt(node, type, pos, loc) { node.type = type node.end = pos if (this.options.locations) node.loc.end = loc if (this.options.ranges) node.range[1] = pos return node } pp.finishNode = function(node, type) { return finishNodeAt.call(this, node, type, this.lastTokEnd, this.lastTokEndLoc) } // Finish node at given position pp.finishNodeAt = function(node, type, pos, loc) { return finishNodeAt.call(this, node, type, pos, loc) }
"use strict"; class IgnorePlugin { constructor(resourceRegExp, contextRegExp) { this.resourceRegExp = resourceRegExp; this.contextRegExp = contextRegExp; this.checkIgnore = this.checkIgnore.bind(this); } /* * Only returns true if a "resourceRegExp" exists * and the resource given matches the regexp. */ checkResource(resource) { if(!this.resourceRegExp) { return false; } return this.resourceRegExp.test(resource); } /* * Returns true if contextRegExp does not exist * or if context matches the given regexp. */ checkContext(context) { if(!this.contextRegExp) { return true; } return this.contextRegExp.test(context); } /* * Returns true if result should be ignored. * false if it shouldn't. * * Not that if "contextRegExp" is given, both the "resourceRegExp" * and "contextRegExp" have to match. */ checkResult(result) { if(!result) { return true; } return this.checkResource(result.request) && this.checkContext(result.context); } checkIgnore(result, callback) { // check if result is ignored if(this.checkResult(result)) { return callback(); } return callback(null, result); } apply(compiler) { compiler.plugin("<API key>", (nmf) => { nmf.plugin("before-resolve", this.checkIgnore); }); compiler.plugin("<API key>", (cmf) => { cmf.plugin("before-resolve", this.checkIgnore); }); } } module.exports = IgnorePlugin;
#!/usr/bin/env python # Ansible is free software: you can redistribute it and/or modify # (at your option) any later version. # Ansible is distributed in the hope that it will be useful, # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # ansible-vault is a script that encrypts/decrypts YAML files. See from __future__ import (absolute_import, division, print_function) __metaclass__ = type import sys import time import os def main(args): path = os.path.abspath(args[1]) fo = open(path, 'r+') content = fo.readlines() content.append('faux editor added at %s\n' % time.time()) fo.seek(0) fo.write(''.join(content)) fo.close() return 0 if __name__ == '__main__': sys.exit(main(sys.argv[:]))
#include "xfs.h" #include "xfs_fs.h" #include "xfs_types.h" #include "xfs_bit.h" #include "xfs_inum.h" #include "xfs_log.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_alloc_btree.h" #include "xfs_ialloc_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_inode_item.h" #include "xfs_btree.h" #include "xfs_error.h" #include "xfs_alloc.h" #include "xfs_ialloc.h" #include "xfs_fsops.h" #include "xfs_itable.h" #include "xfs_trans_space.h" #include "xfs_rtalloc.h" #include "xfs_rw.h" #include "xfs_filestream.h" #include "xfs_trace.h" /* * File system operations */ int xfs_fs_geometry( xfs_mount_t *mp, xfs_fsop_geom_t *geo, int new_version) { memset(geo, 0, sizeof(*geo)); geo->blocksize = mp->m_sb.sb_blocksize; geo->rtextsize = mp->m_sb.sb_rextsize; geo->agblocks = mp->m_sb.sb_agblocks; geo->agcount = mp->m_sb.sb_agcount; geo->logblocks = mp->m_sb.sb_logblocks; geo->sectsize = mp->m_sb.sb_sectsize; geo->inodesize = mp->m_sb.sb_inodesize; geo->imaxpct = mp->m_sb.sb_imax_pct; geo->datablocks = mp->m_sb.sb_dblocks; geo->rtblocks = mp->m_sb.sb_rblocks; geo->rtextents = mp->m_sb.sb_rextents; geo->logstart = mp->m_sb.sb_logstart; ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid)); memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid)); if (new_version >= 2) { geo->sunit = mp->m_sb.sb_unit; geo->swidth = mp->m_sb.sb_width; } if (new_version >= 3) { geo->version = <API key>; geo->flags = (<API key>(&mp->m_sb) ? <API key> : 0) | (<API key>(&mp->m_sb) ? <API key> : 0) | (<API key>(&mp->m_sb) ? <API key> : 0) | (<API key>(&mp->m_sb) ? <API key> : 0) | (<API key>(&mp->m_sb) ? <API key> : 0) | (<API key>(&mp->m_sb) ? <API key> : 0) | (<API key>(&mp->m_sb) ? <API key> : 0) | (<API key>(&mp->m_sb) ? <API key> : 0) | (<API key>(&mp->m_sb) ? <API key> : 0) | (<API key>(&mp->m_sb) ? <API key> : 0) | (<API key>(&mp->m_sb) ? <API key> : 0) | (<API key>(&mp->m_sb) ? <API key> : 0); geo->logsectsize = <API key>(&mp->m_sb) ? mp->m_sb.sb_logsectsize : BBSIZE; geo->rtsectsize = mp->m_sb.sb_blocksize; geo->dirblocksize = mp->m_dirblksize; } if (new_version >= 4) { geo->flags |= (<API key>(&mp->m_sb) ? <API key> : 0); geo->logsunit = mp->m_sb.sb_logsunit; } return 0; } static int <API key>( xfs_mount_t *mp, /* mount point for filesystem */ xfs_growfs_data_t *in) /* growfs data input struct */ { xfs_agf_t *agf; xfs_agi_t *agi; xfs_agnumber_t agno; xfs_extlen_t agsize; xfs_extlen_t tmpsize; xfs_alloc_rec_t *arec; struct xfs_btree_block *block; xfs_buf_t *bp; int bucket; int dpct; int error; xfs_agnumber_t nagcount; xfs_agnumber_t nagimax = 0; xfs_rfsblock_t nb, nb_mod; xfs_rfsblock_t new; xfs_rfsblock_t nfree; xfs_agnumber_t oagcount; int pct; xfs_trans_t *tp; nb = in->newblocks; pct = in->imaxpct; if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100) return XFS_ERROR(EINVAL); if ((error = <API key>(&mp->m_sb, nb))) return error; dpct = pct - mp->m_sb.sb_imax_pct; bp = <API key>(mp, mp->m_ddev_targp, XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), BBTOB(XFS_FSS_TO_BB(mp, 1)), 0); if (!bp) return EIO; xfs_buf_relse(bp); new = nb; /* use new as a temporary here */ nb_mod = do_div(new, mp->m_sb.sb_agblocks); nagcount = new + (nb_mod != 0); if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) { nagcount nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks; if (nb < mp->m_sb.sb_dblocks) return XFS_ERROR(EINVAL); } new = nb - mp->m_sb.sb_dblocks; oagcount = mp->m_sb.sb_agcount; /* allocate the new per-ag structures */ if (nagcount > oagcount) { error = <API key>(mp, nagcount, &nagimax); if (error) return error; } tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS); tp->t_flags |= XFS_TRANS_RESERVE; if ((error = xfs_trans_reserve(tp, <API key>(mp), <API key>(mp), 0, 0, 0))) { xfs_trans_cancel(tp, 0); return error; } /* * Write new AG headers to disk. Non-transactional, but written * synchronously so they are completed prior to the growfs transaction * being logged. */ nfree = 0; for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) { /* * AG freelist header block */ bp = xfs_buf_get(mp->m_ddev_targp, XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED); agf = XFS_BUF_TO_AGF(bp); memset(agf, 0, mp->m_sb.sb_sectsize); agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION); agf->agf_seqno = cpu_to_be32(agno); if (agno == nagcount - 1) agsize = nb - (agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks); else agsize = mp->m_sb.sb_agblocks; agf->agf_length = cpu_to_be32(agsize); agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp)); agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp)); agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1); agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1); agf->agf_flfirst = 0; agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1); agf->agf_flcount = 0; tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp); agf->agf_freeblks = cpu_to_be32(tmpsize); agf->agf_longest = cpu_to_be32(tmpsize); error = xfs_bwrite(mp, bp); if (error) { goto error0; } /* * AG inode header block */ bp = xfs_buf_get(mp->m_ddev_targp, XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED); agi = XFS_BUF_TO_AGI(bp); memset(agi, 0, mp->m_sb.sb_sectsize); agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION); agi->agi_seqno = cpu_to_be32(agno); agi->agi_length = cpu_to_be32(agsize); agi->agi_count = 0; agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp)); agi->agi_level = cpu_to_be32(1); agi->agi_freecount = 0; agi->agi_newino = cpu_to_be32(NULLAGINO); agi->agi_dirino = cpu_to_be32(NULLAGINO); for (bucket = 0; bucket < <API key>; bucket++) agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); error = xfs_bwrite(mp, bp); if (error) { goto error0; } /* * BNO btree root block */ bp = xfs_buf_get(mp->m_ddev_targp, XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)), BTOBB(mp->m_sb.sb_blocksize), XBF_LOCK | XBF_MAPPED); block = XFS_BUF_TO_BLOCK(bp); memset(block, 0, mp->m_sb.sb_blocksize); block->bb_magic = cpu_to_be32(XFS_ABTB_MAGIC); block->bb_level = 0; block->bb_numrecs = cpu_to_be16(1); block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); arec = XFS_ALLOC_REC_ADDR(mp, block, 1); arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); arec->ar_blockcount = cpu_to_be32( agsize - be32_to_cpu(arec->ar_startblock)); error = xfs_bwrite(mp, bp); if (error) { goto error0; } /* * CNT btree root block */ bp = xfs_buf_get(mp->m_ddev_targp, XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)), BTOBB(mp->m_sb.sb_blocksize), XBF_LOCK | XBF_MAPPED); block = XFS_BUF_TO_BLOCK(bp); memset(block, 0, mp->m_sb.sb_blocksize); block->bb_magic = cpu_to_be32(XFS_ABTC_MAGIC); block->bb_level = 0; block->bb_numrecs = cpu_to_be16(1); block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); arec = XFS_ALLOC_REC_ADDR(mp, block, 1); arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); arec->ar_blockcount = cpu_to_be32( agsize - be32_to_cpu(arec->ar_startblock)); nfree += be32_to_cpu(arec->ar_blockcount); error = xfs_bwrite(mp, bp); if (error) { goto error0; } /* * INO btree root block */ bp = xfs_buf_get(mp->m_ddev_targp, XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)), BTOBB(mp->m_sb.sb_blocksize), XBF_LOCK | XBF_MAPPED); block = XFS_BUF_TO_BLOCK(bp); memset(block, 0, mp->m_sb.sb_blocksize); block->bb_magic = cpu_to_be32(XFS_IBT_MAGIC); block->bb_level = 0; block->bb_numrecs = 0; block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); error = xfs_bwrite(mp, bp); if (error) { goto error0; } } <API key>(tp, nfree); /* * There are new blocks in the old last a.g. */ if (new) { /* * Change the agi length. */ error = xfs_ialloc_read_agi(mp, tp, agno, &bp); if (error) { goto error0; } ASSERT(bp); agi = XFS_BUF_TO_AGI(bp); be32_add_cpu(&agi->agi_length, new); ASSERT(nagcount == oagcount || be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks); xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); /* * Change agf length. */ error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp); if (error) { goto error0; } ASSERT(bp); agf = XFS_BUF_TO_AGF(bp); be32_add_cpu(&agf->agf_length, new); ASSERT(be32_to_cpu(agf->agf_length) == be32_to_cpu(agi->agi_length)); xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); /* * Free the new space. */ error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno, be32_to_cpu(agf->agf_length) - new), new); if (error) { goto error0; } } /* * Update changed superblock fields transactionally. These are not * seen by the rest of the world until the transaction commit applies * them atomically to the superblock. */ if (nagcount > oagcount) xfs_trans_mod_sb(tp, <API key>, nagcount - oagcount); if (nb > mp->m_sb.sb_dblocks) xfs_trans_mod_sb(tp, <API key>, nb - mp->m_sb.sb_dblocks); if (nfree) xfs_trans_mod_sb(tp, <API key>, nfree); if (dpct) xfs_trans_mod_sb(tp, <API key>, dpct); error = xfs_trans_commit(tp, 0); if (error) return error; /* New allocation groups fully initialized, so update mount struct */ if (nagimax) mp->m_maxagi = nagimax; if (mp->m_sb.sb_imax_pct) { __uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct; do_div(icount, 100); mp->m_maxicount = icount << mp->m_sb.sb_inopblog; } else mp->m_maxicount = 0; <API key>(mp); /* update secondary superblocks. */ for (agno = 1; agno < nagcount; agno++) { error = xfs_read_buf(mp, mp->m_ddev_targp, XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), XFS_FSS_TO_BB(mp, 1), 0, &bp); if (error) { xfs_warn(mp, "error %d reading secondary superblock for ag %d", error, agno); break; } xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS); /* * If we get an error writing out the alternate superblocks, * just issue a warning and continue. The real work is * already done and committed. */ if (!(error = xfs_bwrite(mp, bp))) { continue; } else { xfs_warn(mp, "write error %d updating secondary superblock for ag %d", error, agno); break; /* no point in continuing */ } } return 0; error0: xfs_trans_cancel(tp, XFS_TRANS_ABORT); return error; } static int <API key>( xfs_mount_t *mp, /* mount point for filesystem */ xfs_growfs_log_t *in) /* growfs log input struct */ { xfs_extlen_t nb; nb = in->newblocks; if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES)) return XFS_ERROR(EINVAL); if (nb == mp->m_sb.sb_logblocks && in->isint == (mp->m_sb.sb_logstart != 0)) return XFS_ERROR(EINVAL); /* * Moving the log is hard, need new interfaces to sync * the log first, hold off all activity while moving it. * Can have shorter or longer log in the same space, * or transform internal to external log or vice versa. */ return XFS_ERROR(ENOSYS); } /* * protected versions of growfs function acquire and release locks on the mount * point - exported through ioctls: <API key>, XFS_IOC_FSGROWFSLOG, * XFS_IOC_FSGROWFSRT */ int xfs_growfs_data( xfs_mount_t *mp, xfs_growfs_data_t *in) { int error; if (!capable(CAP_SYS_ADMIN)) return XFS_ERROR(EPERM); if (!mutex_trylock(&mp->m_growlock)) return XFS_ERROR(EWOULDBLOCK); error = <API key>(mp, in); mutex_unlock(&mp->m_growlock); return error; } int xfs_growfs_log( xfs_mount_t *mp, xfs_growfs_log_t *in) { int error; if (!capable(CAP_SYS_ADMIN)) return XFS_ERROR(EPERM); if (!mutex_trylock(&mp->m_growlock)) return XFS_ERROR(EWOULDBLOCK); error = <API key>(mp, in); mutex_unlock(&mp->m_growlock); return error; } /* * exported through ioctl XFS_IOC_FSCOUNTS */ int xfs_fs_counts( xfs_mount_t *mp, xfs_fsop_counts_t *cnt) { <API key>(mp, XFS_ICSB_LAZY_COUNT); spin_lock(&mp->m_sb_lock); cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); cnt->freertx = mp->m_sb.sb_frextents; cnt->freeino = mp->m_sb.sb_ifree; cnt->allocino = mp->m_sb.sb_icount; spin_unlock(&mp->m_sb_lock); return 0; } /* * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS * * xfs_reserve_blocks is called to set m_resblks * in the in-core mount table. The number of unused reserved blocks * is kept in m_resblks_avail. * * Reserve the requested number of blocks if available. Otherwise return * as many as possible to satisfy the request. The actual number * reserved are returned in outval * * A null inval pointer indicates that only the current reserved blocks * available should be returned no settings are changed. */ int xfs_reserve_blocks( xfs_mount_t *mp, __uint64_t *inval, xfs_fsop_resblks_t *outval) { __int64_t lcounter, delta, fdblks_delta; __uint64_t request; /* If inval is null, report current values and return */ if (inval == (__uint64_t *)NULL) { if (!outval) return EINVAL; outval->resblks = mp->m_resblks; outval->resblks_avail = mp->m_resblks_avail; return 0; } request = *inval; /* * With per-cpu counters, this becomes an interesting * problem. we needto work out if we are freeing or allocation * blocks first, then we can do the modification as necessary. * * We do this under the m_sb_lock so that if we are near * ENOSPC, we will hold out any changes while we work out * what to do. This means that the amount of free space can * change while we do this, so we need to retry if we end up * trying to reserve more space than is available. * * We also use the xfs_mod_incore_sb() interface so that we * don't have to care about whether per cpu counter are * enabled, disabled or even compiled in.... */ retry: spin_lock(&mp->m_sb_lock); <API key>(mp, 0); /* * If our previous reservation was larger than the current value, * then move any unused blocks back to the free pool. */ fdblks_delta = 0; if (mp->m_resblks > request) { lcounter = mp->m_resblks_avail - request; if (lcounter > 0) { /* release unused blocks */ fdblks_delta = lcounter; mp->m_resblks_avail -= lcounter; } mp->m_resblks = request; } else { __int64_t free; free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); if (!free) goto out; /* ENOSPC and fdblks_delta = 0 */ delta = request - mp->m_resblks; lcounter = free - delta; if (lcounter < 0) { /* We can't satisfy the request, just get what we can */ mp->m_resblks += free; mp->m_resblks_avail += free; fdblks_delta = -free; } else { fdblks_delta = -delta; mp->m_resblks = request; mp->m_resblks_avail += delta; } } out: if (outval) { outval->resblks = mp->m_resblks; outval->resblks_avail = mp->m_resblks_avail; } spin_unlock(&mp->m_sb_lock); if (fdblks_delta) { /* * If we are putting blocks back here, m_resblks_avail is * already at its max so this will put it in the free pool. * * If we need space, we'll either succeed in getting it * from the free block count or we'll get an enospc. If * we get a ENOSPC, it means things changed while we were * calculating fdblks_delta and so we should try again to * see if there is anything left to reserve. * * Don't set the reserved flag here - we don't want to reserve * the extra reserve blocks from the reserve..... */ int error; error = <API key>(mp, XFS_SBS_FDBLOCKS, fdblks_delta, 0); if (error == ENOSPC) goto retry; } return 0; } /* * Dump a transaction into the log that contains no real change. This is needed * to be able to make the log dirty or stamp the current tail LSN into the log * during the covering operation. * * We cannot use an inode here for this - that will push dirty state back up * into the VFS and then periodic inode flushing will prevent log covering from * making progress. Hence we log a field in the superblock instead and use a * synchronous transaction to ensure the superblock is immediately unpinned * and can be written back. */ int xfs_fs_log_dummy( xfs_mount_t *mp) { xfs_trans_t *tp; int error; tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP); error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, <API key>); if (error) { xfs_trans_cancel(tp, 0); return error; } /* log the UUID because it is an unchanging field */ xfs_mod_sb(tp, XFS_SB_UUID); xfs_trans_set_sync(tp); return xfs_trans_commit(tp, 0); } int xfs_fs_goingdown( xfs_mount_t *mp, __uint32_t inflags) { switch (inflags) { case <API key>: { struct super_block *sb = freeze_bdev(mp->m_super->s_bdev); if (sb && !IS_ERR(sb)) { xfs_force_shutdown(mp, <API key>); thaw_bdev(sb->s_bdev, sb); } break; } case <API key>: xfs_force_shutdown(mp, <API key>); break; case <API key>: xfs_force_shutdown(mp, <API key> | <API key>); break; default: return XFS_ERROR(EINVAL); } return 0; }
#include <linux/init.h> /* miseon.kim 2012.06.21 FC8150 Device Release */ #include <mach/board_lge.h> /* miseon.kim 2012.06.21 FC8150 Device Release */ #include "fci_types.h" #include "fci_oal.h" #include "fci_tun.h" #include "fc8150_regs.h" #include "fci_hal.h" #define FC8150_FREQ_XTAL BBM_XTAL_FREQ //32MHZ static int fc8150_write(HANDLE hDevice, u8 addr, u8 data) { int res; u8 tmp; tmp = data; res = tuner_i2c_write(hDevice, addr, 1,&tmp, 1); return res; } static int fc8150_read(HANDLE hDevice, u8 addr, u8 *data) { int res; res = tuner_i2c_read(hDevice, addr, 1,data, 1); return res; } static int fc8150_bb_read(HANDLE hDevice, u16 addr, u8 *data) { int res; res = bbm_read(hDevice, addr, data); return res; } #if 0 static int fc8150_bb_write(HANDLE hDevice, u16 addr, u8 data) { int res; res = bbm_write(hDevice, addr, data); return res; } #endif static int fc8150_set_filter(HANDLE hDevice) { int i; u8 cal_mon = 0; #if (FC8150_FREQ_XTAL == 16000) fc8150_write(hDevice, 0x3B, 0x01); fc8150_write(hDevice, 0x3D, 0x20); fc8150_write(hDevice, 0x3B, 0x00); #elif (FC8150_FREQ_XTAL == 16384) fc8150_write(hDevice, 0x3B, 0x01); fc8150_write(hDevice, 0x3D, 0x21); fc8150_write(hDevice, 0x3B, 0x00); #elif (FC8150_FREQ_XTAL == 18000) fc8150_write(hDevice, 0x3B, 0x01); fc8150_write(hDevice, 0x3D, 0x24); fc8150_write(hDevice, 0x3B, 0x00); #elif (FC8150_FREQ_XTAL == 19200) fc8150_write(hDevice, 0x3B, 0x01); fc8150_write(hDevice, 0x3D, 0x26); fc8150_write(hDevice, 0x3B, 0x00); #elif (FC8150_FREQ_XTAL == 24000) fc8150_write(hDevice, 0x3B, 0x01); fc8150_write(hDevice, 0x3D, 0x30); fc8150_write(hDevice, 0x3B, 0x00); #elif (FC8150_FREQ_XTAL == 24576) fc8150_write(hDevice, 0x3B, 0x01); fc8150_write(hDevice, 0x3D, 0x31); fc8150_write(hDevice, 0x3B, 0x00); #elif (FC8150_FREQ_XTAL == 26000) fc8150_write(hDevice, 0x3B, 0x01); fc8150_write(hDevice, 0x3D, 0x34); fc8150_write(hDevice, 0x3B, 0x00); #elif (FC8150_FREQ_XTAL == 27000) fc8150_write(hDevice, 0x3B, 0x01); fc8150_write(hDevice, 0x3D, 0x36); fc8150_write(hDevice, 0x3B, 0x00); #elif (FC8150_FREQ_XTAL == 27120) fc8150_write(hDevice, 0x3B, 0x01); fc8150_write(hDevice, 0x3D, 0x36); fc8150_write(hDevice, 0x3B, 0x00); #elif (FC8150_FREQ_XTAL == 32000) fc8150_write(hDevice, 0x3B, 0x01); fc8150_write(hDevice, 0x3D, 0x40); fc8150_write(hDevice, 0x3B, 0x00); #elif (FC8150_FREQ_XTAL == 37400) fc8150_write(hDevice, 0x3B, 0x01); fc8150_write(hDevice, 0x3D, 0x4B); fc8150_write(hDevice, 0x3B, 0x00); #elif (FC8150_FREQ_XTAL == 38400) fc8150_write(hDevice, 0x3B, 0x01); fc8150_write(hDevice, 0x3D, 0x4D); fc8150_write(hDevice, 0x3B, 0x00); #else return BBM_NOK; #endif for(i=0; i<10; i++) { msWait(5); fc8150_read(hDevice, 0x33, &cal_mon); if( (cal_mon & 0xC0) == 0xC0) break; fc8150_write(hDevice, 0x32, 0x01); fc8150_write(hDevice, 0x32, 0x09); } fc8150_write(hDevice, 0x32, 0x01); return BBM_OK; } int fc8150_tuner_init(HANDLE hDevice, u32 band) { PRINTF(hDevice, "fc8150_init\n"); /* miseon.kim 2012.06.21 FC8150 Device Release */ if(lge_get_board_revno() >= HW_REV_C) { int i; int n_RFAGC_PD1_AVG,n_RFAGC_PD2_AVG; u8 RFPD_REF; u8 RFAGC_PD2[6],RFAGC_PD2_AVG,RFAGC_PD2_MAX,RFAGC_PD2_MIN; u8 RFAGC_PD1[6],RFAGC_PD1_AVG,RFAGC_PD1_MAX,RFAGC_PD1_MIN; int res = BBM_OK; fc8150_write(hDevice, 0x00, 0x00); fc8150_write(hDevice, 0x02, 0x81); fc8150_write(hDevice, 0x15, 0x02); fc8150_write(hDevice, 0x20, 0x33); fc8150_write(hDevice, 0x28, 0x62); fc8150_write(hDevice, 0x35, 0xAA); fc8150_write(hDevice, 0x38, 0x28); fc8150_write(hDevice, 0x3B, 0x01); fc8150_set_filter(hDevice); fc8150_write(hDevice, 0x3B, 0x00); fc8150_write(hDevice, 0x56, 0x01); fc8150_write(hDevice, 0x57, 0x86); fc8150_write(hDevice, 0x58, 0xA7); fc8150_write(hDevice, 0x59, 0x4D); fc8150_write(hDevice, 0x80, 0x17); fc8150_write(hDevice, 0xAB, 0x48); fc8150_write(hDevice, 0xA0, 0xC0); fc8150_write(hDevice, 0xD0, 0x00); fc8150_write(hDevice, 0xA5, 0x65); RFAGC_PD1[0] = 0; RFAGC_PD1[1] = 0; RFAGC_PD1[2] = 0; RFAGC_PD1[3] = 0; RFAGC_PD1[4] = 0; RFAGC_PD1[5] = 0; RFAGC_PD1_MAX = 0; RFAGC_PD1_MIN = 255; for (i = 0 ; i<6 ; i++){ fc8150_read(hDevice, 0xD8 , &RFAGC_PD1[i] ); if( RFAGC_PD1[i] >= RFAGC_PD1_MAX) RFAGC_PD1_MAX = RFAGC_PD1[i]; if( RFAGC_PD1[i] <= RFAGC_PD1_MIN) RFAGC_PD1_MIN = RFAGC_PD1[i]; } n_RFAGC_PD1_AVG = (RFAGC_PD1[0] + RFAGC_PD1[1] + RFAGC_PD1[2] + RFAGC_PD1[3] + RFAGC_PD1[4] + RFAGC_PD1[5] - RFAGC_PD1_MAX - RFAGC_PD1_MIN) /4; RFAGC_PD1_AVG = (unsigned char) n_RFAGC_PD1_AVG; fc8150_write(hDevice, 0x7F , RFAGC_PD1_AVG); RFAGC_PD2[0] = 0; RFAGC_PD2[1] = 0; RFAGC_PD2[2] = 0; RFAGC_PD2[3] = 0; RFAGC_PD2[4] = 0; RFAGC_PD2[5] = 0; RFAGC_PD2_MAX = 0; RFAGC_PD2_MIN = 255; for (i = 0 ; i<6 ; i++){ fc8150_read(hDevice, 0xD6 , &RFAGC_PD2[i] ); if( RFAGC_PD2[i] >= RFAGC_PD2_MAX) RFAGC_PD2_MAX = RFAGC_PD2[i]; if( RFAGC_PD2[i] <= RFAGC_PD2_MIN) RFAGC_PD2_MIN = RFAGC_PD2[i]; } n_RFAGC_PD2_AVG = (RFAGC_PD2[0] + RFAGC_PD2[1] + RFAGC_PD2[2] + RFAGC_PD2[3] + RFAGC_PD2[4] + RFAGC_PD2[5] - RFAGC_PD2_MAX - RFAGC_PD2_MIN) /4; RFAGC_PD2_AVG = (unsigned char) n_RFAGC_PD2_AVG; fc8150_write(hDevice, 0x7E , RFAGC_PD2_AVG); res = fc8150_read(hDevice, 0xD6, &RFPD_REF); if(0x86<=RFPD_REF) fc8150_write(hDevice, 0x7B, 0x8F); else if (RFPD_REF<0x86) fc8150_write(hDevice, 0x7B, 0x88); fc8150_write(hDevice, 0x79, 0x32); fc8150_write(hDevice, 0x7A, 0x2C); fc8150_write(hDevice, 0x7C, 0x10); fc8150_write(hDevice, 0x7D, 0x0C); fc8150_write(hDevice, 0x81, 0x0A); fc8150_write(hDevice, 0x84, 0x00); fc8150_write(hDevice, 0x02, 0x81); } else { u8 RFPD_REF, MIXPD_REF; fc8150_write(hDevice, 0x00, 0x00); fc8150_write(hDevice, 0x02, 0x81); fc8150_write(hDevice, 0x13, 0xF4); fc8150_write(hDevice, 0x30, 0x0A); fc8150_write(hDevice, 0x3B, 0x01); fc8150_set_filter(hDevice); fc8150_write(hDevice, 0x3B, 0x00); fc8150_write(hDevice, 0x34, 0x68); fc8150_write(hDevice, 0x36, 0xFF); fc8150_write(hDevice, 0x37, 0xFF); fc8150_write(hDevice, 0x39, 0x11); fc8150_write(hDevice, 0x3A, 0x00); fc8150_write(hDevice, 0x52, 0x20); fc8150_write(hDevice, 0x53, 0x5F); fc8150_write(hDevice, 0x54, 0x00); fc8150_write(hDevice, 0x5E, 0x00); fc8150_write(hDevice, 0x63, 0x30); fc8150_write(hDevice, 0x56, 0x0F); fc8150_write(hDevice, 0x57, 0x1F); fc8150_write(hDevice, 0x58, 0x09); fc8150_write(hDevice, 0x59, 0x5E); fc8150_write(hDevice, 0x29, 0x00); fc8150_write(hDevice, 0x94, 0x00); fc8150_write(hDevice, 0x95, 0x01); fc8150_write(hDevice, 0x96, 0x11); fc8150_write(hDevice, 0x97, 0x21); fc8150_write(hDevice, 0x98, 0x31); fc8150_write(hDevice, 0x99, 0x32); fc8150_write(hDevice, 0x9A, 0x42); fc8150_write(hDevice, 0x9B, 0x52); fc8150_write(hDevice, 0x9C, 0x53); fc8150_write(hDevice, 0x9D, 0x63); fc8150_write(hDevice, 0x9E, 0x63); fc8150_write(hDevice, 0x9F, 0x63); fc8150_write(hDevice, 0x79, 0x2A); fc8150_write(hDevice, 0x7A, 0x24); fc8150_write(hDevice, 0x7B, 0xFF); fc8150_write(hDevice, 0x7C, 0x1B); fc8150_write(hDevice, 0x7D, 0x17); fc8150_write(hDevice, 0x84, 0x00); fc8150_write(hDevice, 0x85, 0x08); fc8150_write(hDevice, 0x86, 0x00); fc8150_write(hDevice, 0x87, 0x08); fc8150_write(hDevice, 0x88, 0x00); fc8150_write(hDevice, 0x89, 0x08); fc8150_write(hDevice, 0x8A, 0x00); fc8150_write(hDevice, 0x8B, 0x08); fc8150_write(hDevice, 0x8C, 0x00); fc8150_write(hDevice, 0x8D, 0x1D); fc8150_write(hDevice, 0x8E, 0x13); fc8150_write(hDevice, 0x8F, 0x1D); fc8150_write(hDevice, 0x90, 0x13); fc8150_write(hDevice, 0x91, 0x1D); fc8150_write(hDevice, 0x92, 0x13); fc8150_write(hDevice, 0x93, 0x1D); fc8150_write(hDevice, 0x80, 0x1F); fc8150_write(hDevice, 0x81, 0x0A); fc8150_write(hDevice, 0x82, 0x40); fc8150_write(hDevice, 0x83, 0x0A); fc8150_write(hDevice, 0xA0, 0xC0); fc8150_write(hDevice, 0x7E, 0x7F); fc8150_write(hDevice, 0x7F, 0x7F); fc8150_write(hDevice, 0xD0, 0x0A); fc8150_write(hDevice, 0xD2, 0x28); fc8150_write(hDevice, 0xD4, 0x28); fc8150_write(hDevice, 0xA0, 0x17); fc8150_write(hDevice, 0xD0, 0x00); fc8150_write(hDevice, 0xA1, 0x1D); msWait(100); fc8150_read(hDevice, 0xD6, &RFPD_REF); fc8150_read(hDevice, 0xD8, &MIXPD_REF); fc8150_write(hDevice, 0xA0, 0xD7); fc8150_write(hDevice, 0xD0, 0x0A); fc8150_write(hDevice, 0x7E, RFPD_REF); fc8150_write(hDevice, 0x7F, MIXPD_REF); fc8150_write(hDevice, 0xA0, 0xC0); fc8150_write(hDevice, 0xA1, 0x00); } return BBM_OK; } int fc8150_set_freq(HANDLE hDevice, band_type band, u32 rf_kHz) { int n_captune = 0; unsigned long f_diff, f_diff_shifted, n_val, k_val; unsigned long f_vco, f_comp; unsigned char r_val, data_0x56; unsigned char pre_shift_bits = 4; f_vco = (rf_kHz) << 2; if(f_vco < FC8150_FREQ_XTAL*40) r_val = 2; else r_val = 1; f_comp = FC8150_FREQ_XTAL / r_val; n_val = f_vco / f_comp; f_diff = f_vco - f_comp * n_val; f_diff_shifted = f_diff << (20 - pre_shift_bits); /* miseon.kim 2012.06.21 FC8150 Device Release */ if(lge_get_board_revno() >= HW_REV_C) { k_val = (f_diff_shifted ) / (f_comp >> pre_shift_bits); k_val = (k_val | 1); if(470000<rf_kHz && rf_kHz<=473143){ fc8150_write(hDevice, 0x1E, 0x04); fc8150_write(hDevice, 0x1F, 0x36); fc8150_write(hDevice, 0x14, 0x84); } else if(473143<rf_kHz && rf_kHz<=485143){ fc8150_write(hDevice, 0x1E, 0x03); fc8150_write(hDevice, 0x1F, 0x3E); fc8150_write(hDevice, 0x14, 0x84); } else if(485143<rf_kHz && rf_kHz<=551143){ fc8150_write(hDevice, 0x1E, 0x04); fc8150_write(hDevice, 0x1F, 0x36); fc8150_write(hDevice, 0x14, 0x84); } else if(551143<rf_kHz && rf_kHz<=563143){ fc8150_write(hDevice, 0x1E, 0x03); fc8150_write(hDevice, 0x1F, 0x3E); fc8150_write(hDevice, 0x14, 0xC4); } else if(563143<rf_kHz && rf_kHz<=593143){ fc8150_write(hDevice, 0x1E, 0x02); fc8150_write(hDevice, 0x1F, 0x3E); fc8150_write(hDevice, 0x14, 0xC4); } else if(593143<rf_kHz && rf_kHz<=659143){ fc8150_write(hDevice, 0x1E, 0x02); fc8150_write(hDevice, 0x1F, 0x36); fc8150_write(hDevice, 0x14, 0x84); } else if(659143<rf_kHz && rf_kHz<=767143){ fc8150_write(hDevice, 0x1E, 0x01); fc8150_write(hDevice, 0x1F, 0x36); fc8150_write(hDevice, 0x14, 0x84); } else if(767143<rf_kHz){ fc8150_write(hDevice, 0x1E, 0x00); fc8150_write(hDevice, 0x1F, 0x36); fc8150_write(hDevice, 0x14, 0x84); } else{ fc8150_write(hDevice, 0x1E, 0x05); fc8150_write(hDevice, 0x1F, 0x36); fc8150_write(hDevice, 0x14, 0x84); } data_0x56 = ((r_val==1)? 0 : 0x10) + (unsigned char)(k_val>>16); fc8150_write(hDevice, 0x56, data_0x56); fc8150_write(hDevice, 0x57, (unsigned char)((k_val>>8)&0xFF)); fc8150_write(hDevice, 0x58, (unsigned char)(((k_val)&0xFF))); fc8150_write(hDevice, 0x59, (unsigned char) n_val); if(rf_kHz<525000){ fc8150_write(hDevice, 0x55, 0x0E); } else if (525000<=rf_kHz && rf_kHz<600000){ fc8150_write(hDevice, 0x55, 0x0C); } else if (600000<=rf_kHz && rf_kHz<700000){ fc8150_write(hDevice, 0x55, 0x08); } else if (700000<rf_kHz){ fc8150_write(hDevice, 0x55, 0x06); } if(rf_kHz<=491143){ fc8150_write(hDevice, 0x79, 0x28); fc8150_write(hDevice, 0x7A, 0x24); } else if (491143<rf_kHz && rf_kHz<=659143){ fc8150_write(hDevice, 0x79, 0x2A); fc8150_write(hDevice, 0x7A, 0x26); } else if (659143<rf_kHz && rf_kHz<=773143){ fc8150_write(hDevice, 0x79, 0x2C); fc8150_write(hDevice, 0x7A, 0x28); } else if(773143<rf_kHz){ fc8150_write(hDevice, 0x79, 0x2F); fc8150_write(hDevice, 0x7A, 0x2B); } if(rf_kHz<=707143){ fc8150_write(hDevice, 0x54, 0x00); fc8150_write(hDevice, 0x53, 0x5F); } else if (707143<rf_kHz){ fc8150_write(hDevice, 0x54, 0x04); fc8150_write(hDevice, 0x53, 0x9F); } } else { k_val = (f_diff_shifted + (f_comp >> (pre_shift_bits+1))) / (f_comp >> pre_shift_bits); k_val = (k_val | 1); if(470000<rf_kHz && rf_kHz<=505000){ n_captune = 4; } else if(505000<rf_kHz && rf_kHz<=545000){ n_captune = 3; } else if(545000<rf_kHz && rf_kHz<=610000){ n_captune = 2; } else if(610000<rf_kHz && rf_kHz<=695000){ n_captune = 1; } else if(695000<rf_kHz){ n_captune = 0; } fc8150_write(hDevice, 0x1E, (unsigned char)n_captune); data_0x56 = ((r_val==1)? 0 : 0x10) + (unsigned char)(k_val>>16); fc8150_write(hDevice, 0x56, data_0x56); fc8150_write(hDevice, 0x57, (unsigned char)((k_val>>8)&0xFF)); fc8150_write(hDevice, 0x58, (unsigned char)(((k_val)&0xFF))); fc8150_write(hDevice, 0x59, (unsigned char) n_val); if(rf_kHz<=600000){ fc8150_write(hDevice, 0x55, 0x07); } else { fc8150_write(hDevice, 0x55, 0x05); } if((490000<rf_kHz)&& (560000>=rf_kHz)){ fc8150_write(hDevice, 0x1F, 0x0E); } else { fc8150_write(hDevice, 0x1F, 0x06); } } return BBM_OK; } int fc8150_get_rssi(HANDLE hDevice, int *rssi) { int res = BBM_OK; u8 LNA, RFVGA, CSF,PREAMP_PGA = 0x00; int K = -101; int PGA = 0; res |= fc8150_read(hDevice, 0xA3, &LNA); res |= fc8150_read(hDevice, 0xA4, &RFVGA); res |= fc8150_read(hDevice, 0xA8, &CSF); res |= fc8150_bb_read(hDevice, 0x106E, &PREAMP_PGA); if(res != BBM_OK) return res; if (127<PREAMP_PGA) PGA = -1*((256-PREAMP_PGA)+1); else if(PREAMP_PGA<=127) PGA = PREAMP_PGA;
jQuery(function(e){e.datepicker.regional.he={closeText:"סגור",prevText:"&
package trust import ( "crypto/x509" "errors" "io/ioutil" "net/http" "net/url" "os" "path" "path/filepath" "sync" "time" "github.com/Sirupsen/logrus" "github.com/docker/libtrust/trustgraph" ) type TrustStore struct { path string caPool *x509.CertPool graph trustgraph.TrustGraph expiration time.Time fetcher *time.Timer fetchTime time.Duration autofetch bool httpClient *http.Client baseEndpoints map[string]*url.URL sync.RWMutex } // defaultFetchtime represents the starting duration to wait between // fetching sections of the graph. Unsuccessful fetches should // increase time between fetching. const defaultFetchtime = 45 * time.Second var baseEndpoints = map[string]string{"official": "https://dvjy3tqbc323p.cloudfront.net/trust/official.json"} func NewTrustStore(path string) (*TrustStore, error) { abspath, err := filepath.Abs(path) if err != nil { return nil, err } // Create base graph url map endpoints := map[string]*url.URL{} for name, endpoint := range baseEndpoints { u, err := url.Parse(endpoint) if err != nil { return nil, err } endpoints[name] = u } // Load grant files t := &TrustStore{ path: abspath, caPool: nil, httpClient: &http.Client{}, fetchTime: time.Millisecond, baseEndpoints: endpoints, } if err := t.reload(); err != nil { return nil, err } return t, nil } func (t *TrustStore) reload() error { t.Lock() defer t.Unlock() matches, err := filepath.Glob(filepath.Join(t.path, "*.json")) if err != nil { return err } statements := make([]*trustgraph.Statement, len(matches)) for i, match := range matches { f, err := os.Open(match) if err != nil { return err } statements[i], err = trustgraph.LoadStatement(f, nil) if err != nil { f.Close() return err } f.Close() } if len(statements) == 0 { if t.autofetch { logrus.Debugf("No grants, fetching") t.fetcher = time.AfterFunc(t.fetchTime, t.fetch) } return nil } grants, expiration, err := trustgraph.CollapseStatements(statements, true) if err != nil { return err } t.expiration = expiration t.graph = trustgraph.NewMemoryGraph(grants) logrus.Debugf("Reloaded graph with %d grants expiring at %s", len(grants), expiration) if t.autofetch { nextFetch := expiration.Sub(time.Now()) if nextFetch < 0 { nextFetch = defaultFetchtime } else { nextFetch = time.Duration(0.8 * (float64)(nextFetch)) } t.fetcher = time.AfterFunc(nextFetch, t.fetch) } return nil } func (t *TrustStore) fetchBaseGraph(u *url.URL) (*trustgraph.Statement, error) { req := &http.Request{ Method: "GET", URL: u, Proto: "HTTP/1.1", ProtoMajor: 1, ProtoMinor: 1, Header: make(http.Header), Body: nil, Host: u.Host, } resp, err := t.httpClient.Do(req) if err != nil { return nil, err } if resp.StatusCode == 404 { return nil, errors.New("base graph does not exist") } defer resp.Body.Close() return trustgraph.LoadStatement(resp.Body, t.caPool) } // fetch retrieves updated base graphs. This function cannot error, it // should only log errors func (t *TrustStore) fetch() { t.Lock() defer t.Unlock() if t.autofetch && t.fetcher == nil { // Do nothing ?? return } fetchCount := 0 for bg, ep := range t.baseEndpoints { statement, err := t.fetchBaseGraph(ep) if err != nil { logrus.Infof("Trust graph fetch failed: %s", err) continue } b, err := statement.Bytes() if err != nil { logrus.Infof("Bad trust graph statement: %s", err) continue } // TODO check if value differs if err := ioutil.WriteFile(path.Join(t.path, bg+".json"), b, 0600); err != nil { logrus.Infof("Error writing trust graph statement: %s", err) } fetchCount++ } logrus.Debugf("Fetched %d base graphs at %s", fetchCount, time.Now()) if fetchCount > 0 { go func() { if err := t.reload(); err != nil { logrus.Infof("Reload of trust graph failed: %s", err) } }() t.fetchTime = defaultFetchtime t.fetcher = nil } else if t.autofetch { maxTime := 10 * defaultFetchtime t.fetchTime = time.Duration(1.5 * (float64)(t.fetchTime+time.Second)) if t.fetchTime > maxTime { t.fetchTime = maxTime } t.fetcher = time.AfterFunc(t.fetchTime, t.fetch) } }
/** * Module dependencies. */ var utils = require('./../utils') , cookie = require('cookie'); /** * Cookie parser: * * Parse _Cookie_ header and populate `req.cookies` * with an object keyed by the cookie names. Optionally * you may enabled signed cookie support by passing * a `secret` string, which assigns `req.secret` so * it may be used by other middleware. * * Examples: * * connect() * .use(connect.cookieParser('optional secret string')) * .use(function(req, res, next){ * res.end(JSON.stringify(req.cookies)); * }) * * @param {String} secret * @return {Function} * @api public */ module.exports = function cookieParser(secret){ return function cookieParser(req, res, next) { if (req.cookies) return next(); var cookies = req.headers.cookie; req.secret = secret; req.cookies = {}; req.signedCookies = {}; if (cookies) { try { req.cookies = cookie.parse(cookies); if (secret) { req.signedCookies = utils.parseSignedCookies(req.cookies, secret); req.signedCookies = utils.parseJSONCookies(req.signedCookies); } req.cookies = utils.parseJSONCookies(req.cookies); } catch (err) { err.status = 400; return next(err); } } next(); }; };
YUI.add("lang/<API key>",function(a){a.Intl.add("<API key>","es-EC",{"a":["dom","lun","mar","mié","jue","vie","sáb"],"A":["domingo","lunes","martes","miércoles","jueves","viernes","sábado"],"b":["ene","feb","mar","abr","may","jun","jul","ago","sep","oct","nov","dic"],"B":["enero","febrero","marzo","abril","mayo","junio","julio","agosto","septiembre","octubre","noviembre","diciembre"],"c":"%a, %d %b %Y %H:%M:%S %Z","p":["A.M.","P.M."],"P":["a.m.","p.m."],"x":"%d/%m/%y","X":"%H:%M:%S"});},"@VERSION@");
package com.facebook.datasource; /** * Base implementation of {@link DataSubscriber} that ensures that the data source is closed when * the subscriber has finished with it. * <p> * Sample usage: * <pre> * <code> * dataSource.subscribe( * new BaseDataSubscriber() { * {@literal @}Override * public void onNewResultImpl(DataSource dataSource) { * // Store image ref to be released later. * mCloseableImageRef = dataSource.getResult(); * // Use the image. * updateImage(mCloseableImageRef); * // No need to do any cleanup of the data source. * } * * {@literal @}Override * public void onFailureImpl(DataSource dataSource) { * // No cleanup of the data source required here. * } * }); * </code> * </pre> */ public abstract class BaseDataSubscriber<T> implements DataSubscriber<T> { @Override public void onNewResult(DataSource<T> dataSource) { try { onNewResultImpl(dataSource); } finally { if (dataSource.isFinished()) { dataSource.close(); } } } @Override public void onFailure(DataSource<T> dataSource) { try { onFailureImpl(dataSource); } finally { dataSource.close(); } } @Override public void onCancellation(DataSource<T> dataSource) { } @Override public void onProgressUpdate(DataSource<T> dataSource) { } protected abstract void onNewResultImpl(DataSource<T> dataSource); protected abstract void onFailureImpl(DataSource<T> dataSource); }
/* * Authors: Thomas Hellstrom <<API key>> */ #include "ttm/ttm_module.h" #include "ttm/ttm_bo_driver.h" #include "ttm/ttm_placement.h" #include <linux/jiffies.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/file.h> #include <linux/module.h> #include <linux/atomic.h> #define TTM_ASSERT_LOCKED(param) #define TTM_DEBUG(fmt, arg...) #define TTM_BO_HASH_ORDER 13 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); static void <API key>(struct kobject *kobj); static struct attribute ttm_bo_count = { .name = "bo_count", .mode = S_IRUGO }; static inline int <API key>(uint32_t flags, uint32_t *mem_type) { int i; for (i = 0; i <= TTM_PL_PRIV5; i++) if (flags & (1 << i)) { *mem_type = i; return 0; } return -EINVAL; } static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) { struct <API key> *man = &bdev->man[mem_type]; printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type); printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); printk(KERN_ERR TTM_PFX " size: %llu\n", man->size); printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", man->available_caching); printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", man->default_caching); if (mem_type != TTM_PL_SYSTEM) (*man->func->debug)(man, TTM_PFX); } static void <API key>(struct ttm_buffer_object *bo, struct ttm_placement *placement) { int i, ret, mem_type; printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n", bo, bo->mem.num_pages, bo->mem.size >> 10, bo->mem.size >> 20); for (i = 0; i < placement->num_placement; i++) { ret = <API key>(placement->placement[i], &mem_type); if (ret) return; printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n", i, placement->placement[i], mem_type); ttm_mem_type_debug(bo->bdev, mem_type); } } static ssize_t ttm_bo_global_show(struct kobject *kobj, struct attribute *attr, char *buffer) { struct ttm_bo_global *glob = container_of(kobj, struct ttm_bo_global, kobj); return snprintf(buffer, PAGE_SIZE, "%lu\n", (unsigned long) atomic_read(&glob->bo_count)); } static struct attribute *ttm_bo_global_attrs[] = { &ttm_bo_count, NULL }; static const struct sysfs_ops ttm_bo_global_ops = { .show = &ttm_bo_global_show }; static struct kobj_type <API key> = { .release = &<API key>, .sysfs_ops = &ttm_bo_global_ops, .default_attrs = ttm_bo_global_attrs }; static inline uint32_t ttm_bo_type_flags(unsigned type) { return 1 << (type); } static void ttm_bo_release_list(struct kref *list_kref) { struct ttm_buffer_object *bo = container_of(list_kref, struct ttm_buffer_object, list_kref); struct ttm_bo_device *bdev = bo->bdev; BUG_ON(atomic_read(&bo->list_kref.refcount)); BUG_ON(atomic_read(&bo->kref.refcount)); BUG_ON(atomic_read(&bo->cpu_writers)); BUG_ON(bo->sync_obj != NULL); BUG_ON(bo->mem.mm_node != NULL); BUG_ON(!list_empty(&bo->lru)); BUG_ON(!list_empty(&bo->ddestroy)); if (bo->ttm) ttm_tt_destroy(bo->ttm); atomic_dec(&bo->glob->bo_count); if (bo->destroy) bo->destroy(bo); else { ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size); kfree(bo); } } int <API key>(struct ttm_buffer_object *bo, bool interruptible) { if (interruptible) { return <API key>(bo->event_queue, atomic_read(&bo->reserved) == 0); } else { wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); return 0; } } EXPORT_SYMBOL(<API key>); void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct <API key> *man; BUG_ON(!atomic_read(&bo->reserved)); if (!(bo->mem.placement & <API key>)) { BUG_ON(!list_empty(&bo->lru)); man = &bdev->man[bo->mem.mem_type]; list_add_tail(&bo->lru, &man->lru); kref_get(&bo->list_kref); if (bo->ttm != NULL) { list_add_tail(&bo->swap, &bo->glob->swap_lru); kref_get(&bo->list_kref); } } } int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) { int put_count = 0; if (!list_empty(&bo->swap)) { list_del_init(&bo->swap); ++put_count; } if (!list_empty(&bo->lru)) { list_del_init(&bo->lru); ++put_count; } /* * TODO: Add a driver hook to delete from * driver-specific LRU's here. */ return put_count; } int <API key>(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) { struct ttm_bo_global *glob = bo->glob; int ret; while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { /** * Deadlock avoidance for multi-bo reserving. */ if (use_sequence && bo->seq_valid) { /** * We've already reserved this one. */ if (unlikely(sequence == bo->val_seq)) return -EDEADLK; /** * Already reserved by a thread that will not back * off for us. We need to back off. */ if (unlikely(sequence - bo->val_seq < (1 << 31))) return -EAGAIN; } if (no_wait) return -EBUSY; spin_unlock(&glob->lru_lock); ret = <API key>(bo, interruptible); spin_lock(&glob->lru_lock); if (unlikely(ret)) return ret; } if (use_sequence) { /** * Wake up waiters that may need to recheck for deadlock, * if we decreased the sequence number. */ if (unlikely((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)) wake_up_all(&bo->event_queue); bo->val_seq = sequence; bo->seq_valid = true; } else { bo->seq_valid = false; } return 0; } EXPORT_SYMBOL(ttm_bo_reserve); static void ttm_bo_ref_bug(struct kref *list_kref) { BUG(); } void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, bool never_free) { kref_sub(&bo->list_kref, count, (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); } int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) { struct ttm_bo_global *glob = bo->glob; int put_count = 0; int ret; spin_lock(&glob->lru_lock); ret = <API key>(bo, interruptible, no_wait, use_sequence, sequence); if (likely(ret == 0)) put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); ttm_bo_list_ref_sub(bo, put_count, true); return ret; } void <API key>(struct ttm_buffer_object *bo) { ttm_bo_add_to_lru(bo); atomic_set(&bo->reserved, 0); wake_up_all(&bo->event_queue); } void ttm_bo_unreserve(struct ttm_buffer_object *bo) { struct ttm_bo_global *glob = bo->glob; spin_lock(&glob->lru_lock); <API key>(bo); spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_bo_unreserve); /* * Call bo->mutex locked. */ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; int ret = 0; uint32_t page_flags = 0; TTM_ASSERT_LOCKED(&bo->mutex); bo->ttm = NULL; if (bdev->need_dma32) page_flags |= TTM_PAGE_FLAG_DMA32; switch (bo->type) { case ttm_bo_type_device: if (zero_alloc) page_flags |= <API key>; case ttm_bo_type_kernel: bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, page_flags, glob->dummy_read_page); if (unlikely(bo->ttm == NULL)) ret = -ENOMEM; break; case ttm_bo_type_user: bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, page_flags | TTM_PAGE_FLAG_USER, glob->dummy_read_page); if (unlikely(bo->ttm == NULL)) { ret = -ENOMEM; break; } ret = ttm_tt_set_user(bo->ttm, current, bo->buffer_start, bo->num_pages); if (unlikely(ret != 0)) { ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } break; default: printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); ret = -EINVAL; break; } return ret; } static int <API key>(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); struct <API key> *old_man = &bdev->man[bo->mem.mem_type]; struct <API key> *new_man = &bdev->man[mem->mem_type]; int ret = 0; if (old_is_pci || new_is_pci || ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { ret = ttm_mem_io_lock(old_man, true); if (unlikely(ret != 0)) goto out_err; <API key>(bo); ttm_mem_io_unlock(old_man); } /* * Create and bind a ttm if required. */ if (!(new_man->flags & <API key>)) { if (bo->ttm == NULL) { bool zero = !(old_man->flags & <API key>); ret = ttm_bo_add_ttm(bo, zero); if (ret) goto out_err; } ret = <API key>(bo->ttm, mem->placement); if (ret) goto out_err; if (mem->mem_type != TTM_PL_SYSTEM) { ret = ttm_tt_bind(bo->ttm, mem); if (ret) goto out_err; } if (bo->mem.mem_type == TTM_PL_SYSTEM) { if (bdev->driver->move_notify) bdev->driver->move_notify(bo, mem); bo->mem = *mem; mem->mm_node = NULL; goto moved; } } if (bdev->driver->move_notify) bdev->driver->move_notify(bo, mem); if (!(old_man->flags & <API key>) && !(new_man->flags & <API key>)) ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, interruptible, no_wait_reserve, no_wait_gpu, mem); else ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem); if (ret) goto out_err; moved: if (bo->evicted) { ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); if (ret) printk(KERN_ERR TTM_PFX "Can not flush read caches\n"); bo->evicted = false; } if (bo->mem.mm_node) { bo->offset = (bo->mem.start << PAGE_SHIFT) + bdev->man[bo->mem.mem_type].gpu_offset; bo->cur_placement = bo->mem.placement; } else bo->offset = 0; return 0; out_err: new_man = &bdev->man[bo->mem.mem_type]; if ((new_man->flags & <API key>) && bo->ttm) { ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } return ret; } /** * Call bo::reserved. * Will release GPU memory type usage on destruction. * This is the place to put in driver specific hooks to release * driver private resources. * Will release the bo::reserved lock. */ static void <API key>(struct ttm_buffer_object *bo) { if (bo->ttm) { ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } ttm_bo_mem_put(bo, &bo->mem); atomic_set(&bo->reserved, 0); /* * Make processes trying to reserve really pick it up. */ <API key>(); wake_up_all(&bo->event_queue); } static void <API key>(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; struct ttm_bo_driver *driver; void *sync_obj = NULL; void *sync_obj_arg; int put_count; int ret; spin_lock(&bdev->fence_lock); (void) ttm_bo_wait(bo, false, false, true); if (!bo->sync_obj) { spin_lock(&glob->lru_lock); /** * Lock inversion between bo:reserve and bdev::fence_lock here, * but that's OK, since we're only trylocking. */ ret = <API key>(bo, false, true, false, 0); if (unlikely(ret == -EBUSY)) goto queue; spin_unlock(&bdev->fence_lock); put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); <API key>(bo); ttm_bo_list_ref_sub(bo, put_count, true); return; } else { spin_lock(&glob->lru_lock); } queue: driver = bdev->driver; if (bo->sync_obj) sync_obj = driver->sync_obj_ref(bo->sync_obj); sync_obj_arg = bo->sync_obj_arg; kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); spin_unlock(&glob->lru_lock); spin_unlock(&bdev->fence_lock); if (sync_obj) { driver->sync_obj_flush(sync_obj, sync_obj_arg); driver->sync_obj_unref(&sync_obj); } <API key>(&bdev->wq, ((HZ / 100) < 1) ? 1 : HZ / 100); } /** * function ttm_bo_cleanup_refs * If bo idle, remove from delayed- and lru lists, and unref. * If not idle, do nothing. * * @interruptible Any sleeps should occur interruptibly. * @no_wait_reserve Never wait for reserve. Return -EBUSY instead. * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. */ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; int put_count; int ret = 0; retry: spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) return ret; spin_lock(&glob->lru_lock); ret = <API key>(bo, interruptible, no_wait_reserve, false, 0); if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { spin_unlock(&glob->lru_lock); return ret; } /** * We can re-check for sync object without taking * the bo::lock since setting the sync object requires * also bo::reserved. A busy object at this point may * be caused by another thread recently starting an accelerated * eviction. */ if (unlikely(bo->sync_obj)) { atomic_set(&bo->reserved, 0); wake_up_all(&bo->event_queue); spin_unlock(&glob->lru_lock); goto retry; } put_count = ttm_bo_del_from_lru(bo); list_del_init(&bo->ddestroy); ++put_count; spin_unlock(&glob->lru_lock); <API key>(bo); ttm_bo_list_ref_sub(bo, put_count, true); return 0; } /** * Traverse the delayed list, and call ttm_bo_cleanup_refs on all * encountered buffers. */ static int <API key>(struct ttm_bo_device *bdev, bool remove_all) { struct ttm_bo_global *glob = bdev->glob; struct ttm_buffer_object *entry = NULL; int ret = 0; spin_lock(&glob->lru_lock); if (list_empty(&bdev->ddestroy)) goto out_unlock; entry = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object, ddestroy); kref_get(&entry->list_kref); for (;;) { struct ttm_buffer_object *nentry = NULL; if (entry->ddestroy.next != &bdev->ddestroy) { nentry = list_first_entry(&entry->ddestroy, struct ttm_buffer_object, ddestroy); kref_get(&nentry->list_kref); } spin_unlock(&glob->lru_lock); ret = ttm_bo_cleanup_refs(entry, false, !remove_all, !remove_all); kref_put(&entry->list_kref, ttm_bo_release_list); entry = nentry; if (ret || !entry) goto out; spin_lock(&glob->lru_lock); if (list_empty(&entry->ddestroy)) break; } out_unlock: spin_unlock(&glob->lru_lock); out: if (entry) kref_put(&entry->list_kref, ttm_bo_release_list); return ret; } static void <API key>(struct work_struct *work) { struct ttm_bo_device *bdev = container_of(work, struct ttm_bo_device, wq.work); if (<API key>(bdev, false)) { <API key>(&bdev->wq, ((HZ / 100) < 1) ? 1 : HZ / 100); } } static void ttm_bo_release(struct kref *kref) { struct ttm_buffer_object *bo = container_of(kref, struct ttm_buffer_object, kref); struct ttm_bo_device *bdev = bo->bdev; struct <API key> *man = &bdev->man[bo->mem.mem_type]; if (likely(bo->vm_node != NULL)) { rb_erase(&bo->vm_rb, &bdev->addr_space_rb); drm_mm_put_block(bo->vm_node); bo->vm_node = NULL; } write_unlock(&bdev->vm_lock); ttm_mem_io_lock(man, false); ttm_mem_io_free_vm(bo); ttm_mem_io_unlock(man); <API key>(bo); kref_put(&bo->list_kref, ttm_bo_release_list); write_lock(&bdev->vm_lock); } void ttm_bo_unref(struct ttm_buffer_object **p_bo) { struct ttm_buffer_object *bo = *p_bo; struct ttm_bo_device *bdev = bo->bdev; *p_bo = NULL; write_lock(&bdev->vm_lock); kref_put(&bo->kref, ttm_bo_release); write_unlock(&bdev->vm_lock); } EXPORT_SYMBOL(ttm_bo_unref); int <API key>(struct ttm_bo_device *bdev) { return <API key>(&bdev->wq); } EXPORT_SYMBOL(<API key>); void <API key>(struct ttm_bo_device *bdev, int resched) { if (resched) <API key>(&bdev->wq, ((HZ / 100) < 1) ? 1 : HZ / 100); } EXPORT_SYMBOL(<API key>); static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_reg evict_mem; struct ttm_placement placement; int ret = 0; spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) { printk(KERN_ERR TTM_PFX "Failed to expire sync object before " "buffer eviction.\n"); } goto out; } BUG_ON(!atomic_read(&bo->reserved)); evict_mem = bo->mem; evict_mem.mm_node = NULL; evict_mem.bus.io_reserved_vm = false; evict_mem.bus.io_reserved_count = 0; placement.fpfn = 0; placement.lpfn = 0; placement.num_placement = 0; placement.num_busy_placement = 0; bdev->driver->evict_flags(bo, &placement); ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, no_wait_reserve, no_wait_gpu); if (ret) { if (ret != -ERESTARTSYS) { printk(KERN_ERR TTM_PFX "Failed to find memory space for " "buffer 0x%p eviction.\n", bo); <API key>(bo, &placement); } goto out; } ret = <API key>(bo, &evict_mem, true, interruptible, no_wait_reserve, no_wait_gpu); if (ret) { if (ret != -ERESTARTSYS) printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); ttm_bo_mem_put(bo, &evict_mem); goto out; } bo->evicted = true; out: return ret; } static int ttm_mem_evict_first(struct ttm_bo_device *bdev, uint32_t mem_type, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_global *glob = bdev->glob; struct <API key> *man = &bdev->man[mem_type]; struct ttm_buffer_object *bo; int ret, put_count = 0; retry: spin_lock(&glob->lru_lock); if (list_empty(&man->lru)) { spin_unlock(&glob->lru_lock); return -EBUSY; } bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); kref_get(&bo->list_kref); if (!list_empty(&bo->ddestroy)) { spin_unlock(&glob->lru_lock); ret = ttm_bo_cleanup_refs(bo, interruptible, no_wait_reserve, no_wait_gpu); kref_put(&bo->list_kref, ttm_bo_release_list); if (likely(ret == 0 || ret == -ERESTARTSYS)) return ret; goto retry; } ret = <API key>(bo, false, no_wait_reserve, false, 0); if (unlikely(ret == -EBUSY)) { spin_unlock(&glob->lru_lock); if (likely(!no_wait_gpu)) ret = <API key>(bo, interruptible); kref_put(&bo->list_kref, ttm_bo_release_list); /** * We *need* to retry after releasing the lru lock. */ if (unlikely(ret != 0)) return ret; goto retry; } put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); BUG_ON(ret != 0); ttm_bo_list_ref_sub(bo, put_count, true); ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); ttm_bo_unreserve(bo); kref_put(&bo->list_kref, ttm_bo_release_list); return ret; } void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { struct <API key> *man = &bo->bdev->man[mem->mem_type]; if (mem->mm_node) (*man->func->put_node)(man, mem); } EXPORT_SYMBOL(ttm_bo_mem_put); /** * Repeatedly evict memory from the LRU for @mem_type until we create enough * space, or we've evicted everything and there isn't enough space. */ static int <API key>(struct ttm_buffer_object *bo, uint32_t mem_type, struct ttm_placement *placement, struct ttm_mem_reg *mem, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; struct <API key> *man = &bdev->man[mem_type]; int ret; do { ret = (*man->func->get_node)(man, bo, placement, mem); if (unlikely(ret != 0)) return ret; if (mem->mm_node) break; ret = ttm_mem_evict_first(bdev, mem_type, interruptible, no_wait_reserve, no_wait_gpu); if (unlikely(ret != 0)) return ret; } while (1); if (mem->mm_node == NULL) return -ENOMEM; mem->mem_type = mem_type; return 0; } static uint32_t <API key>(struct <API key> *man, uint32_t cur_placement, uint32_t proposed_placement) { uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; /** * Keep current caching if possible. */ if ((cur_placement & caching) != 0) result |= (cur_placement & caching); else if ((man->default_caching & caching) != 0) result |= man->default_caching; else if ((TTM_PL_FLAG_CACHED & caching) != 0) result |= TTM_PL_FLAG_CACHED; else if ((TTM_PL_FLAG_WC & caching) != 0) result |= TTM_PL_FLAG_WC; else if ((<API key> & caching) != 0) result |= <API key>; return result; } static bool <API key>(struct <API key> *man, bool disallow_fixed, uint32_t mem_type, uint32_t proposed_placement, uint32_t *masked_placement) { uint32_t cur_flags = ttm_bo_type_flags(mem_type); if ((man->flags & <API key>) && disallow_fixed) return false; if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) return false; if ((proposed_placement & man->available_caching) == 0) return false; cur_flags |= (proposed_placement & man->available_caching); *masked_placement = cur_flags; return true; } /** * Creates space for memory region @mem according to its type. * * This function first searches for free space in compatible memory types in * the priority order defined by the driver. If free space isn't found, then * <API key> is attempted in priority order to evict and find * space. */ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; struct <API key> *man; uint32_t mem_type = TTM_PL_SYSTEM; uint32_t cur_flags = 0; bool type_found = false; bool type_ok = false; bool has_erestartsys = false; int i, ret; mem->mm_node = NULL; for (i = 0; i < placement->num_placement; ++i) { ret = <API key>(placement->placement[i], &mem_type); if (ret) return ret; man = &bdev->man[mem_type]; type_ok = <API key>(man, bo->type == ttm_bo_type_user, mem_type, placement->placement[i], &cur_flags); if (!type_ok) continue; cur_flags = <API key>(man, bo->mem.placement, cur_flags); /* * Use the access and other non-mapping-related flag bits from * the memory placement flags to the current flags */ ttm_flag_masked(&cur_flags, placement->placement[i], ~TTM_PL_MASK_MEMTYPE); if (mem_type == TTM_PL_SYSTEM) break; if (man->has_type && man->use_type) { type_found = true; ret = (*man->func->get_node)(man, bo, placement, mem); if (unlikely(ret)) return ret; } if (mem->mm_node) break; } if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { mem->mem_type = mem_type; mem->placement = cur_flags; return 0; } if (!type_found) return -EINVAL; for (i = 0; i < placement->num_busy_placement; ++i) { ret = <API key>(placement->busy_placement[i], &mem_type); if (ret) return ret; man = &bdev->man[mem_type]; if (!man->has_type) continue; if (!<API key>(man, bo->type == ttm_bo_type_user, mem_type, placement->busy_placement[i], &cur_flags)) continue; cur_flags = <API key>(man, bo->mem.placement, cur_flags); /* * Use the access and other non-mapping-related flag bits from * the memory placement flags to the current flags */ ttm_flag_masked(&cur_flags, placement->busy_placement[i], ~TTM_PL_MASK_MEMTYPE); if (mem_type == TTM_PL_SYSTEM) { mem->mem_type = mem_type; mem->placement = cur_flags; mem->mm_node = NULL; return 0; } ret = <API key>(bo, mem_type, placement, mem, interruptible, no_wait_reserve, no_wait_gpu); if (ret == 0 && mem->mm_node) { mem->placement = cur_flags; return 0; } if (ret == -ERESTARTSYS) has_erestartsys = true; } ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; return ret; } EXPORT_SYMBOL(ttm_bo_mem_space); int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) { if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) return -EBUSY; return <API key>(bo->event_queue, atomic_read(&bo->cpu_writers) == 0); } EXPORT_SYMBOL(ttm_bo_wait_cpu); int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_placement *placement, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { int ret = 0; struct ttm_mem_reg mem; struct ttm_bo_device *bdev = bo->bdev; BUG_ON(!atomic_read(&bo->reserved)); /* * FIXME: It's possible to pipeline buffer moves. * Have the driver move function wait for idle when necessary, * instead of doing it here. */ spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); spin_unlock(&bdev->fence_lock); if (ret) return ret; mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; mem.bus.io_reserved_vm = false; mem.bus.io_reserved_count = 0; /* * Determine where to move the buffer. */ ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu); if (ret) goto out_unlock; ret = <API key>(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); out_unlock: if (ret && mem.mm_node) ttm_bo_mem_put(bo, &mem); return ret; } static int ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem) { int i; if (mem->mm_node && placement->lpfn != 0 && (mem->start < placement->fpfn || mem->start + mem->num_pages > placement->lpfn)) return -1; for (i = 0; i < placement->num_placement; i++) { if ((placement->placement[i] & mem->placement & TTM_PL_MASK_CACHING) && (placement->placement[i] & mem->placement & TTM_PL_MASK_MEM)) return i; } return -1; } int ttm_bo_validate(struct ttm_buffer_object *bo, struct ttm_placement *placement, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { int ret; BUG_ON(!atomic_read(&bo->reserved)); /* Check that range is valid */ if (placement->lpfn || placement->fpfn) if (placement->fpfn > placement->lpfn || (placement->lpfn - placement->fpfn) < bo->num_pages) return -EINVAL; /* * Check whether we need to move buffer. */ ret = ttm_bo_mem_compat(placement, &bo->mem); if (ret < 0) { ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu); if (ret) return ret; } else { /* * Use the access and other non-mapping-related flag bits from * the compatible memory placement flags to the active flags */ ttm_flag_masked(&bo->mem.placement, placement->placement[ret], ~TTM_PL_MASK_MEMTYPE); } /* * We might need to add a TTM. */ if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { ret = ttm_bo_add_ttm(bo, true); if (ret) return ret; } return 0; } EXPORT_SYMBOL(ttm_bo_validate); int <API key>(struct ttm_buffer_object *bo, struct ttm_placement *placement) { BUG_ON((placement->fpfn || placement->lpfn) && (bo->mem.num_pages > (placement->lpfn - placement->fpfn))); return 0; } int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, unsigned long size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, unsigned long buffer_start, bool interruptible, struct file *<API key>, size_t acc_size, void (*destroy) (struct ttm_buffer_object *)) { int ret = 0; unsigned long num_pages; size += buffer_start & ~PAGE_MASK; num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; if (num_pages == 0) { printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); if (destroy) (*destroy)(bo); else kfree(bo); return -EINVAL; } bo->destroy = destroy; kref_init(&bo->kref); kref_init(&bo->list_kref); atomic_set(&bo->cpu_writers, 0); atomic_set(&bo->reserved, 1); init_waitqueue_head(&bo->event_queue); INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); INIT_LIST_HEAD(&bo->io_reserve_lru); bo->bdev = bdev; bo->glob = bdev->glob; bo->type = type; bo->num_pages = num_pages; bo->mem.size = num_pages << PAGE_SHIFT; bo->mem.mem_type = TTM_PL_SYSTEM; bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; bo->mem.bus.io_reserved_vm = false; bo->mem.bus.io_reserved_count = 0; bo->buffer_start = buffer_start & PAGE_MASK; bo->priv_flags = 0; bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); bo->seq_valid = false; bo-><API key> = <API key>; bo->acc_size = acc_size; atomic_inc(&bo->glob->bo_count); ret = <API key>(bo, placement); if (unlikely(ret != 0)) goto out_err; /* * For ttm_bo_type_device buffers, allocate * address space from the device. */ if (bo->type == ttm_bo_type_device) { ret = ttm_bo_setup_vm(bo); if (ret) goto out_err; } ret = ttm_bo_validate(bo, placement, interruptible, false, false); if (ret) goto out_err; ttm_bo_unreserve(bo); return 0; out_err: ttm_bo_unreserve(bo); ttm_bo_unref(&bo); return ret; } EXPORT_SYMBOL(ttm_bo_init); static inline size_t ttm_bo_size(struct ttm_bo_global *glob, unsigned long num_pages) { size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK; return glob->ttm_bo_size + 2 * page_array_size; } int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, unsigned long buffer_start, bool interruptible, struct file *<API key>, struct ttm_buffer_object **p_bo) { struct ttm_buffer_object *bo; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; int ret; size_t acc_size = ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); ret = <API key>(mem_glob, acc_size, false, false); if (unlikely(ret != 0)) return ret; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (unlikely(bo == NULL)) { ttm_mem_global_free(mem_glob, acc_size); return -ENOMEM; } ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, buffer_start, interruptible, <API key>, acc_size, NULL); if (likely(ret == 0)) *p_bo = bo; return ret; } static int <API key>(struct ttm_bo_device *bdev, unsigned mem_type, bool allow_errors) { struct <API key> *man = &bdev->man[mem_type]; struct ttm_bo_global *glob = bdev->glob; int ret; /* * Can't use standard list traversal since we're unlocking. */ spin_lock(&glob->lru_lock); while (!list_empty(&man->lru)) { spin_unlock(&glob->lru_lock); ret = ttm_mem_evict_first(bdev, mem_type, false, false, false); if (ret) { if (allow_errors) { return ret; } else { printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n"); } } spin_lock(&glob->lru_lock); } spin_unlock(&glob->lru_lock); return 0; } int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) { struct <API key> *man; int ret = -EINVAL; if (mem_type >= TTM_NUM_MEM_TYPES) { printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); return ret; } man = &bdev->man[mem_type]; if (!man->has_type) { printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " "memory manager type %u\n", mem_type); return ret; } man->use_type = false; man->has_type = false; ret = 0; if (mem_type > 0) { <API key>(bdev, mem_type, false); ret = (*man->func->takedown)(man); } return ret; } EXPORT_SYMBOL(ttm_bo_clean_mm); int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) { struct <API key> *man = &bdev->man[mem_type]; if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { printk(KERN_ERR TTM_PFX "Illegal memory manager memory type %u.\n", mem_type); return -EINVAL; } if (!man->has_type) { printk(KERN_ERR TTM_PFX "Memory type %u has not been initialized.\n", mem_type); return 0; } return <API key>(bdev, mem_type, true); } EXPORT_SYMBOL(ttm_bo_evict_mm); int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, unsigned long p_size) { int ret = -EINVAL; struct <API key> *man; BUG_ON(type >= TTM_NUM_MEM_TYPES); man = &bdev->man[type]; BUG_ON(man->has_type); man->io_reserve_fastpath = true; man->use_io_reserve_lru = false; mutex_init(&man->io_reserve_mutex); INIT_LIST_HEAD(&man->io_reserve_lru); ret = bdev->driver->init_mem_type(bdev, type, man); if (ret) return ret; man->bdev = bdev; ret = 0; if (type != TTM_PL_SYSTEM) { ret = (*man->func->init)(man, p_size); if (ret) return ret; } man->has_type = true; man->use_type = true; man->size = p_size; INIT_LIST_HEAD(&man->lru); return 0; } EXPORT_SYMBOL(ttm_bo_init_mm); static void <API key>(struct kobject *kobj) { struct ttm_bo_global *glob = container_of(kobj, struct ttm_bo_global, kobj); <API key>(glob->mem_glob, &glob->shrink); __free_page(glob->dummy_read_page); kfree(glob); } void <API key>(struct <API key> *ref) { struct ttm_bo_global *glob = ref->object; kobject_del(&glob->kobj); kobject_put(&glob->kobj); } EXPORT_SYMBOL(<API key>); int ttm_bo_global_init(struct <API key> *ref) { struct ttm_bo_global_ref *bo_ref = container_of(ref, struct ttm_bo_global_ref, ref); struct ttm_bo_global *glob = ref->object; int ret; mutex_init(&glob->device_list_mutex); spin_lock_init(&glob->lru_lock); glob->mem_glob = bo_ref->mem_glob; glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); if (unlikely(glob->dummy_read_page == NULL)) { ret = -ENOMEM; goto out_no_drp; } INIT_LIST_HEAD(&glob->swap_lru); INIT_LIST_HEAD(&glob->device_list); ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); ret = <API key>(glob->mem_glob, &glob->shrink); if (unlikely(ret != 0)) { printk(KERN_ERR TTM_PFX "Could not register buffer object swapout.\n"); goto out_no_shrink; } glob->ttm_bo_extra_size = ttm_round_pot(sizeof(struct ttm_tt)) + ttm_round_pot(sizeof(struct ttm_backend)); glob->ttm_bo_size = glob->ttm_bo_extra_size + ttm_round_pot(sizeof(struct ttm_buffer_object)); atomic_set(&glob->bo_count, 0); ret = <API key>( &glob->kobj, &<API key>, ttm_get_kobj(), "buffer_objects"); if (unlikely(ret != 0)) kobject_put(&glob->kobj); return ret; out_no_shrink: __free_page(glob->dummy_read_page); out_no_drp: kfree(glob); return ret; } EXPORT_SYMBOL(ttm_bo_global_init); int <API key>(struct ttm_bo_device *bdev) { int ret = 0; unsigned i = TTM_NUM_MEM_TYPES; struct <API key> *man; struct ttm_bo_global *glob = bdev->glob; while (i man = &bdev->man[i]; if (man->has_type) { man->use_type = false; if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { ret = -EBUSY; printk(KERN_ERR TTM_PFX "DRM memory manager type %d " "is not clean.\n", i); } man->has_type = false; } } mutex_lock(&glob->device_list_mutex); list_del(&bdev->device_list); mutex_unlock(&glob->device_list_mutex); <API key>(&bdev->wq); while (<API key>(bdev, true)) ; spin_lock(&glob->lru_lock); if (list_empty(&bdev->ddestroy)) TTM_DEBUG("Delayed destroy list was clean\n"); if (list_empty(&bdev->man[0].lru)) TTM_DEBUG("Swap list was clean\n"); spin_unlock(&glob->lru_lock); BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); write_lock(&bdev->vm_lock); drm_mm_takedown(&bdev->addr_space_mm); write_unlock(&bdev->vm_lock); return ret; } EXPORT_SYMBOL(<API key>); int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob, struct ttm_bo_driver *driver, uint64_t file_page_offset, bool need_dma32) { int ret = -EINVAL; rwlock_init(&bdev->vm_lock); bdev->driver = driver; memset(bdev->man, 0, sizeof(bdev->man)); /* * Initialize the system memory buffer type. * Other types need to be driver / IOCTL initialized. */ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); if (unlikely(ret != 0)) goto out_no_sys; bdev->addr_space_rb = RB_ROOT; ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); if (unlikely(ret != 0)) goto out_no_addr_mm; INIT_DELAYED_WORK(&bdev->wq, <API key>); bdev->nice_mode = true; INIT_LIST_HEAD(&bdev->ddestroy); bdev->dev_mapping = NULL; bdev->glob = glob; bdev->need_dma32 = need_dma32; bdev->val_seq = 0; spin_lock_init(&bdev->fence_lock); mutex_lock(&glob->device_list_mutex); list_add_tail(&bdev->device_list, &glob->device_list); mutex_unlock(&glob->device_list_mutex); return 0; out_no_addr_mm: ttm_bo_clean_mm(bdev, 0); out_no_sys: return ret; } EXPORT_SYMBOL(ttm_bo_device_init); /* * buffer object vm functions. */ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct <API key> *man = &bdev->man[mem->mem_type]; if (!(man->flags & <API key>)) { if (mem->mem_type == TTM_PL_SYSTEM) return false; if (man->flags & <API key>) return false; if (mem->placement & TTM_PL_FLAG_CACHED) return false; } return true; } void <API key>(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; loff_t offset = (loff_t) bo->addr_space_offset; loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; if (!bdev->dev_mapping) return; unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); ttm_mem_io_free_vm(bo); } void <API key>(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct <API key> *man = &bdev->man[bo->mem.mem_type]; ttm_mem_io_lock(man, false); <API key>(bo); ttm_mem_io_unlock(man); } EXPORT_SYMBOL(<API key>); static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct rb_node **cur = &bdev->addr_space_rb.rb_node; struct rb_node *parent = NULL; struct ttm_buffer_object *cur_bo; unsigned long offset = bo->vm_node->start; unsigned long cur_offset; while (*cur) { parent = *cur; cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb); cur_offset = cur_bo->vm_node->start; if (offset < cur_offset) cur = &parent->rb_left; else if (offset > cur_offset) cur = &parent->rb_right; else BUG(); } rb_link_node(&bo->vm_rb, parent, cur); rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb); } /** * ttm_bo_setup_vm: * * @bo: the buffer to allocate address space for * * Allocate address space in the drm device so that applications * can mmap the buffer and access the contents. This only * applies to ttm_bo_type_device objects as others are not * placed in the drm device address space. */ static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; int ret; retry_pre_get: ret = drm_mm_pre_get(&bdev->addr_space_mm); if (unlikely(ret != 0)) return ret; write_lock(&bdev->vm_lock); bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, bo->mem.num_pages, 0, 0); if (unlikely(bo->vm_node == NULL)) { ret = -ENOMEM; goto out_unlock; } bo->vm_node = <API key>(bo->vm_node, bo->mem.num_pages, 0); if (unlikely(bo->vm_node == NULL)) { write_unlock(&bdev->vm_lock); goto retry_pre_get; } ttm_bo_vm_insert_rb(bo); write_unlock(&bdev->vm_lock); bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; return 0; out_unlock: write_unlock(&bdev->vm_lock); return ret; } int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, bool interruptible, bool no_wait) { struct ttm_bo_driver *driver = bo->bdev->driver; struct ttm_bo_device *bdev = bo->bdev; void *sync_obj; void *sync_obj_arg; int ret = 0; if (likely(bo->sync_obj == NULL)) return 0; while (bo->sync_obj) { if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) { void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(<API key>, &bo->priv_flags); spin_unlock(&bdev->fence_lock); driver->sync_obj_unref(&tmp_obj); spin_lock(&bdev->fence_lock); continue; } if (no_wait) return -EBUSY; sync_obj = driver->sync_obj_ref(bo->sync_obj); sync_obj_arg = bo->sync_obj_arg; spin_unlock(&bdev->fence_lock); ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, lazy, interruptible); if (unlikely(ret != 0)) { driver->sync_obj_unref(&sync_obj); spin_lock(&bdev->fence_lock); return ret; } spin_lock(&bdev->fence_lock); if (likely(bo->sync_obj == sync_obj && bo->sync_obj_arg == sync_obj_arg)) { void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(<API key>, &bo->priv_flags); spin_unlock(&bdev->fence_lock); driver->sync_obj_unref(&sync_obj); driver->sync_obj_unref(&tmp_obj); spin_lock(&bdev->fence_lock); } else { spin_unlock(&bdev->fence_lock); driver->sync_obj_unref(&sync_obj); spin_lock(&bdev->fence_lock); } } return 0; } EXPORT_SYMBOL(ttm_bo_wait); int <API key>(struct ttm_buffer_object *bo, bool no_wait) { struct ttm_bo_device *bdev = bo->bdev; int ret = 0; /* * Using ttm_bo_reserve makes sure the lru lists are updated. */ ret = ttm_bo_reserve(bo, true, no_wait, false, 0); if (unlikely(ret != 0)) return ret; spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, true, no_wait); spin_unlock(&bdev->fence_lock); if (likely(ret == 0)) atomic_inc(&bo->cpu_writers); ttm_bo_unreserve(bo); return ret; } EXPORT_SYMBOL(<API key>); void <API key>(struct ttm_buffer_object *bo) { if (atomic_dec_and_test(&bo->cpu_writers)) wake_up_all(&bo->event_queue); } EXPORT_SYMBOL(<API key>); /** * A buffer object shrink method that tries to swap out the first * buffer object on the bo_global::swap_lru list. */ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) { struct ttm_bo_global *glob = container_of(shrink, struct ttm_bo_global, shrink); struct ttm_buffer_object *bo; int ret = -EBUSY; int put_count; uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); spin_lock(&glob->lru_lock); while (ret == -EBUSY) { if (unlikely(list_empty(&glob->swap_lru))) { spin_unlock(&glob->lru_lock); return -EBUSY; } bo = list_first_entry(&glob->swap_lru, struct ttm_buffer_object, swap); kref_get(&bo->list_kref); if (!list_empty(&bo->ddestroy)) { spin_unlock(&glob->lru_lock); (void) ttm_bo_cleanup_refs(bo, false, false, false); kref_put(&bo->list_kref, ttm_bo_release_list); continue; } /** * Reserve buffer. Since we unlock while sleeping, we need * to re-check that nobody removed us from the swap-list while * we slept. */ ret = <API key>(bo, false, true, false, 0); if (unlikely(ret == -EBUSY)) { spin_unlock(&glob->lru_lock); <API key>(bo, false); kref_put(&bo->list_kref, ttm_bo_release_list); spin_lock(&glob->lru_lock); } } BUG_ON(ret != 0); put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); ttm_bo_list_ref_sub(bo, put_count, true); /** * Wait for GPU, then move to system cached. */ spin_lock(&bo->bdev->fence_lock); ret = ttm_bo_wait(bo, false, false, false); spin_unlock(&bo->bdev->fence_lock); if (unlikely(ret != 0)) goto out; if ((bo->mem.placement & swap_placement) != swap_placement) { struct ttm_mem_reg evict_mem; evict_mem = bo->mem; evict_mem.mm_node = NULL; evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; evict_mem.mem_type = TTM_PL_SYSTEM; ret = <API key>(bo, &evict_mem, true, false, false, false); if (unlikely(ret != 0)) goto out; } <API key>(bo); /** * Swap out. Buffer will be swapped in again as soon as * anyone tries to access a ttm page. */ if (bo->bdev->driver->swap_notify) bo->bdev->driver->swap_notify(bo); ret = ttm_tt_swapout(bo->ttm, bo-><API key>); out: /** * * Unreserve without putting on LRU to avoid swapping out an * already swapped buffer. */ atomic_set(&bo->reserved, 0); wake_up_all(&bo->event_queue); kref_put(&bo->list_kref, ttm_bo_release_list); return ret; } void ttm_bo_swapout_all(struct ttm_bo_device *bdev) { while (ttm_bo_swapout(&bdev->glob->shrink) == 0) ; } EXPORT_SYMBOL(ttm_bo_swapout_all);
'use strict'; angular.module("ngLocale", [], ["$provide", function($provide) { var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"}; function getDecimals(n) { n = n + ''; var i = n.indexOf('.'); return (i == -1) ? 0 : n.length - i - 1; } function getVF(n, opt_precision) { var v = opt_precision; if (undefined === v) { v = Math.min(getDecimals(n), 3); } var base = Math.pow(10, v); var f = ((n * base) | 0) % base; return {v: v, f: f}; } $provide.value("$locale", { "DATETIME_FORMATS": { "AMPMS": [ "OD", "OT" ], "DAY": [ "Jumapil", "Wuok Tich", "Tich Ariyo", "Tich Adek", "Tich Ang\u2019wen", "Tich Abich", "Ngeso" ], "ERANAMES": [ "Kapok Kristo obiro", "Ka Kristo osebiro" ], "ERAS": [ "BC", "AD" ], "FIRSTDAYOFWEEK": 0, "MONTH": [ "Dwe mar Achiel", "Dwe mar Ariyo", "Dwe mar Adek", "Dwe mar Ang\u2019wen", "Dwe mar Abich", "Dwe mar Auchiel", "Dwe mar Abiriyo", "Dwe mar Aboro", "Dwe mar Ochiko", "Dwe mar Apar", "Dwe mar gi achiel", "Dwe mar Apar gi ariyo" ], "SHORTDAY": [ "JMP", "WUT", "TAR", "TAD", "TAN", "TAB", "NGS" ], "SHORTMONTH": [ "DAC", "DAR", "DAD", "DAN", "DAH", "DAU", "DAO", "DAB", "DOC", "DAP", "DGI", "DAG" ], "STANDALONEMONTH": [ "Dwe mar Achiel", "Dwe mar Ariyo", "Dwe mar Adek", "Dwe mar Ang\u2019wen", "Dwe mar Abich", "Dwe mar Auchiel", "Dwe mar Abiriyo", "Dwe mar Aboro", "Dwe mar Ochiko", "Dwe mar Apar", "Dwe mar gi achiel", "Dwe mar Apar gi ariyo" ], "WEEKENDRANGE": [ 5, 6 ], "fullDate": "EEEE, d MMMM y", "longDate": "d MMMM y", "medium": "d MMM y h:mm:ss a", "mediumDate": "d MMM y", "mediumTime": "h:mm:ss a", "short": "dd/MM/y h:mm a", "shortDate": "dd/MM/y", "shortTime": "h:mm a" }, "NUMBER_FORMATS": { "CURRENCY_SYM": "Ksh", "DECIMAL_SEP": ".", "GROUP_SEP": ",", "PATTERNS": [ { "gSize": 3, "lgSize": 3, "maxFrac": 3, "minFrac": 0, "minInt": 1, "negPre": "-", "negSuf": "", "posPre": "", "posSuf": "" }, { "gSize": 3, "lgSize": 3, "maxFrac": 2, "minFrac": 2, "minInt": 1, "negPre": "-", "negSuf": "\u00a4", "posPre": "", "posSuf": "\u00a4" } ] }, "id": "luo", "localeID": "luo", "pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;} }); }]);
package com.googlecode.javacv; import static com.googlecode.javacv.cpp.cvkernels.*; import static com.googlecode.javacv.cpp.opencv_calib3d.*; import static com.googlecode.javacv.cpp.opencv_core.*; import static com.googlecode.javacv.cpp.opencv_imgproc.*; /** * * @author Samuel Audet */ public class ProCamTransformer implements ImageTransformer { public ProCamTransformer(double[] referencePoints, CameraDevice camera, ProjectorDevice projector) { this(referencePoints, camera, projector, null); } public ProCamTransformer(double[] referencePoints, CameraDevice camera, ProjectorDevice projector, CvMat n) { this.camera = camera; this.projector = projector; if (referencePoints != null) { this.surfaceTransformer = new <API key>( camera.cameraMatrix, camera.cameraMatrix, null, null, n, referencePoints, null, null, 3, 0); } double[] referencePoints1 = { 0, 0, camera.imageWidth/2, camera.imageHeight, camera.imageWidth, 0 }; double[] referencePoints2 = { 0, 0, projector.imageWidth/2, projector.imageHeight, projector.imageWidth, 0 }; if (n != null) { invCameraMatrix = CvMat.create(3, 3); cvInvert(camera.cameraMatrix, invCameraMatrix); JavaCV.<API key>(referencePoints2, referencePoints1, invCameraMatrix, projector.cameraMatrix, projector.R, projector.T, n, true); } this.<API key> = new <API key>( camera.cameraMatrix, projector.cameraMatrix, projector.R, projector.T, null, referencePoints1, referencePoints2, projector.colorMixingMatrix, /*surfaceTransformer == null ? 3 : */1, 3); // CvMat n2 = createParameters().getN(); if (referencePoints != null && n != null) { frontoParallelH = camera.getFrontoParallelH(referencePoints, n, CvMat.create(3, 3)); invFrontoParallelH = frontoParallelH.clone(); cvInvert(frontoParallelH, invFrontoParallelH); } } protected CameraDevice camera = null; protected ProjectorDevice projector = null; protected <API key> surfaceTransformer = null; protected <API key> <API key> = null; protected IplImage[] projectorImage = null, surfaceImage = null; protected CvScalar fillColor = cvScalar(0.0, 0.0, 0.0, 1.0); protected CvRect roi = new CvRect(); protected CvMat frontoParallelH = null, invFrontoParallelH = null; protected CvMat invCameraMatrix = null; protected KernelData kernelData = null; protected CvMat[] H1 = null; protected CvMat[] H2 = null; protected CvMat[] X = null; public int getNumGains() { return <API key>.getNumGains(); } public int getNumBiases() { return <API key>.getNumBiases(); } public CvScalar getFillColor() { return fillColor; } public void setFillColor(CvScalar fillColor) { this.fillColor = fillColor; } public <API key> <API key>() { return surfaceTransformer; } public <API key> <API key>() { return <API key>; } public IplImage getProjectorImage(int pyramidLevel) { return projectorImage[pyramidLevel]; } public void setProjectorImage(IplImage projectorImage0, int minLevel, int maxLevel) { setProjectorImage(projectorImage0, minLevel, maxLevel, true); } public void setProjectorImage(IplImage projectorImage0, int minLevel, int maxLevel, boolean convertToFloat) { if (projectorImage == null || projectorImage.length != maxLevel+1) { projectorImage = new IplImage[maxLevel+1]; } if (projectorImage0.depth() == IPL_DEPTH_32F || !convertToFloat) { projectorImage[minLevel] = projectorImage0; } else { if (projectorImage[minLevel] == null) { projectorImage[minLevel] = IplImage.create(projectorImage0.width(), projectorImage0.height(), IPL_DEPTH_32F, projectorImage0.nChannels(), projectorImage0.origin()); } IplROI ir = projectorImage0.roi(); if (ir != null) { int align = 1<<(maxLevel+1); roi.x(Math.max(0, (int)Math.floor((double)ir.xOffset()/align)*align)); roi.y(Math.max(0, (int)Math.floor((double)ir.yOffset()/align)*align)); roi.width (Math.min(projectorImage0.width(), (int)Math.ceil((double)ir.width() /align)*align)); roi.height(Math.min(projectorImage0.height(), (int)Math.ceil((double)ir.height()/align)*align)); cvSetImageROI(projectorImage0, roi); cvSetImageROI(projectorImage[minLevel], roi); } else { cvResetImageROI(projectorImage0); cvResetImageROI(projectorImage[minLevel]); } cvConvertScale(projectorImage0, projectorImage[minLevel], 1.0/255.0, 0); } // CvScalar.ByValue average = cvAvg(projectorImage[0], null); // cvSubS(projectorImage[0], average, projectorImage[0], null); for (int i = minLevel+1; i <= maxLevel; i++) { int w = projectorImage[i-1].width()/2; int h = projectorImage[i-1].height()/2; int d = projectorImage[i-1].depth(); int c = projectorImage[i-1].nChannels(); int o = projectorImage[i-1].origin(); if (projectorImage[i] == null) { projectorImage[i] = IplImage.create(w, h, d, c, o); } IplROI ir = projectorImage[i-1].roi(); if (ir != null) { roi.x(ir.xOffset()/2); roi.width (ir.width() /2); roi.y(ir.yOffset()/2); roi.height(ir.height()/2); cvSetImageROI(projectorImage[i], roi); } else { cvResetImageROI(projectorImage[i]); } cvPyrDown(projectorImage[i-1], projectorImage[i], CV_GAUSSIAN_5x5); cvResetImageROI(projectorImage[i-1]); } } public IplImage getSurfaceImage(int pyramidLevel) { return surfaceImage[pyramidLevel]; } public void setSurfaceImage(IplImage surfaceImage0, int pyramidLevels) { if (surfaceImage == null || surfaceImage.length != pyramidLevels) { surfaceImage = new IplImage[pyramidLevels]; } surfaceImage[0] = surfaceImage0; cvResetImageROI(surfaceImage0); for (int i = 1; i < pyramidLevels; i++) { int w = surfaceImage[i-1].width()/2; int h = surfaceImage[i-1].height()/2; int d = surfaceImage[i-1].depth(); int c = surfaceImage[i-1].nChannels(); int o = surfaceImage[i-1].origin(); if (surfaceImage[i] == null) { surfaceImage[i] = IplImage.create(w, h, d, c, o); } else { cvResetImageROI(surfaceImage[i]); } cvPyrDown(surfaceImage[i-1], surfaceImage[i], CV_GAUSSIAN_5x5); } } protected void prepareTransforms(CvMat H1, CvMat H2, CvMat X, int pyramidLevel, Parameters p) { <API key>.Parameters cameraParameters = p.<API key>(); <API key>.Parameters projectorParameters = p.<API key>(); if (surfaceTransformer != null) { cvInvert(cameraParameters.getH(), H1); } cvInvert(projectorParameters.getH(), H2); // adjust the scale of the transformation based on the pyramid level if (pyramidLevel > 0) { int scale = 1<<pyramidLevel; if (surfaceTransformer != null) { H1.put(2, H1.get(2)/scale); H1.put(5, H1.get(5)/scale); H1.put(6, H1.get(6)*scale); H1.put(7, H1.get(7)*scale); } H2.put(2, H2.get(2)/scale); H2.put(5, H2.get(5)/scale); H2.put(6, H2.get(6)*scale); H2.put(7, H2.get(7)*scale); } double[] x = projector.colorMixingMatrix.get(); double[] a = projectorParameters.getColorParameters(); double a2 = a[0]; X.put(a2*x[0], a2*x[1], a2*x[2], a[1], a2*x[3], a2*x[4], a2*x[5], a[2], a2*x[6], a2*x[7], a2*x[8], a[3], 0, 0, 0, 1); } public void transform(final IplImage srcImage, final IplImage dstImage, final CvRect roi, final int pyramidLevel, final ImageTransformer.Parameters parameters, final boolean inverse) { if (inverse) { throw new <API key>("Inverse transform not supported."); } final Parameters p = ((Parameters)parameters); final <API key>.Parameters cameraParameters = p.<API key>(); final <API key>.Parameters projectorParameters = p.<API key>(); if (p.tempImage == null || p.tempImage.length <= pyramidLevel) { p.tempImage = new IplImage[pyramidLevel+1]; } p.tempImage[pyramidLevel] = IplImage.<API key>(p.tempImage[pyramidLevel], dstImage); if (roi == null) { cvResetImageROI(p.tempImage[pyramidLevel]); } else { cvSetImageROI(p.tempImage[pyramidLevel], roi); } // Parallel.run(new Runnable() { public void run() { // warp the template image if (surfaceTransformer != null) { surfaceTransformer.transform(srcImage, p.tempImage[pyramidLevel], roi, pyramidLevel, cameraParameters, false); } // }}, new Runnable() { public void run() { // warp the projector image <API key>.transform(projectorImage[pyramidLevel], dstImage, roi, pyramidLevel, projectorParameters, false); // multiply projector image with template image if (surfaceTransformer != null) { cvMul(dstImage, p.tempImage[pyramidLevel], dstImage, 1/dstImage.highValue()); } else { cvCopy(p.tempImage[pyramidLevel], dstImage); } } public void transform(CvMat srcPts, CvMat dstPts, ImageTransformer.Parameters parameters, boolean inverse) { if (surfaceTransformer != null) { surfaceTransformer.transform(srcPts, dstPts, ((Parameters)parameters).surfaceParameters, inverse); } else if (dstPts != srcPts) { dstPts.put(srcPts); } } public void transform(Data[] data, CvRect roi, ImageTransformer.Parameters[] parameters, boolean[] inverses) { assert data.length == parameters.length; if (kernelData == null || kernelData.capacity() < data.length) { kernelData = new KernelData(data.length); } if ((H1 == null || H1.length < data.length) && surfaceTransformer != null) { H1 = new CvMat[data.length]; for (int i = 0; i < H1.length; i++) { H1[i] = CvMat.create(3, 3); } } if (H2 == null || H2.length < data.length) { H2 = new CvMat[data.length]; for (int i = 0; i < H2.length; i++) { H2[i] = CvMat.create(3, 3); } } if (X == null || X.length < data.length) { X = new CvMat[data.length]; for (int i = 0; i < X.length; i++) { X[i] = CvMat.create(4, 4); } } for (int i = 0; i < data.length; i++) { kernelData.position(i); kernelData.srcImg(projectorImage[data[i].pyramidLevel]); kernelData.srcImg2(surfaceTransformer == null ? null : data[i].srcImg); kernelData.subImg(data[i].subImg); kernelData.srcDotImg(data[i].srcDotImg); kernelData.mask(data[i].mask); kernelData.zeroThreshold(data[i].zeroThreshold); kernelData.outlierThreshold(data[i].outlierThreshold); if (inverses != null && inverses[i]) { throw new <API key>("Inverse transform not supported."); } prepareTransforms(surfaceTransformer == null ? null : H1[i], H2[i], X[i], data[i].pyramidLevel, (Parameters)parameters[i]); kernelData.H1(H2[i]); kernelData.H2(surfaceTransformer == null ? null : H1[i]); kernelData.X (X [i]); kernelData.transImg(data[i].transImg); kernelData.dstImg(data[i].dstImg); kernelData.dstDstDot(data[i].dstDstDot); } int fullCapacity = kernelData.capacity(); kernelData.capacity(data.length); <API key>(kernelData, roi, getFillColor()); kernelData.capacity(fullCapacity); for (int i = 0; i < data.length; i++) { kernelData.position(i); data[i].dstCount = kernelData.dstCount(); data[i].dstCountZero = kernelData.dstCountZero(); data[i].dstCountOutlier = kernelData.dstCountOutlier(); data[i].srcDstDot = kernelData.srcDstDot(); } // if (data[0].dstCountZero > 0) { // System.err.println(data[0].dstCountZero + " out of " + data[0].dstCount // + " are zero = " + 100*data[0].dstCountZero/data[0].dstCount + "%"); } public Parameters createParameters() { return new Parameters(); } public class Parameters implements ImageTransformer.Parameters { protected Parameters() { reset(false); } protected Parameters(<API key>.Parameters surfaceParameters, <API key>.Parameters projectorParameters) { reset(surfaceParameters, projectorParameters); } private <API key>.Parameters surfaceParameters = null; private <API key>.Parameters projectorParameters = null; private IplImage[] tempImage = null; private CvMat H = CvMat.create(3, 3), R = CvMat.create(3, 3), n = CvMat.create(3, 1), t = CvMat.create(3, 1); public <API key>.Parameters <API key>() { return surfaceParameters; } public <API key>.Parameters <API key>() { return projectorParameters; } private int getSizeForSurface() { return surfaceTransformer == null ? 0 : surfaceParameters.size() - surfaceTransformer.getNumGains() - surfaceTransformer.getNumBiases(); } private int getSizeForProjector() { return projectorParameters.size(); } public int size() { return getSizeForSurface() + getSizeForProjector(); } public double[] get() { double[] p = new double[size()]; for (int i = 0; i < p.length; i++) { p[i] = get(i); } return p; } public double get(int i) { if (i < getSizeForSurface()) { return surfaceParameters.get(i); } else { return projectorParameters.get(i-getSizeForSurface()); } } public void set(double ... p) { for (int i = 0; i < p.length; i++) { set(i, p[i]); } } public void set(int i, double p) { if (i < getSizeForSurface()) { surfaceParameters.set(i, p); } else { projectorParameters.set(i-getSizeForSurface(), p); } } public void set(ImageTransformer.Parameters p) { Parameters pcp = (Parameters)p; if (surfaceTransformer != null) { surfaceParameters.set(pcp.<API key>()); surfaceParameters.resetColor(false); } projectorParameters.set(pcp.<API key>()); } public void reset(boolean asIdentity) { reset(null, null); } public void reset(<API key>.Parameters surfaceParameters, <API key>.Parameters projectorParameters) { if (surfaceParameters == null && surfaceTransformer != null) { surfaceParameters = surfaceTransformer.createParameters(); } if (projectorParameters == null) { projectorParameters = <API key>.createParameters(); } this.surfaceParameters = surfaceParameters; this.projectorParameters = projectorParameters; setSubspace(getSubspace()); } // public boolean addDelta(int i) { // return addDelta(i, 1); // public boolean addDelta(int i, double scale) { // // gradient varies linearly with intensity, so // // the increment value is not very important, but // // <API key> is good only for the value 1, // // so let's use that // if (i < getSizeForSurface()) { // surfaceParameters.addDelta(i, scale); // projectorParameters.setUpdateNeeded(true); // } else { // projectorParameters.addDelta(i-getSizeForSurface(), scale); // return false; public double getConstraintError() { double error = surfaceTransformer == null ? 0 : surfaceParameters.getConstraintError(); projectorParameters.update(); return error; } public void compose(ImageTransformer.Parameters p1, boolean inverse1, ImageTransformer.Parameters p2, boolean inverse2) { throw new <API key>("Compose operation not supported."); } public boolean preoptimize() { double[] p = setSubspaceInternal(getSubspaceInternal()); if (p != null) { set(8, p[8]); set(9, p[9]); set(10, p[10]); return true; } return false; } public void setSubspace(double ... p) { double[] dst = setSubspaceInternal(p); if (dst != null) { set(dst); } } public double[] getSubspace() { return getSubspaceInternal(); } private double[] setSubspaceInternal(double ... p) { if (invFrontoParallelH == null) { return null; } double[] dst = new double[8+3]; t.put(p[0], p[1], p[2]); cvRodrigues2(t, R, null); t.put(p[3], p[4], p[5]); // compute new H H.put(R.get(0), R.get(1), t.get(0), R.get(3), R.get(4), t.get(1), R.get(6), R.get(7), t.get(2)); cvMatMul(H, invFrontoParallelH, H); cvMatMul(surfaceTransformer.getK2(), H, H); cvMatMul(H, surfaceTransformer.getInvK1(), H); // compute new n, rotation from the z-axis cvGEMM(R, t, 1, null, 0, t, CV_GEMM_A_T); double scale = 1/t.get(2); n.put(0.0, 0.0, 1.0); cvGEMM(R, n, scale, null, 0, n, 0); // compute and set new three points double[] src = <API key>.getReferencePoints2(); JavaCV.<API key>(src, dst, <API key>.getInvK1(),<API key>.getK2(), <API key>.getR(), <API key>.getT(), n, true); dst[8] = dst[0]; dst[9] = dst[2]; dst[10] = dst[4]; // compute and set new four points JavaCV.<API key>(surfaceTransformer.getReferencePoints1(), dst, H); return dst; } private double[] getSubspaceInternal() { if (frontoParallelH == null) { return null; } cvMatMul(surfaceTransformer.getK1(), frontoParallelH, H); cvMatMul(surfaceParameters .getH(), H, H); cvMatMul(surfaceTransformer.getInvK2(), H, H); JavaCV.HtoRt(H, R, t); cvRodrigues2(R, n, null); double[] p = { n.get(0), n.get(1), n.get(2), t.get(0), t.get(1), t.get(2) }; return p; } public CvMat getN() { double[] src = <API key>.getReferencePoints2(); double[] dst = <API key>.getReferencePoints1().clone(); dst[0] = projectorParameters.get(0); dst[2] = projectorParameters.get(1); dst[4] = projectorParameters.get(2); // get plane parameters n, but since we model the target to be // the camera, we have to inverse everything before calling // getPlaneParameters() and reframe the n it returns cvTranspose(<API key>.getR(), R); cvGEMM(R, <API key>.getT(), -1, null, 0, t, 0); JavaCV.getPlaneParameters(src, dst, <API key>.getInvK2(), <API key>.getK1(), R, t, n); double d = 1 + cvDotProduct(n, <API key>.getT()); cvGEMM(R, n, 1/d, null, 0, n, 0); return n; } public CvMat getN0() { n = getN(); if (surfaceTransformer == null) { return n; } // remove projective effect of the current n, // leaving only the effect of n0 camera.getFrontoParallelH(surfaceParameters.get(), n, R); cvInvert(surfaceParameters.getH(), H); cvMatMul(H, surfaceTransformer.getK2(), H); cvMatMul(H, R, H); cvMatMul(surfaceTransformer.getInvK1(), H, H); JavaCV.HtoRt(H, R, t); // compute n0, as a rotation from the z-axis cvGEMM(R, t, 1, null, 0, t, CV_GEMM_A_T); double scale = 1/t.get(2); n.put(0.0, 0.0, 1.0); cvGEMM(R, n, scale, null, 0, n, 0); return n; } @Override public Parameters clone() { Parameters p = new Parameters(); p.surfaceParameters = surfaceParameters == null ? null : surfaceParameters.clone(); p.projectorParameters = projectorParameters.clone(); return p; } @Override public String toString() { if (surfaceParameters != null) { return surfaceParameters.toString() + projectorParameters.toString(); } else { return projectorParameters.toString(); } } } }
<?php require_once 'Zend/Service/Rackspace/Files.php'; class <API key> { /** * The service that has created the object * * @var <API key> */ protected $service; /** * Name of the object * * @var string */ protected $name; /** * MD5 value of the object's content * * @var string */ protected $hash; /** * Size in bytes of the object's content * * @var integer */ protected $size; /** * Content type of the object's content * * @var string */ protected $contentType; /** * Date of the last modified of the object * * @var string */ protected $lastModified; /** * Object content * * @var string */ protected $content; /** * Name of the container where the object is stored * * @var string */ protected $container; /** * Constructor * * You must pass the <API key> object of the caller and an associative * array with the keys "name", "container", "hash", "bytes", "content_type", * "last_modified", "file" where: * name= name of the object * container= name of the container where the object is stored * hash= the MD5 of the object's content * bytes= size in bytes of the object's content * content_type= content type of the object's content * last_modified= date of the last modified of the object * content= content of the object * * @param <API key> $service * @param array $data */ public function __construct($service,$data) { if (!($service instanceof <API key>) || !is_array($data)) { require_once 'Zend/Service/Rackspace/Files/Exception.php'; throw new <API key>("You must pass a RackspaceFiles and an array"); } if (!array_key_exists('name', $data)) { require_once 'Zend/Service/Rackspace/Files/Exception.php'; throw new <API key>("You must pass the name of the object in the array (name)"); } if (!array_key_exists('container', $data)) { require_once 'Zend/Service/Rackspace/Files/Exception.php'; throw new <API key>("You must pass the container of the object in the array (container)"); } if (!array_key_exists('hash', $data)) { require_once 'Zend/Service/Rackspace/Files/Exception.php'; throw new <API key>("You must pass the hash of the object in the array (hash)"); } if (!array_key_exists('bytes', $data)) { require_once 'Zend/Service/Rackspace/Files/Exception.php'; throw new <API key>("You must pass the byte size of the object in the array (bytes)"); } if (!array_key_exists('content_type', $data)) { require_once 'Zend/Service/Rackspace/Files/Exception.php'; throw new <API key>("You must pass the content type of the object in the array (content_type)"); } if (!array_key_exists('last_modified', $data)) { require_once 'Zend/Service/Rackspace/Files/Exception.php'; throw new <API key>("You must pass the last modified data of the object in the array (last_modified)"); } $this->name= $data['name']; $this->container= $data['container']; $this->hash= $data['hash']; $this->size= $data['bytes']; $this->contentType= $data['content_type']; $this->lastModified= $data['last_modified']; if (!empty($data['content'])) { $this->content= $data['content']; } $this->service= $service; } /** * Get name * * @return string */ public function getName() { return $this->name; } /** * Get the name of the container * * @return string */ public function getContainer() { return $this->container; } /** * Get the MD5 of the object's content * * @return string|boolean */ public function getHash() { return $this->hash; } /** * Get the size (in bytes) of the object's content * * @return integer|boolean */ public function getSize() { return $this->size; } /** * Get the content type of the object's content * * @return string */ public function getContentType() { return $this->contentType; } /** * Get the data of the last modified of the object * * @return string */ public function getLastModified() { return $this->lastModified; } /** * Get the content of the object * * @return string */ public function getContent() { return $this->content; } /** * Get the metadata of the object * If you don't pass the $key it returns the entire array of metadata value * * @param string $key * @return string|array|boolean */ public function getMetadata($key=null) { $result= $this->service->getMetadataObject($this->container,$this->name); if (!empty($result)) { if (empty($key)) { return $result['metadata']; } if (isset($result['metadata'][$key])) { return $result['metadata'][$key]; } } return false; } /** * Set the metadata value * The old metadata values are replaced with the new one * * @param array $metadata * @return boolean */ public function setMetadata($metadata) { return $this->service->setMetadataObject($this->container,$this->name,$metadata); } /** * Copy the object to another container * You can add metadata information to the destination object, change the * content_type and the name of the object * * @param string $container_dest * @param string $name_dest * @param array $metadata * @param string $content_type * @return boolean */ public function copyTo($container_dest,$name_dest,$metadata=array(),$content_type=null) { return $this->service->copyObject($this->container,$this->name,$container_dest,$name_dest,$metadata,$content_type); } /** * Get the CDN URL of the object * * @return string */ public function getCdnUrl() { $result= $this->service->getInfoCdnContainer($this->container); if ($result!==false) { if ($result['cdn_enabled']) { return $result['cdn_uri'].'/'.$this->name; } } return false; } /** * Get the CDN SSL URL of the object * * @return string */ public function getCdnUrlSsl() { $result= $this->service->getInfoCdnContainer($this->container); if ($result!==false) { if ($result['cdn_enabled']) { return $result['cdn_uri_ssl'].'/'.$this->name; } } return false; } }
// <API key>: GPL-2.0 #include <linux/bug.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/math64.h> #include <linux/log2.h> #include <linux/err.h> #include <linux/module.h> #include "qcom-vadc-common.h" /* Voltage to temperature */ static const struct vadc_map_pt <API key>[] = { {1758, -40}, {1742, -35}, {1719, -30}, {1691, -25}, {1654, -20}, {1608, -15}, {1551, -10}, {1483, -5}, {1404, 0}, {1315, 5}, {1218, 10}, {1114, 15}, {1007, 20}, {900, 25}, {795, 30}, {696, 35}, {605, 40}, {522, 45}, {448, 50}, {383, 55}, {327, 60}, {278, 65}, {237, 70}, {202, 75}, {172, 80}, {146, 85}, {125, 90}, {107, 95}, {92, 100}, {79, 105}, {68, 110}, {59, 115}, {51, 120}, {44, 125} }; /* * Voltage to temperature table for 100k pull up for NTCG104EF104 with * 1.875V reference. */ static const struct vadc_map_pt <API key>[] = { { 1831, -40000 }, { 1814, -35000 }, { 1791, -30000 }, { 1761, -25000 }, { 1723, -20000 }, { 1675, -15000 }, { 1616, -10000 }, { 1545, -5000 }, { 1463, 0 }, { 1370, 5000 }, { 1268, 10000 }, { 1160, 15000 }, { 1049, 20000 }, { 937, 25000 }, { 828, 30000 }, { 726, 35000 }, { 630, 40000 }, { 544, 45000 }, { 467, 50000 }, { 399, 55000 }, { 340, 60000 }, { 290, 65000 }, { 247, 70000 }, { 209, 75000 }, { 179, 80000 }, { 153, 85000 }, { 130, 90000 }, { 112, 95000 }, { 96, 100000 }, { 82, 105000 }, { 71, 110000 }, { 62, 115000 }, { 53, 120000 }, { 46, 125000 }, }; static int <API key>( const struct vadc_prescale_ratio *prescale, const struct adc5_data *data, u16 adc_code, int *result_uv); static int <API key>( const struct vadc_prescale_ratio *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec); static int <API key>( const struct vadc_prescale_ratio *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec); static int <API key>( const struct vadc_prescale_ratio *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec); static int <API key>( const struct vadc_prescale_ratio *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec); static struct <API key> scale_adc5_fn[] = { [<API key>] = {<API key>}, [<API key>] = {<API key>}, [<API key>] = {<API key>}, [<API key>] = {<API key>}, [<API key>] = {<API key>}, [<API key>] = {<API key>}, }; static int <API key>(const struct vadc_map_pt *pts, u32 tablesize, s32 input, int *output) { bool descending = 1; u32 i = 0; if (!pts) return -EINVAL; /* Check if table is descending or ascending */ if (tablesize > 1) { if (pts[0].x < pts[1].x) descending = 0; } while (i < tablesize) { if ((descending) && (pts[i].x < input)) { /* table entry is less than measured*/ /* value and table is descending, stop */ break; } else if ((!descending) && (pts[i].x > input)) { /* table entry is greater than measured*/ /*value and table is ascending, stop */ break; } i++; } if (i == 0) { *output = pts[0].y; } else if (i == tablesize) { *output = pts[tablesize - 1].y; } else { /* result is between search_index and search_index-1 */ /* interpolate linearly */ *output = (((s32)((pts[i].y - pts[i - 1].y) * (input - pts[i - 1].x)) / (pts[i].x - pts[i - 1].x)) + pts[i - 1].y); } return 0; } static void <API key>(const struct vadc_linear_graph *calib_graph, u16 adc_code, bool absolute, s64 *scale_voltage) { *scale_voltage = (adc_code - calib_graph->gnd); *scale_voltage *= calib_graph->dx; *scale_voltage = div64_s64(*scale_voltage, calib_graph->dy); if (absolute) *scale_voltage += calib_graph->dx; if (*scale_voltage < 0) *scale_voltage = 0; } static int <API key>(const struct vadc_linear_graph *calib_graph, const struct vadc_prescale_ratio *prescale, bool absolute, u16 adc_code, int *result_uv) { s64 voltage = 0, result = 0; <API key>(calib_graph, adc_code, absolute, &voltage); voltage = voltage * prescale->den; result = div64_s64(voltage, prescale->num); *result_uv = result; return 0; } static int <API key>(const struct vadc_linear_graph *calib_graph, const struct vadc_prescale_ratio *prescale, bool absolute, u16 adc_code, int *result_mdec) { s64 voltage = 0; int ret; <API key>(calib_graph, adc_code, absolute, &voltage); if (absolute) voltage = div64_s64(voltage, 1000); ret = <API key>(<API key>, ARRAY_SIZE(<API key>), voltage, result_mdec); if (ret) return ret; *result_mdec *= 1000; return 0; } static int <API key>(const struct vadc_linear_graph *calib_graph, const struct vadc_prescale_ratio *prescale, bool absolute, u16 adc_code, int *result_mdec) { s64 voltage = 0; u64 temp; /* Temporary variable for do_div */ <API key>(calib_graph, adc_code, absolute, &voltage); if (voltage > 0) { temp = voltage * prescale->den; do_div(temp, prescale->num * 2); voltage = temp; } else { voltage = 0; } voltage -= <API key>; *result_mdec = voltage; return 0; } static int <API key>(const struct vadc_linear_graph *calib_graph, const struct vadc_prescale_ratio *prescale, bool absolute, u16 adc_code, int *result_mdec) { s64 voltage = 0, result = 0; <API key>(calib_graph, adc_code, absolute, &voltage); voltage = voltage * prescale->den; voltage = div64_s64(voltage, prescale->num); voltage = ((PMI_CHG_SCALE_1) * (voltage * 2)); voltage = (voltage + PMI_CHG_SCALE_2); result = div64_s64(voltage, 1000000); *result_mdec = result; return 0; } static int <API key>(u16 adc_code, const struct vadc_prescale_ratio *prescale, const struct adc5_data *data, unsigned int factor) { s64 voltage, temp, adc_vdd_ref_mv = 1875; /* * The normal data range is between 0V to 1.875V. On cases where * we read low voltage values, the ADC code can go beyond the * range and the scale result is incorrect so we clamp the values * for the cases where the code represents a value below 0V */ if (adc_code > VADC5_MAX_CODE) adc_code = 0; /* (ADC code * vref_vadc (1.875V)) / full_scale_code */ voltage = (s64) adc_code * adc_vdd_ref_mv * 1000; voltage = div64_s64(voltage, data-><API key>); if (voltage > 0) { voltage *= prescale->den; temp = prescale->num * factor; voltage = div64_s64(voltage, temp); } else { voltage = 0; } return (int) voltage; } static int <API key>( const struct vadc_prescale_ratio *prescale, const struct adc5_data *data, u16 adc_code, int *result_uv) { *result_uv = <API key>(adc_code, prescale, data, 1); return 0; } static int <API key>( const struct vadc_prescale_ratio *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec) { int voltage; voltage = <API key>(adc_code, prescale, data, 1000); /* Map voltage to temperature from look-up table */ return <API key>(<API key>, ARRAY_SIZE(<API key>), voltage, result_mdec); } static int <API key>( const struct vadc_prescale_ratio *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec) { *result_mdec = <API key>(adc_code, prescale, data, 2); *result_mdec -= <API key>; return 0; } static int <API key>( const struct vadc_prescale_ratio *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec) { *result_mdec = <API key>(adc_code * 100, prescale, data, <API key>); *result_mdec = <API key> - *result_mdec; return 0; } static int <API key>( const struct vadc_prescale_ratio *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec) { *result_mdec = <API key>(adc_code, prescale, data, 4); *result_mdec = <API key> - *result_mdec; return 0; } int qcom_vadc_scale(enum vadc_scale_fn_type scaletype, const struct vadc_linear_graph *calib_graph, const struct vadc_prescale_ratio *prescale, bool absolute, u16 adc_code, int *result) { switch (scaletype) { case SCALE_DEFAULT: return <API key>(calib_graph, prescale, absolute, adc_code, result); case <API key>: case SCALE_XOTHERM: return <API key>(calib_graph, prescale, absolute, adc_code, result); case SCALE_PMIC_THERM: return <API key>(calib_graph, prescale, absolute, adc_code, result); case SCALE_PMI_CHG_TEMP: return <API key>(calib_graph, prescale, absolute, adc_code, result); default: return -EINVAL; } } EXPORT_SYMBOL(qcom_vadc_scale); int qcom_adc5_hw_scale(enum vadc_scale_fn_type scaletype, const struct vadc_prescale_ratio *prescale, const struct adc5_data *data, u16 adc_code, int *result) { if (!(scaletype >= <API key> && scaletype < <API key>)) { pr_err("Invalid scale type %d\n", scaletype); return -EINVAL; } return scale_adc5_fn[scaletype].scale_fn(prescale, data, adc_code, result); } EXPORT_SYMBOL(qcom_adc5_hw_scale); int <API key>(u32 value) { if (!is_power_of_2(value) || value < VADC_DECIMATION_MIN || value > VADC_DECIMATION_MAX) return -EINVAL; return __ffs64(value / VADC_DECIMATION_MIN); } EXPORT_SYMBOL(<API key>); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Qualcomm ADC common functionality");
#ifndef __RTL92E_HW_H__ #define __RTL92E_HW_H__ void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); void <API key>(struct ieee80211_hw *hw); void <API key>(struct ieee80211_hw *hw, struct rtl_int *int_vec); int rtl92ee_hw_init(struct ieee80211_hw *hw); void <API key>(struct ieee80211_hw *hw); void <API key>(struct ieee80211_hw *hw); void <API key>(struct ieee80211_hw *hw); int <API key>(struct ieee80211_hw *hw, enum nl80211_iftype type); void <API key>(struct ieee80211_hw *hw, bool check_bssid); void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci); void <API key>(struct ieee80211_hw *hw); void <API key>(struct ieee80211_hw *hw); void <API key>(struct ieee80211_hw *hw, u32 add_msr, u32 rm_msr); void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); void <API key>(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u8 rssi_level, bool update_bw); void <API key>(struct ieee80211_hw *hw); bool <API key>(struct ieee80211_hw *hw, u8 *valid); void <API key>(struct ieee80211_hw *hw); void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr, bool is_group, u8 enc_algo, bool is_wepkey, bool clear_all); void <API key>(struct ieee80211_hw *hw, bool autoload_fail, u8 *hwinfo); void rtl92ee_bt_reg_init(struct ieee80211_hw *hw); void rtl92ee_bt_hw_init(struct ieee80211_hw *hw); void rtl92ee_suspend(struct ieee80211_hw *hw); void rtl92ee_resume(struct ieee80211_hw *hw); void <API key>(struct ieee80211_hw *hw, bool allow_all_da, bool write_into_reg); void <API key>(unsigned long data); #endif
<?php defined('_JEXEC') or die('@-_-@'); jimport('joomla.form.formfield'); class <API key> extends JFormField { /** * The form field type. * * @var string * @since 1.6 */ public $type = 'CallbackUrl'; /** * Method to get the field input markup. * * @return string The field input markup. * @since 1.6 */ protected function getInput() { $task = !empty($this->element['value']) ? '?option=com_slogin&task=check&plugin=' . (string) $this->element['value'] : ''; $readonly = ((string) $this->element['readonly'] == 'true') ? ' readonly="readonly"' : ''; $class = $this->element['class'] ? ' class="' . (string) $this->element['class'] . '"' : ''; $CallbackUrl = JURI::root().$task; if(substr($CallbackUrl, -1, 1) == '/'){ $CallbackUrl = substr($CallbackUrl, 0, -1); } $html = '<input type="text" name="' . $this->name . '" id="' . $this->id . '"' . ' value="'.$CallbackUrl.'" size="70%" '. $class . $readonly .' />'; return $html; } }
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "ixgb_hw.h" #include "ixgb_ee.h" /* Local prototypes */ static u16 ixgb_shift_in_bits(struct ixgb_hw *hw); static void ixgb_shift_out_bits(struct ixgb_hw *hw, u16 data, u16 count); static void ixgb_standby_eeprom(struct ixgb_hw *hw); static bool <API key>(struct ixgb_hw *hw); static void ixgb_cleanup_eeprom(struct ixgb_hw *hw); static void ixgb_raise_clock(struct ixgb_hw *hw, u32 *eecd_reg) { /* Raise the clock input to the EEPROM (by setting the SK bit), and then * wait 50 microseconds. */ *eecd_reg = *eecd_reg | IXGB_EECD_SK; IXGB_WRITE_REG(hw, EECD, *eecd_reg); IXGB_WRITE_FLUSH(hw); udelay(50); } static void ixgb_lower_clock(struct ixgb_hw *hw, u32 *eecd_reg) { /* Lower the clock input to the EEPROM (by clearing the SK bit), and then * wait 50 microseconds. */ *eecd_reg = *eecd_reg & ~IXGB_EECD_SK; IXGB_WRITE_REG(hw, EECD, *eecd_reg); IXGB_WRITE_FLUSH(hw); udelay(50); } static void ixgb_shift_out_bits(struct ixgb_hw *hw, u16 data, u16 count) { u32 eecd_reg; u32 mask; /* We need to shift "count" bits out to the EEPROM. So, value in the * "data" parameter will be shifted out to the EEPROM one bit at a time. * In order to do this, "data" must be broken down into bits. */ mask = 0x01 << (count - 1); eecd_reg = IXGB_READ_REG(hw, EECD); eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI); do { /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1", * and then raising and then lowering the clock (the SK bit controls * the clock input to the EEPROM). A "0" is shifted out to the EEPROM * by setting "DI" to "0" and then raising and then lowering the clock. */ eecd_reg &= ~IXGB_EECD_DI; if (data & mask) eecd_reg |= IXGB_EECD_DI; IXGB_WRITE_REG(hw, EECD, eecd_reg); IXGB_WRITE_FLUSH(hw); udelay(50); ixgb_raise_clock(hw, &eecd_reg); ixgb_lower_clock(hw, &eecd_reg); mask = mask >> 1; } while (mask); /* We leave the "DI" bit set to "0" when we leave this routine. */ eecd_reg &= ~IXGB_EECD_DI; IXGB_WRITE_REG(hw, EECD, eecd_reg); } static u16 ixgb_shift_in_bits(struct ixgb_hw *hw) { u32 eecd_reg; u32 i; u16 data; /* In order to read a register from the EEPROM, we need to shift 16 bits * in from the EEPROM. Bits are "shifted in" by raising the clock input to * the EEPROM (setting the SK bit), and then reading the value of the "DO" * bit. During this "shifting in" process the "DI" bit should always be * clear.. */ eecd_reg = IXGB_READ_REG(hw, EECD); eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI); data = 0; for (i = 0; i < 16; i++) { data = data << 1; ixgb_raise_clock(hw, &eecd_reg); eecd_reg = IXGB_READ_REG(hw, EECD); eecd_reg &= ~(IXGB_EECD_DI); if (eecd_reg & IXGB_EECD_DO) data |= 1; ixgb_lower_clock(hw, &eecd_reg); } return data; } static void ixgb_setup_eeprom(struct ixgb_hw *hw) { u32 eecd_reg; eecd_reg = IXGB_READ_REG(hw, EECD); /* Clear SK and DI */ eecd_reg &= ~(IXGB_EECD_SK | IXGB_EECD_DI); IXGB_WRITE_REG(hw, EECD, eecd_reg); /* Set CS */ eecd_reg |= IXGB_EECD_CS; IXGB_WRITE_REG(hw, EECD, eecd_reg); } static void ixgb_standby_eeprom(struct ixgb_hw *hw) { u32 eecd_reg; eecd_reg = IXGB_READ_REG(hw, EECD); /* Deselect EEPROM */ eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK); IXGB_WRITE_REG(hw, EECD, eecd_reg); IXGB_WRITE_FLUSH(hw); udelay(50); /* Clock high */ eecd_reg |= IXGB_EECD_SK; IXGB_WRITE_REG(hw, EECD, eecd_reg); IXGB_WRITE_FLUSH(hw); udelay(50); /* Select EEPROM */ eecd_reg |= IXGB_EECD_CS; IXGB_WRITE_REG(hw, EECD, eecd_reg); IXGB_WRITE_FLUSH(hw); udelay(50); /* Clock low */ eecd_reg &= ~IXGB_EECD_SK; IXGB_WRITE_REG(hw, EECD, eecd_reg); IXGB_WRITE_FLUSH(hw); udelay(50); } static void ixgb_clock_eeprom(struct ixgb_hw *hw) { u32 eecd_reg; eecd_reg = IXGB_READ_REG(hw, EECD); /* Rising edge of clock */ eecd_reg |= IXGB_EECD_SK; IXGB_WRITE_REG(hw, EECD, eecd_reg); IXGB_WRITE_FLUSH(hw); udelay(50); /* Falling edge of clock */ eecd_reg &= ~IXGB_EECD_SK; IXGB_WRITE_REG(hw, EECD, eecd_reg); IXGB_WRITE_FLUSH(hw); udelay(50); } static void ixgb_cleanup_eeprom(struct ixgb_hw *hw) { u32 eecd_reg; eecd_reg = IXGB_READ_REG(hw, EECD); eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_DI); IXGB_WRITE_REG(hw, EECD, eecd_reg); ixgb_clock_eeprom(hw); } static bool <API key>(struct ixgb_hw *hw) { u32 eecd_reg; u32 i; /* Toggle the CS line. This in effect tells to EEPROM to actually execute * the command in question. */ ixgb_standby_eeprom(hw); /* Now read DO repeatedly until is high (equal to '1'). The EEPROM will * signal that the command has been completed by raising the DO signal. * If DO does not go high in 10 milliseconds, then error out. */ for (i = 0; i < 200; i++) { eecd_reg = IXGB_READ_REG(hw, EECD); if (eecd_reg & IXGB_EECD_DO) return true; udelay(50); } ASSERT(0); return false; } bool <API key>(struct ixgb_hw *hw) { u16 checksum = 0; u16 i; for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) checksum += ixgb_read_eeprom(hw, i); if (checksum == (u16) EEPROM_SUM) return true; else return false; } void <API key>(struct ixgb_hw *hw) { u16 checksum = 0; u16 i; for (i = 0; i < EEPROM_CHECKSUM_REG; i++) checksum += ixgb_read_eeprom(hw, i); checksum = (u16) EEPROM_SUM - checksum; ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum); } void ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data) { struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; /* Prepare the EEPROM for writing */ ixgb_setup_eeprom(hw); /* Send the 9-bit EWEN (write enable) command to the EEPROM (5-bit opcode * plus 4-bit dummy). This puts the EEPROM into write/erase mode. */ ixgb_shift_out_bits(hw, EEPROM_EWEN_OPCODE, 5); ixgb_shift_out_bits(hw, 0, 4); /* Prepare the EEPROM */ ixgb_standby_eeprom(hw); /* Send the Write command (3-bit opcode + 6-bit addr) */ ixgb_shift_out_bits(hw, EEPROM_WRITE_OPCODE, 3); ixgb_shift_out_bits(hw, offset, 6); /* Send the data */ ixgb_shift_out_bits(hw, data, 16); <API key>(hw); /* Recover from write */ ixgb_standby_eeprom(hw); /* Send the 9-bit EWDS (write disable) command to the EEPROM (5-bit * opcode plus 4-bit dummy). This takes the EEPROM out of write/erase * mode. */ ixgb_shift_out_bits(hw, EEPROM_EWDS_OPCODE, 5); ixgb_shift_out_bits(hw, 0, 4); /* Done with writing */ ixgb_cleanup_eeprom(hw); /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */ ee_map->init_ctrl_reg_1 = cpu_to_le16(<API key>); } u16 ixgb_read_eeprom(struct ixgb_hw *hw, u16 offset) { u16 data; /* Prepare the EEPROM for reading */ ixgb_setup_eeprom(hw); /* Send the READ command (opcode + addr) */ ixgb_shift_out_bits(hw, EEPROM_READ_OPCODE, 3); /* * We have a 64 word EEPROM, there are 6 address bits */ ixgb_shift_out_bits(hw, offset, 6); /* Read the data */ data = ixgb_shift_in_bits(hw); /* End this read operation */ ixgb_standby_eeprom(hw); return data; } bool <API key>(struct ixgb_hw *hw) { u16 i; u16 checksum = 0; struct ixgb_ee_map_type *ee_map; ENTER(); ee_map = (struct ixgb_ee_map_type *)hw->eeprom; pr_debug("Reading eeprom data\n"); for (i = 0; i < IXGB_EEPROM_SIZE ; i++) { u16 ee_data; ee_data = ixgb_read_eeprom(hw, i); checksum += ee_data; hw->eeprom[i] = cpu_to_le16(ee_data); } if (checksum != (u16) EEPROM_SUM) { pr_debug("Checksum invalid\n"); /* clear the init_ctrl_reg_1 to signify that the cache is * invalidated */ ee_map->init_ctrl_reg_1 = cpu_to_le16(<API key>); return false; } if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(<API key>)) != cpu_to_le16(<API key>)) { pr_debug("Signature invalid\n"); return false; } return true; } static bool <API key> (struct ixgb_hw* hw) { struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(<API key>)) == cpu_to_le16(<API key>)) { return true; } else { return <API key>(hw); } } __le16 <API key>(struct ixgb_hw *hw, u16 index) { if ((index < IXGB_EEPROM_SIZE) && (<API key>(hw) == true)) { return hw->eeprom[index]; } return 0; } void <API key>(struct ixgb_hw *hw, u8 *mac_addr) { int i; struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; ENTER(); if (<API key>(hw) == true) { for (i = 0; i < <API key>; i++) { mac_addr[i] = ee_map->mac_addr[i]; } pr_debug("eeprom mac address = %pM\n", mac_addr); } } u32 <API key>(struct ixgb_hw *hw) { if (<API key>(hw) == true) return le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG]) | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16); return 0; } u16 <API key>(struct ixgb_hw *hw) { struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; if (<API key>(hw) == true) return le16_to_cpu(ee_map->device_id); return 0; }
# $Id: admonitions.py 7681 2013-07-12 07:52:27Z milde $ """ Admonition directives. """ __docformat__ = 'reStructuredText' from docutils.parsers.rst import Directive from docutils.parsers.rst import states, directives from docutils.parsers.rst.roles import set_classes from docutils import nodes class BaseAdmonition(Directive): <API key> = True option_spec = {'class': directives.class_option, 'name': directives.unchanged} has_content = True node_class = None """Subclasses must set this to the appropriate admonition node class.""" def run(self): set_classes(self.options) self.assert_has_content() text = '\n'.join(self.content) admonition_node = self.node_class(text, **self.options) self.add_name(admonition_node) if self.node_class is nodes.admonition: title_text = self.arguments[0] textnodes, messages = self.state.inline_text(title_text, self.lineno) title = nodes.title(title_text, '', *textnodes) title.source, title.line = ( self.state_machine.get_source_and_line(self.lineno)) admonition_node += title admonition_node += messages if not 'classes' in self.options: admonition_node['classes'] += ['admonition-' + nodes.make_id(title_text)] self.state.nested_parse(self.content, self.content_offset, admonition_node) return [admonition_node] class Admonition(BaseAdmonition): required_arguments = 1 node_class = nodes.admonition class Attention(BaseAdmonition): node_class = nodes.attention class Caution(BaseAdmonition): node_class = nodes.caution class Danger(BaseAdmonition): node_class = nodes.danger class Error(BaseAdmonition): node_class = nodes.error class Hint(BaseAdmonition): node_class = nodes.hint class Important(BaseAdmonition): node_class = nodes.important class Note(BaseAdmonition): node_class = nodes.note class Tip(BaseAdmonition): node_class = nodes.tip class Warning(BaseAdmonition): node_class = nodes.warning
class Blazeblogger < Formula desc "CMS for the command-line" homepage "http://blaze.blackened.cz/" url "https://blazeblogger.googlecode.com/files/blazeblogger-1.2.0.tar.gz" sha1 "<SHA1-like>" bottle do cellar :any sha1 "<SHA1-like>" => :yosemite sha1 "<SHA1-like>" => :mavericks sha1 "<SHA1-like>" => :mountain_lion end def install ENV.deparallelize system "make", "prefix=#{prefix}", "compdir=#{prefix}", "install" end test do system bin/"blaze", "init" system bin/"blaze", "config", "blog.title", "Homebrew!" system bin/"blaze", "make" assert File.exist? "default.css" assert File.read(".blaze/config").include?("Homebrew!") end end
require 'formula' class Mad < Formula desc "MPEG audio decoder" homepage 'http: url 'https://downloads.sourceforge.net/project/mad/libmad/0.15.1b/libmad-0.15.1b.tar.gz' sha1 '<SHA1-like>' bottle do cellar :any revision 1 sha1 "<SHA1-like>" => :yosemite sha1 "<SHA1-like>" => :mavericks sha1 "<SHA1-like>" => :mountain_lion end def install fpm = MacOS.prefer_64_bit? ? '64bit': 'intel' system "./configure", "--disable-debugging", "--enable-fpm=#{fpm}", "--prefix=#{prefix}" system "make", "CFLAGS=#{ENV.cflags}", "LDFLAGS=#{ENV.ldflags}", "install" (lib+'pkgconfig/mad.pc').write pc_file end def pc_file; <<-EOS.undent prefix=#{opt_prefix} exec_prefix=${prefix} libdir=${exec_prefix}/lib includedir=${prefix}/include Name: mad Description: MPEG Audio Decoder Version: #{version} Requires: Conflicts: Libs: -L${libdir} -lmad -lm Cflags: -I${includedir} EOS end end
#include <linux/kernel.h> #include "ieee754.h" #define DP_EBIAS 1023 #define DP_EMIN (-1022) #define DP_EMAX 1023 #define DP_FBITS 52 #define SP_EBIAS 127 #define SP_EMIN (-126) #define SP_EMAX 127 #define SP_FBITS 23 #define DP_MBIT(x) ((u64)1 << (x)) #define DP_HIDDEN_BIT DP_MBIT(DP_FBITS) #define DP_SIGN_BIT DP_MBIT(63) #define SP_MBIT(x) ((u32)1 << (x)) #define SP_HIDDEN_BIT SP_MBIT(SP_FBITS) #define SP_SIGN_BIT SP_MBIT(31) #define SPSIGN(sp) (sp.parts.sign) #define SPBEXP(sp) (sp.parts.bexp) #define SPMANT(sp) (sp.parts.mant) #define DPSIGN(dp) (dp.parts.sign) #define DPBEXP(dp) (dp.parts.bexp) #define DPMANT(dp) (dp.parts.mant) ieee754dp ieee754dp_dump(char *m, ieee754dp x) { int i; printk("%s", m); printk("<%08x,%08x>\n", (unsigned) (x.bits >> 32), (unsigned) x.bits); printk("\t="); switch (ieee754dp_class(x)) { case IEEE754_CLASS_QNAN: case IEEE754_CLASS_SNAN: printk("Nan %c", DPSIGN(x) ? '-' : '+'); for (i = DP_FBITS - 1; i >= 0; i printk("%c", DPMANT(x) & DP_MBIT(i) ? '1' : '0'); break; case IEEE754_CLASS_INF: printk("%cInfinity", DPSIGN(x) ? '-' : '+'); break; case IEEE754_CLASS_ZERO: printk("%cZero", DPSIGN(x) ? '-' : '+'); break; case IEEE754_CLASS_DNORM: printk("%c0.", DPSIGN(x) ? '-' : '+'); for (i = DP_FBITS - 1; i >= 0; i printk("%c", DPMANT(x) & DP_MBIT(i) ? '1' : '0'); printk("e%d", DPBEXP(x) - DP_EBIAS); break; case IEEE754_CLASS_NORM: printk("%c1.", DPSIGN(x) ? '-' : '+'); for (i = DP_FBITS - 1; i >= 0; i printk("%c", DPMANT(x) & DP_MBIT(i) ? '1' : '0'); printk("e%d", DPBEXP(x) - DP_EBIAS); break; default: printk("Illegal/Unknown IEEE754 value class"); } printk("\n"); return x; } ieee754sp ieee754sp_dump(char *m, ieee754sp x) { int i; printk("%s=", m); printk("<%08x>\n", (unsigned) x.bits); printk("\t="); switch (ieee754sp_class(x)) { case IEEE754_CLASS_QNAN: case IEEE754_CLASS_SNAN: printk("Nan %c", SPSIGN(x) ? '-' : '+'); for (i = SP_FBITS - 1; i >= 0; i printk("%c", SPMANT(x) & SP_MBIT(i) ? '1' : '0'); break; case IEEE754_CLASS_INF: printk("%cInfinity", SPSIGN(x) ? '-' : '+'); break; case IEEE754_CLASS_ZERO: printk("%cZero", SPSIGN(x) ? '-' : '+'); break; case IEEE754_CLASS_DNORM: printk("%c0.", SPSIGN(x) ? '-' : '+'); for (i = SP_FBITS - 1; i >= 0; i printk("%c", SPMANT(x) & SP_MBIT(i) ? '1' : '0'); printk("e%d", SPBEXP(x) - SP_EBIAS); break; case IEEE754_CLASS_NORM: printk("%c1.", SPSIGN(x) ? '-' : '+'); for (i = SP_FBITS - 1; i >= 0; i printk("%c", SPMANT(x) & SP_MBIT(i) ? '1' : '0'); printk("e%d", SPBEXP(x) - SP_EBIAS); break; default: printk("Illegal/Unknown IEEE754 value class"); } printk("\n"); return x; }
jQuery(function(e){e.datepicker.regional.ro={closeText:"Închide",prevText:"&
#include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/input.h> #include <linux/input/matrix_keypad.h> #include <linux/leds.h> #include <linux/interrupt.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <linux/i2c/twl.h> #include <linux/usb/otg.h> #include <linux/smsc911x.h> #include <linux/regulator/machine.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <plat/board.h> #include <plat/usb.h> #include <plat/common.h> #include <plat/mcspi.h> #include <plat/display.h> #include "mux.h" #include "<API key>.h" #include "hsmmc.h" #define OMAP3_EVM_TS_GPIO 175 #define OMAP3_EVM_EHCI_VBUS 22 #define <API key> 61 #define OMAP3EVM_ETHR_START 0x2c000000 #define OMAP3EVM_ETHR_SIZE 1024 #define <API key> 0x50 #define <API key> 176 #define <API key> 5 static u8 omap3_evm_version; u8 get_omap3_evm_rev(void) { return omap3_evm_version; } EXPORT_SYMBOL(get_omap3_evm_rev); static void __init <API key>(void) { void __iomem *ioaddr; unsigned int smsc_id; /* Ethernet PHY ID is stored at ID_REV register */ ioaddr = ioremap_nocache(OMAP3EVM_ETHR_START, SZ_1K); if (!ioaddr) return; smsc_id = readl(ioaddr + <API key>) & 0xFFFF0000; iounmap(ioaddr); switch (smsc_id) { /*SMSC9115 chipset*/ case 0x01150000: omap3_evm_version = <API key>; break; /*SMSC 9220 chipset*/ case 0x92200000: default: omap3_evm_version = <API key>; } } #if defined(CONFIG_SMSC911X) || defined(<API key>) static struct resource <API key>[] = { [0] = { .start = OMAP3EVM_ETHR_START, .end = (OMAP3EVM_ETHR_START + OMAP3EVM_ETHR_SIZE - 1), .flags = IORESOURCE_MEM, }, [1] = { .start = OMAP_GPIO_IRQ(<API key>), .end = OMAP_GPIO_IRQ(<API key>), .flags = (IORESOURCE_IRQ | IRQF_TRIGGER_LOW), }, }; static struct <API key> smsc911x_config = { .phy_interface = <API key>, .irq_polarity = <API key>, .irq_type = <API key>, .flags = (SMSC911X_USE_32BIT | <API key>), }; static struct platform_device <API key> = { .name = "smsc911x", .id = -1, .num_resources = ARRAY_SIZE(<API key>), .resource = &<API key>[0], .dev = { .platform_data = &smsc911x_config, }, }; static inline void __init <API key>(void) { int eth_cs; struct clk *l3ck; unsigned int rate; eth_cs = <API key>; l3ck = clk_get(NULL, "l3_ck"); if (IS_ERR(l3ck)) rate = 100000000; else rate = clk_get_rate(l3ck); if (gpio_request(<API key>, "SMSC911x irq") < 0) { printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n", <API key>); return; } <API key>(<API key>); <API key>(&<API key>); } #else static inline void __init <API key>(void) { return; } #endif /* * OMAP3EVM LCD Panel control signals */ #define <API key> 2 #define <API key> 3 #define <API key> 152 #define <API key> 153 #define <API key> 154 #define <API key> 155 #define <API key> 210 #define <API key> 199 static int lcd_enabled; static int dvi_enabled; static void __init <API key>(void) { int r; r = gpio_request(<API key>, "lcd_panel_resb"); if (r) { printk(KERN_ERR "failed to get lcd_panel_resb\n"); return; } <API key>(<API key>, 1); r = gpio_request(<API key>, "lcd_panel_ini"); if (r) { printk(KERN_ERR "failed to get lcd_panel_ini\n"); goto err_1; } <API key>(<API key>, 1); r = gpio_request(<API key>, "lcd_panel_qvga"); if (r) { printk(KERN_ERR "failed to get lcd_panel_qvga\n"); goto err_2; } <API key>(<API key>, 0); r = gpio_request(<API key>, "lcd_panel_lr"); if (r) { printk(KERN_ERR "failed to get lcd_panel_lr\n"); goto err_3; } <API key>(<API key>, 1); r = gpio_request(<API key>, "lcd_panel_ud"); if (r) { printk(KERN_ERR "failed to get lcd_panel_ud\n"); goto err_4; } <API key>(<API key>, 1); r = gpio_request(<API key>, "lcd_panel_envdd"); if (r) { printk(KERN_ERR "failed to get lcd_panel_envdd\n"); goto err_5; } <API key>(<API key>, 0); return; err_5: gpio_free(<API key>); err_4: gpio_free(<API key>); err_3: gpio_free(<API key>); err_2: gpio_free(<API key>); err_1: gpio_free(<API key>); } static int <API key>(struct omap_dss_device *dssdev) { if (dvi_enabled) { printk(KERN_ERR "cannot enable LCD, DVI is enabled\n"); return -EINVAL; } gpio_set_value(<API key>, 0); if (get_omap3_evm_rev() >= <API key>) gpio_set_value(<API key>, 0); else gpio_set_value(<API key>, 1); lcd_enabled = 1; return 0; } static void <API key>(struct omap_dss_device *dssdev) { gpio_set_value(<API key>, 1); if (get_omap3_evm_rev() >= <API key>) gpio_set_value(<API key>, 1); else gpio_set_value(<API key>, 0); lcd_enabled = 0; } static struct omap_dss_device <API key> = { .name = "lcd", .driver_name = "sharp_ls_panel", .type = <API key>, .phy.dpi.data_lines = 18, .platform_enable = <API key>, .platform_disable = <API key>, }; static int omap3_evm_enable_tv(struct omap_dss_device *dssdev) { return 0; } static void <API key>(struct omap_dss_device *dssdev) { } static struct omap_dss_device omap3_evm_tv_device = { .name = "tv", .driver_name = "venc", .type = <API key>, .phy.venc.type = <API key>, .platform_enable = omap3_evm_enable_tv, .platform_disable = <API key>, }; static int <API key>(struct omap_dss_device *dssdev) { if (lcd_enabled) { printk(KERN_ERR "cannot enable DVI, LCD is enabled\n"); return -EINVAL; } gpio_set_value(<API key>, 1); dvi_enabled = 1; return 0; } static void <API key>(struct omap_dss_device *dssdev) { gpio_set_value(<API key>, 0); dvi_enabled = 0; } static struct omap_dss_device <API key> = { .name = "dvi", .driver_name = "generic_panel", .type = <API key>, .phy.dpi.data_lines = 24, .platform_enable = <API key>, .platform_disable = <API key>, }; static struct omap_dss_device *<API key>[] = { &<API key>, &omap3_evm_tv_device, &<API key>, }; static struct omap_dss_board_info omap3_evm_dss_data = { .num_devices = ARRAY_SIZE(<API key>), .devices = <API key>, .default_device = &<API key>, }; static struct platform_device <API key> = { .name = "omapdss", .id = -1, .dev = { .platform_data = &omap3_evm_dss_data, }, }; static struct <API key> <API key> = { .supply = "vmmc", }; static struct <API key> <API key> = { .supply = "vmmc_aux", }; /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */ static struct regulator_init_data omap3evm_vmmc1 = { .constraints = { .min_uV = 1850000, .max_uV = 3150000, .valid_modes_mask = <API key> | <API key>, .valid_ops_mask = <API key> | <API key> | <API key>, }, .<API key> = 1, .consumer_supplies = &<API key>, }; /* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */ static struct regulator_init_data omap3evm_vsim = { .constraints = { .min_uV = 1800000, .max_uV = 3000000, .valid_modes_mask = <API key> | <API key>, .valid_ops_mask = <API key> | <API key> | <API key>, }, .<API key> = 1, .consumer_supplies = &<API key>, }; static struct omap2_hsmmc_info mmc[] = { { .mmc = 1, .wires = 4, .gpio_cd = -EINVAL, .gpio_wp = 63, }, {} /* Terminator */ }; static struct gpio_led gpio_leds[] = { { .name = "omap3evm::ledb", /* normally not visible (board underside) */ .default_trigger = "default-on", .gpio = -EINVAL, /* gets replaced */ .active_low = true, }, }; static struct <API key> gpio_led_info = { .leds = gpio_leds, .num_leds = ARRAY_SIZE(gpio_leds), }; static struct platform_device leds_gpio = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &gpio_led_info, }, }; static int <API key>(struct device *dev, unsigned gpio, unsigned ngpio) { /* gpio + 0 is "mmc0_cd" (input/IRQ) */ omap_mux_init_gpio(63, OMAP_PIN_INPUT); mmc[0].gpio_cd = gpio + 0; omap2_hsmmc_init(mmc); /* link regulators to MMC adapters */ <API key>.dev = mmc[0].dev; <API key>.dev = mmc[0].dev; /* * Most GPIOs are for USB OTG. Some are mostly sent to * the P2 connector; notably LEDA for the LCD backlight. */ /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */ gpio_request(gpio + TWL4030_GPIO_MAX, "EN_LCD_BKL"); <API key>(gpio + TWL4030_GPIO_MAX, 0); /* gpio + 7 == DVI Enable */ gpio_request(gpio + 7, "EN_DVI"); <API key>(gpio + 7, 0); /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */ gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; <API key>(&leds_gpio); return 0; } static struct <API key> omap3evm_gpio_data = { .gpio_base = OMAP_MAX_GPIO_LINES, .irq_base = <API key>, .irq_end = <API key>, .use_leds = true, .setup = <API key>, }; static struct twl4030_usb_data omap3evm_usb_data = { .usb_mode = T2_USB_MODE_ULPI, }; static int board_keymap[] = { KEY(0, 0, KEY_LEFT), KEY(0, 1, KEY_DOWN), KEY(0, 2, KEY_ENTER), KEY(0, 3, KEY_M), KEY(1, 0, KEY_RIGHT), KEY(1, 1, KEY_UP), KEY(1, 2, KEY_I), KEY(1, 3, KEY_N), KEY(2, 0, KEY_A), KEY(2, 1, KEY_E), KEY(2, 2, KEY_J), KEY(2, 3, KEY_O), KEY(3, 0, KEY_B), KEY(3, 1, KEY_F), KEY(3, 2, KEY_K), KEY(3, 3, KEY_P) }; static struct matrix_keymap_data board_map_data = { .keymap = board_keymap, .keymap_size = ARRAY_SIZE(board_keymap), }; static struct twl4030_keypad_data omap3evm_kp_data = { .keymap_data = &board_map_data, .rows = 4, .cols = 4, .rep = 1, }; static struct <API key> omap3evm_madc_data = { .irq_line = 1, }; static struct <API key> omap3evm_audio_data = { .audio_mclk = 26000000, }; static struct twl4030_codec_data omap3evm_codec_data = { .audio_mclk = 26000000, .audio = &omap3evm_audio_data, }; static struct <API key> <API key> = { .supply = "vdda_dac", .dev = &<API key>.dev, }; /* VDAC for DSS driving S-Video */ static struct regulator_init_data omap3_evm_vdac = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, .apply_uV = true, .valid_modes_mask = <API key> | <API key>, .valid_ops_mask = <API key> | <API key>, }, .<API key> = 1, .consumer_supplies = &<API key>, }; /* VPLL2 for digital video outputs */ static struct <API key> <API key> = REGULATOR_SUPPLY("vdds_dsi", "omapdss"); static struct regulator_init_data omap3_evm_vpll2 = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, .apply_uV = true, .valid_modes_mask = <API key> | <API key>, .valid_ops_mask = <API key> | <API key>, }, .<API key> = 1, .consumer_supplies = &<API key>, }; static struct <API key> omap3evm_twldata = { .irq_base = TWL4030_IRQ_BASE, .irq_end = TWL4030_IRQ_END, /* platform_data for children goes here */ .keypad = &omap3evm_kp_data, .madc = &omap3evm_madc_data, .usb = &omap3evm_usb_data, .gpio = &omap3evm_gpio_data, .codec = &omap3evm_codec_data, .vdac = &omap3_evm_vdac, .vpll2 = &omap3_evm_vpll2, }; static struct i2c_board_info __initdata <API key>[] = { { I2C_BOARD_INFO("twl4030", 0x48), .flags = I2C_CLIENT_WAKE, .irq = INT_34XX_SYS_NIRQ, .platform_data = &omap3evm_twldata, }, }; static int __init omap3_evm_i2c_init(void) { /* * REVISIT: These entries can be set in omap3evm_twl_data * after a merge with MFD tree */ omap3evm_twldata.vmmc1 = &omap3evm_vmmc1; omap3evm_twldata.vsim = &omap3evm_vsim; <API key>(1, 2600, <API key>, ARRAY_SIZE(<API key>)); <API key>(2, 400, NULL, 0); <API key>(3, 400, NULL, 0); return 0; } static void ads7846_dev_init(void) { if (gpio_request(OMAP3_EVM_TS_GPIO, "ADS7846 pendown") < 0) printk(KERN_ERR "can't get ads7846 pen down GPIO\n"); <API key>(OMAP3_EVM_TS_GPIO); gpio_set_debounce(OMAP3_EVM_TS_GPIO, 310); } static int <API key>(void) { return !gpio_get_value(OMAP3_EVM_TS_GPIO); } struct <API key> ads7846_config = { .x_max = 0x0fff, .y_max = 0x0fff, .x_plate_ohms = 180, .pressure_max = 255, .debounce_max = 10, .debounce_tol = 3, .debounce_rep = 1, .get_pendown_state = <API key>, .keep_vref_on = 1, .settle_delay_usecs = 150, .wakeup = true, }; static struct <API key> <API key> = { .turbo_mode = 0, .single_channel = 1, /* 0: slave, 1: master */ }; struct spi_board_info <API key>[] = { [0] = { .modalias = "ads7846", .bus_num = 1, .chip_select = 0, .max_speed_hz = 1500000, .controller_data = &<API key>, .irq = OMAP_GPIO_IRQ(OMAP3_EVM_TS_GPIO), .platform_data = &ads7846_config, }, }; static struct <API key> omap3_evm_config[] __initdata = { }; static void __init omap3_evm_init_irq(void) { omap_board_config = omap3_evm_config; <API key> = ARRAY_SIZE(omap3_evm_config); <API key>(<API key>, NULL); omap_init_irq(); omap_gpio_init(); } static struct platform_device *omap3_evm_devices[] __initdata = { &<API key>, }; static struct <API key> ehci_pdata __initdata = { .port_mode[0] = <API key>, .port_mode[1] = <API key>, .port_mode[2] = <API key>, .phy_reset = true, /* PHY reset GPIO will be runtime programmed based on EVM version */ .reset_gpio_port[0] = -EINVAL, .reset_gpio_port[1] = -EINVAL, .reset_gpio_port[2] = -EINVAL }; #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | <API key> | <API key> | <API key> | <API key>), OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | <API key> | <API key> | <API key>), { .reg_offset = OMAP_MUX_TERMINATOR }, }; #else #define board_mux NULL #endif static struct <API key> musb_board_data = { .interface_type = MUSB_INTERFACE_ULPI, .mode = MUSB_OTG, .power = 100, }; static void __init omap3_evm_init(void) { <API key>(); omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); omap3_evm_i2c_init(); <API key>(omap3_evm_devices, ARRAY_SIZE(omap3_evm_devices)); <API key>(<API key>, ARRAY_SIZE(<API key>)); omap_serial_init(); /* OMAP3EVM uses ISP1504 phy and so register nop transceiver */ <API key>(); if (get_omap3_evm_rev() >= <API key>) { /* enable EHCI VBUS using GPIO22 */ omap_mux_init_gpio(22, <API key>); gpio_request(OMAP3_EVM_EHCI_VBUS, "enable EHCI VBUS"); <API key>(OMAP3_EVM_EHCI_VBUS, 0); gpio_set_value(OMAP3_EVM_EHCI_VBUS, 1); /* Select EHCI port on main board */ omap_mux_init_gpio(61, <API key>); gpio_request(<API key>, "select EHCI port"); <API key>(<API key>, 0); gpio_set_value(<API key>, 0); /* setup EHCI phy reset config */ omap_mux_init_gpio(21, <API key>); ehci_pdata.reset_gpio_port[1] = 21; /* EVM REV >= E can supply 500mA with EXTVBUS programming */ musb_board_data.power = 500; musb_board_data.extvbus = 1; } else { /* setup EHCI phy reset on MDC */ omap_mux_init_gpio(135, OMAP_PIN_OUTPUT); ehci_pdata.reset_gpio_port[1] = 135; } usb_musb_init(&musb_board_data); usb_ehci_init(&ehci_pdata); ads7846_dev_init(); <API key>(); <API key>(); } MACHINE_START(OMAP3EVM, "OMAP3 EVM") /* Maintainer: Syed Mohammed Khasim - Texas Instruments */ .phys_io = 0x48000000, .io_pg_offst = ((0xfa000000) >> 18) & 0xfffc, .boot_params = 0x80000100, .map_io = omap3_map_io, .reserve = omap_reserve, .init_irq = omap3_evm_init_irq, .init_machine = omap3_evm_init, .timer = &omap_timer, MACHINE_END
<!DOCTYPE html> <html lang='en'> <head> <title><API key>.svg</title> <meta charset='utf-8'> </head> <body> <h1>Source SVG: <API key>.svg</h1> <svg id="svg-root" width="100%" height="100%" viewBox="0 0 480 360" xmlns="http: xmlns:xlink="http: <!--= Institute of Technology, European Research Consortium for =--> <!--= Informatics and Mathematics (ERCIM), Keio University). =--> <title id="test-title">$RCSfile: <API key>.svg,v $</title> <defs> <font-face font-family="SVGFreeSansASCII" unicode-range="U+0-7F"> <font-face-src> <font-face-uri xlink:href="../resources/SVGFreeSans.svg#ascii"/> </font-face-src> </font-face> </defs> <g id="test-body-content" font-family="SVGFreeSansASCII,sans-serif" font-size="18"> <defs> <pattern id="pattern1" patternUnits="userSpaceOnUse" x="0" y="0" width="100" height="100" viewBox="0 0 10 10"> <circle cx="5" cy="5" r="1.7" fill="red" /> </pattern> <pattern id="pattern2" xlink:href="#invalidlink" patternUnits="userSpaceOnUse" x="0" y="0" width="100" height="100" viewBox="0 0 10 10"> <circle cx="5" cy="5" r="2" fill="lime" /> </pattern> </defs> <rect fill="url(#pattern1)" stroke="none" x="1" y="1" width="200" height="200" /> <rect fill="url(#pattern2)" stroke="none" x="1" y="1" width="200" height="200" /> </g> <g font-family="SVGFreeSansASCII,sans-serif" font-size="32"> <text id="revision" x="10" y="340" stroke="none" fill="black">$Revision: 1.2 $</text> </g> <rect id="test-frame" x="1" y="1" width="478" height="358" fill="none" stroke="#000"/> <!-- comment out this watermark once the test is approved --> <g id="draft-watermark"> <rect x="1" y="1" width="478" height="20" fill="red" stroke="black" stroke-width="1"/> <text font-family="SVGFreeSansASCII,sans-serif" font-weight="bold" font-size="20" x="240" text-anchor="middle" y="18" stroke-width="0.5" stroke="black" fill="white">DRAFT</text> </g> </svg> </body> </html>
import { constant } from "../fp"; export = constant;
/* orig : i386 init_task.c */ #include <linux/mm.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/init_task.h> #include <linux/fs.h> #include <linux/mqueue.h> #include <asm/uaccess.h> #include <asm/pgtable.h> static struct fs_struct init_fs = INIT_FS; static struct files_struct init_files = INIT_FILES; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); EXPORT_SYMBOL(init_mm); /* * Initial thread structure. * * We need to make sure that this is 8192-byte aligned due to the * way process stacks are handled. This is done by having a special * "init_task" linker map entry.. */ union thread_union init_thread_union __attribute__((__section__(".data.init_task"))) = { INIT_THREAD_INFO(init_task) }; /* * Initial task structure. * * All other task structs will be allocated on slabs in fork.c */ struct task_struct init_task = INIT_TASK(init_task); EXPORT_SYMBOL(init_task);
/** * @module Ink.UI.DatePicker_1 * @version 1 * Date selector */ Ink.createModule('Ink.UI.DatePicker', '1', ['Ink.UI.Common_1','Ink.Dom.Event_1','Ink.Dom.Css_1','Ink.Dom.Element_1','Ink.Dom.Selector_1','Ink.Util.Array_1','Ink.Util.Date_1', 'Ink.Dom.Browser_1'], function(Common, Event, Css, InkElement, Selector, InkArray, InkDate ) { 'use strict'; // Repeat a string. Long version of (new Array(n)).join(str); function strRepeat(n, str) { var ret = ''; for (var i = 0; i < n; i++) { ret += str; } return ret; } // Clamp a number into a min/max limit function clamp(n, min, max) { if (n > max) { n = max; } if (n < min) { n = min; } return n; } function <API key>(YMD) { var split = YMD.split('-'); return dateishFromYMD(+split[0], +split[1] - 1, +split[2]); } function dateishFromYMD(year, month, day) { return {_year: year, _month: month, _day: day}; } function dateishFromDate(date) { return {_year: date.getFullYear(), _month: date.getMonth(), _day: date.getDate()}; } var DatePicker = function(selector, options) { this._element = selector && Common.elOrSelector(selector, '[Ink.UI.DatePicker_1]: selector argument'); this._options = Common.options('Ink.UI.DatePicker_1', { autoOpen: ['Boolean', false], cleanText: ['String', 'Clear'], closeText: ['String', 'Close'], containerElement:['Element', null], cssClass: ['String', 'ink-calendar bottom'], dateRange: ['String', null], // use this in a <select> displayInSelect: ['Boolean', false], dayField: ['Element', null], monthField: ['Element', null], yearField: ['Element', null], format: ['String', 'yyyy-mm-dd'], instance: ['String', 'scdp_' + Math.round(99999 * Math.random())], nextLinkText: ['String', '»'], ofText: ['String', ' de '], onFocus: ['Boolean', true], onMonthSelected: ['Function', null], onSetDate: ['Function', null], onYearSelected: ['Function', null], position: ['String', 'right'], prevLinkText: ['String', '«'], showClean: ['Boolean', true], showClose: ['Boolean', true], shy: ['Boolean', true], startDate: ['String', null], // format yyyy-mm-dd, startWeekDay: ['Number', 1], // Validation validDayFn: ['Function', null], validMonthFn: ['Function', null], validYearFn: ['Function', null], nextValidDateFn: ['Function', null], prevValidDateFn: ['Function', null], yearRange: ['String', null], // Text month: ['Object', { 1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December' }], wDay: ['Object', { 0:'Sunday', 1:'Monday', 2:'Tuesday', 3:'Wednesday', 4:'Thursday', 5:'Friday', 6:'Saturday' }] }, options || {}, this._element); this._options.format = this._dateParsers[ this._options.format ] || this._options.format; this._hoverPicker = false; this._picker = this._options.pickerField && Common.elOrSelector(this._options.pickerField, 'pickerField'); this._setMinMax( this._options.dateRange || this._options.yearRange ); if(this._options.startDate) { this.setDate( this._options.startDate ); } else if (this._element && this._element.value) { this.setDate( this._element.value ); } else { var today = new Date(); this._day = today.getDate( ); this._month = today.getMonth( ); this._year = today.getFullYear( ); } if (this._options.startWeekDay < 0 || this._options.startWeekDay > 6) { Ink.warn('Ink.UI.DatePicker_1: option "startWeekDay" must be between 0 (sunday) and 6 (saturday)'); this._options.startWeekDay = clamp(this._options.startWeekDay, 0, 6); } if(this._options.displayInSelect && !(this._options.dayField && this._options.monthField && this._options.yearField)){ throw new Error( 'Ink.UI.DatePicker: displayInSelect option enabled.'+ 'Please specify dayField, monthField and yearField selectors.'); } this._init(); }; DatePicker.prototype = { version: '0.1', /** * Initialization function. Called by the constructor and receives the same parameters. * * @method _init * @private */ _init: function(){ Ink.extendObj(this._options,this._lang || {}); this._render(); this.<API key>(); Common.registerInstance(this, this._containerObject, 'datePicker'); }, /** * Renders the DatePicker's markup. * * @method _render * @private */ _render: function() { this._containerObject = document.createElement('div'); this._containerObject.id = this._options.instance; this._containerObject.className = this._options.cssClass + ' <API key> hide-all'; this._renderSuperTopBar(); var calendarTop = document.createElement("div"); calendarTop.className = 'ink-calendar-top'; this._monthDescContainer = document.createElement("div"); this._monthDescContainer.className = '<API key>'; this._monthPrev = document.createElement('div'); this._monthPrev.className = 'ink-calendar-prev'; this._monthPrev.innerHTML ='<a href="#prev" class="change_month_prev">' + this._options.prevLinkText + '</a>'; this._monthNext = document.createElement('div'); this._monthNext.className = 'ink-calendar-next'; this._monthNext.innerHTML ='<a href="#next" class="change_month_next">' + this._options.nextLinkText + '</a>'; calendarTop.appendChild(this._monthPrev); calendarTop.appendChild(this._monthDescContainer); calendarTop.appendChild(this._monthNext); this._monthContainer = document.createElement("div"); this._monthContainer.className = 'ink-calendar-month'; this._containerObject.appendChild(calendarTop); this._containerObject.appendChild(this._monthContainer); this._monthSelector = this.<API key>(); this._containerObject.appendChild(this._monthSelector); this._yearSelector = document.createElement('ul'); this._yearSelector.className = '<API key>'; this._containerObject.appendChild(this._yearSelector); if(!this._options.onFocus || this._options.displayInSelect){ if(!this._options.pickerField){ this._picker = document.createElement('a'); this._picker.href = '#open_cal'; this._picker.innerHTML = 'open'; this._element.parentNode.appendChild(this._picker); this._picker.className = '<API key>'; } else { this._picker = Common.elOrSelector(this._options.pickerField, 'pickerField'); } } this.<API key>(); this._renderMonth(); this._monthChanger = document.createElement('a'); this._monthChanger.href = '#monthchanger'; this._monthChanger.className = '<API key>'; this._monthChanger.innerHTML = this._options.month[this._month + 1]; this._ofText = document.createElement('span'); this._ofText.innerHTML = this._options.ofText; this._yearChanger = document.createElement('a'); this._yearChanger.href = '#yearchanger'; this._yearChanger.className = '<API key>'; this._yearChanger.innerHTML = this._year; this._monthDescContainer.innerHTML = ''; this._monthDescContainer.appendChild(this._monthChanger); this._monthDescContainer.appendChild(this._ofText); this._monthDescContainer.appendChild(this._yearChanger); if (!this._options.inline) { this._addOpenCloseEvents(); } else { this.show(); } this.<API key>(); }, <API key>: function () { var fields = this._element; if (this._options.displayInSelect) { fields = [ this._options.dayField, this._options.monthField, this._options.yearField]; } Event.observeMulti(fields ,'change', Ink.bindEvent(function(){ this._updateDate( ); this._showDefaultView( ); this.setDate( ); if ( !this._inline && !this._hoverPicker ) { this._hide(true); } },this)); }, /** * Shows the calendar. * * @method show **/ show: function () { this._updateDate(); this._renderMonth(); Css.removeClassName(this._containerObject, 'hide-all'); }, _addOpenCloseEvents: function () { var opener = this._picker || this._element; Event.observe(opener, 'click', Ink.bindEvent(function(e){ Event.stop(e); this.show(); },this)); if (this._options.autoOpen) { this.show(); } if(!this._options.displayInSelect){ Event.observe(opener, 'blur', Ink.bindEvent(function() { if ( !this._hoverPicker ) { this._hide(true); } },this)); } if (this._options.shy) { // Close the picker when clicking elsewhere. Event.observe(document,'click',Ink.bindEvent(function(e){ var target = Event.element(e); // "elsewhere" is outside any of these elements: var cannotBe = [ this._options.dayField, this._options.monthField, this._options.yearField, this._picker, this._element ]; for (var i = 0, len = cannotBe.length; i < len; i++) { if (cannotBe[i] && InkElement.descendantOf(cannotBe[i], target)) { return; } } this._hide(true); },this)); } }, /** * Creates the markup of the view with months. * * @method <API key> * @private */ <API key>: function () { var selector = document.createElement('ul'); selector.className = '<API key>'; var ulSelector = document.createElement('ul'); for(var mon=1; mon<=12; mon++){ ulSelector.appendChild(this._renderMonthButton(mon)); if (mon % 4 === 0) { selector.appendChild(ulSelector); ulSelector = document.createElement('ul'); } } return selector; }, /** * Renders a single month button. */ _renderMonthButton: function (mon) { var liMonth = document.createElement('li'); var aMonth = document.createElement('a'); aMonth.setAttribute('data-cal-month', mon); aMonth.innerHTML = this._options.month[mon].substring(0,3); liMonth.appendChild(aMonth); return liMonth; }, <API key>: function () { if(this._options.containerElement) { var appendTarget = Ink.i(this._options.containerElement) || // [2.3.0] maybe id; small backwards compatibility thing Common.elOrSelector(this._options.containerElement); appendTarget.appendChild(this._containerObject); } if (InkElement.<API key>(this._element, '.ink-form .control-group .control') === this._element.parentNode) { // [3.0.0] Check if the <input> must be a direct child of .control, and if not, remove this block. this._wrapper = this._element.parentNode; this._wrapperIsControl = true; } else { this._wrapper = InkElement.create('div', { className: '<API key>' }); InkElement.wrap(this._element, this._wrapper); } InkElement.insertAfter(this._containerObject, this._element); }, /** * Render the topmost bar with the "close" and "clear" buttons. */ _renderSuperTopBar: function () { if((!this._options.showClose) || (!this._options.showClean)){ return; } this._superTopBar = document.createElement("div"); this._superTopBar.className = '<API key>'; if(this._options.showClean){ this._superTopBar.appendChild(InkElement.create('a', { className: 'clean', setHTML: this._options.cleanText })); } if(this._options.showClose){ this._superTopBar.appendChild(InkElement.create('a', { className: 'close', setHTML: this._options.closeText })); } this._containerObject.appendChild(this._superTopBar); }, <API key>: function () { Event.observe(this._containerObject,'mouseover',Ink.bindEvent(function(e){ Event.stop( e ); this._hoverPicker = true; },this)); Event.observe(this._containerObject,'mouseout',Ink.bindEvent(function(e){ Event.stop( e ); this._hoverPicker = false; },this)); Event.observe(this._containerObject,'click',Ink.bindEvent(this._onClick, this)); }, _onClick: function(e){ var elem = Event.element(e); if (Css.hasClassName(elem, 'ink-calendar-off')) { Event.stopDefault(e); return null; } Event.stop(e); // Relative changers this.<API key>(elem); // Absolute changers this.<API key>(elem); // Mode changers if (Css.hasClassName(elem, '<API key>')) { this._showMonthSelector(); } else if (Css.hasClassName(elem, '<API key>')) { this._showYearSelector(); } else if(Css.hasClassName(elem, 'clean')){ this._clean(); } else if(Css.hasClassName(elem, 'close')){ this._hide(false); } this._updateDescription(); }, <API key>: function (elem) { var changeYear = { change_year_next: 1, change_year_prev: -1 }; var changeMonth = { change_month_next: 1, change_month_prev: -1 }; if( elem.className in changeMonth ) { this._updateCal(changeMonth[elem.className]); } else if( elem.className in changeYear ) { this._showYearSelector(changeYear[elem.className]); } }, /** * Handles click events on an atom-changer (day button, month button, year button) * * @method <API key> * @private */ <API key>: function (elem) { var elemData = InkElement.data(elem); if( Number(elemData.calDay) ){ this.setDate( [this._year, this._month + 1, elemData.calDay].join('-') ); this._hide(); } else if( Number(elemData.calMonth) ) { this._month = Number(elemData.calMonth) - 1; this._showDefaultView(); this._updateCal(); } else if( Number(elemData.calYear) ){ this._changeYear(Number(elemData.calYear)); } }, _changeYear: function (year) { year = +year; if(year){ this._year = year; if( typeof this._options.onYearSelected === 'function' ){ this._options.onYearSelected(this, { 'year': this._year }); } this._showMonthSelector(); } }, _clean: function () { if(this._options.displayInSelect){ this._options.yearField.selectedIndex = 0; this._options.monthField.selectedIndex = 0; this._options.dayField.selectedIndex = 0; } else { this._element.value = ''; } }, /** * Hides the DatePicker. * If the component is shy (options.shy), behaves differently. * * @method _hide * @param {Boolean} [blur] If false, forces hiding even if the component is shy. */ _hide: function(blur) { blur = blur === undefined ? true : blur; if (blur === false || (blur && this._options.shy)) { Css.addClassName(this._containerObject, 'hide-all'); } }, /** * Sets the range of dates allowed to be selected in the Date Picker * * @method _setMinMax * @param {String} dateRange Two dates separated by a ':'. Example: 2013-01-01:2013-12-12 * @private */ _setMinMax: function( dateRange ) { var self = this; var noMinLimit = { _year: -Number.MAX_VALUE, _month: 0, _day: 1 }; var noMaxLimit = { _year: Number.MAX_VALUE, _month: 11, _day: 31 }; function noLimits() { self._min = noMinLimit; self._max = noMaxLimit; } if (!dateRange) { return noLimits(); } var dates = dateRange.split( ':' ); var rDate = /^(\d{4})((\-)(\d{1,2})((\-)(\d{1,2}))?)?$/; InkArray.each([ {name: '_min', date: dates[0], noLim: noMinLimit}, {name: '_max', date: dates[1], noLim: noMaxLimit} ], Ink.bind(function (data) { var lim = data.noLim; if ( data.date.toUpperCase() === 'NOW' ) { var now = new Date(); lim = dateishFromDate(now); } else if (data.date.toUpperCase() === 'EVER') { lim = data.noLim; } else if ( rDate.test( data.date ) ) { lim = <API key>(data.date); lim._month = clamp(lim._month, 0, 11); lim._day = clamp(lim._day, 1, this._daysInMonth( lim._year, lim._month + 1 )); } this[data.name] = lim; }, this)); // Should be equal, or min should be smaller var valid = this._dateCmp(this._max, this._min) !== -1; if (!valid) { noLimits(); } }, /** * Checks if a date is between the valid range. * Starts by checking if the date passed is valid. If not, will fallback to the 'today' date. * Then checks if the all params are inside of the date range specified. If not, it will fallback to the nearest valid date (either Min or Max). * * @method _fitDateToRange * @param {Number} year Year with 4 digits (yyyy) * @param {Number} month Month * @param {Number} day Day * @return {Array} Array with the final processed date. * @private */ _fitDateToRange: function( date ) { if ( !this._isValidDate( date ) ) { date = dateishFromDate(new Date()); } if (this._dateCmp(date, this._min) === -1) { return Ink.extendObj({}, this._min); } else if (this._dateCmp(date, this._max) === 1) { return Ink.extendObj({}, this._max); } return Ink.extendObj({}, date); // date is okay already, just copy it. }, /** * Checks whether a date is within the valid date range * @method _dateWithinRange * @param year * @param month * @param day * @return {Boolean} * @private */ _dateWithinRange: function (date) { if (!arguments.length) { date = this; } return (!this._dateAboveMax(date) && (!this._dateBelowMin(date))); }, _dateAboveMax: function (date) { return this._dateCmp(date, this._max) === 1; }, _dateBelowMin: function (date) { return this._dateCmp(date, this._min) === -1; }, _dateCmp: function (self, oth) { return this._dateCmpUntil(self, oth, '_day'); }, _dateCmpUntil: function (self, oth, depth) { var props = ['_year', '_month', '_day']; var i = -1; do { i++; if (self[props[i]] > oth[props[i]]) { return 1; } else if (self[props[i]] < oth[props[i]]) { return -1; } } while (props[i] !== depth && self[props[i + 1]] !== undefined && oth[props[i + 1]] !== undefined); return 0; }, /** * Sets the markup in the default view mode (showing the days). * Also disables the previous and next buttons in case they don't meet the range requirements. * * @method _showDefaultView * @private */ _showDefaultView: function(){ this._yearSelector.style.display = 'none'; this._monthSelector.style.display = 'none'; this._monthPrev.childNodes[0].className = 'change_month_prev'; this._monthNext.childNodes[0].className = 'change_month_next'; if ( !this._getPrevMonth() ) { this._monthPrev.childNodes[0].className = 'action_inactive'; } if ( !this._getNextMonth() ) { this._monthNext.childNodes[0].className = 'action_inactive'; } this._monthContainer.style.display = 'block'; }, /** * Updates the date shown on the datepicker * * @method _updateDate * @private */ _updateDate: function(){ var dataParsed; if(!this._options.displayInSelect && this._element.value){ dataParsed = this._parseDate(this._element.value); } else if (this._options.displayInSelect) { dataParsed = { _year: this._options.yearField[this._options.yearField.selectedIndex].value, _month: this._options.monthField[this._options.monthField.selectedIndex].value - 1, _day: this._options.dayField[this._options.dayField.selectedIndex].value }; } if (dataParsed) { dataParsed = this._fitDateToRange(dataParsed); this._year = dataParsed._year; this._month = dataParsed._month; this._day = dataParsed._day; } this.setDate(); this._updateDescription(); this._renderMonth(); }, /** * Updates the date description shown at the top of the datepicker * * EG "12 de November" * * @method _updateDescription * @private */ _updateDescription: function(){ this._monthChanger.innerHTML = this._options.month[ this._month + 1 ]; this._ofText.innerHTML = this._options.ofText; this._yearChanger.innerHTML = this._year; }, /** * Renders the year selector view of the datepicker * * @method _showYearSelector * @private */ _showYearSelector: function(inc){ this.<API key>(inc); var firstYear = this._year - (this._year % 10); var thisYear = firstYear - 1; var str = "<li><ul>"; if (thisYear > this._min._year) { str += '<li><a href="#year_prev" class="change_year_prev">' + this._options.prevLinkText + '</a></li>'; } else { str += '<li>&nbsp;</li>'; } for (var i=1; i < 11; i++){ if (i % 4 === 0){ str+='</ul><ul>'; } thisYear = firstYear + i - 1; str += this._getYearButtonHtml(thisYear); } if( thisYear < this._max._year){ str += '<li><a href="#year_next" class="change_year_next">' + this._options.nextLinkText + '</a></li>'; } else { str += '<li>&nbsp;</li>'; } str += "</ul></li>"; this._yearSelector.innerHTML = str; this._monthPrev.childNodes[0].className = 'action_inactive'; this._monthNext.childNodes[0].className = 'action_inactive'; this._monthSelector.style.display = 'none'; this._monthContainer.style.display = 'none'; this._yearSelector.style.display = 'block'; }, /** * For the year selector. * * Update this._year, to find the next decade or use nextValidDateFn to find it. */ <API key>: function (inc) { if (!inc) { return; } var year = +this._year + inc*10; year = year - year % 10; if ( year > this._max._year || year + 9 < this._min._year){ return; } this._year = +this._year + inc*10; }, _getYearButtonHtml: function (thisYear) { if ( this._acceptableYear({_year: thisYear}) ){ var className = (thisYear === this._year) ? ' class="ink-calendar-on"' : ''; return '<li><a href="#" data-cal-year="' + thisYear + '"' + className + '>' + thisYear +'</a></li>'; } else { return '<li><a href="#" class="ink-calendar-off">' + thisYear +'</a></li>'; } }, /** * Show the month selector (happens when you click a year, or the "month" link. * @method _showMonthSelector * @private */ _showMonthSelector: function () { this._yearSelector.style.display = 'none'; this._monthContainer.style.display = 'none'; this._monthPrev.childNodes[0].className = 'action_inactive'; this._monthNext.childNodes[0].className = 'action_inactive'; this._addMonthClassNames(); this._monthSelector.style.display = 'block'; }, /** * This function returns the given date in the dateish format * * @method _parseDate * @param {String} dateStr A date on a string. * @private */ _parseDate: function(dateStr){ var date = InkDate.set( this._options.format , dateStr ); if (date) { return dateishFromDate(date); } return null; }, /** * Checks if a date is valid * * @method _isValidDate * @param {Dateish} date * @private * @return {Boolean} True if the date is valid, false otherwise */ _isValidDate: function(date){ var yearRegExp = /^\d{4}$/; var validOneOrTwo = /^\d{1,2}$/; return ( yearRegExp.test(date._year) && validOneOrTwo.test(date._month) && validOneOrTwo.test(date._day) && +date._month + 1 >= 1 && +date._month + 1 <= 12 && +date._day >= 1 && +date._day <= this._daysInMonth(date._year, date._month + 1) ); }, /** * Checks if a given date is an valid format. * * @method _isDate * @param {String} format A date format. * @param {String} dateStr A date on a string. * @private * @return {Boolean} True if the given date is valid according to the given format */ _isDate: function(format, dateStr){ try { if (typeof format === 'undefined'){ return false; } var date = InkDate.set( format , dateStr ); if( date && this._isValidDate( dateishFromDate(date) )) { return true; } } catch (ex) {} return false; }, _acceptableDay: function (date) { return this.<API key>(date, 'validDayFn'); }, _acceptableMonth: function (date) { return this.<API key>(date, 'validMonthFn'); }, _acceptableYear: function (date) { return this.<API key>(date, 'validYearFn'); }, /** DRY base for the above 2 functions */ <API key>: function (date, userCb) { if (this._options[userCb]) { return this.<API key>(this._options[userCb], date); } else { return this._dateWithinRange(date); } }, /** * This method returns the date written with the format specified on the options * * @method _writeDateInFormat * @private * @return {String} Returns the current date of the object in the specified format */ _writeDateInFormat:function(){ return InkDate.get( this._options.format , this.getDate()); }, /** * This method allows the user to set the DatePicker's date on run-time. * * @method setDate * @param {String} dateString A date string in yyyy-mm-dd format. * @public */ setDate: function( dateString ) { if ( /\d{4}-\d{1,2}-\d{1,2}/.test( dateString ) ) { var auxDate = dateString.split( '-' ); this._year = +auxDate[ 0 ]; this._month = +auxDate[ 1 ] - 1; this._day = +auxDate[ 2 ]; } this._setDate( ); }, /** * Gets the current date as a JavaScript date. * * @method getDate */ getDate: function () { if (!this._day) { throw 'Ink.UI.DatePicker: Still picking a date. Cannot getDate now!'; } return new Date(this._year, this._month, this._day); }, /** * Sets the chosen date on the target input field * * @method _setDate * @param {DOMElement} objClicked Clicked object inside the DatePicker's calendar. * @private */ _setDate : function( objClicked ) { if (objClicked) { var data = InkElement.data(objClicked); this._day = (+data.calDay) || this._day; } var dt = this._fitDateToRange(this); this._year = dt._year; this._month = dt._month; this._day = dt._day; if(!this._options.displayInSelect){ this._element.value = this._writeDateInFormat(); } else { this._options.dayField.value = this._day; this._options.monthField.value = this._month + 1; this._options.yearField.value = this._year; } if(this._options.onSetDate) { this._options.onSetDate( this , { date : this.getDate() } ); } }, /** * Makes the necessary work to update the calendar * when choosing a different month * * @method _updateCal * @param {Number} inc Indicates previous or next month * @private */ _updateCal: function(inc){ if( typeof this._options.onMonthSelected === 'function' ){ this._options.onMonthSelected(this, { 'year': this._year, 'month' : this._month }); } if (inc && this._updateMonth(inc) === null) { return; } this._renderMonth(); }, /** * Function that returns the number of days on a given month on a given year * * @method _daysInMonth * @param {Number} _y - year * @param {Number} _m - month * @private * @return {Number} The number of days on a given month on a given year */ _daysInMonth: function(_y,_m){ var exceptions = { 2: ((_y % 400 === 0) || (_y % 4 === 0 && _y % 100 !== 0)) ? 29 : 28, 4: 30, 6: 30, 9: 30, 11: 30 }; return exceptions[_m] || 31; }, /** * Updates the calendar when a different month is chosen * * @method _updateMonth * @param {Number} incValue - indicates previous or next month * @private */ _updateMonth: function(incValue){ var date; if (incValue > 0) { date = this._getNextMonth(); } else if (incValue < 0) { date = this._getPrevMonth(); } if (!date) { return null; } this._year = date._year; this._month = date._month; this._day = date._day; }, /** * Get the next month we can show. */ _getNextMonth: function (date) { return this._tryLeap( date, 'Month', 'next', function (d) { d._month += 1; if (d._month > 11) { d._month = 0; d._year += 1; } return d; }); }, /** * Get the previous month we can show. */ _getPrevMonth: function (date) { return this._tryLeap( date, 'Month', 'prev', function (d) { d._month -= 1; if (d._month < 0) { d._month = 11; d._year -= 1; } return d; }); }, /** * Get the next year we can show. */ _getPrevYear: function (date) { return this._tryLeap( date, 'Year', 'prev', function (d) { d._year -= 1; return d; }); }, /** * Get the next year we can show. */ _getNextYear: function (date) { return this._tryLeap( date, 'Year', 'next', function (d) { d._year += 1; return d; }); }, /** * DRY base for a function which tries to get the next or previous valid year or month. * * It checks if we can go forward by using _dateCmp with atomic * precision (this means, {_year} for leaping years, and * {_year, month} for leaping months), then it tries to get the * result from the user-supplied callback (nextDateFn or prevDateFn), * and when this is not present, advance the date forward using the * `advancer` callback. */ _tryLeap: function (date, atomName, directionName, advancer) { date = date || { _year: this._year, _month: this._month, _day: this._day }; var maxOrMin = directionName === 'prev' ? '_min' : '_max'; var boundary = this[maxOrMin]; // Check if we're by the boundary of min/max year/month if (this._dateCmpUntil(date, boundary, atomName) === 0) { return null; // We're already at the boundary. Bail. } var leapUserCb = this._options[directionName + 'ValidDateFn']; if (leapUserCb) { return this.<API key>(leapUserCb, date); } else { date = advancer(date); } date = this._fitDateToRange(date); return this['_acceptable' + atomName](date) ? date : null; }, _getNextDecade: function (date) { date = date || { _year: this._year, _month: this._month, _day: this._day }; var decade = this._getCurrentDecade(date); if (decade + 10 > this._max._year) { return null; } return decade + 10; }, _getPrevDecade: function (date) { date = date || { _year: this._year, _month: this._month, _day: this._day }; var decade = this._getCurrentDecade(date); if (decade - 10 < this._min._year) { return null; } return decade - 10; }, /** Returns the decade given a date or year*/ _getCurrentDecade: function (year) { year = year ? (year._year || year) : this._year; return Math.floor(year / 10) * 10; // Round to first place }, <API key>: function (cb, date) { return cb.call(this, date._year, date._month + 1, date._day); }, <API key>: function (cb, date) { return !!this.<API key>(cb, date); }, <API key>: function (cb, date) { var ret = this.<API key>(cb, date); return ret ? dateishFromDate(ret) : null; }, /** * Key-value object that (for a given key) points to the correct parsing format for the DatePicker * @property _dateParsers * @type {Object} * @readOnly */ _dateParsers: { 'yyyy-mm-dd' : 'Y-m-d' , 'yyyy/mm/dd' : 'Y/m/d' , 'yy-mm-dd' : 'y-m-d' , 'yy/mm/dd' : 'y/m/d' , 'dd-mm-yyyy' : 'd-m-Y' , 'dd/mm/yyyy' : 'd/m/Y' , 'dd-mm-yy' : 'd-m-y' , 'dd/mm/yy' : 'd/m/y' , 'mm/dd/yyyy' : 'm/d/Y' , 'mm-dd-yyyy' : 'm-d-Y' }, /** * Renders the current month * * @method _renderMonth * @private */ _renderMonth: function(){ var month = this._month; var year = this._year; this._showDefaultView(); var html = ''; html += this.<API key>(this._options.startWeekDay); var counter = 0; html+='<ul>'; var emptyHtml = '<li class="ink-calendar-empty">&nbsp;</li>'; var firstDayIndex = this._getFirstDayIndex(year, month); // Add padding if the first day of the month is not monday. if(firstDayIndex > 0) { counter += firstDayIndex; html += strRepeat(firstDayIndex, emptyHtml); } html += this._getDayButtonsHtml(year, month); html += '</ul>'; this._monthContainer.innerHTML = html; }, _getFirstDayIndex: function (year, month) { var wDayFirst = (new Date( year , month , 1 )).getDay(); // Sunday=0 var startWeekDay = this._options.startWeekDay || 0; // Sunday=0 var result = wDayFirst - startWeekDay; result %= 7; if (result < 0) { result += 6; } return result; }, _getDayButtonsHtml: function (year, month) { var counter = this._getFirstDayIndex(year, month); var daysInMonth = this._daysInMonth(year, month + 1); var ret = ''; for (var day = 1; day <= daysInMonth; day++) { if (counter === 7){ // new week counter=0; ret += '<ul>'; } ret += this._getDayButtonHtml(year, month, day); counter++; if(counter === 7){ ret += '</ul>'; } } return ret; }, /** * Get the HTML markup for a single day in month view, given year, month, day. * * @method _getDayButtonHtml * @private */ _getDayButtonHtml: function (year, month, day) { var attrs = ' '; var date = dateishFromYMD(year, month, day); if (!this._acceptableDay(date)) { attrs += 'class="ink-calendar-off"'; } else { attrs += 'data-cal-day="' + day + '"'; } if (this._day && this._dateCmp(date, this) === 0) { attrs += 'class="ink-calendar-on" data-cal-day="' + day + '"'; } return '<li><a href="#" ' + attrs + '>' + day + '</a></li>'; }, /** Write the top bar of the calendar (M T W T F S S) */ <API key>: function (startWeekDay) { var ret = '<ul class="ink-calendar-header">'; var wDay; for(var i=0; i<7; i++){ wDay = (startWeekDay + i) % 7; ret += '<li>' + this._options.wDay[wDay].substring(0,1) + '</li>'; } return ret + '</ul>'; }, /** * This method adds class names to month buttons, to visually distinguish. * * @method _addMonthClassNames * @param {DOMElement} parent DOMElement where all the months are. * @private */ _addMonthClassNames: function(parent){ InkArray.forEach( (parent || this._monthSelector).<API key>('a'), Ink.bindMethod(this, '<API key>')); }, /** * Add the ink-calendar-on className if the given button is the current month, * otherwise add the ink-calendar-off className if the given button refers to * an unacceptable month (given dateRange and validMonthFn) */ <API key>: function (btn) { var data = InkElement.data(btn); if (!data.calMonth) { throw 'not a calendar month button!'; } var month = +data.calMonth - 1; if ( month === this._month ) { Css.addClassName( btn, 'ink-calendar-on' ); // This month Css.removeClassName( btn, 'ink-calendar-off' ); } else { Css.removeClassName( btn, 'ink-calendar-on' ); // Not this month var toDisable = !this._acceptableMonth({_year: this._year, _month: month}); Css.addRemoveClassName( btn, 'ink-calendar-off', toDisable); } }, /** * Prototype's method to allow the 'i18n files' to change all objects' language at once. * @param {Object} options Object with the texts' configuration. * @param {String} options.closeText Text of the close anchor * @param {String} options.cleanText Text of the clean text anchor * @param {String} options.prevLinkText "Previous" link's text * @param {String} options.nextLinkText "Next" link's text * @param {String} options.ofText The text "of", present in 'May of 2013' * @param {Object} options.month An object with keys from 1 to 12 for the full months' names * @param {Object} options.wDay An object with keys from 0 to 6 for the full weekdays' names * @public */ lang: function( options ){ this._lang = options; }, /** * This calls the rendering of the selected month. (Deprecated: use show() instead) * */ showMonth: function(){ this._renderMonth(); }, /** * Checks if the calendar screen is in 'select day' mode * * @return {Boolean} True if the calendar screen is in 'select day' mode * @public */ isMonthRendered: function(){ var header = Selector.select('.ink-calendar-header', this._containerObject)[0]; return ((Css.getStyle(header.parentNode,'display') !== 'none') && (Css.getStyle(header.parentNode.parentNode,'display') !== 'none') ); }, /** * Destroys this datepicker, removing it from the page. * * @public **/ destroy: function () { InkElement.unwrap(this._element); InkElement.remove(this._wrapper); InkElement.remove(this._containerObject); Common.unregisterInstance.call(this); } }; return DatePicker; });
#include <string.h> #if !defined(<API key>) && !defined(__OPTIMIZE_SIZE__) # define RETURN_TYPE void * # define AVAILABLE(h, h_l, j, n_l) ((j) <= (h_l) - (n_l)) # include "str-two-way.h" #endif void * _DEFUN (memmem, (haystack_start, haystack_len, needle_start, needle_len), const void *haystack_start _AND size_t haystack_len _AND const void *needle_start _AND size_t needle_len) { /* Abstract memory is considered to be an array of 'unsigned char' values, not an array of 'char' values. See ISO C 99 section 6.2.6.1. */ const unsigned char *haystack = (const unsigned char *) haystack_start; const unsigned char *needle = (const unsigned char *) needle_start; if (needle_len == 0) /* The first occurrence of the empty string is deemed to occur at the beginning of the string. */ return (void *) haystack; #if defined(<API key>) || defined(__OPTIMIZE_SIZE__) /* Less code size, but quadratic performance in the worst case. */ while (needle_len <= haystack_len) { if (!memcmp (haystack, needle, needle_len)) return (void *) haystack; haystack++; haystack_len } return NULL; #else /* compilation for speed */ /* Larger code size, but guaranteed linear performance. */ /* Sanity check, otherwise the loop might search through the whole memory. */ if (haystack_len < needle_len) return NULL; /* Use optimizations in memchr when possible, to reduce the search size of haystack using a linear algorithm with a smaller coefficient. However, avoid memchr for long needles, since we can often achieve sublinear performance. */ if (needle_len < <API key>) { haystack = memchr (haystack, *needle, haystack_len); if (!haystack || needle_len == 1) return (void *) haystack; haystack_len -= haystack - (const unsigned char *) haystack_start; if (haystack_len < needle_len) return NULL; return <API key> (haystack, haystack_len, needle, needle_len); } return two_way_long_needle (haystack, haystack_len, needle, needle_len); #endif /* compilation for speed */ }