code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
4
991
language
stringclasses
9 values
license
stringclasses
15 values
size
int32
3
1.05M
/*! * Aloha Editor * Author & Copyright (c) 2010 Gentics Software GmbH * aloha-sales@gentics.com * Licensed unter the terms of http://www.aloha-editor.com/license.html */ /** * @name block.editor * @namespace Block attribute editors */ define(['aloha/jquery', 'aloha/observable'], function(jQuery, Observable) { /** * @name block.editor.AbstractEditor * @class An abstract editor */ var AbstractEditor = Class.extend(Observable, /** @lends block.editor.AbstractEditor */ { schema: null, /** * @constructor */ _constructor: function(schema) { this.schema = schema; }, /** * Template method to render the editor elements * @return {jQuery} */ render: function() { // Implement in subclass! }, /** * Template method to get the editor values */ getValue: function() { // Implement in subclass! }, /** * We do not throw any change event here, as we need to break the loop "Block" -> "Editor" -> "Block" * * @param {String} value */ setValue: function(value) { // Implement in subclass! }, /** * Destroy the editor elements and unbind events */ destroy: function() { // Implement in subclass! }, /** * On deactivating, we still need to trigger a change event if the value has been modified. * * @private */ _deactivate: function() { this.trigger('change', this.getValue()); this.destroy(); } }); /** * @name block.editor.AbstractFormElementEditor * @class An abstract form editor with label * @extends block.editor.AbstractEditor */ var AbstractFormElementEditor = AbstractEditor.extend( /** @lends block.editor.AbstractFormElementEditor */ { /** * Input element HTML definition * @type String * * @private */ formInputElementDefinition: null, /** * @type jQuery */ _$formInputElement: null, /** * Render the label and form element * @return {jQuery} */ render: function() { var $wrapper = jQuery('<div class="aloha-block-editor" />'); var guid = GENTICS.Utils.guid(); $wrapper.append(this.renderLabel().attr('id', guid)); $wrapper.append(this.renderFormElement().attr('id', guid)); return $wrapper; }, /** * Render the label for the editor * @return {jQuery} */ renderLabel: function() { var element = jQuery('<label />'); element.html(this.schema.label); return element; }, /** * Render the form input element * @return {jQuery} */ renderFormElement: function() { var that = this; this._$formInputElement = jQuery(this.formInputElementDefinition); this._$formInputElement.change(function() { that.trigger('change', that.getValue()); }); return this._$formInputElement; }, /** * @return {String} */ getValue: function() { return this._$formInputElement.val(); }, /** * We do not throw any change event here, as we need to break the loop "Block" -> "Editor" -> "Block" */ setValue: function(value) { this._$formInputElement.val(value); }, /** * Cleanup and remove the input element */ destroy: function() { this._$formInputElement.remove(); } }); /** * @name block.editor.StringEditor * @class An editor for string input * @extends block.editor.AbstractFormElementEditor */ var StringEditor = AbstractFormElementEditor.extend( /** @lends block.editor.StringEditor */ { formInputElementDefinition: '<input type="text" />' }); /** * @name block.editor.NumberEditor * @class An editor for numbers * @extends block.editor.AbstractFormElementEditor */ var NumberEditor = AbstractFormElementEditor.extend( /** @lends block.editor.NumberEditor */ { // TODO Range should be an option formInputElementDefinition: '<input type="range" />' }); /** * @name block.editor.UrlEditor * @class An editor for URLs * @extends block.editor.AbstractFormElementEditor */ var UrlEditor = AbstractFormElementEditor.extend( /** @lends block.editor.UrlEditor */ { formInputElementDefinition: '<input type="url" />' }); /** * @name block.editor.EmailEditor * @class An editor for email addresses * @extends block.editor.AbstractFormElementEditor */ var EmailEditor = AbstractFormElementEditor.extend( /** @lends block.editor.EmailEditor */ { formInputElementDefinition: '<input type="email" />' }); return { AbstractEditor: AbstractEditor, AbstractFormElementEditor: AbstractFormElementEditor, StringEditor: StringEditor, NumberEditor: NumberEditor, UrlEditor: UrlEditor, EmailEditor: EmailEditor } });
berkmancenter/spectacle
web/js/lib/plugins/common/block/lib/editor.js
JavaScript
gpl-2.0
4,571
#define PNG_SKIP_SETJMP_CHECK #include <png.h> #include <fcntl.h> #include <lib/gdi/picload.h> #include <lib/gdi/picexif.h> extern "C" { #include <jpeglib.h> #include <gif_lib.h> } extern const uint32_t crc32_table[256]; DEFINE_REF(ePicLoad); static std::string getSize(const char* file) { struct stat64 s; if (stat64(file, &s) < 0) return ""; char tmp[20]; snprintf(tmp, 20, "%ld kB", (long)s.st_size / 1024); return tmp; } static unsigned char *simple_resize_24(unsigned char *orgin, int ox, int oy, int dx, int dy) { unsigned char *cr = new unsigned char[dx * dy * 3]; if (cr == NULL) { eDebug("[Picload] Error malloc"); return orgin; } unsigned char* k = cr; for (int j = 0; j < dy; ++j) { const unsigned char* p = orgin + (j * oy / dy * ox) * 3; for (int i = 0; i < dx; i++) { const unsigned char* ip = p + (i * ox / dx) * 3; *k++ = ip[0]; *k++ = ip[1]; *k++ = ip[2]; } } delete [] orgin; return cr; } static unsigned char *simple_resize_8(unsigned char *orgin, int ox, int oy, int dx, int dy) { unsigned char* cr = new unsigned char[dx * dy]; if (cr == NULL) { eDebug("[Picload] Error malloc"); return(orgin); } unsigned char* k = cr; for (int j = 0; j < dy; ++j) { const unsigned char* p = orgin + (j * oy / dy * ox); for (int i = 0; i < dx; i++) { *k++ = p[i * ox / dx]; } } delete [] orgin; return cr; } static unsigned char *color_resize(unsigned char * orgin, int ox, int oy, int dx, int dy) { unsigned char* cr = new unsigned char[dx * dy * 3]; if (cr == NULL) { eDebug("[Picload] Error malloc"); return orgin; } unsigned char* p = cr; for (int j = 0; j < dy; j++) { int ya = j * oy / dy; int yb = (j + 1) * oy / dy; if (yb >= oy) yb = oy - 1; for (int i = 0; i < dx; i++, p += 3) { int xa = i * ox / dx; int xb = (i + 1) * ox / dx; if (xb >= ox) xb = ox - 1; int r = 0; int g = 0; int b = 0; int sq = 0; for (int l = ya; l <= yb; l++) { const unsigned char* q = orgin + ((l * ox + xa) * 3); for (int k = xa; k <= xb; k++, q += 3, sq++) { r += q[0]; g += q[1]; b += q[2]; } } p[0] = r / sq; p[1] = g / sq; p[2] = b / sq; } } delete [] orgin; return cr; } //--------------------------------------------------------------------------------------------- #define BMP_TORASTER_OFFSET 10 #define BMP_SIZE_OFFSET 18 #define BMP_BPP_OFFSET 28 #define BMP_RLE_OFFSET 30 #define BMP_COLOR_OFFSET 54 #define fill4B(a) ((4 - ((a) % 4 )) & 0x03) struct color { unsigned char red; unsigned char green; unsigned char blue; }; static void fetch_pallete(int fd, struct color pallete[], int count) { unsigned char buff[4]; lseek(fd, BMP_COLOR_OFFSET, SEEK_SET); for (int i = 0; i < count; i++) { read(fd, buff, 4); pallete[i].red = buff[2]; pallete[i].green = buff[1]; pallete[i].blue = buff[0]; } } static unsigned char *bmp_load(const char *file, int *x, int *y) { unsigned char buff[4]; struct color pallete[256]; int fd = open(file, O_RDONLY); if (fd == -1) return NULL; if (lseek(fd, BMP_SIZE_OFFSET, SEEK_SET) == -1) return NULL; read(fd, buff, 4); *x = buff[0] + (buff[1] << 8) + (buff[2] << 16) + (buff[3] << 24); read(fd, buff, 4); *y = buff[0] + (buff[1] << 8) + (buff[2] << 16) + (buff[3] << 24); if (lseek(fd, BMP_TORASTER_OFFSET, SEEK_SET) == -1) return NULL; read(fd, buff, 4); int raster = buff[0] + (buff[1] << 8) + (buff[2] << 16) + (buff[3] << 24); if (lseek(fd, BMP_BPP_OFFSET, SEEK_SET) == -1) return NULL; read(fd, buff, 2); int bpp = buff[0] + (buff[1] << 8); unsigned char *pic_buffer = new unsigned char[(*x) * (*y) * 3]; unsigned char *wr_buffer = pic_buffer + (*x) * ((*y) - 1) * 3; switch (bpp) { case 4: { int skip = fill4B((*x) / 2 + (*x) % 2); fetch_pallete(fd, pallete, 16); lseek(fd, raster, SEEK_SET); unsigned char * tbuffer = new unsigned char[*x / 2 + 1]; if (tbuffer == NULL) return NULL; for (int i = 0; i < *y; i++) { read(fd, tbuffer, (*x) / 2 + *x % 2); int j; for (j = 0; j < (*x) / 2; j++) { unsigned char c1 = tbuffer[j] >> 4; unsigned char c2 = tbuffer[j] & 0x0f; *wr_buffer++ = pallete[c1].red; *wr_buffer++ = pallete[c1].green; *wr_buffer++ = pallete[c1].blue; *wr_buffer++ = pallete[c2].red; *wr_buffer++ = pallete[c2].green; *wr_buffer++ = pallete[c2].blue; } if ((*x) % 2) { unsigned char c1 = tbuffer[j] >> 4; *wr_buffer++ = pallete[c1].red; *wr_buffer++ = pallete[c1].green; *wr_buffer++ = pallete[c1].blue; } if (skip) read(fd, buff, skip); wr_buffer -= (*x) * 6; } delete tbuffer; break; } case 8: { int skip = fill4B(*x); fetch_pallete(fd, pallete, 256); lseek(fd, raster, SEEK_SET); unsigned char * tbuffer = new unsigned char[*x]; if (tbuffer == NULL) return NULL; for (int i = 0; i < *y; i++) { read(fd, tbuffer, *x); for (int j = 0; j < *x; j++) { wr_buffer[j * 3] = pallete[tbuffer[j]].red; wr_buffer[j * 3 + 1] = pallete[tbuffer[j]].green; wr_buffer[j * 3 + 2] = pallete[tbuffer[j]].blue; } if (skip) read(fd, buff, skip); wr_buffer -= (*x) * 3; } delete tbuffer; break; } case 24: { int skip = fill4B((*x) * 3); lseek(fd, raster, SEEK_SET); for (int i = 0; i < (*y); i++) { read(fd, wr_buffer, (*x) * 3); for (int j = 0; j < (*x) * 3 ; j = j + 3) { unsigned char c = wr_buffer[j]; wr_buffer[j] = wr_buffer[j + 2]; wr_buffer[j + 2] = c; } if (skip) read(fd, buff, skip); wr_buffer -= (*x) * 3; } break; } default: close(fd); return NULL; } close(fd); return(pic_buffer); } //--------------------------------------------------------------------- static void png_load(Cfilepara* filepara, int background) { png_uint_32 width, height; unsigned int i; int bit_depth, color_type, interlace_type; png_byte *fbptr; FILE *fh = fopen(filepara->file, "rb"); if (fh == NULL) return; png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); if (png_ptr == NULL) { fclose(fh); return; } png_infop info_ptr = png_create_info_struct(png_ptr); if (info_ptr == NULL) { png_destroy_read_struct(&png_ptr, (png_infopp)NULL, (png_infopp)NULL); fclose(fh); return; } if (setjmp(png_jmpbuf(png_ptr))) { png_destroy_read_struct(&png_ptr, &info_ptr, (png_infopp)NULL); fclose(fh); return; } png_init_io(png_ptr, fh); png_read_info(png_ptr, info_ptr); png_get_IHDR(png_ptr, info_ptr, &width, &height, &bit_depth, &color_type, &interlace_type, NULL, NULL); if (color_type == PNG_COLOR_TYPE_GRAY || color_type & PNG_COLOR_MASK_PALETTE) { if (bit_depth < 8) { png_set_packing(png_ptr); bit_depth = 8; } unsigned char *pic_buffer = new unsigned char[height * width]; filepara->ox = width; filepara->oy = height; filepara->pic_buffer = pic_buffer; filepara->bits = 8; png_bytep *rowptr=new png_bytep[height]; for (unsigned int i=0; i!=height; i++) { rowptr[i]=(png_byte*)pic_buffer; pic_buffer += width; } png_read_rows(png_ptr, rowptr, 0, height); delete [] rowptr; if (png_get_valid(png_ptr, info_ptr, PNG_INFO_PLTE)) { png_color *palette; int num_palette; png_get_PLTE(png_ptr, info_ptr, &palette, &num_palette); filepara->palette_size = num_palette; if (num_palette) filepara->palette = new gRGB[num_palette]; for (int i=0; i<num_palette; i++) { filepara->palette[i].a=0; filepara->palette[i].r=palette[i].red; filepara->palette[i].g=palette[i].green; filepara->palette[i].b=palette[i].blue; } if (png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS)) { png_byte *trans; png_get_tRNS(png_ptr, info_ptr, &trans, &num_palette, 0); for (int i=0; i<num_palette; i++) filepara->palette[i].a=255-trans[i]; } } } else { if (png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS)) png_set_expand(png_ptr); if (bit_depth == 16) png_set_strip_16(png_ptr); if (color_type == PNG_COLOR_TYPE_GRAY || color_type == PNG_COLOR_TYPE_GRAY_ALPHA) png_set_gray_to_rgb(png_ptr); if ((color_type == PNG_COLOR_TYPE_RGB_ALPHA) || (color_type == PNG_COLOR_TYPE_GRAY_ALPHA)) { png_set_strip_alpha(png_ptr); png_color_16 bg; bg.red = (background >> 16) & 0xFF; bg.green = (background >> 8) & 0xFF; bg.blue = (background) & 0xFF; bg.gray = bg.green; bg.index = 0; png_set_background(png_ptr, &bg, PNG_BACKGROUND_GAMMA_SCREEN, 0, 1.0); } int number_passes = png_set_interlace_handling(png_ptr); png_read_update_info(png_ptr, info_ptr); if (width * 3 != png_get_rowbytes(png_ptr, info_ptr)) { eDebug("[Picload] Error processing (did not get RGB data from PNG file)"); png_destroy_read_struct(&png_ptr, &info_ptr, (png_infopp)NULL); fclose(fh); return; } unsigned char *pic_buffer = new unsigned char[height * width * 3]; filepara->ox = width; filepara->oy = height; filepara->pic_buffer = pic_buffer; for(int pass = 0; pass < number_passes; pass++) { fbptr = (png_byte *)pic_buffer; for (i = 0; i < height; i++, fbptr += width * 3) png_read_row(png_ptr, fbptr, NULL); } png_read_end(png_ptr, info_ptr); } png_destroy_read_struct(&png_ptr, &info_ptr, (png_infopp)NULL); fclose(fh); } //------------------------------------------------------------------- struct r_jpeg_error_mgr { struct jpeg_error_mgr pub; jmp_buf envbuffer; }; void jpeg_cb_error_exit(j_common_ptr cinfo) { struct r_jpeg_error_mgr *mptr; mptr = (struct r_jpeg_error_mgr *) cinfo->err; (*cinfo->err->output_message) (cinfo); longjmp(mptr->envbuffer, 1); } static unsigned char *jpeg_load(const char *file, int *ox, int *oy, unsigned int max_x, unsigned int max_y) { struct jpeg_decompress_struct cinfo; struct jpeg_decompress_struct *ciptr = &cinfo; struct r_jpeg_error_mgr emgr; FILE *fh; unsigned char *pic_buffer=NULL; if (!(fh = fopen(file, "rb"))) return NULL; ciptr->err = jpeg_std_error(&emgr.pub); emgr.pub.error_exit = jpeg_cb_error_exit; if (setjmp(emgr.envbuffer) == 1) { jpeg_destroy_decompress(ciptr); fclose(fh); return NULL; } jpeg_create_decompress(ciptr); jpeg_stdio_src(ciptr, fh); jpeg_read_header(ciptr, TRUE); ciptr->out_color_space = JCS_RGB; int s = 8; if (max_x == 0) max_x = 1280; // sensible default if (max_y == 0) max_y = 720; while (s != 1) { if ((ciptr->image_width >= (s * max_x)) || (ciptr->image_height >= (s * max_y))) break; s /= 2; } ciptr->scale_num = 1; ciptr->scale_denom = s; jpeg_start_decompress(ciptr); *ox=ciptr->output_width; *oy=ciptr->output_height; // eDebug("jpeg_read ox=%d oy=%d w=%d (%d), h=%d (%d) scale=%d rec_outbuf_height=%d", ciptr->output_width, ciptr->output_height, ciptr->image_width, max_x, ciptr->image_height, max_y, ciptr->scale_denom, ciptr->rec_outbuf_height); if(ciptr->output_components == 3) { unsigned int stride = ciptr->output_width * ciptr->output_components; pic_buffer = new unsigned char[ciptr->output_height * stride]; unsigned char *bp = pic_buffer; while (ciptr->output_scanline < ciptr->output_height) { JDIMENSION lines = jpeg_read_scanlines(ciptr, &bp, ciptr->rec_outbuf_height); bp += stride * lines; } } jpeg_finish_decompress(ciptr); jpeg_destroy_decompress(ciptr); fclose(fh); return(pic_buffer); } static int jpeg_save(const char * filename, int ox, int oy, unsigned char *pic_buffer) { struct jpeg_compress_struct cinfo; struct jpeg_error_mgr jerr; FILE * outfile; JSAMPROW row_pointer[1]; int row_stride; cinfo.err = jpeg_std_error(&jerr); jpeg_create_compress(&cinfo); if ((outfile = fopen(filename, "wb")) == NULL) { eDebug("[Picload] jpeg can't open %s", filename); return 1; } eDebug("[Picload] save Thumbnail... %s",filename); jpeg_stdio_dest(&cinfo, outfile); cinfo.image_width = ox; cinfo.image_height = oy; cinfo.input_components = 3; cinfo.in_color_space = JCS_RGB; jpeg_set_defaults(&cinfo); jpeg_set_quality(&cinfo, 70, TRUE ); jpeg_start_compress(&cinfo, TRUE); row_stride = ox * 3; while (cinfo.next_scanline < cinfo.image_height) { row_pointer[0] = & pic_buffer[cinfo.next_scanline * row_stride]; (void) jpeg_write_scanlines(&cinfo, row_pointer, 1); } jpeg_finish_compress(&cinfo); fclose(outfile); jpeg_destroy_compress(&cinfo); return 0; } //------------------------------------------------------------------- inline void m_rend_gif_decodecolormap(unsigned char *cmb, unsigned char *rgbb, ColorMapObject *cm, int s, int l) { GifColorType *cmentry; int i; for (i = 0; i < l; i++) { cmentry = &cm->Colors[cmb[i]]; *(rgbb++) = cmentry->Red; *(rgbb++) = cmentry->Green; *(rgbb++) = cmentry->Blue; } } static void gif_load(Cfilepara* filepara) { unsigned char *pic_buffer = NULL; int px, py, i, j; unsigned char *fbptr; unsigned char *slb=NULL; GifFileType *gft; GifRecordType rt; GifByteType *extension; ColorMapObject *cmap; int cmaps; int extcode; gft = DGifOpenFileName(filepara->file); if (gft == NULL) return; do { if (DGifGetRecordType(gft, &rt) == GIF_ERROR) goto ERROR_R; switch(rt) { case IMAGE_DESC_RECORD_TYPE: if (DGifGetImageDesc(gft) == GIF_ERROR) goto ERROR_R; filepara->ox = px = gft->Image.Width; filepara->oy = py = gft->Image.Height; pic_buffer = new unsigned char[px * py]; filepara->pic_buffer = pic_buffer; filepara->bits = 8; slb = pic_buffer; if (pic_buffer != NULL) { cmap = (gft->Image.ColorMap ? gft->Image.ColorMap : gft->SColorMap); cmaps = cmap->ColorCount; filepara->palette_size = cmaps; filepara->palette = new gRGB[cmaps]; for (i = 0; i != cmaps; ++i) { filepara->palette[i].a = 0; filepara->palette[i].r = cmap->Colors[i].Red; filepara->palette[i].g = cmap->Colors[i].Green; filepara->palette[i].b = cmap->Colors[i].Blue; } fbptr = pic_buffer; if (!(gft->Image.Interlace)) { for (i = 0; i < py; i++, fbptr += px * 3) { if (DGifGetLine(gft, slb, px) == GIF_ERROR) goto ERROR_R; slb += px; } } else { for (j = 0; j < 4; j++) { slb = pic_buffer; for (i = 0; i < py; i++) { if (DGifGetLine(gft, slb, px) == GIF_ERROR) goto ERROR_R; slb += px; } } } } break; case EXTENSION_RECORD_TYPE: if (DGifGetExtension(gft, &extcode, &extension) == GIF_ERROR) goto ERROR_R; while (extension != NULL) if (DGifGetExtensionNext(gft, &extension) == GIF_ERROR) goto ERROR_R; break; default: break; } } while (rt != TERMINATE_RECORD_TYPE); DGifCloseFile(gft); return; ERROR_R: eDebug("[Picload] <Error gif>"); DGifCloseFile(gft); } //--------------------------------------------------------------------------------------------- ePicLoad::ePicLoad(): m_filepara(NULL), threadrunning(false), m_conf(), msg_thread(this,1), msg_main(eApp,1) { CONNECT(msg_thread.recv_msg, ePicLoad::gotMessage); CONNECT(msg_main.recv_msg, ePicLoad::gotMessage); } ePicLoad::PConf::PConf(): max_x(0), max_y(0), aspect_ratio(1.066400), //4:3 background(0), resizetype(1), usecache(false), thumbnailsize(180) { } void ePicLoad::waitFinished() { msg_thread.send(Message(Message::quit)); kill(); } ePicLoad::~ePicLoad() { if (threadrunning) waitFinished(); if(m_filepara != NULL) delete m_filepara; } void ePicLoad::thread_finished() { threadrunning=false; } void ePicLoad::thread() { hasStarted(); threadrunning=true; nice(4); runLoop(); } void ePicLoad::decodePic() { eDebug("[Picload] decode picture... %s",m_filepara->file); switch(m_filepara->id) { case F_PNG: png_load(m_filepara, m_conf.background); break; case F_JPEG: m_filepara->pic_buffer = jpeg_load(m_filepara->file, &m_filepara->ox, &m_filepara->oy, m_filepara->max_x, m_filepara->max_y); break; case F_BMP: m_filepara->pic_buffer = bmp_load(m_filepara->file, &m_filepara->ox, &m_filepara->oy); break; case F_GIF: gif_load(m_filepara); break; } if(m_filepara->pic_buffer != NULL) resizePic(); } void ePicLoad::decodeThumb() { eDebug("[Picload] get Thumbnail... %s",m_filepara->file); bool exif_thumbnail = false; bool cachefile_found = false; std::string cachefile = ""; std::string cachedir = "/.Thumbnails"; if(m_filepara->id == F_JPEG) { Cexif *exif = new Cexif; if(exif->DecodeExif(m_filepara->file, 1)) { if(exif->m_exifinfo->IsExif) { if(exif->m_exifinfo->Thumnailstate==2) { free(m_filepara->file); m_filepara->file = strdup(THUMBNAILTMPFILE); exif_thumbnail = true; eDebug("[Picload] Exif Thumbnail found"); } m_filepara->addExifInfo(exif->m_exifinfo->CameraMake); m_filepara->addExifInfo(exif->m_exifinfo->CameraModel); m_filepara->addExifInfo(exif->m_exifinfo->DateTime); char buf[20]; snprintf(buf, 20, "%d x %d", exif->m_exifinfo->Width, exif->m_exifinfo->Height); m_filepara->addExifInfo(buf); } exif->ClearExif(); } delete exif; } if((! exif_thumbnail) && m_conf.usecache) { if(FILE *f=fopen(m_filepara->file, "rb")) { int c; int count = 1024*100; unsigned long crc32 = 0; char crcstr[9];*crcstr=0; while ((c=getc(f))!=EOF) { crc32 = crc32_table[((crc32) ^ (c)) & 0xFF] ^ ((crc32) >> 8); if(--count < 0) break; } fclose(f); crc32 = ~crc32; sprintf(crcstr, "%08lX", crc32); cachedir = m_filepara->file; unsigned int pos = cachedir.find_last_of("/"); if (pos != std::string::npos) cachedir = cachedir.substr(0, pos) + "/.Thumbnails"; cachefile = cachedir + std::string("/pc_") + crcstr; if(!access(cachefile.c_str(), R_OK)) { cachefile_found = true; free(m_filepara->file); m_filepara->file = strdup(cachefile.c_str()); m_filepara->id = F_JPEG; eDebug("[Picload] Cache File found"); } } } switch(m_filepara->id) { case F_PNG: png_load(m_filepara, m_conf.background); break; case F_JPEG: m_filepara->pic_buffer = jpeg_load(m_filepara->file, &m_filepara->ox, &m_filepara->oy, m_filepara->max_x, m_filepara->max_y); break; case F_BMP: m_filepara->pic_buffer = bmp_load(m_filepara->file, &m_filepara->ox, &m_filepara->oy); break; case F_GIF: gif_load(m_filepara); break; } if(exif_thumbnail) ::unlink(THUMBNAILTMPFILE); if(m_filepara->pic_buffer != NULL) { //save cachefile if(m_conf.usecache && (! exif_thumbnail) && (! cachefile_found)) { if(access(cachedir.c_str(), R_OK)) ::mkdir(cachedir.c_str(), 0755); //resize for Thumbnail int imx, imy; if (m_filepara->ox <= m_filepara->oy) { imy = m_conf.thumbnailsize; imx = (int)( (m_conf.thumbnailsize * ((double)m_filepara->ox)) / ((double)m_filepara->oy) ); } else { imx = m_conf.thumbnailsize; imy = (int)( (m_conf.thumbnailsize * ((double)m_filepara->oy)) / ((double)m_filepara->ox) ); } m_filepara->pic_buffer = color_resize(m_filepara->pic_buffer, m_filepara->ox, m_filepara->oy, imx, imy); m_filepara->ox = imx; m_filepara->oy = imy; if(jpeg_save(cachefile.c_str(), m_filepara->ox, m_filepara->oy, m_filepara->pic_buffer)) eDebug("[Picload] error saving cachefile"); } resizePic(); } } void ePicLoad::resizePic() { int imx, imy; if (m_conf.aspect_ratio == 0) // do not keep aspect ration but just fill the destination area { imx = m_filepara->max_x; imy = m_filepara->max_y; } else if ((m_conf.aspect_ratio * m_filepara->oy * m_filepara->max_x / m_filepara->ox) <= m_filepara->max_y) { imx = m_filepara->max_x; imy = (int)(m_conf.aspect_ratio * m_filepara->oy * m_filepara->max_x / m_filepara->ox); } else { imx = (int)((1.0/m_conf.aspect_ratio) * m_filepara->ox * m_filepara->max_y / m_filepara->oy); imy = m_filepara->max_y; } if (m_filepara->bits == 8) m_filepara->pic_buffer = simple_resize_8(m_filepara->pic_buffer, m_filepara->ox, m_filepara->oy, imx, imy); else if (m_conf.resizetype) m_filepara->pic_buffer = color_resize(m_filepara->pic_buffer, m_filepara->ox, m_filepara->oy, imx, imy); else m_filepara->pic_buffer = simple_resize_24(m_filepara->pic_buffer, m_filepara->ox, m_filepara->oy, imx, imy); m_filepara->ox = imx; m_filepara->oy = imy; } void ePicLoad::gotMessage(const Message &msg) { switch (msg.type) { case Message::decode_Pic: decodePic(); msg_main.send(Message(Message::decode_finished)); break; case Message::decode_Thumb: decodeThumb(); msg_main.send(Message(Message::decode_finished)); break; case Message::quit: // called from decode thread eDebug("[Picload] decode thread ... got quit msg"); quit(0); break; case Message::decode_finished: // called from main thread //eDebug("[Picload] decode finished... %s", m_filepara->file); if(m_filepara->callback) PictureData(m_filepara->picinfo.c_str()); else { if(m_filepara != NULL) { delete m_filepara; m_filepara = NULL; } } break; default: eDebug("unhandled thread message"); } } int ePicLoad::startThread(int what, const char *file, int x, int y, bool async) { if(async && threadrunning && m_filepara != NULL) { eDebug("[Picload] thread running"); m_filepara->callback = false; return 1; } if(m_filepara != NULL) { delete m_filepara; m_filepara = NULL; } int file_id = -1; unsigned char id[10]; int fd = ::open(file, O_RDONLY); if (fd == -1) return 1; ::read(fd, id, 10); ::close(fd); if(id[1] == 'P' && id[2] == 'N' && id[3] == 'G') file_id = F_PNG; else if(id[6] == 'J' && id[7] == 'F' && id[8] == 'I' && id[9] == 'F') file_id = F_JPEG; else if(id[0] == 0xff && id[1] == 0xd8 && id[2] == 0xff) file_id = F_JPEG; else if(id[0] == 'B' && id[1] == 'M' ) file_id = F_BMP; else if(id[0] == 'G' && id[1] == 'I' && id[2] == 'F') file_id = F_GIF; if(file_id < 0) { eDebug("[Picload] <format not supported>"); return 1; } m_filepara = new Cfilepara(file, file_id, getSize(file)); m_filepara->max_x = x > 0 ? x : m_conf.max_x; m_filepara->max_y = x > 0 ? y : m_conf.max_y; if(m_filepara->max_x <= 0 || m_filepara->max_y <= 0) { delete m_filepara; m_filepara = NULL; eDebug("[Picload] <error in Para>"); return 1; } if (async) { if(what==1) msg_thread.send(Message(Message::decode_Pic)); else msg_thread.send(Message(Message::decode_Thumb)); run(); } else if (what == 1) decodePic(); else decodeThumb(); return 0; } RESULT ePicLoad::startDecode(const char *file, int x, int y, bool async) { return startThread(1, file, x, y, async); } RESULT ePicLoad::getThumbnail(const char *file, int x, int y, bool async) { return startThread(0, file, x, y, async); } PyObject *ePicLoad::getInfo(const char *filename) { ePyObject list; Cexif *exif = new Cexif; if(exif->DecodeExif(filename)) { if(exif->m_exifinfo->IsExif) { char tmp[256]; int pos=0; list = PyList_New(23); PyList_SET_ITEM(list, pos++, PyString_FromString(filename)); PyList_SET_ITEM(list, pos++, PyString_FromString(exif->m_exifinfo->Version)); PyList_SET_ITEM(list, pos++, PyString_FromString(exif->m_exifinfo->CameraMake)); PyList_SET_ITEM(list, pos++, PyString_FromString(exif->m_exifinfo->CameraModel)); PyList_SET_ITEM(list, pos++, PyString_FromString(exif->m_exifinfo->DateTime)); PyList_SET_ITEM(list, pos++, PyString_FromFormat("%d x %d", exif->m_exifinfo->Width, exif->m_exifinfo->Height)); PyList_SET_ITEM(list, pos++, PyString_FromString(exif->m_exifinfo->FlashUsed)); PyList_SET_ITEM(list, pos++, PyString_FromString(exif->m_exifinfo->Orientation)); PyList_SET_ITEM(list, pos++, PyString_FromString(exif->m_exifinfo->Comments)); PyList_SET_ITEM(list, pos++, PyString_FromString(exif->m_exifinfo->MeteringMode)); PyList_SET_ITEM(list, pos++, PyString_FromString(exif->m_exifinfo->ExposureProgram)); PyList_SET_ITEM(list, pos++, PyString_FromString(exif->m_exifinfo->LightSource)); PyList_SET_ITEM(list, pos++, PyString_FromFormat("%d", exif->m_exifinfo->CompressionLevel)); PyList_SET_ITEM(list, pos++, PyString_FromFormat("%d", exif->m_exifinfo->ISOequivalent)); sprintf(tmp, "%.2f", exif->m_exifinfo->Xresolution); PyList_SET_ITEM(list, pos++, PyString_FromString(tmp)); sprintf(tmp, "%.2f", exif->m_exifinfo->Yresolution); PyList_SET_ITEM(list, pos++, PyString_FromString(tmp)); PyList_SET_ITEM(list, pos++, PyString_FromString(exif->m_exifinfo->ResolutionUnit)); sprintf(tmp, "%.2f", exif->m_exifinfo->Brightness); PyList_SET_ITEM(list, pos++, PyString_FromString(tmp)); sprintf(tmp, "%.5f sec.", exif->m_exifinfo->ExposureTime); PyList_SET_ITEM(list, pos++, PyString_FromString(tmp)); sprintf(tmp, "%.5f", exif->m_exifinfo->ExposureBias); PyList_SET_ITEM(list, pos++, PyString_FromString(tmp)); sprintf(tmp, "%.5f", exif->m_exifinfo->Distance); PyList_SET_ITEM(list, pos++, PyString_FromString(tmp)); sprintf(tmp, "%.5f", exif->m_exifinfo->CCDWidth); PyList_SET_ITEM(list, pos++, PyString_FromString(tmp)); sprintf(tmp, "%.2f", exif->m_exifinfo->ApertureFNumber); PyList_SET_ITEM(list, pos++, PyString_FromString(tmp)); } else { list = PyList_New(2); PyList_SET_ITEM(list, 0, PyString_FromString(filename)); PyList_SET_ITEM(list, 1, PyString_FromString(exif->m_szLastError)); } exif->ClearExif(); } else { list = PyList_New(2); PyList_SET_ITEM(list, 0, PyString_FromString(filename)); PyList_SET_ITEM(list, 1, PyString_FromString(exif->m_szLastError)); } delete exif; return list ? (PyObject*)list : (PyObject*)PyList_New(0); } int ePicLoad::getData(ePtr<gPixmap> &result) { result = 0; if (m_filepara == NULL) { eDebug("picload - Weird situation, I wasn't decoding anything!"); return 1; } if(m_filepara->pic_buffer == NULL) { delete m_filepara; m_filepara = NULL; return 0; } if (m_filepara->bits == 8) { result=new gPixmap(m_filepara->max_x, m_filepara->max_y, 8, NULL, gPixmap::accelAlways); gUnmanagedSurface *surface = result->surface; surface->clut.data = m_filepara->palette; surface->clut.colors = m_filepara->palette_size; m_filepara->palette = NULL; // transfer ownership int o_y=0, u_y=0, v_x=0, h_x=0; int extra_stride = surface->stride - surface->x; unsigned char *tmp_buffer=((unsigned char *)(surface->data)); unsigned char *origin = m_filepara->pic_buffer; if(m_filepara->oy < m_filepara->max_y) { o_y = (m_filepara->max_y - m_filepara->oy) / 2; u_y = m_filepara->max_y - m_filepara->oy - o_y; } if(m_filepara->ox < m_filepara->max_x) { v_x = (m_filepara->max_x - m_filepara->ox) / 2; h_x = m_filepara->max_x - m_filepara->ox - v_x; } int background; gRGB bg(m_conf.background); background = surface->clut.findColor(bg); if(m_filepara->oy < m_filepara->max_y) { memset(tmp_buffer, background, o_y * surface->stride); tmp_buffer += o_y * surface->stride; } for(int a = m_filepara->oy; a > 0; --a) { if(m_filepara->ox < m_filepara->max_x) { memset(tmp_buffer, background, v_x); tmp_buffer += v_x; } memcpy(tmp_buffer, origin, m_filepara->ox); tmp_buffer += m_filepara->ox; origin += m_filepara->ox; if(m_filepara->ox < m_filepara->max_x) { memset(tmp_buffer, background, h_x); tmp_buffer += h_x; } tmp_buffer += extra_stride; } if(m_filepara->oy < m_filepara->max_y) { memset(tmp_buffer, background, u_y * surface->stride); } } else { result=new gPixmap(m_filepara->max_x, m_filepara->max_y, 32, NULL, gPixmap::accelAuto); gUnmanagedSurface *surface = result->surface; int o_y=0, u_y=0, v_x=0, h_x=0; unsigned char *tmp_buffer=((unsigned char *)(surface->data)); unsigned char *origin = m_filepara->pic_buffer; int extra_stride = surface->stride - (surface->x * surface->bypp); if(m_filepara->oy < m_filepara->max_y) { o_y = (m_filepara->max_y - m_filepara->oy) / 2; u_y = m_filepara->max_y - m_filepara->oy - o_y; } if(m_filepara->ox < m_filepara->max_x) { v_x = (m_filepara->max_x - m_filepara->ox) / 2; h_x = m_filepara->max_x - m_filepara->ox - v_x; } int background = m_conf.background; if(m_filepara->oy < m_filepara->max_y) { for (int y = o_y; y != 0; --y) { int* row_buffer = (int*)tmp_buffer; for (int x = m_filepara->ox; x !=0; --x) *row_buffer++ = background; tmp_buffer += surface->stride; } } for(int a = m_filepara->oy; a > 0; --a) { if(m_filepara->ox < m_filepara->max_x) { for(int b = v_x; b != 0; --b) { *(int*)tmp_buffer = background; tmp_buffer += 4; } } for(int b = m_filepara->ox; b != 0; --b) { tmp_buffer[2] = *origin; ++origin; tmp_buffer[1] = *origin; ++origin; tmp_buffer[0] = *origin; ++origin; tmp_buffer[3] = 0xFF; // alpha tmp_buffer += 4; } if(m_filepara->ox < m_filepara->max_x) { for(int b = h_x; b != 0; --b) { *(int*)tmp_buffer = background; tmp_buffer += 4; } } tmp_buffer += extra_stride; } if(m_filepara->oy < m_filepara->max_y) { for (int y = u_y; y != 0; --y) { int* row_buffer = (int*)tmp_buffer; for (int x = m_filepara->ox; x !=0; --x) *row_buffer++ = background; tmp_buffer += surface->stride; } } } delete m_filepara; m_filepara = NULL; return 0; } RESULT ePicLoad::setPara(PyObject *val) { if (!PySequence_Check(val)) return 0; if (PySequence_Size(val) < 7) return 0; else { ePyObject fast = PySequence_Fast(val, ""); int width = PyInt_AsLong(PySequence_Fast_GET_ITEM(fast, 0)); int height = PyInt_AsLong(PySequence_Fast_GET_ITEM(fast, 1)); double aspectRatio = PyInt_AsLong(PySequence_Fast_GET_ITEM(fast, 2)); int as = PyInt_AsLong(PySequence_Fast_GET_ITEM(fast, 3)); bool useCache = PyInt_AsLong(PySequence_Fast_GET_ITEM(fast, 4)); int resizeType = PyInt_AsLong(PySequence_Fast_GET_ITEM(fast, 5)); const char *bg_str = PyString_AsString(PySequence_Fast_GET_ITEM(fast, 6)); return setPara(width, height, aspectRatio, as, useCache, resizeType, bg_str); } return 1; } RESULT ePicLoad::setPara(int width, int height, double aspectRatio, int as, bool useCache, int resizeType, const char *bg_str) { m_conf.max_x = width; m_conf.max_y = height; m_conf.aspect_ratio = as == 0 ? 0.0 : aspectRatio / as; m_conf.usecache = useCache; m_conf.resizetype = resizeType; if(bg_str[0] == '#' && strlen(bg_str)==9) m_conf.background = strtoul(bg_str+1, NULL, 16); eDebug("[Picload] setPara max-X=%d max-Y=%d aspect_ratio=%lf cache=%d resize=%d bg=#%08X", m_conf.max_x, m_conf.max_y, m_conf.aspect_ratio, (int)m_conf.usecache, (int)m_conf.resizetype, m_conf.background); return 1; } //------------------------------------------------------------------------------------ //for old plugins SWIG_VOID(int) loadPic(ePtr<gPixmap> &result, std::string filename, int x, int y, int aspect, int resize_mode, int rotate, int background, std::string cachefile) { long asp1, asp2; result = 0; eDebug("deprecated loadPic function used!!! please use the non blocking version! you can see demo code in Pictureplayer plugin... this function is removed in the near future!"); ePicLoad mPL; switch(aspect) { case 1: asp1 = 16*576, asp2 = 9*720; break; //16:9 case 2: asp1 = 16*576, asp2 = 10*720; break; //16:10 case 3: asp1 = 5*576, asp2 = 4*720; break; //5:4 default: asp1 = 4*576, asp2 = 3*720; break; //4:3 } ePyObject tuple = PyTuple_New(7); PyTuple_SET_ITEM(tuple, 0, PyLong_FromLong(x)); PyTuple_SET_ITEM(tuple, 1, PyLong_FromLong(y)); PyTuple_SET_ITEM(tuple, 2, PyLong_FromLong(asp1)); PyTuple_SET_ITEM(tuple, 3, PyLong_FromLong(asp2)); PyTuple_SET_ITEM(tuple, 4, PyLong_FromLong(0)); PyTuple_SET_ITEM(tuple, 5, PyLong_FromLong(resize_mode)); if(background) PyTuple_SET_ITEM(tuple, 6, PyString_FromString("#ff000000")); else PyTuple_SET_ITEM(tuple, 6, PyString_FromString("#00000000")); mPL.setPara(tuple); if(!mPL.startDecode(filename.c_str(), 0, 0, false)) mPL.getData(result); return 0; }
vit2/vit-e2
lib/gdi/picload.cpp
C++
gpl-2.0
32,254
<?php /* vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4: */ /** * Image_Graph - PEAR PHP OO Graph Rendering Utility. * * PHP version 5 * * LICENSE: This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2.1 of the License, or (at your * option) any later version. This library is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser * General Public License for more details. You should have received a copy of * the GNU Lesser General Public License along with this library; if not, write * to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 USA * * @category Images * @package Image_Graph * @subpackage Text * @author Jesper Veggerby <pear.nosey@veggerby.dk> * @author Stefan Neufeind <pear.neufeind@speedpartner.de> * @copyright 2003-2009 The PHP Group * @license http://www.gnu.org/copyleft/lesser.html LGPL License 2.1 * @version SVN: $Id: Font.php 291170 2009-11-23 03:50:22Z neufeind $ * @link http://pear.php.net/package/Image_Graph */ /** * Include file Image/Graph/Common.php */ require_once 'Image/Graph/Common.php'; /** * A font. * * @category Images * @package Image_Graph * @subpackage Text * @author Jesper Veggerby <pear.nosey@veggerby.dk> * @author Stefan Neufeind <pear.neufeind@speedpartner.de> * @copyright 2003-2009 The PHP Group * @license http://www.gnu.org/copyleft/lesser.html LGPL License 2.1 * @version Release: @package_version@ * @link http://pear.php.net/package/Image_Graph * @abstract */ class Image_Graph_Font extends Image_Graph_Common { /** * The name of the font * @var string * @access private */ var $_name = false; /** * The angle of the output * @var int * @access private */ var $_angle = false; /** * The size of the font * @var int * @access private */ var $_size = 11; /** * The color of the font * @var Color * @access private */ var $_color = 'black'; /** * Image_Graph_Font [Constructor] * * @param string $name Font name * @param int $size Font size */ function Image_Graph_Font($name = false, $size = false) { parent::__construct(); if ($name !== false) { $this->_name = $name; } if ($size !== false) { $this->_size = $size; } } /** * Set the color of the font * * @param mixed $color The color object of the Font * * @return void */ function setColor($color) { $this->_color = $color; } /** * Set the angle slope of the output font. * * 0 = normal, 90 = bottom and up, 180 = upside down, 270 = top and down * * @param int $angle The angle in degrees to slope the text * * @return void */ function setAngle($angle) { $this->_angle = $angle; } /** * Set the size of the font * * @param int $size The size in pixels of the font * * @return void */ function setSize($size) { $this->_size = $size; } /** * Get the font 'array' * * @param array $options Font options (optional) * * @return array The font 'summary' to pass to the canvas * @access private */ function _getFont($options = false) { if ($options === false) { $options = array(); } if ($this->_name !== false) { $options['name'] = $this->_name; } if (!isset($options['color'])) { $options['color'] = $this->_color; } if (!isset($options['size'])) { $options['size'] = $this->_size; } if ((!isset($options['angle'])) && ($this->_angle !== false)) { $options['angle'] = $this->_angle; } return $options; } } ?>
eireford/mahara
htdocs/lib/pear/Image/Graph/Font.php
PHP
gpl-3.0
4,265
/* * Copyright (C) 2013 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package android.hardware.location; import android.content.Context; import android.content.pm.PackageManager; import android.location.IFusedGeofenceHardware; import android.location.IGpsGeofenceHardware; import android.location.Location; import android.os.Handler; import android.os.IBinder; import android.os.IInterface; import android.os.Message; import android.os.PowerManager; import android.os.RemoteException; import android.util.Log; import android.util.SparseArray; import java.util.ArrayList; import java.util.Iterator; /** * This class manages the geofences which are handled by hardware. * * @hide */ public final class GeofenceHardwareImpl { private static final String TAG = "GeofenceHardwareImpl"; private static final boolean DEBUG = Log.isLoggable(TAG, Log.DEBUG); private static final int FIRST_VERSION_WITH_CAPABILITIES = 2; private final Context mContext; private static GeofenceHardwareImpl sInstance; private PowerManager.WakeLock mWakeLock; private final SparseArray<IGeofenceHardwareCallback> mGeofences = new SparseArray<IGeofenceHardwareCallback>(); private final ArrayList<IGeofenceHardwareMonitorCallback>[] mCallbacks = new ArrayList[GeofenceHardware.NUM_MONITORS]; private final ArrayList<Reaper> mReapers = new ArrayList<Reaper>(); private IFusedGeofenceHardware mFusedService; private IGpsGeofenceHardware mGpsService; private int mCapabilities; private int mVersion = 1; private int[] mSupportedMonitorTypes = new int[GeofenceHardware.NUM_MONITORS]; // mGeofenceHandler message types private static final int GEOFENCE_TRANSITION_CALLBACK = 1; private static final int ADD_GEOFENCE_CALLBACK = 2; private static final int REMOVE_GEOFENCE_CALLBACK = 3; private static final int PAUSE_GEOFENCE_CALLBACK = 4; private static final int RESUME_GEOFENCE_CALLBACK = 5; private static final int GEOFENCE_CALLBACK_BINDER_DIED = 6; // mCallbacksHandler message types private static final int GEOFENCE_STATUS = 1; private static final int CALLBACK_ADD = 2; private static final int CALLBACK_REMOVE = 3; private static final int MONITOR_CALLBACK_BINDER_DIED = 4; // mReaperHandler message types private static final int REAPER_GEOFENCE_ADDED = 1; private static final int REAPER_MONITOR_CALLBACK_ADDED = 2; private static final int REAPER_REMOVED = 3; // The following constants need to match GpsLocationFlags enum in gps.h private static final int LOCATION_INVALID = 0; private static final int LOCATION_HAS_LAT_LONG = 1; private static final int LOCATION_HAS_ALTITUDE = 2; private static final int LOCATION_HAS_SPEED = 4; private static final int LOCATION_HAS_BEARING = 8; private static final int LOCATION_HAS_ACCURACY = 16; // Resolution level constants used for permission checks. // These constants must be in increasing order of finer resolution. private static final int RESOLUTION_LEVEL_NONE = 1; private static final int RESOLUTION_LEVEL_COARSE = 2; private static final int RESOLUTION_LEVEL_FINE = 3; // Capability constant corresponding to fused_location.h entry when geofencing supports GNNS. private static final int CAPABILITY_GNSS = 1; public synchronized static GeofenceHardwareImpl getInstance(Context context) { if (sInstance == null) { sInstance = new GeofenceHardwareImpl(context); } return sInstance; } private GeofenceHardwareImpl(Context context) { mContext = context; // Init everything to unsupported. setMonitorAvailability(GeofenceHardware.MONITORING_TYPE_GPS_HARDWARE, GeofenceHardware.MONITOR_UNSUPPORTED); setMonitorAvailability( GeofenceHardware.MONITORING_TYPE_FUSED_HARDWARE, GeofenceHardware.MONITOR_UNSUPPORTED); } private void acquireWakeLock() { if (mWakeLock == null) { PowerManager powerManager = (PowerManager) mContext.getSystemService(Context.POWER_SERVICE); mWakeLock = powerManager.newWakeLock(PowerManager.PARTIAL_WAKE_LOCK, TAG); } mWakeLock.acquire(); } private void releaseWakeLock() { if (mWakeLock.isHeld()) mWakeLock.release(); } private void updateGpsHardwareAvailability() { //Check which monitors are available. boolean gpsSupported; try { gpsSupported = mGpsService.isHardwareGeofenceSupported(); } catch (RemoteException e) { Log.e(TAG, "Remote Exception calling LocationManagerService"); gpsSupported = false; } if (gpsSupported) { // Its assumed currently available at startup. // native layer will update later. setMonitorAvailability(GeofenceHardware.MONITORING_TYPE_GPS_HARDWARE, GeofenceHardware.MONITOR_CURRENTLY_AVAILABLE); } } private void updateFusedHardwareAvailability() { boolean fusedSupported; try { final boolean hasGnnsCapabilities = (mVersion < FIRST_VERSION_WITH_CAPABILITIES) || (mCapabilities & CAPABILITY_GNSS) != 0; fusedSupported = (mFusedService != null ? mFusedService.isSupported() && hasGnnsCapabilities : false); } catch (RemoteException e) { Log.e(TAG, "RemoteException calling LocationManagerService"); fusedSupported = false; } if(fusedSupported) { setMonitorAvailability( GeofenceHardware.MONITORING_TYPE_FUSED_HARDWARE, GeofenceHardware.MONITOR_CURRENTLY_AVAILABLE); } } public void setGpsHardwareGeofence(IGpsGeofenceHardware service) { if (mGpsService == null) { mGpsService = service; updateGpsHardwareAvailability(); } else if (service == null) { mGpsService = null; Log.w(TAG, "GPS Geofence Hardware service seems to have crashed"); } else { Log.e(TAG, "Error: GpsService being set again."); } } public void onCapabilities(int capabilities) { mCapabilities = capabilities; updateFusedHardwareAvailability(); } public void setVersion(int version) { mVersion = version; updateFusedHardwareAvailability(); } public void setFusedGeofenceHardware(IFusedGeofenceHardware service) { if(mFusedService == null) { mFusedService = service; updateFusedHardwareAvailability(); } else if(service == null) { mFusedService = null; Log.w(TAG, "Fused Geofence Hardware service seems to have crashed"); } else { Log.e(TAG, "Error: FusedService being set again"); } } public int[] getMonitoringTypes() { boolean gpsSupported; boolean fusedSupported; synchronized (mSupportedMonitorTypes) { gpsSupported = mSupportedMonitorTypes[GeofenceHardware.MONITORING_TYPE_GPS_HARDWARE] != GeofenceHardware.MONITOR_UNSUPPORTED; fusedSupported = mSupportedMonitorTypes[GeofenceHardware.MONITORING_TYPE_FUSED_HARDWARE] != GeofenceHardware.MONITOR_UNSUPPORTED; } if(gpsSupported) { if(fusedSupported) { return new int[] { GeofenceHardware.MONITORING_TYPE_GPS_HARDWARE, GeofenceHardware.MONITORING_TYPE_FUSED_HARDWARE }; } else { return new int[] { GeofenceHardware.MONITORING_TYPE_GPS_HARDWARE }; } } else if (fusedSupported) { return new int[] { GeofenceHardware.MONITORING_TYPE_FUSED_HARDWARE }; } else { return new int[0]; } } public int getStatusOfMonitoringType(int monitoringType) { synchronized (mSupportedMonitorTypes) { if (monitoringType >= mSupportedMonitorTypes.length || monitoringType < 0) { throw new IllegalArgumentException("Unknown monitoring type"); } return mSupportedMonitorTypes[monitoringType]; } } public int getCapabilitiesForMonitoringType(int monitoringType) { switch (mSupportedMonitorTypes[monitoringType]) { case GeofenceHardware.MONITOR_CURRENTLY_AVAILABLE: switch (monitoringType) { case GeofenceHardware.MONITORING_TYPE_GPS_HARDWARE: return CAPABILITY_GNSS; case GeofenceHardware.MONITORING_TYPE_FUSED_HARDWARE: if (mVersion >= FIRST_VERSION_WITH_CAPABILITIES) { return mCapabilities; } // This was the implied capability on old FLP HAL versions that didn't // have the capability callback. return CAPABILITY_GNSS; } break; } return 0; } public boolean addCircularFence( int monitoringType, GeofenceHardwareRequestParcelable request, IGeofenceHardwareCallback callback) { int geofenceId = request.getId(); // This API is not thread safe. Operations on the same geofence need to be serialized // by upper layers if (DEBUG) { String message = String.format( "addCircularFence: monitoringType=%d, %s", monitoringType, request); Log.d(TAG, message); } boolean result; // The callback must be added before addCircularHardwareGeofence is called otherwise the // callback might not be called after the geofence is added in the geofence hardware. // This also means that the callback must be removed if the addCircularHardwareGeofence // operations is not called or fails. synchronized (mGeofences) { mGeofences.put(geofenceId, callback); } switch (monitoringType) { case GeofenceHardware.MONITORING_TYPE_GPS_HARDWARE: if (mGpsService == null) return false; try { result = mGpsService.addCircularHardwareGeofence( request.getId(), request.getLatitude(), request.getLongitude(), request.getRadius(), request.getLastTransition(), request.getMonitorTransitions(), request.getNotificationResponsiveness(), request.getUnknownTimer()); } catch (RemoteException e) { Log.e(TAG, "AddGeofence: Remote Exception calling LocationManagerService"); result = false; } break; case GeofenceHardware.MONITORING_TYPE_FUSED_HARDWARE: if(mFusedService == null) { return false; } try { mFusedService.addGeofences( new GeofenceHardwareRequestParcelable[] { request }); result = true; } catch(RemoteException e) { Log.e(TAG, "AddGeofence: RemoteException calling LocationManagerService"); result = false; } break; default: result = false; } if (result) { Message m = mReaperHandler.obtainMessage(REAPER_GEOFENCE_ADDED, callback); m.arg1 = monitoringType; mReaperHandler.sendMessage(m); } else { synchronized (mGeofences) { mGeofences.remove(geofenceId); } } if (DEBUG) Log.d(TAG, "addCircularFence: Result is: " + result); return result; } public boolean removeGeofence(int geofenceId, int monitoringType) { // This API is not thread safe. Operations on the same geofence need to be serialized // by upper layers if (DEBUG) Log.d(TAG, "Remove Geofence: GeofenceId: " + geofenceId); boolean result = false; synchronized (mGeofences) { if (mGeofences.get(geofenceId) == null) { throw new IllegalArgumentException("Geofence " + geofenceId + " not registered."); } } switch (monitoringType) { case GeofenceHardware.MONITORING_TYPE_GPS_HARDWARE: if (mGpsService == null) return false; try { result = mGpsService.removeHardwareGeofence(geofenceId); } catch (RemoteException e) { Log.e(TAG, "RemoveGeofence: Remote Exception calling LocationManagerService"); result = false; } break; case GeofenceHardware.MONITORING_TYPE_FUSED_HARDWARE: if(mFusedService == null) { return false; } try { mFusedService.removeGeofences(new int[] { geofenceId }); result = true; } catch(RemoteException e) { Log.e(TAG, "RemoveGeofence: RemoteException calling LocationManagerService"); result = false; } break; default: result = false; } if (DEBUG) Log.d(TAG, "removeGeofence: Result is: " + result); return result; } public boolean pauseGeofence(int geofenceId, int monitoringType) { // This API is not thread safe. Operations on the same geofence need to be serialized // by upper layers if (DEBUG) Log.d(TAG, "Pause Geofence: GeofenceId: " + geofenceId); boolean result; synchronized (mGeofences) { if (mGeofences.get(geofenceId) == null) { throw new IllegalArgumentException("Geofence " + geofenceId + " not registered."); } } switch (monitoringType) { case GeofenceHardware.MONITORING_TYPE_GPS_HARDWARE: if (mGpsService == null) return false; try { result = mGpsService.pauseHardwareGeofence(geofenceId); } catch (RemoteException e) { Log.e(TAG, "PauseGeofence: Remote Exception calling LocationManagerService"); result = false; } break; case GeofenceHardware.MONITORING_TYPE_FUSED_HARDWARE: if(mFusedService == null) { return false; } try { mFusedService.pauseMonitoringGeofence(geofenceId); result = true; } catch(RemoteException e) { Log.e(TAG, "PauseGeofence: RemoteException calling LocationManagerService"); result = false; } break; default: result = false; } if (DEBUG) Log.d(TAG, "pauseGeofence: Result is: " + result); return result; } public boolean resumeGeofence(int geofenceId, int monitoringType, int monitorTransition) { // This API is not thread safe. Operations on the same geofence need to be serialized // by upper layers if (DEBUG) Log.d(TAG, "Resume Geofence: GeofenceId: " + geofenceId); boolean result; synchronized (mGeofences) { if (mGeofences.get(geofenceId) == null) { throw new IllegalArgumentException("Geofence " + geofenceId + " not registered."); } } switch (monitoringType) { case GeofenceHardware.MONITORING_TYPE_GPS_HARDWARE: if (mGpsService == null) return false; try { result = mGpsService.resumeHardwareGeofence(geofenceId, monitorTransition); } catch (RemoteException e) { Log.e(TAG, "ResumeGeofence: Remote Exception calling LocationManagerService"); result = false; } break; case GeofenceHardware.MONITORING_TYPE_FUSED_HARDWARE: if(mFusedService == null) { return false; } try { mFusedService.resumeMonitoringGeofence(geofenceId, monitorTransition); result = true; } catch(RemoteException e) { Log.e(TAG, "ResumeGeofence: RemoteException calling LocationManagerService"); result = false; } break; default: result = false; } if (DEBUG) Log.d(TAG, "resumeGeofence: Result is: " + result); return result; } public boolean registerForMonitorStateChangeCallback(int monitoringType, IGeofenceHardwareMonitorCallback callback) { Message reaperMessage = mReaperHandler.obtainMessage(REAPER_MONITOR_CALLBACK_ADDED, callback); reaperMessage.arg1 = monitoringType; mReaperHandler.sendMessage(reaperMessage); Message m = mCallbacksHandler.obtainMessage(CALLBACK_ADD, callback); m.arg1 = monitoringType; mCallbacksHandler.sendMessage(m); return true; } public boolean unregisterForMonitorStateChangeCallback(int monitoringType, IGeofenceHardwareMonitorCallback callback) { Message m = mCallbacksHandler.obtainMessage(CALLBACK_REMOVE, callback); m.arg1 = monitoringType; mCallbacksHandler.sendMessage(m); return true; } /** * Used to report geofence transitions */ public void reportGeofenceTransition( int geofenceId, Location location, int transition, long transitionTimestamp, int monitoringType, int sourcesUsed) { if(location == null) { Log.e(TAG, String.format("Invalid Geofence Transition: location=null")); return; } if(DEBUG) { Log.d( TAG, "GeofenceTransition| " + location + ", transition:" + transition + ", transitionTimestamp:" + transitionTimestamp + ", monitoringType:" + monitoringType + ", sourcesUsed:" + sourcesUsed); } GeofenceTransition geofenceTransition = new GeofenceTransition( geofenceId, transition, transitionTimestamp, location, monitoringType, sourcesUsed); acquireWakeLock(); Message message = mGeofenceHandler.obtainMessage( GEOFENCE_TRANSITION_CALLBACK, geofenceTransition); message.sendToTarget(); } /** * Used to report Monitor status changes. */ public void reportGeofenceMonitorStatus( int monitoringType, int monitoringStatus, Location location, int source) { setMonitorAvailability(monitoringType, monitoringStatus); acquireWakeLock(); GeofenceHardwareMonitorEvent event = new GeofenceHardwareMonitorEvent( monitoringType, monitoringStatus, source, location); Message message = mCallbacksHandler.obtainMessage(GEOFENCE_STATUS, event); message.sendToTarget(); } /** * Internal generic status report function for Geofence operations. * * @param operation The operation to be reported as defined internally. * @param geofenceId The id of the geofence the operation is related to. * @param operationStatus The status of the operation as defined in GeofenceHardware class. This * status is independent of the statuses reported by different HALs. */ private void reportGeofenceOperationStatus(int operation, int geofenceId, int operationStatus) { acquireWakeLock(); Message message = mGeofenceHandler.obtainMessage(operation); message.arg1 = geofenceId; message.arg2 = operationStatus; message.sendToTarget(); } /** * Used to report the status of a Geofence Add operation. */ public void reportGeofenceAddStatus(int geofenceId, int status) { if(DEBUG) Log.d(TAG, "AddCallback| id:" + geofenceId + ", status:" + status); reportGeofenceOperationStatus(ADD_GEOFENCE_CALLBACK, geofenceId, status); } /** * Used to report the status of a Geofence Remove operation. */ public void reportGeofenceRemoveStatus(int geofenceId, int status) { if(DEBUG) Log.d(TAG, "RemoveCallback| id:" + geofenceId + ", status:" + status); reportGeofenceOperationStatus(REMOVE_GEOFENCE_CALLBACK, geofenceId, status); } /** * Used to report the status of a Geofence Pause operation. */ public void reportGeofencePauseStatus(int geofenceId, int status) { if(DEBUG) Log.d(TAG, "PauseCallbac| id:" + geofenceId + ", status" + status); reportGeofenceOperationStatus(PAUSE_GEOFENCE_CALLBACK, geofenceId, status); } /** * Used to report the status of a Geofence Resume operation. */ public void reportGeofenceResumeStatus(int geofenceId, int status) { if(DEBUG) Log.d(TAG, "ResumeCallback| id:" + geofenceId + ", status:" + status); reportGeofenceOperationStatus(RESUME_GEOFENCE_CALLBACK, geofenceId, status); } // All operations on mGeofences private Handler mGeofenceHandler = new Handler() { @Override public void handleMessage(Message msg) { int geofenceId; int status; IGeofenceHardwareCallback callback; switch (msg.what) { case ADD_GEOFENCE_CALLBACK: geofenceId = msg.arg1; synchronized (mGeofences) { callback = mGeofences.get(geofenceId); } if (callback != null) { try { callback.onGeofenceAdd(geofenceId, msg.arg2); } catch (RemoteException e) {Log.i(TAG, "Remote Exception:" + e);} } releaseWakeLock(); break; case REMOVE_GEOFENCE_CALLBACK: geofenceId = msg.arg1; synchronized (mGeofences) { callback = mGeofences.get(geofenceId); } if (callback != null) { try { callback.onGeofenceRemove(geofenceId, msg.arg2); } catch (RemoteException e) {} IBinder callbackBinder = callback.asBinder(); boolean callbackInUse = false; synchronized (mGeofences) { mGeofences.remove(geofenceId); // Check if the underlying binder is still useful for other geofences, // if no, unlink the DeathRecipient to avoid memory leak. for (int i = 0; i < mGeofences.size(); i++) { if (mGeofences.valueAt(i).asBinder() == callbackBinder) { callbackInUse = true; break; } } } // Remove the reaper associated with this binder. if (!callbackInUse) { for (Iterator<Reaper> iterator = mReapers.iterator(); iterator.hasNext();) { Reaper reaper = iterator.next(); if (reaper.mCallback != null && reaper.mCallback.asBinder() == callbackBinder) { iterator.remove(); reaper.unlinkToDeath(); if (DEBUG) Log.d(TAG, String.format("Removed reaper %s " + "because binder %s is no longer needed.", reaper, callbackBinder)); } } } } releaseWakeLock(); break; case PAUSE_GEOFENCE_CALLBACK: geofenceId = msg.arg1; synchronized (mGeofences) { callback = mGeofences.get(geofenceId); } if (callback != null) { try { callback.onGeofencePause(geofenceId, msg.arg2); } catch (RemoteException e) {} } releaseWakeLock(); break; case RESUME_GEOFENCE_CALLBACK: geofenceId = msg.arg1; synchronized (mGeofences) { callback = mGeofences.get(geofenceId); } if (callback != null) { try { callback.onGeofenceResume(geofenceId, msg.arg2); } catch (RemoteException e) {} } releaseWakeLock(); break; case GEOFENCE_TRANSITION_CALLBACK: GeofenceTransition geofenceTransition = (GeofenceTransition)(msg.obj); synchronized (mGeofences) { callback = mGeofences.get(geofenceTransition.mGeofenceId); // need to keep access to mGeofences synchronized at all times if (DEBUG) Log.d(TAG, "GeofenceTransistionCallback: GPS : GeofenceId: " + geofenceTransition.mGeofenceId + " Transition: " + geofenceTransition.mTransition + " Location: " + geofenceTransition.mLocation + ":" + mGeofences); } if (callback != null) { try { callback.onGeofenceTransition( geofenceTransition.mGeofenceId, geofenceTransition.mTransition, geofenceTransition.mLocation, geofenceTransition.mTimestamp, geofenceTransition.mMonitoringType); } catch (RemoteException e) {} } releaseWakeLock(); break; case GEOFENCE_CALLBACK_BINDER_DIED: // Find all geofences associated with this callback and remove them. callback = (IGeofenceHardwareCallback) (msg.obj); if (DEBUG) Log.d(TAG, "Geofence callback reaped:" + callback); int monitoringType = msg.arg1; synchronized (mGeofences) { for (int i = 0; i < mGeofences.size(); i++) { if (mGeofences.valueAt(i).equals(callback)) { geofenceId = mGeofences.keyAt(i); removeGeofence(mGeofences.keyAt(i), monitoringType); mGeofences.remove(geofenceId); } } } } } }; // All operations on mCallbacks private Handler mCallbacksHandler = new Handler() { @Override public void handleMessage(Message msg) { int monitoringType; ArrayList<IGeofenceHardwareMonitorCallback> callbackList; IGeofenceHardwareMonitorCallback callback; switch (msg.what) { case GEOFENCE_STATUS: GeofenceHardwareMonitorEvent event = (GeofenceHardwareMonitorEvent) msg.obj; callbackList = mCallbacks[event.getMonitoringType()]; if (callbackList != null) { if (DEBUG) Log.d(TAG, "MonitoringSystemChangeCallback: " + event); for (IGeofenceHardwareMonitorCallback c : callbackList) { try { c.onMonitoringSystemChange(event); } catch (RemoteException e) { Log.d(TAG, "Error reporting onMonitoringSystemChange.", e); } } } releaseWakeLock(); break; case CALLBACK_ADD: monitoringType = msg.arg1; callback = (IGeofenceHardwareMonitorCallback) msg.obj; callbackList = mCallbacks[monitoringType]; if (callbackList == null) { callbackList = new ArrayList<IGeofenceHardwareMonitorCallback>(); mCallbacks[monitoringType] = callbackList; } if (!callbackList.contains(callback)) callbackList.add(callback); break; case CALLBACK_REMOVE: monitoringType = msg.arg1; callback = (IGeofenceHardwareMonitorCallback) msg.obj; callbackList = mCallbacks[monitoringType]; if (callbackList != null) { callbackList.remove(callback); } break; case MONITOR_CALLBACK_BINDER_DIED: callback = (IGeofenceHardwareMonitorCallback) msg.obj; if (DEBUG) Log.d(TAG, "Monitor callback reaped:" + callback); callbackList = mCallbacks[msg.arg1]; if (callbackList != null && callbackList.contains(callback)) { callbackList.remove(callback); } } } }; // All operations on mReaper private Handler mReaperHandler = new Handler() { @Override public void handleMessage(Message msg) { Reaper r; IGeofenceHardwareCallback callback; IGeofenceHardwareMonitorCallback monitorCallback; int monitoringType; switch (msg.what) { case REAPER_GEOFENCE_ADDED: callback = (IGeofenceHardwareCallback) msg.obj; monitoringType = msg.arg1; r = new Reaper(callback, monitoringType); if (!mReapers.contains(r)) { mReapers.add(r); IBinder b = callback.asBinder(); try { b.linkToDeath(r, 0); } catch (RemoteException e) {} } break; case REAPER_MONITOR_CALLBACK_ADDED: monitorCallback = (IGeofenceHardwareMonitorCallback) msg.obj; monitoringType = msg.arg1; r = new Reaper(monitorCallback, monitoringType); if (!mReapers.contains(r)) { mReapers.add(r); IBinder b = monitorCallback.asBinder(); try { b.linkToDeath(r, 0); } catch (RemoteException e) {} } break; case REAPER_REMOVED: r = (Reaper) msg.obj; mReapers.remove(r); } } }; private class GeofenceTransition { private int mGeofenceId, mTransition; private long mTimestamp; private Location mLocation; private int mMonitoringType; private int mSourcesUsed; GeofenceTransition( int geofenceId, int transition, long timestamp, Location location, int monitoringType, int sourcesUsed) { mGeofenceId = geofenceId; mTransition = transition; mTimestamp = timestamp; mLocation = location; mMonitoringType = monitoringType; mSourcesUsed = sourcesUsed; } } private void setMonitorAvailability(int monitor, int val) { synchronized (mSupportedMonitorTypes) { mSupportedMonitorTypes[monitor] = val; } } int getMonitoringResolutionLevel(int monitoringType) { switch (monitoringType) { case GeofenceHardware.MONITORING_TYPE_GPS_HARDWARE: return RESOLUTION_LEVEL_FINE; case GeofenceHardware.MONITORING_TYPE_FUSED_HARDWARE: return RESOLUTION_LEVEL_FINE; } return RESOLUTION_LEVEL_NONE; } class Reaper implements IBinder.DeathRecipient { private IGeofenceHardwareMonitorCallback mMonitorCallback; private IGeofenceHardwareCallback mCallback; private int mMonitoringType; Reaper(IGeofenceHardwareCallback c, int monitoringType) { mCallback = c; mMonitoringType = monitoringType; } Reaper(IGeofenceHardwareMonitorCallback c, int monitoringType) { mMonitorCallback = c; mMonitoringType = monitoringType; } @Override public void binderDied() { Message m; if (mCallback != null) { m = mGeofenceHandler.obtainMessage(GEOFENCE_CALLBACK_BINDER_DIED, mCallback); m.arg1 = mMonitoringType; mGeofenceHandler.sendMessage(m); } else if (mMonitorCallback != null) { m = mCallbacksHandler.obtainMessage(MONITOR_CALLBACK_BINDER_DIED, mMonitorCallback); m.arg1 = mMonitoringType; mCallbacksHandler.sendMessage(m); } Message reaperMessage = mReaperHandler.obtainMessage(REAPER_REMOVED, this); mReaperHandler.sendMessage(reaperMessage); } @Override public int hashCode() { int result = 17; result = 31 * result + (mCallback != null ? mCallback.asBinder().hashCode() : 0); result = 31 * result + (mMonitorCallback != null ? mMonitorCallback.asBinder().hashCode() : 0); result = 31 * result + mMonitoringType; return result; } @Override public boolean equals(Object obj) { if (obj == null) return false; if (obj == this) return true; Reaper rhs = (Reaper) obj; return binderEquals(rhs.mCallback, mCallback) && binderEquals(rhs.mMonitorCallback, mMonitorCallback) && rhs.mMonitoringType == mMonitoringType; } /** * Compares the underlying Binder of the given two IInterface objects and returns true if * they equals. null values are accepted. */ private boolean binderEquals(IInterface left, IInterface right) { if (left == null) { return right == null; } else { return right == null ? false : left.asBinder() == right.asBinder(); } } /** * Unlinks this DeathRecipient. */ private boolean unlinkToDeath() { if (mMonitorCallback != null) { return mMonitorCallback.asBinder().unlinkToDeath(this, 0); } else if (mCallback != null) { return mCallback.asBinder().unlinkToDeath(this, 0); } return true; } private boolean callbackEquals(IGeofenceHardwareCallback cb) { return mCallback != null && mCallback.asBinder() == cb.asBinder(); } } int getAllowedResolutionLevel(int pid, int uid) { if (mContext.checkPermission(android.Manifest.permission.ACCESS_FINE_LOCATION, pid, uid) == PackageManager.PERMISSION_GRANTED) { return RESOLUTION_LEVEL_FINE; } else if (mContext.checkPermission(android.Manifest.permission.ACCESS_COARSE_LOCATION, pid, uid) == PackageManager.PERMISSION_GRANTED) { return RESOLUTION_LEVEL_COARSE; } else { return RESOLUTION_LEVEL_NONE; } } }
syslover33/ctank
java/android-sdk-linux_r24.4.1_src/sources/android-23/android/hardware/location/GeofenceHardwareImpl.java
Java
gpl-3.0
37,828
package net.minecraft.server; import java.awt.GraphicsEnvironment; import java.awt.image.BufferedImage; import java.io.File; import java.net.Proxy; import java.security.KeyPair; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.Iterator; import java.util.List; import java.util.Random; import java.util.UUID; import java.util.concurrent.Callable; import javax.imageio.ImageIO; import net.minecraft.util.com.google.common.base.Charsets; import net.minecraft.util.com.mojang.authlib.GameProfile; import net.minecraft.util.com.mojang.authlib.GameProfileRepository; import net.minecraft.util.com.mojang.authlib.minecraft.MinecraftSessionService; import net.minecraft.util.com.mojang.authlib.yggdrasil.YggdrasilAuthenticationService; import net.minecraft.util.io.netty.buffer.ByteBuf; import net.minecraft.util.io.netty.buffer.ByteBufOutputStream; import net.minecraft.util.io.netty.buffer.Unpooled; import net.minecraft.util.io.netty.handler.codec.base64.Base64; import net.minecraft.util.org.apache.commons.lang3.Validate; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; // CraftBukkit start import java.io.IOException; import jline.console.ConsoleReader; import joptsimple.OptionSet; import org.bukkit.World.Environment; import org.bukkit.craftbukkit.util.Waitable; import org.bukkit.event.server.RemoteServerCommandEvent; import org.bukkit.event.world.WorldSaveEvent; // CraftBukkit end public abstract class MinecraftServer implements ICommandListener, Runnable, IMojangStatistics { private static final Logger i = LogManager.getLogger(); private static final File a = new File("usercache.json"); private static MinecraftServer j; public Convertable convertable; // CraftBukkit - private final -> public private final MojangStatisticsGenerator l = new MojangStatisticsGenerator("server", this, ar()); public File universe; // CraftBukkit - private final -> public private final List n = new ArrayList(); private final ICommandHandler o; public final MethodProfiler methodProfiler = new MethodProfiler(); private final ServerConnection p; private final ServerPing q = new ServerPing(); private final Random r = new Random(); private String serverIp; private int t = -1; public WorldServer[] worldServer; private PlayerList u; private boolean isRunning = true; private boolean isStopped; private int ticks; protected final Proxy d; public String e; public int f; private boolean onlineMode; private boolean spawnAnimals; private boolean spawnNPCs; private boolean pvpMode; private boolean allowFlight; private String motd; private int E; private int F = 0; public final long[] g = new long[100]; public long[][] h; private KeyPair G; private String H; private String I; private boolean demoMode; private boolean L; private boolean M; private String N = ""; private boolean O; private long P; private String Q; private boolean R; private boolean S; private final YggdrasilAuthenticationService T; private final MinecraftSessionService U; private long V = 0L; private final GameProfileRepository W; private final UserCache X; // CraftBukkit start - add fields public List<WorldServer> worlds = new ArrayList<WorldServer>(); public org.bukkit.craftbukkit.CraftServer server; public OptionSet options; public org.bukkit.command.ConsoleCommandSender console; public org.bukkit.command.RemoteConsoleCommandSender remoteConsole; public ConsoleReader reader; public static int currentTick = (int) (System.currentTimeMillis() / 50); public final Thread primaryThread; public java.util.Queue<Runnable> processQueue = new java.util.concurrent.ConcurrentLinkedQueue<Runnable>(); public int autosavePeriod; // CraftBukkit end public MinecraftServer(OptionSet options, Proxy proxy) { // CraftBukkit - signature file -> OptionSet this.X = new UserCache(this, a); j = this; this.d = proxy; // this.universe = file1; // CraftBukkit this.p = new ServerConnection(this); this.o = new CommandDispatcher(); // this.convertable = new WorldLoaderServer(file1); // CraftBukkit - moved to DedicatedServer.init this.T = new YggdrasilAuthenticationService(proxy, UUID.randomUUID().toString()); this.U = this.T.createMinecraftSessionService(); this.W = this.T.createProfileRepository(); // CraftBukkit start this.options = options; // Try to see if we're actually running in a terminal, disable jline if not if (System.console() == null) { System.setProperty("jline.terminal", "jline.UnsupportedTerminal"); org.bukkit.craftbukkit.Main.useJline = false; } try { this.reader = new ConsoleReader(System.in, System.out); this.reader.setExpandEvents(false); // Avoid parsing exceptions for uncommonly used event designators } catch (Throwable e) { try { // Try again with jline disabled for Windows users without C++ 2008 Redistributable System.setProperty("jline.terminal", "jline.UnsupportedTerminal"); System.setProperty("user.language", "en"); org.bukkit.craftbukkit.Main.useJline = false; this.reader = new ConsoleReader(System.in, System.out); this.reader.setExpandEvents(false); } catch (IOException ex) { i.warn((String) null, ex); } } Runtime.getRuntime().addShutdownHook(new org.bukkit.craftbukkit.util.ServerShutdownThread(this)); primaryThread = new ThreadServerApplication(this, "Server thread"); // Moved from main } public abstract PropertyManager getPropertyManager(); // CraftBukkit end protected abstract boolean init() throws java.net.UnknownHostException; // CraftBukkit - throws UnknownHostException protected void a(String s) { if (this.getConvertable().isConvertable(s)) { i.info("Converting map!"); this.b("menu.convertingLevel"); this.getConvertable().convert(s, new ConvertProgressUpdater(this)); } } protected synchronized void b(String s) { this.Q = s; } protected void a(String s, String s1, long i, WorldType worldtype, String s2) { this.a(s); this.b("menu.loadingLevel"); this.worldServer = new WorldServer[3]; // this.h = new long[this.worldServer.length][100]; // CraftBukkit - Removed ticktime arrays // IDataManager idatamanager = this.convertable.a(s, true); // WorldData worlddata = idatamanager.getWorldData(); /* CraftBukkit start - Removed worldsettings WorldSettings worldsettings; if (worlddata == null) { worldsettings = new WorldSettings(i, this.getGamemode(), this.getGenerateStructures(), this.isHardcore(), worldtype); worldsettings.a(s2); } else { worldsettings = new WorldSettings(worlddata); } if (this.L) { worldsettings.a(); } // */ int worldCount = 3; for (int j = 0; j < worldCount; ++j) { WorldServer world; int dimension = 0; if (j == 1) { if (this.getAllowNether()) { dimension = -1; } else { continue; } } if (j == 2) { if (this.server.getAllowEnd()) { dimension = 1; } else { continue; } } String worldType = Environment.getEnvironment(dimension).toString().toLowerCase(); String name = (dimension == 0) ? s : s + "_" + worldType; org.bukkit.generator.ChunkGenerator gen = this.server.getGenerator(name); WorldSettings worldsettings = new WorldSettings(i, this.getGamemode(), this.getGenerateStructures(), this.isHardcore(), worldtype); worldsettings.a(s2); if (j == 0) { IDataManager idatamanager = new ServerNBTManager(server.getWorldContainer(), s1, true); if (this.R()) { world = new DemoWorldServer(this, idatamanager, s1, dimension, this.methodProfiler); } else { // world =, b0 to dimension, added Environment and gen world = new WorldServer(this, idatamanager, s1, dimension, worldsettings, this.methodProfiler, Environment.getEnvironment(dimension), gen); } this.server.scoreboardManager = new org.bukkit.craftbukkit.scoreboard.CraftScoreboardManager(this, world.getScoreboard()); } else { String dim = "DIM" + dimension; File newWorld = new File(new File(name), dim); File oldWorld = new File(new File(s), dim); if ((!newWorld.isDirectory()) && (oldWorld.isDirectory())) { MinecraftServer.i.info("---- Migration of old " + worldType + " folder required ----"); MinecraftServer.i.info("Unfortunately due to the way that Minecraft implemented multiworld support in 1.6, Bukkit requires that you move your " + worldType + " folder to a new location in order to operate correctly."); MinecraftServer.i.info("We will move this folder for you, but it will mean that you need to move it back should you wish to stop using Bukkit in the future."); MinecraftServer.i.info("Attempting to move " + oldWorld + " to " + newWorld + "..."); if (newWorld.exists()) { MinecraftServer.i.warn("A file or folder already exists at " + newWorld + "!"); MinecraftServer.i.info("---- Migration of old " + worldType + " folder failed ----"); } else if (newWorld.getParentFile().mkdirs()) { if (oldWorld.renameTo(newWorld)) { MinecraftServer.i.info("Success! To restore " + worldType + " in the future, simply move " + newWorld + " to " + oldWorld); // Migrate world data too. try { com.google.common.io.Files.copy(new File(new File(s), "level.dat"), new File(new File(name), "level.dat")); } catch (IOException exception) { MinecraftServer.i.warn("Unable to migrate world data."); } MinecraftServer.i.info("---- Migration of old " + worldType + " folder complete ----"); } else { MinecraftServer.i.warn("Could not move folder " + oldWorld + " to " + newWorld + "!"); MinecraftServer.i.info("---- Migration of old " + worldType + " folder failed ----"); } } else { MinecraftServer.i.warn("Could not create path for " + newWorld + "!"); MinecraftServer.i.info("---- Migration of old " + worldType + " folder failed ----"); } } IDataManager idatamanager = new ServerNBTManager(server.getWorldContainer(), name, true); // world =, b0 to dimension, s1 to name, added Environment and gen world = new SecondaryWorldServer(this, idatamanager, name, dimension, worldsettings, this.worlds.get(0), this.methodProfiler, Environment.getEnvironment(dimension), gen); } if (gen != null) { world.getWorld().getPopulators().addAll(gen.getDefaultPopulators(world.getWorld())); } this.server.getPluginManager().callEvent(new org.bukkit.event.world.WorldInitEvent(world.getWorld())); world.addIWorldAccess(new WorldManager(this, world)); if (!this.N()) { world.getWorldData().setGameType(this.getGamemode()); } this.worlds.add(world); this.u.setPlayerFileData(this.worlds.toArray(new WorldServer[this.worlds.size()])); // CraftBukkit end } this.a(this.getDifficulty()); this.g(); } protected void g() { boolean flag = true; boolean flag1 = true; boolean flag2 = true; boolean flag3 = true; int i = 0; this.b("menu.generatingTerrain"); byte b0 = 0; // CraftBukkit start - fire WorldLoadEvent and handle whether or not to keep the spawn in memory for (int m = 0; m < this.worlds.size(); ++m) { WorldServer worldserver = this.worlds.get(m); MinecraftServer.i.info("Preparing start region for level " + m + " (Seed: " + worldserver.getSeed() + ")"); if (!worldserver.getWorld().getKeepSpawnInMemory()) { continue; } ChunkCoordinates chunkcoordinates = worldserver.getSpawn(); long j = ar(); i = 0; for (int k = -192; k <= 192 && this.isRunning(); k += 16) { for (int l = -192; l <= 192 && this.isRunning(); l += 16) { long i1 = ar(); if (i1 - j > 1000L) { this.a_("Preparing spawn area", i * 100 / 625); j = i1; } ++i; worldserver.chunkProviderServer.getChunkAt(chunkcoordinates.x + k >> 4, chunkcoordinates.z + l >> 4); } } } for (WorldServer world : this.worlds) { this.server.getPluginManager().callEvent(new org.bukkit.event.world.WorldLoadEvent(world.getWorld())); } // CraftBukkit end this.n(); } public abstract boolean getGenerateStructures(); public abstract EnumGamemode getGamemode(); public abstract EnumDifficulty getDifficulty(); public abstract boolean isHardcore(); public abstract int l(); public abstract boolean m(); protected void a_(String s, int i) { this.e = s; this.f = i; // CraftBukkit - Use FQN to work around decompiler issue MinecraftServer.i.info(s + ": " + i + "%"); } protected void n() { this.e = null; this.f = 0; this.server.enablePlugins(org.bukkit.plugin.PluginLoadOrder.POSTWORLD); // CraftBukkit } protected void saveChunks(boolean flag) throws ExceptionWorldConflict { // CraftBukkit - added throws if (!this.M) { // CraftBukkit start - fire WorldSaveEvent // WorldServer[] aworldserver = this.worldServer; int i = this.worlds.size(); for (int j = 0; j < i; ++j) { WorldServer worldserver = this.worlds.get(j); if (worldserver != null) { if (!flag) { MinecraftServer.i.info("Saving chunks for level \'" + worldserver.getWorldData().getName() + "\'/" + worldserver.worldProvider.getName()); } worldserver.save(true, (IProgressUpdate) null); worldserver.saveLevel(); WorldSaveEvent event = new WorldSaveEvent(worldserver.getWorld()); this.server.getPluginManager().callEvent(event); // CraftBukkit end } } } } public void stop() throws ExceptionWorldConflict { // CraftBukkit - added throws if (!this.M) { i.info("Stopping server"); // CraftBukkit start if (this.server != null) { this.server.disablePlugins(); } // CraftBukkit end if (this.ai() != null) { this.ai().b(); } if (this.u != null) { i.info("Saving players"); this.u.savePlayers(); this.u.u(); } if (this.worldServer != null) { i.info("Saving worlds"); this.saveChunks(false); /* CraftBukkit start - Handled in saveChunks for (int i = 0; i < this.worldServer.length; ++i) { WorldServer worldserver = this.worldServer[i]; worldserver.saveLevel(); } // CraftBukkit end */ } if (this.l.d()) { this.l.e(); } } } public String getServerIp() { return this.serverIp; } public void c(String s) { this.serverIp = s; } public boolean isRunning() { return this.isRunning; } public void safeShutdown() { this.isRunning = false; } public void run() { try { if (this.init()) { long i = ar(); long j = 0L; this.q.setMOTD(new ChatComponentText(this.motd)); this.q.setServerInfo(new ServerPingServerData("1.7.10", 5)); this.a(this.q); while (this.isRunning) { long k = ar(); long l = k - i; if (l > 2000L && i - this.P >= 15000L) { if (this.server.getWarnOnOverload()) // CraftBukkit - Added option to suppress warning messages MinecraftServer.i.warn("Can\'t keep up! Did the system time change, or is the server overloaded? Running {}ms behind, skipping {} tick(s)", new Object[] { Long.valueOf(l), Long.valueOf(l / 50L)}); l = 2000L; this.P = i; } if (l < 0L) { MinecraftServer.i.warn("Time ran backwards! Did the system time change?"); l = 0L; } j += l; i = k; if (this.worlds.get(0).everyoneDeeplySleeping()) { // CraftBukkit this.u(); j = 0L; } else { while (j > 50L) { MinecraftServer.currentTick = (int) (System.currentTimeMillis() / 50); // CraftBukkit j -= 50L; this.u(); } } Thread.sleep(Math.max(1L, 50L - j)); this.O = true; } } else { this.a((CrashReport) null); } } catch (Throwable throwable) { i.error("Encountered an unexpected exception", throwable); CrashReport crashreport = null; if (throwable instanceof ReportedException) { crashreport = this.b(((ReportedException) throwable).a()); } else { crashreport = this.b(new CrashReport("Exception in server tick loop", throwable)); } File file1 = new File(new File(this.s(), "crash-reports"), "crash-" + (new SimpleDateFormat("yyyy-MM-dd_HH.mm.ss")).format(new Date()) + "-server.txt"); if (crashreport.a(file1)) { i.error("This crash report has been saved to: " + file1.getAbsolutePath()); } else { i.error("We were unable to save this crash report to disk."); } this.a(crashreport); } finally { try { this.stop(); this.isStopped = true; } catch (Throwable throwable1) { i.error("Exception stopping the server", throwable1); } finally { // CraftBukkit start - Restore terminal to original settings try { this.reader.getTerminal().restore(); } catch (Exception e) { } // CraftBukkit end this.t(); } } } private void a(ServerPing serverping) { File file1 = this.d("server-icon.png"); if (file1.isFile()) { ByteBuf bytebuf = Unpooled.buffer(); try { BufferedImage bufferedimage = ImageIO.read(file1); Validate.validState(bufferedimage.getWidth() == 64, "Must be 64 pixels wide", new Object[0]); Validate.validState(bufferedimage.getHeight() == 64, "Must be 64 pixels high", new Object[0]); ImageIO.write(bufferedimage, "PNG", new ByteBufOutputStream(bytebuf)); ByteBuf bytebuf1 = Base64.encode(bytebuf); serverping.setFavicon("data:image/png;base64," + bytebuf1.toString(Charsets.UTF_8)); } catch (Exception exception) { i.error("Couldn\'t load server icon", exception); } finally { bytebuf.release(); } } } protected File s() { return new File("."); } protected void a(CrashReport crashreport) {} protected void t() {} protected void u() throws ExceptionWorldConflict { // CraftBukkit - added throws long i = System.nanoTime(); ++this.ticks; if (this.R) { this.R = false; this.methodProfiler.a = true; this.methodProfiler.a(); } this.methodProfiler.a("root"); this.v(); if (i - this.V >= 5000000000L) { this.V = i; this.q.setPlayerSample(new ServerPingPlayerSample(this.D(), this.C())); GameProfile[] agameprofile = new GameProfile[Math.min(this.C(), 12)]; int j = MathHelper.nextInt(this.r, 0, this.C() - agameprofile.length); for (int k = 0; k < agameprofile.length; ++k) { agameprofile[k] = ((EntityPlayer) this.u.players.get(j + k)).getProfile(); } Collections.shuffle(Arrays.asList(agameprofile)); this.q.b().a(agameprofile); } if ((this.autosavePeriod > 0) && ((this.ticks % this.autosavePeriod) == 0)) { // CraftBukkit this.methodProfiler.a("save"); this.u.savePlayers(); this.saveChunks(true); this.methodProfiler.b(); } this.methodProfiler.a("tallying"); this.g[this.ticks % 100] = System.nanoTime() - i; this.methodProfiler.b(); this.methodProfiler.a("snooper"); if (!this.l.d() && this.ticks > 100) { this.l.a(); } if (this.ticks % 6000 == 0) { this.l.b(); } this.methodProfiler.b(); this.methodProfiler.b(); } public void v() { this.methodProfiler.a("levels"); // CraftBukkit start this.server.getScheduler().mainThreadHeartbeat(this.ticks); // Run tasks that are waiting on processing while (!processQueue.isEmpty()) { processQueue.remove().run(); } org.bukkit.craftbukkit.chunkio.ChunkIOExecutor.tick(); // Send time updates to everyone, it will get the right time from the world the player is in. if (this.ticks % 20 == 0) { for (int i = 0; i < this.getPlayerList().players.size(); ++i) { EntityPlayer entityplayer = (EntityPlayer) this.getPlayerList().players.get(i); entityplayer.playerConnection.sendPacket(new PacketPlayOutUpdateTime(entityplayer.world.getTime(), entityplayer.getPlayerTime(), entityplayer.world.getGameRules().getBoolean("doDaylightCycle"))); // Add support for per player time } } int i; for (i = 0; i < this.worlds.size(); ++i) { long j = System.nanoTime(); // if (i == 0 || this.getAllowNether()) { WorldServer worldserver = this.worlds.get(i); this.methodProfiler.a(worldserver.getWorldData().getName()); this.methodProfiler.a("pools"); this.methodProfiler.b(); /* Drop global time updates if (this.ticks % 20 == 0) { this.methodProfiler.a("timeSync"); this.t.a(new PacketPlayOutUpdateTime(worldserver.getTime(), worldserver.getDayTime(), worldserver.getGameRules().getBoolean("doDaylightCycle")), worldserver.worldProvider.dimension); this.methodProfiler.b(); } // CraftBukkit end */ this.methodProfiler.a("tick"); CrashReport crashreport; try { worldserver.doTick(); } catch (Throwable throwable) { crashreport = CrashReport.a(throwable, "Exception ticking world"); worldserver.a(crashreport); throw new ReportedException(crashreport); } try { worldserver.tickEntities(); } catch (Throwable throwable1) { crashreport = CrashReport.a(throwable1, "Exception ticking world entities"); worldserver.a(crashreport); throw new ReportedException(crashreport); } this.methodProfiler.b(); this.methodProfiler.a("tracker"); worldserver.getTracker().updatePlayers(); this.methodProfiler.b(); this.methodProfiler.b(); // } // CraftBukkit // this.h[i][this.ticks % 100] = System.nanoTime() - j; // CraftBukkit } this.methodProfiler.c("connection"); this.ai().c(); this.methodProfiler.c("players"); this.u.tick(); this.methodProfiler.c("tickables"); for (i = 0; i < this.n.size(); ++i) { ((IUpdatePlayerListBox) this.n.get(i)).a(); } this.methodProfiler.b(); } public boolean getAllowNether() { return true; } public void a(IUpdatePlayerListBox iupdateplayerlistbox) { this.n.add(iupdateplayerlistbox); } public static void main(final OptionSet options) { // CraftBukkit - replaces main(String[] astring) DispenserRegistry.b(); try { /* CraftBukkit start - Replace everything boolean flag = true; String s = null; String s1 = "."; String s2 = null; boolean flag1 = false; boolean flag2 = false; int i = -1; for (int j = 0; j < astring.length; ++j) { String s3 = astring[j]; String s4 = j == astring.length - 1 ? null : astring[j + 1]; boolean flag3 = false; if (!s3.equals("nogui") && !s3.equals("--nogui")) { if (s3.equals("--port") && s4 != null) { flag3 = true; try { i = Integer.parseInt(s4); } catch (NumberFormatException numberformatexception) { ; } } else if (s3.equals("--singleplayer") && s4 != null) { flag3 = true; s = s4; } else if (s3.equals("--universe") && s4 != null) { flag3 = true; s1 = s4; } else if (s3.equals("--world") && s4 != null) { flag3 = true; s2 = s4; } else if (s3.equals("--demo")) { flag1 = true; } else if (s3.equals("--bonusChest")) { flag2 = true; } } else { flag = false; } if (flag3) { ++j; } } DedicatedServer dedicatedserver = new DedicatedServer(new File(s1)); if (s != null) { dedicatedserver.j(s); } if (s2 != null) { dedicatedserver.k(s2); } if (i >= 0) { dedicatedserver.setPort(i); } if (flag1) { dedicatedserver.b(true); } if (flag2) { dedicatedserver.c(true); } if (flag) { dedicatedserver.aD(); } // */ DedicatedServer dedicatedserver = new DedicatedServer(options); if (options.has("port")) { int port = (Integer) options.valueOf("port"); if (port > 0) { dedicatedserver.setPort(port); } } if (options.has("universe")) { dedicatedserver.universe = (File) options.valueOf("universe"); } if (options.has("world")) { dedicatedserver.k((String) options.valueOf("world")); } dedicatedserver.primaryThread.start(); // Runtime.getRuntime().addShutdownHook(new ThreadShutdown("Server Shutdown Thread", dedicatedserver)); // CraftBukkit end } catch (Exception exception) { i.fatal("Failed to start the minecraft server", exception); } } public void x() { // (new ThreadServerApplication(this, "Server thread")).start(); // CraftBukkit - prevent abuse } public File d(String s) { return new File(this.s(), s); } public void info(String s) { i.info(s); } public void warning(String s) { i.warn(s); } public WorldServer getWorldServer(int i) { // CraftBukkit start for (WorldServer world : this.worlds) { if (world.dimension == i) { return world; } } return this.worlds.get(0); // CraftBukkit end } public String y() { return this.serverIp; } public int z() { return this.t; } public String A() { return this.motd; } public String getVersion() { return "1.7.10"; } public int C() { return this.u.getPlayerCount(); } public int D() { return this.u.getMaxPlayers(); } public String[] getPlayers() { return this.u.f(); } public GameProfile[] F() { return this.u.g(); } public String getPlugins() { // CraftBukkit start - Whole method StringBuilder result = new StringBuilder(); org.bukkit.plugin.Plugin[] plugins = server.getPluginManager().getPlugins(); result.append(server.getName()); result.append(" on Bukkit "); result.append(server.getBukkitVersion()); if (plugins.length > 0 && this.server.getQueryPlugins()) { result.append(": "); for (int i = 0; i < plugins.length; i++) { if (i > 0) { result.append("; "); } result.append(plugins[i].getDescription().getName()); result.append(" "); result.append(plugins[i].getDescription().getVersion().replaceAll(";", ",")); } } return result.toString(); // CraftBukkit end } // CraftBukkit start - fire RemoteServerCommandEvent public String g(final String s) { // final parameter Waitable<String> waitable = new Waitable<String>() { @Override protected String evaluate() { RemoteControlCommandListener.instance.e(); // Event changes start RemoteServerCommandEvent event = new RemoteServerCommandEvent(MinecraftServer.this.remoteConsole, s); MinecraftServer.this.server.getPluginManager().callEvent(event); // Event changes end ServerCommand servercommand = new ServerCommand(event.getCommand(), RemoteControlCommandListener.instance); MinecraftServer.this.server.dispatchServerCommand(MinecraftServer.this.remoteConsole, servercommand); // CraftBukkit // this.o.a(RemoteControlCommandListener.instance, s); return RemoteControlCommandListener.instance.f(); }}; processQueue.add(waitable); try { return waitable.get(); } catch (java.util.concurrent.ExecutionException e) { throw new RuntimeException("Exception processing rcon command " + s, e.getCause()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); // Maintain interrupted state throw new RuntimeException("Interrupted processing rcon command " + s, e); } // CraftBukkit end } public boolean isDebugging() { return this.getPropertyManager().getBoolean("debug", false); // CraftBukkit - don't hardcode } public void h(String s) { i.error(s); } public void i(String s) { if (this.isDebugging()) { i.info(s); } } public String getServerModName() { return server.getName(); // CraftBukkit - cb > vanilla! } public CrashReport b(CrashReport crashreport) { crashreport.g().a("Profiler Position", (Callable) (new CrashReportProfilerPosition(this))); if (this.worlds != null && this.worlds.size() > 0 && this.worlds.get(0) != null) { // CraftBukkit crashreport.g().a("Vec3 Pool Size", (Callable) (new CrashReportVec3DPoolSize(this))); } if (this.u != null) { crashreport.g().a("Player Count", (Callable) (new CrashReportPlayerCount(this))); } return crashreport; } public List a(ICommandListener icommandlistener, String s) { // CraftBukkit start - Allow tab-completion of Bukkit commands /* ArrayList arraylist = new ArrayList(); if (s.startsWith("/")) { s = s.substring(1); boolean flag = !s.contains(" "); List list = this.o.b(icommandlistener, s); if (list != null) { Iterator iterator = list.iterator(); while (iterator.hasNext()) { String s1 = (String) iterator.next(); if (flag) { arraylist.add("/" + s1); } else { arraylist.add(s1); } } } return arraylist; } else { String[] astring = s.split(" ", -1); String s2 = astring[astring.length - 1]; String[] astring1 = this.u.f(); int i = astring1.length; for (int j = 0; j < i; ++j) { String s3 = astring1[j]; if (CommandAbstract.a(s2, s3)) { arraylist.add(s3); } } return arraylist; } */ return this.server.tabComplete(icommandlistener, s); // CraftBukkit end } public static MinecraftServer getServer() { return j; } public String getName() { return "Server"; } public void sendMessage(IChatBaseComponent ichatbasecomponent) { i.info(ichatbasecomponent.c()); } public boolean a(int i, String s) { return true; } public ICommandHandler getCommandHandler() { return this.o; } public KeyPair K() { return this.G; } public int L() { return this.t; } public void setPort(int i) { this.t = i; } public String M() { return this.H; } public void j(String s) { this.H = s; } public boolean N() { return this.H != null; } public String O() { return this.I; } public void k(String s) { this.I = s; } public void a(KeyPair keypair) { this.G = keypair; } public void a(EnumDifficulty enumdifficulty) { // CraftBukkit start - Use worlds list for iteration for (int j = 0; j < this.worlds.size(); ++j) { WorldServer worldserver = this.worlds.get(j); // CraftBukkit end if (worldserver != null) { if (worldserver.getWorldData().isHardcore()) { worldserver.difficulty = EnumDifficulty.HARD; worldserver.setSpawnFlags(true, true); } else if (this.N()) { worldserver.difficulty = enumdifficulty; worldserver.setSpawnFlags(worldserver.difficulty != EnumDifficulty.PEACEFUL, true); } else { worldserver.difficulty = enumdifficulty; worldserver.setSpawnFlags(this.getSpawnMonsters(), this.spawnAnimals); } } } } protected boolean getSpawnMonsters() { return true; } public boolean R() { return this.demoMode; } public void b(boolean flag) { this.demoMode = flag; } public void c(boolean flag) { this.L = flag; } public Convertable getConvertable() { return this.convertable; } public void U() { this.M = true; this.getConvertable().d(); // CraftBukkit start for (int i = 0; i < this.worlds.size(); ++i) { WorldServer worldserver = this.worlds.get(i); // CraftBukkit end if (worldserver != null) { worldserver.saveLevel(); } } this.getConvertable().e(this.worlds.get(0).getDataManager().g()); // CraftBukkit this.safeShutdown(); } public String getResourcePack() { return this.N; } public void setTexturePack(String s) { this.N = s; } public void a(MojangStatisticsGenerator mojangstatisticsgenerator) { mojangstatisticsgenerator.a("whitelist_enabled", Boolean.valueOf(false)); mojangstatisticsgenerator.a("whitelist_count", Integer.valueOf(0)); mojangstatisticsgenerator.a("players_current", Integer.valueOf(this.C())); mojangstatisticsgenerator.a("players_max", Integer.valueOf(this.D())); mojangstatisticsgenerator.a("players_seen", Integer.valueOf(this.u.getSeenPlayers().length)); mojangstatisticsgenerator.a("uses_auth", Boolean.valueOf(this.onlineMode)); mojangstatisticsgenerator.a("gui_state", this.ak() ? "enabled" : "disabled"); mojangstatisticsgenerator.a("run_time", Long.valueOf((ar() - mojangstatisticsgenerator.g()) / 60L * 1000L)); mojangstatisticsgenerator.a("avg_tick_ms", Integer.valueOf((int) (MathHelper.a(this.g) * 1.0E-6D))); int i = 0; // CraftBukkit start - use worlds list for iteration for (int j = 0; j < this.worlds.size(); ++j) { WorldServer worldserver = this.worlds.get(j); if (worldServer != null) { // CraftBukkit end WorldData worlddata = worldserver.getWorldData(); mojangstatisticsgenerator.a("world[" + i + "][dimension]", Integer.valueOf(worldserver.worldProvider.dimension)); mojangstatisticsgenerator.a("world[" + i + "][mode]", worlddata.getGameType()); mojangstatisticsgenerator.a("world[" + i + "][difficulty]", worldserver.difficulty); mojangstatisticsgenerator.a("world[" + i + "][hardcore]", Boolean.valueOf(worlddata.isHardcore())); mojangstatisticsgenerator.a("world[" + i + "][generator_name]", worlddata.getType().name()); mojangstatisticsgenerator.a("world[" + i + "][generator_version]", Integer.valueOf(worlddata.getType().getVersion())); mojangstatisticsgenerator.a("world[" + i + "][height]", Integer.valueOf(this.E)); mojangstatisticsgenerator.a("world[" + i + "][chunks_loaded]", Integer.valueOf(worldserver.L().getLoadedChunks())); ++i; } } mojangstatisticsgenerator.a("worlds", Integer.valueOf(i)); } public void b(MojangStatisticsGenerator mojangstatisticsgenerator) { mojangstatisticsgenerator.b("singleplayer", Boolean.valueOf(this.N())); mojangstatisticsgenerator.b("server_brand", this.getServerModName()); mojangstatisticsgenerator.b("gui_supported", GraphicsEnvironment.isHeadless() ? "headless" : "supported"); mojangstatisticsgenerator.b("dedicated", Boolean.valueOf(this.X())); } public boolean getSnooperEnabled() { return true; } public abstract boolean X(); public boolean getOnlineMode() { return this.server.getOnlineMode(); // CraftBukkit } public void setOnlineMode(boolean flag) { this.onlineMode = flag; } public boolean getSpawnAnimals() { return this.spawnAnimals; } public void setSpawnAnimals(boolean flag) { this.spawnAnimals = flag; } public boolean getSpawnNPCs() { return this.spawnNPCs; } public void setSpawnNPCs(boolean flag) { this.spawnNPCs = flag; } public boolean getPvP() { return this.pvpMode; } public void setPvP(boolean flag) { this.pvpMode = flag; } public boolean getAllowFlight() { return this.allowFlight; } public void setAllowFlight(boolean flag) { this.allowFlight = flag; } public abstract boolean getEnableCommandBlock(); public String getMotd() { return this.motd; } public void setMotd(String s) { this.motd = s; } public int getMaxBuildHeight() { return this.E; } public void c(int i) { this.E = i; } public boolean isStopped() { return this.isStopped; } public PlayerList getPlayerList() { return this.u; } public void a(PlayerList playerlist) { this.u = playerlist; } public void a(EnumGamemode enumgamemode) { // CraftBukkit start - use worlds list for iteration for (int i = 0; i < this.worlds.size(); ++i) { getServer().worlds.get(i).getWorldData().setGameType(enumgamemode); // CraftBukkit end } } public ServerConnection ai() { return this.p; } public boolean ak() { return false; } public abstract String a(EnumGamemode enumgamemode, boolean flag); public int al() { return this.ticks; } public void am() { this.R = true; } public ChunkCoordinates getChunkCoordinates() { return new ChunkCoordinates(0, 0, 0); } public World getWorld() { return this.worlds.get(0); // CraftBukkit } public int getSpawnProtection() { return 16; } public boolean a(World world, int i, int j, int k, EntityHuman entityhuman) { return false; } public void setForceGamemode(boolean flag) { this.S = flag; } public boolean getForceGamemode() { return this.S; } public Proxy aq() { return this.d; } public static long ar() { return System.currentTimeMillis(); } public int getIdleTimeout() { return this.F; } public void setIdleTimeout(int i) { this.F = i; } public IChatBaseComponent getScoreboardDisplayName() { return new ChatComponentText(this.getName()); } public boolean at() { return true; } public MinecraftSessionService av() { return this.U; } public GameProfileRepository getGameProfileRepository() { return this.W; } public UserCache getUserCache() { return this.X; } public ServerPing ay() { return this.q; } public void az() { this.V = 0L; } public static Logger getLogger() { return i; } public static PlayerList a(MinecraftServer minecraftserver) { return minecraftserver.u; } }
OvercastNetwork/CraftBukkit
src/main/java/net/minecraft/server/MinecraftServer.java
Java
gpl-3.0
44,601
<?php require('header.php'); if($_SERVER['REQUEST_METHOD']=='POST'){ if(\GO\Base\Html\Error::checkRequired()) redirect("configFile.php"); } printHead(); ?> <h1>License terms</h1> <p>The following license applies to this product:</p> <div class="cmd"> <?php echo \GO\Base\Util\StringHelper::text_to_html(file_get_contents('../LICENSE.TXT')); ?> </div> <?php \GO\Base\Html\Checkbox::render(array( 'required'=>true, 'name'=>'agree', 'value'=>1, 'label'=>'I agree to the terms of the above license.' )); continueButton(); printFoot();
deependhulla/powermail-debian9
files/rootdir/usr/local/src/groupoffice-6.2/groupoffice-6.2-setup-www/install_disabled/license.php
PHP
gpl-3.0
548
__version__ = '0.17'
etherkit/OpenBeacon2
macos/venv/lib/python3.8/site-packages/PyInstaller/lib/modulegraph/__init__.py
Python
gpl-3.0
21
#!/usr/bin/python # # (c) 2015 Peter Sprygada, <psprygada@ansible.com> # Copyright (c) 2017 Dell Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: dellos10_command version_added: "2.2" author: "Senthil Kumar Ganesan (@skg-net)" short_description: Run commands on remote devices running Dell OS10 description: - Sends arbitrary commands to a Dell OS10 node and returns the results read from the device. This module includes an argument that will cause the module to wait for a specific condition before returning or timing out if the condition is not met. - This module does not support running commands in configuration mode. Please use M(dellos10_config) to configure Dell OS10 devices. extends_documentation_fragment: dellos10 options: commands: description: - List of commands to send to the remote dellos10 device over the configured provider. The resulting output from the command is returned. If the I(wait_for) argument is provided, the module is not returned until the condition is satisfied or the number of retries has expired. required: true wait_for: description: - List of conditions to evaluate against the output of the command. The task will wait for each condition to be true before moving forward. If the conditional is not true within the configured number of I(retries), the task fails. See examples. required: false default: null retries: description: - Specifies the number of retries a command should by tried before it is considered failed. The command is run on the target device every retry and evaluated against the I(wait_for) conditions. required: false default: 10 interval: description: - Configures the interval in seconds to wait between retries of the command. If the command does not pass the specified conditions, the interval indicates how long to wait before trying the command again. required: false default: 1 """ EXAMPLES = """ # Note: examples below use the following provider dict to handle # transport and authentication to the node. vars: cli: host: "{{ inventory_hostname }}" username: admin password: admin transport: cli tasks: - name: run show version on remote devices dellos10_command: commands: show version provider: "{{ cli }}" - name: run show version and check to see if output contains OS10 dellos10_command: commands: show version wait_for: result[0] contains OS10 provider: "{{ cli }}" - name: run multiple commands on remote nodes dellos10_command: commands: - show version - show interface provider: "{{ cli }}" - name: run multiple commands and evaluate the output dellos10_command: commands: - show version - show interface wait_for: - result[0] contains OS10 - result[1] contains Ethernet provider: "{{ cli }}" """ RETURN = """ stdout: description: The set of responses from the commands returned: always apart from low level errors (such as action plugin) type: list sample: ['...', '...'] stdout_lines: description: The value of stdout split into a list returned: always apart from low level errors (such as action plugin) type: list sample: [['...', '...'], ['...'], ['...']] failed_conditions: description: The list of conditionals that have failed returned: failed type: list sample: ['...', '...'] warnings: description: The list of warnings (if any) generated by module based on arguments returned: always type: list sample: ['...', '...'] """ import time from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.dellos10.dellos10 import run_commands from ansible.module_utils.network.dellos10.dellos10 import dellos10_argument_spec, check_args from ansible.module_utils.network.common.utils import ComplexList from ansible.module_utils.network.common.parsing import Conditional from ansible.module_utils.six import string_types def to_lines(stdout): for item in stdout: if isinstance(item, string_types): item = str(item).split('\n') yield item def parse_commands(module, warnings): command = ComplexList(dict( command=dict(key=True), prompt=dict(), answer=dict() ), module) commands = command(module.params['commands']) for index, item in enumerate(commands): if module.check_mode and not item['command'].startswith('show'): warnings.append( 'only show commands are supported when using check mode, not ' 'executing `%s`' % item['command'] ) elif item['command'].startswith('conf'): module.fail_json( msg='dellos10_command does not support running config mode ' 'commands. Please use dellos10_config instead' ) return commands def main(): """main entry point for module execution """ argument_spec = dict( # { command: <str>, prompt: <str>, response: <str> } commands=dict(type='list', required=True), wait_for=dict(type='list'), match=dict(default='all', choices=['all', 'any']), retries=dict(default=10, type='int'), interval=dict(default=1, type='int') ) argument_spec.update(dellos10_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) result = {'changed': False} warnings = list() check_args(module, warnings) commands = parse_commands(module, warnings) result['warnings'] = warnings wait_for = module.params['wait_for'] or list() conditionals = [Conditional(c) for c in wait_for] retries = module.params['retries'] interval = module.params['interval'] match = module.params['match'] while retries > 0: responses = run_commands(module, commands) for item in list(conditionals): if item(responses): if match == 'any': conditionals = list() break conditionals.remove(item) if not conditionals: break time.sleep(interval) retries -= 1 if conditionals: failed_conditions = [item.raw for item in conditionals] msg = 'One or more conditional statements have not be satisfied' module.fail_json(msg=msg, failed_conditions=failed_conditions) result = { 'changed': False, 'stdout': responses, 'stdout_lines': list(to_lines(responses)) } module.exit_json(**result) if __name__ == '__main__': main()
haad/ansible
lib/ansible/modules/network/dellos10/dellos10_command.py
Python
gpl-3.0
7,132
<?php // This file is part of Moodle - http://moodle.org/ // // Moodle is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // Moodle is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Moodle. If not, see <http://www.gnu.org/licenses/>. /** * Event observers used in forum. * * @package mod_forum * @copyright 2013 Rajesh Taneja <rajesh@moodle.com> * @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later */ defined('MOODLE_INTERNAL') || die(); /** * Event observer for mod_forum. */ class mod_forum_observer { /** * Triggered via user_enrolment_deleted event. * * @param \core\event\user_enrolment_deleted $event */ public static function user_enrolment_deleted(\core\event\user_enrolment_deleted $event) { global $DB; // NOTE: this has to be as fast as possible. // Get user enrolment info from event. $cp = (object)$event->other['userenrolment']; if ($cp->lastenrol) { $params = array('userid' => $cp->userid, 'courseid' => $cp->courseid); $forumselect = "IN (SELECT f.id FROM {forum} f WHERE f.course = :courseid)"; $DB->delete_records_select('forum_digests', 'userid = :userid AND forum '.$forumselect, $params); $DB->delete_records_select('forum_subscriptions', 'userid = :userid AND forum '.$forumselect, $params); $DB->delete_records_select('forum_track_prefs', 'userid = :userid AND forumid '.$forumselect, $params); $DB->delete_records_select('forum_read', 'userid = :userid AND forumid '.$forumselect, $params); } } /** * Observer for role_assigned event. * * @param \core\event\role_assigned $event * @return void */ public static function role_assigned(\core\event\role_assigned $event) { global $CFG, $DB; $context = context::instance_by_id($event->contextid, MUST_EXIST); // If contextlevel is course then only subscribe user. Role assignment // at course level means user is enroled in course and can subscribe to forum. if ($context->contextlevel != CONTEXT_COURSE) { return; } // Forum lib required for the constant used below. require_once($CFG->dirroot . '/mod/forum/lib.php'); $userid = $event->relateduserid; $sql = "SELECT f.id, cm.id AS cmid, f.forcesubscribe FROM {forum} f JOIN {course_modules} cm ON (cm.instance = f.id) JOIN {modules} m ON (m.id = cm.module) LEFT JOIN {forum_subscriptions} fs ON (fs.forum = f.id AND fs.userid = :userid) WHERE f.course = :courseid AND f.forcesubscribe = :initial AND m.name = 'forum' AND fs.id IS NULL"; $params = array('courseid' => $context->instanceid, 'userid' => $userid, 'initial' => FORUM_INITIALSUBSCRIBE); $forums = $DB->get_records_sql($sql, $params); foreach ($forums as $forum) { // If user doesn't have allowforcesubscribe capability then don't subscribe. $modcontext = context_module::instance($forum->cmid); if (has_capability('mod/forum:allowforcesubscribe', $modcontext, $userid)) { \mod_forum\subscriptions::subscribe_user($userid, $forum, $modcontext); } } } /** * Observer for \core\event\course_module_created event. * * @param \core\event\course_module_created $event * @return void */ public static function course_module_created(\core\event\course_module_created $event) { global $CFG; if ($event->other['modulename'] === 'forum') { // Include the forum library to make use of the forum_instance_created function. require_once($CFG->dirroot . '/mod/forum/lib.php'); $forum = $event->get_record_snapshot('forum', $event->other['instanceid']); forum_instance_created($event->get_context(), $forum); } } }
Jinelle/moodle
mod/forum/classes/observer.php
PHP
gpl-3.0
4,511
<?php /* Copyright (C) 2016 Laurent Destailleur <eldy@users.sourceforge.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * \file htdocs/fichinter/stats/index.php * \ingroup fichinter * \brief Page with interventions statistics */ require '../../main.inc.php'; require_once DOL_DOCUMENT_ROOT.'/fichinter/class/fichinter.class.php'; require_once DOL_DOCUMENT_ROOT.'/fichinter/class/fichinterstats.class.php'; require_once DOL_DOCUMENT_ROOT.'/core/class/dolgraph.class.php'; $WIDTH=DolGraph::getDefaultGraphSizeForStats('width'); $HEIGHT=DolGraph::getDefaultGraphSizeForStats('height'); $mode='customer'; if ($mode == 'customer' && ! $user->rights->ficheinter->lire) accessforbidden(); $userid=GETPOST('userid','int'); $socid=GETPOST('socid','int'); // Security check if ($user->societe_id > 0) { $action = ''; $socid = $user->societe_id; } $nowyear=strftime("%Y", dol_now()); $year = GETPOST('year')>0?GETPOST('year'):$nowyear; //$startyear=$year-2; $startyear=$year-1; $endyear=$year; $object_status=GETPOST('object_status'); $langs->load('interventions'); $langs->load('companies'); $langs->load('other'); $langs->load('suppliers'); /* * View */ $form=new Form($db); $objectstatic=new FichInter($db); if ($mode == 'customer') { $title=$langs->trans("InterventionStatistics"); $dir=$conf->ficheinter->dir_temp; } llxHeader('', $title); print load_fiche_titre($title,'','title_commercial.png'); dol_mkdir($dir); $stats = new FichinterStats($db, $socid, $mode, ($userid>0?$userid:0)); if ($object_status != '' && $object_status > -1) $stats->where .= ' AND c.fk_statut IN ('.$db->escape($object_status).')'; // Build graphic number of object $data = $stats->getNbByMonthWithPrevYear($endyear,$startyear); //var_dump($data); // $data = array(array('Lib',val1,val2,val3),...) if (!$user->rights->societe->client->voir || $user->societe_id) { $filenamenb = $dir.'/interventionsnbinyear-'.$user->id.'-'.$year.'.png'; if ($mode == 'customer') $fileurlnb = DOL_URL_ROOT.'/viewimage.php?modulepart=interventionstats&file=interventionsnbinyear-'.$user->id.'-'.$year.'.png'; } else { $filenamenb = $dir.'/interventionsnbinyear-'.$year.'.png'; if ($mode == 'customer') $fileurlnb = DOL_URL_ROOT.'/viewimage.php?modulepart=interventionstats&file=interventionsnbinyear-'.$year.'.png'; } $px1 = new DolGraph(); $mesg = $px1->isGraphKo(); if (! $mesg) { $px1->SetData($data); $px1->SetPrecisionY(0); $i=$startyear;$legend=array(); while ($i <= $endyear) { $legend[]=$i; $i++; } $px1->SetLegend($legend); $px1->SetMaxValue($px1->GetCeilMaxValue()); $px1->SetMinValue(min(0,$px1->GetFloorMinValue())); $px1->SetWidth($WIDTH); $px1->SetHeight($HEIGHT); $px1->SetYLabel($langs->trans("NbOfIntervention")); $px1->SetShading(3); $px1->SetHorizTickIncrement(1); $px1->SetPrecisionY(0); $px1->mode='depth'; $px1->SetTitle($langs->trans("NumberOfInterventionsByMonth")); $px1->draw($filenamenb,$fileurlnb); } // Build graphic amount of object $data = $stats->getAmountByMonthWithPrevYear($endyear,$startyear); //var_dump($data); // $data = array(array('Lib',val1,val2,val3),...) if (!$user->rights->societe->client->voir || $user->societe_id) { $filenameamount = $dir.'/interventionsamountinyear-'.$user->id.'-'.$year.'.png'; if ($mode == 'customer') $fileurlamount = DOL_URL_ROOT.'/viewimage.php?modulepart=interventionstats&file=interventionsamountinyear-'.$user->id.'-'.$year.'.png'; if ($mode == 'supplier') $fileurlamount = DOL_URL_ROOT.'/viewimage.php?modulepart=interventionstatssupplier&file=interventionsamountinyear-'.$user->id.'-'.$year.'.png'; } else { $filenameamount = $dir.'/interventionsamountinyear-'.$year.'.png'; if ($mode == 'customer') $fileurlamount = DOL_URL_ROOT.'/viewimage.php?modulepart=interventionstats&file=interventionsamountinyear-'.$year.'.png'; if ($mode == 'supplier') $fileurlamount = DOL_URL_ROOT.'/viewimage.php?modulepart=interventionstatssupplier&file=interventionsamountinyear-'.$year.'.png'; } $px2 = new DolGraph(); $mesg = $px2->isGraphKo(); if (! $mesg) { $px2->SetData($data); $i=$startyear;$legend=array(); while ($i <= $endyear) { $legend[]=$i; $i++; } $px2->SetLegend($legend); $px2->SetMaxValue($px2->GetCeilMaxValue()); $px2->SetMinValue(min(0,$px2->GetFloorMinValue())); $px2->SetWidth($WIDTH); $px2->SetHeight($HEIGHT); $px2->SetYLabel($langs->trans("AmountOfinterventions")); $px2->SetShading(3); $px2->SetHorizTickIncrement(1); $px2->SetPrecisionY(0); $px2->mode='depth'; $px2->SetTitle($langs->trans("AmountOfinterventionsByMonthHT")); $px2->draw($filenameamount,$fileurlamount); } $data = $stats->getAverageByMonthWithPrevYear($endyear, $startyear); if (!$user->rights->societe->client->voir || $user->societe_id) { $filename_avg = $dir.'/interventionsaverage-'.$user->id.'-'.$year.'.png'; if ($mode == 'customer') $fileurl_avg = DOL_URL_ROOT.'/viewimage.php?modulepart=interventionstats&file=interventionsaverage-'.$user->id.'-'.$year.'.png'; if ($mode == 'supplier') $fileurl_avg = DOL_URL_ROOT.'/viewimage.php?modulepart=interventionstatssupplier&file=interventionsaverage-'.$user->id.'-'.$year.'.png'; } else { $filename_avg = $dir.'/interventionsaverage-'.$year.'.png'; if ($mode == 'customer') $fileurl_avg = DOL_URL_ROOT.'/viewimage.php?modulepart=interventionstats&file=interventionsaverage-'.$year.'.png'; if ($mode == 'supplier') $fileurl_avg = DOL_URL_ROOT.'/viewimage.php?modulepart=interventionstatssupplier&file=interventionsaverage-'.$year.'.png'; } $px3 = new DolGraph(); $mesg = $px3->isGraphKo(); if (! $mesg) { $px3->SetData($data); $i=$startyear;$legend=array(); while ($i <= $endyear) { $legend[]=$i; $i++; } $px3->SetLegend($legend); $px3->SetYLabel($langs->trans("AmountAverage")); $px3->SetMaxValue($px3->GetCeilMaxValue()); $px3->SetMinValue($px3->GetFloorMinValue()); $px3->SetWidth($WIDTH); $px3->SetHeight($HEIGHT); $px3->SetShading(3); $px3->SetHorizTickIncrement(1); $px3->SetPrecisionY(0); $px3->mode='depth'; $px3->SetTitle($langs->trans("AmountAverage")); $px3->draw($filename_avg,$fileurl_avg); } // Show array $data = $stats->getAllByYear(); $arrayyears=array(); foreach($data as $val) { if (! empty($val['year'])) { $arrayyears[$val['year']]=$val['year']; } } if (! count($arrayyears)) $arrayyears[$nowyear]=$nowyear; $h=0; $head = array(); $head[$h][0] = DOL_URL_ROOT . '/commande/stats/index.php?mode='.$mode; $head[$h][1] = $langs->trans("ByMonthYear"); $head[$h][2] = 'byyear'; $h++; if ($mode == 'customer') $type='order_stats'; if ($mode == 'supplier') $type='supplier_order_stats'; complete_head_from_modules($conf,$langs,null,$head,$h,$type); dol_fiche_head($head, 'byyear', $langs->trans("Statistics"), -1); print '<div class="fichecenter"><div class="fichethirdleft">'; //if (empty($socid)) //{ // Show filter box print '<form name="stats" method="POST" action="'.$_SERVER["PHP_SELF"].'">'; print '<input type="hidden" name="mode" value="'.$mode.'">'; print '<table class="noborder" width="100%">'; print '<tr class="liste_titre"><td class="liste_titre" colspan="2">'.$langs->trans("Filter").'</td></tr>'; // Company print '<tr><td align="left">'.$langs->trans("ThirdParty").'</td><td align="left">'; if ($mode == 'customer') $filter='s.client in (1,2,3)'; if ($mode == 'supplier') $filter='s.fournisseur = 1'; print $form->select_company($socid,'socid',$filter,1,0,0,array(),0,'','style="width: 95%"'); print '</td></tr>'; // User print '<tr><td align="left">'.$langs->trans("CreatedBy").'</td><td align="left">'; print $form->select_dolusers($userid, 'userid', 1, '', 0, '', '', 0, 0, 0, '', 0, '', 'maxwidth300'); // Status print '<tr><td align="left">'.$langs->trans("Status").'</td><td align="left">'; $liststatus=$objectstatic->statuts_short; if (empty($conf->global->FICHINTER_CLASSIFY_BILLED)) unset($liststatus[2]); // Option deprecated. In a future, billed must be managed with a dedicated field to 0 or 1 print $form->selectarray('object_status', $liststatus, $object_status, 1, 0, 0, '', 1); print '</td></tr>'; // Year print '<tr><td align="left">'.$langs->trans("Year").'</td><td align="left">'; if (! in_array($year,$arrayyears)) $arrayyears[$year]=$year; if (! in_array($nowyear,$arrayyears)) $arrayyears[$nowyear]=$nowyear; arsort($arrayyears); print $form->selectarray('year',$arrayyears,$year,0); print '</td></tr>'; print '<tr><td align="center" colspan="2"><input type="submit" name="submit" class="button" value="'.$langs->trans("Refresh").'"></td></tr>'; print '</table>'; print '</form>'; print '<br><br>'; //} print '<table class="noborder" width="100%">'; print '<tr class="liste_titre" height="24">'; print '<td align="center">'.$langs->trans("Year").'</td>'; print '<td align="right">'.$langs->trans("NbOfinterventions").'</td>'; print '<td align="right">%</td>'; print '<td align="right">'.$langs->trans("AmountTotal").'</td>'; print '<td align="right">%</td>'; print '<td align="right">'.$langs->trans("AmountAverage").'</td>'; print '<td align="right">%</td>'; print '</tr>'; $oldyear=0; $var=true; foreach ($data as $val) { $year = $val['year']; while (! empty($year) && $oldyear > $year+1) { // If we have empty year $oldyear--; print '<tr '.$bc[$var].' height="24">'; print '<td align="center"><a href="'.$_SERVER["PHP_SELF"].'?year='.$oldyear.'&amp;mode='.$mode.($socid>0?'&socid='.$socid:'').($userid>0?'&userid='.$userid:'').'">'.$oldyear.'</a></td>'; print '<td align="right">0</td>'; print '<td align="right"></td>'; print '<td align="right">0</td>'; print '<td align="right"></td>'; print '<td align="right">0</td>'; print '<td align="right"></td>'; print '</tr>'; } print '<tr '.$bc[$var].' height="24">'; print '<td align="center"><a href="'.$_SERVER["PHP_SELF"].'?year='.$year.'&amp;mode='.$mode.($socid>0?'&socid='.$socid:'').($userid>0?'&userid='.$userid:'').'">'.$year.'</a></td>'; print '<td align="right">'.$val['nb'].'</td>'; print '<td align="right" style="'.(($val['nb_diff'] >= 0) ? 'color: green;':'color: red;').'">'.round($val['nb_diff']).'</td>'; print '<td align="right">'.price(price2num($val['total'],'MT'),1).'</td>'; print '<td align="right" style="'.(($val['total_diff'] >= 0) ? 'color: green;':'color: red;').'">'.round($val['total_diff']).'</td>'; print '<td align="right">'.price(price2num($val['avg'],'MT'),1).'</td>'; print '<td align="right" style="'.(($val['avg_diff'] >= 0) ? 'color: green;':'color: red;').'">'.round($val['avg_diff']).'</td>'; print '</tr>'; $oldyear=$year; } print '</table>'; print '</div><div class="fichetwothirdright"><div class="ficheaddleft">'; // Show graphs print '<table class="border" width="100%"><tr valign="top"><td align="center">'; if ($mesg) { print $mesg; } else { print $px1->show(); /*print "<br>\n"; print $px2->show(); print "<br>\n"; print $px3->show();*/ } print '</td></tr></table>'; print '</div></div></div>'; print '<div style="clear:both"></div>'; dol_fiche_end(); llxFooter(); $db->close();
guerrierk/dolibarr
htdocs/fichinter/stats/index.php
PHP
gpl-3.0
11,914
<?php // This file is part of Moodle - http://moodle.org/ // // Moodle is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // Moodle is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Moodle. If not, see <http://www.gnu.org/licenses/>. /** * Boost. * * @package theme_boost * @copyright 2016 Frédéric Massart * @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later */ defined('MOODLE_INTERNAL') || die(); $plugin->version = 2019052000; $plugin->requires = 2019051100; $plugin->component = 'theme_boost';
zeduardu/moodle
theme/boost/version.php
PHP
gpl-3.0
990
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_cloudscheduler_job description: - A scheduled job that can publish a pubsub message or a http request every X interval of time, using crontab format string. - To use Cloud Scheduler your project must contain an App Engine app that is located in one of the supported regions. If your project does not have an App Engine app, you must create one. short_description: Creates a GCP Job version_added: 2.9 author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: state: description: - Whether the given object should exist in GCP choices: - present - absent default: present type: str name: description: - The name of the job. required: true type: str description: description: - A human-readable description for the job. This string must not contain more than 500 characters. required: false type: str schedule: description: - Describes the schedule on which the job will be executed. required: false type: str time_zone: description: - Specifies the time zone to be used in interpreting schedule. - The value of this field must be a time zone name from the tz database. required: false default: Etc/UTC type: str retry_config: description: - By default, if a job does not complete successfully, meaning that an acknowledgement is not received from the handler, then it will be retried with exponential backoff according to the settings . required: false type: dict suboptions: retry_count: description: - The number of attempts that the system will make to run a job using the exponential backoff procedure described by maxDoublings. - Values greater than 5 and negative values are not allowed. required: false type: int max_retry_duration: description: - The time limit for retrying a failed job, measured from time when an execution was first attempted. If specified with retryCount, the job will be retried until both limits are reached. - A duration in seconds with up to nine fractional digits, terminated by 's'. required: false type: str min_backoff_duration: description: - The minimum amount of time to wait before retrying a job after it fails. - A duration in seconds with up to nine fractional digits, terminated by 's'. required: false type: str max_backoff_duration: description: - The maximum amount of time to wait before retrying a job after it fails. - A duration in seconds with up to nine fractional digits, terminated by 's'. required: false type: str max_doublings: description: - The time between retries will double maxDoublings times. - A job's retry interval starts at minBackoffDuration, then doubles maxDoublings times, then increases linearly, and finally retries retries at intervals of maxBackoffDuration up to retryCount times. required: false type: int pubsub_target: description: - Pub/Sub target If the job providers a Pub/Sub target the cron will publish a message to the provided topic . required: false type: dict suboptions: topic_name: description: - The name of the Cloud Pub/Sub topic to which messages will be published when a job is delivered. The topic name must be in the same format as required by PubSub's PublishRequest.name, for example projects/PROJECT_ID/topics/TOPIC_ID. required: true type: str data: description: - The message payload for PubsubMessage. - Pubsub message must contain either non-empty data, or at least one attribute. required: false type: str attributes: description: - Attributes for PubsubMessage. - Pubsub message must contain either non-empty data, or at least one attribute. required: false type: dict app_engine_http_target: description: - App Engine HTTP target. - If the job providers a App Engine HTTP target the cron will send a request to the service instance . required: false type: dict suboptions: http_method: description: - Which HTTP method to use for the request. required: false type: str app_engine_routing: description: - App Engine Routing setting for the job. required: false type: dict suboptions: service: description: - App service. - By default, the job is sent to the service which is the default service when the job is attempted. required: false type: str version: description: - App version. - By default, the job is sent to the version which is the default version when the job is attempted. required: false type: str instance: description: - App instance. - By default, the job is sent to an instance which is available when the job is attempted. required: false type: str relative_uri: description: - The relative URI. required: true type: str body: description: - HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. required: false type: str headers: description: - HTTP request headers. - This map contains the header field names and values. Headers can be set when the job is created. required: false type: dict http_target: description: - HTTP target. - If the job providers a http_target the cron will send a request to the targeted url . required: false type: dict suboptions: uri: description: - The full URI path that the request will be sent to. required: true type: str http_method: description: - Which HTTP method to use for the request. required: false type: str body: description: - HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. required: false type: str headers: description: - This map contains the header field names and values. Repeated headers are not supported, but a header value can contain commas. required: false type: dict oauth_token: description: - Contains information needed for generating an OAuth token. - This type of authorization should be used when sending requests to a GCP endpoint. required: false type: dict suboptions: service_account_email: description: - Service account email to be used for generating OAuth token. - The service account must be within the same project as the job. required: false type: str scope: description: - OAuth scope to be used for generating OAuth access token. If not specified, "U(https://www.googleapis.com/auth/cloud-platform") will be used. required: false type: str oidc_token: description: - Contains information needed for generating an OpenID Connect token. - This type of authorization should be used when sending requests to third party endpoints or Cloud Run. required: false type: dict suboptions: service_account_email: description: - Service account email to be used for generating OAuth token. - The service account must be within the same project as the job. required: false type: str audience: description: - Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. required: false type: str region: description: - Region where the scheduler job resides . required: true type: str extends_documentation_fragment: gcp notes: - 'API Reference: U(https://cloud.google.com/scheduler/docs/reference/rest/)' - 'Official Documentation: U(https://cloud.google.com/scheduler/)' ''' EXAMPLES = ''' - name: create a job gcp_cloudscheduler_job: name: job region: us-central1 schedule: "*/4 * * * *" description: test app engine job time_zone: Europe/London app_engine_http_target: http_method: POST app_engine_routing: service: web version: prod instance: my-instance-001 relative_uri: "/ping" project: test_project auth_kind: serviceaccount service_account_file: "/tmp/auth.pem" state: present ''' RETURN = ''' name: description: - The name of the job. returned: success type: str description: description: - A human-readable description for the job. This string must not contain more than 500 characters. returned: success type: str schedule: description: - Describes the schedule on which the job will be executed. returned: success type: str timeZone: description: - Specifies the time zone to be used in interpreting schedule. - The value of this field must be a time zone name from the tz database. returned: success type: str retryConfig: description: - By default, if a job does not complete successfully, meaning that an acknowledgement is not received from the handler, then it will be retried with exponential backoff according to the settings . returned: success type: complex contains: retryCount: description: - The number of attempts that the system will make to run a job using the exponential backoff procedure described by maxDoublings. - Values greater than 5 and negative values are not allowed. returned: success type: int maxRetryDuration: description: - The time limit for retrying a failed job, measured from time when an execution was first attempted. If specified with retryCount, the job will be retried until both limits are reached. - A duration in seconds with up to nine fractional digits, terminated by 's'. returned: success type: str minBackoffDuration: description: - The minimum amount of time to wait before retrying a job after it fails. - A duration in seconds with up to nine fractional digits, terminated by 's'. returned: success type: str maxBackoffDuration: description: - The maximum amount of time to wait before retrying a job after it fails. - A duration in seconds with up to nine fractional digits, terminated by 's'. returned: success type: str maxDoublings: description: - The time between retries will double maxDoublings times. - A job's retry interval starts at minBackoffDuration, then doubles maxDoublings times, then increases linearly, and finally retries retries at intervals of maxBackoffDuration up to retryCount times. returned: success type: int pubsubTarget: description: - Pub/Sub target If the job providers a Pub/Sub target the cron will publish a message to the provided topic . returned: success type: complex contains: topicName: description: - The name of the Cloud Pub/Sub topic to which messages will be published when a job is delivered. The topic name must be in the same format as required by PubSub's PublishRequest.name, for example projects/PROJECT_ID/topics/TOPIC_ID. returned: success type: str data: description: - The message payload for PubsubMessage. - Pubsub message must contain either non-empty data, or at least one attribute. returned: success type: str attributes: description: - Attributes for PubsubMessage. - Pubsub message must contain either non-empty data, or at least one attribute. returned: success type: dict appEngineHttpTarget: description: - App Engine HTTP target. - If the job providers a App Engine HTTP target the cron will send a request to the service instance . returned: success type: complex contains: httpMethod: description: - Which HTTP method to use for the request. returned: success type: str appEngineRouting: description: - App Engine Routing setting for the job. returned: success type: complex contains: service: description: - App service. - By default, the job is sent to the service which is the default service when the job is attempted. returned: success type: str version: description: - App version. - By default, the job is sent to the version which is the default version when the job is attempted. returned: success type: str instance: description: - App instance. - By default, the job is sent to an instance which is available when the job is attempted. returned: success type: str relativeUri: description: - The relative URI. returned: success type: str body: description: - HTTP request body. A request body is allowed only if the HTTP method is POST or PUT. It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. returned: success type: str headers: description: - HTTP request headers. - This map contains the header field names and values. Headers can be set when the job is created. returned: success type: dict httpTarget: description: - HTTP target. - If the job providers a http_target the cron will send a request to the targeted url . returned: success type: complex contains: uri: description: - The full URI path that the request will be sent to. returned: success type: str httpMethod: description: - Which HTTP method to use for the request. returned: success type: str body: description: - HTTP request body. A request body is allowed only if the HTTP method is POST, PUT, or PATCH. It is an error to set body on a job with an incompatible HttpMethod. returned: success type: str headers: description: - This map contains the header field names and values. Repeated headers are not supported, but a header value can contain commas. returned: success type: dict oauthToken: description: - Contains information needed for generating an OAuth token. - This type of authorization should be used when sending requests to a GCP endpoint. returned: success type: complex contains: serviceAccountEmail: description: - Service account email to be used for generating OAuth token. - The service account must be within the same project as the job. returned: success type: str scope: description: - OAuth scope to be used for generating OAuth access token. If not specified, "U(https://www.googleapis.com/auth/cloud-platform") will be used. returned: success type: str oidcToken: description: - Contains information needed for generating an OpenID Connect token. - This type of authorization should be used when sending requests to third party endpoints or Cloud Run. returned: success type: complex contains: serviceAccountEmail: description: - Service account email to be used for generating OAuth token. - The service account must be within the same project as the job. returned: success type: str audience: description: - Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. returned: success type: str region: description: - Region where the scheduler job resides . returned: success type: str ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict import json ################################################################################ # Main ################################################################################ def main(): """Main function""" module = GcpModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent'], type='str'), name=dict(required=True, type='str'), description=dict(type='str'), schedule=dict(type='str'), time_zone=dict(default='Etc/UTC', type='str'), retry_config=dict( type='dict', options=dict( retry_count=dict(type='int'), max_retry_duration=dict(type='str'), min_backoff_duration=dict(type='str'), max_backoff_duration=dict(type='str'), max_doublings=dict(type='int'), ), ), pubsub_target=dict(type='dict', options=dict(topic_name=dict(required=True, type='str'), data=dict(type='str'), attributes=dict(type='dict'))), app_engine_http_target=dict( type='dict', options=dict( http_method=dict(type='str'), app_engine_routing=dict(type='dict', options=dict(service=dict(type='str'), version=dict(type='str'), instance=dict(type='str'))), relative_uri=dict(required=True, type='str'), body=dict(type='str'), headers=dict(type='dict'), ), ), http_target=dict( type='dict', options=dict( uri=dict(required=True, type='str'), http_method=dict(type='str'), body=dict(type='str'), headers=dict(type='dict'), oauth_token=dict(type='dict', options=dict(service_account_email=dict(type='str'), scope=dict(type='str'))), oidc_token=dict(type='dict', options=dict(service_account_email=dict(type='str'), audience=dict(type='str'))), ), ), region=dict(required=True, type='str'), ), mutually_exclusive=[['app_engine_http_target', 'http_target', 'pubsub_target']], ) if not module.params['scopes']: module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] state = module.params['state'] fetch = fetch_resource(module, self_link(module)) changed = False if fetch: if state == 'present': if is_different(module, fetch): update(module, self_link(module)) fetch = fetch_resource(module, self_link(module)) changed = True else: delete(module, self_link(module)) fetch = {} changed = True else: if state == 'present': fetch = create(module, collection(module)) changed = True else: fetch = {} fetch.update({'changed': changed}) module.exit_json(**fetch) def create(module, link): auth = GcpSession(module, 'cloudscheduler') return return_if_object(module, auth.post(link, resource_to_request(module))) def update(module, link): delete(module, self_link(module)) create(module, collection(module)) def delete(module, link): auth = GcpSession(module, 'cloudscheduler') return return_if_object(module, auth.delete(link)) def resource_to_request(module): request = { u'name': module.params.get('name'), u'description': module.params.get('description'), u'schedule': module.params.get('schedule'), u'timeZone': module.params.get('time_zone'), u'retryConfig': JobRetryconfig(module.params.get('retry_config', {}), module).to_request(), u'pubsubTarget': JobPubsubtarget(module.params.get('pubsub_target', {}), module).to_request(), u'appEngineHttpTarget': JobAppenginehttptarget(module.params.get('app_engine_http_target', {}), module).to_request(), u'httpTarget': JobHttptarget(module.params.get('http_target', {}), module).to_request(), } request = encode_request(request, module) return_vals = {} for k, v in request.items(): if v or v is False: return_vals[k] = v return return_vals def fetch_resource(module, link, allow_not_found=True): auth = GcpSession(module, 'cloudscheduler') return return_if_object(module, auth.get(link), allow_not_found) def self_link(module): return "https://cloudscheduler.googleapis.com/v1/projects/{project}/locations/{region}/jobs/{name}".format(**module.params) def collection(module): return "https://cloudscheduler.googleapis.com/v1/projects/{project}/locations/{region}/jobs".format(**module.params) def return_if_object(module, response, allow_not_found=False): # If not found, return nothing. if allow_not_found and response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None try: module.raise_for_status(response) result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError): module.fail_json(msg="Invalid JSON response with error: %s" % response.text) result = decode_request(result, module) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) return result def is_different(module, response): request = resource_to_request(module) response = response_to_hash(module, response) request = decode_request(request, module) # Remove all output-only from response. response_vals = {} for k, v in response.items(): if k in request: response_vals[k] = v request_vals = {} for k, v in request.items(): if k in response: request_vals[k] = v return GcpRequest(request_vals) != GcpRequest(response_vals) # Remove unnecessary properties from the response. # This is for doing comparisons with Ansible's current parameters. def response_to_hash(module, response): return { u'name': module.params.get('name'), u'description': module.params.get('description'), u'schedule': module.params.get('schedule'), u'timeZone': module.params.get('time_zone'), u'retryConfig': JobRetryconfig(module.params.get('retry_config', {}), module).to_request(), u'pubsubTarget': JobPubsubtarget(module.params.get('pubsub_target', {}), module).to_request(), u'appEngineHttpTarget': JobAppenginehttptarget(module.params.get('app_engine_http_target', {}), module).to_request(), u'httpTarget': JobHttptarget(module.params.get('http_target', {}), module).to_request(), } def encode_request(request, module): request['name'] = "projects/%s/locations/%s/jobs/%s" % (module.params['project'], module.params['region'], module.params['name']) return request def decode_request(response, module): if 'name' in response: response['name'] = response['name'].split('/')[-1] return response class JobRetryconfig(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( { u'retryCount': self.request.get('retry_count'), u'maxRetryDuration': self.request.get('max_retry_duration'), u'minBackoffDuration': self.request.get('min_backoff_duration'), u'maxBackoffDuration': self.request.get('max_backoff_duration'), u'maxDoublings': self.request.get('max_doublings'), } ) def from_response(self): return remove_nones_from_dict( { u'retryCount': self.module.params.get('retry_count'), u'maxRetryDuration': self.module.params.get('max_retry_duration'), u'minBackoffDuration': self.module.params.get('min_backoff_duration'), u'maxBackoffDuration': self.module.params.get('max_backoff_duration'), u'maxDoublings': self.module.params.get('max_doublings'), } ) class JobPubsubtarget(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( {u'topicName': self.request.get('topic_name'), u'data': self.request.get('data'), u'attributes': self.request.get('attributes')} ) def from_response(self): return remove_nones_from_dict( {u'topicName': self.module.params.get('topic_name'), u'data': self.module.params.get('data'), u'attributes': self.module.params.get('attributes')} ) class JobAppenginehttptarget(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( { u'httpMethod': self.request.get('http_method'), u'appEngineRouting': JobAppenginerouting(self.request.get('app_engine_routing', {}), self.module).to_request(), u'relativeUri': self.request.get('relative_uri'), u'body': self.request.get('body'), u'headers': self.request.get('headers'), } ) def from_response(self): return remove_nones_from_dict( { u'httpMethod': self.module.params.get('http_method'), u'appEngineRouting': JobAppenginerouting(self.module.params.get('app_engine_routing', {}), self.module).to_request(), u'relativeUri': self.request.get(u'relativeUri'), u'body': self.module.params.get('body'), u'headers': self.module.params.get('headers'), } ) class JobAppenginerouting(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( {u'service': self.request.get('service'), u'version': self.request.get('version'), u'instance': self.request.get('instance')} ) def from_response(self): return remove_nones_from_dict( {u'service': self.module.params.get('service'), u'version': self.module.params.get('version'), u'instance': self.module.params.get('instance')} ) class JobHttptarget(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( { u'uri': self.request.get('uri'), u'httpMethod': self.request.get('http_method'), u'body': self.request.get('body'), u'headers': self.request.get('headers'), u'oauthToken': JobOauthtoken(self.request.get('oauth_token', {}), self.module).to_request(), u'oidcToken': JobOidctoken(self.request.get('oidc_token', {}), self.module).to_request(), } ) def from_response(self): return remove_nones_from_dict( { u'uri': self.request.get(u'uri'), u'httpMethod': self.request.get(u'httpMethod'), u'body': self.request.get(u'body'), u'headers': self.request.get(u'headers'), u'oauthToken': JobOauthtoken(self.module.params.get('oauth_token', {}), self.module).to_request(), u'oidcToken': JobOidctoken(self.module.params.get('oidc_token', {}), self.module).to_request(), } ) class JobOauthtoken(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({u'serviceAccountEmail': self.request.get('service_account_email'), u'scope': self.request.get('scope')}) def from_response(self): return remove_nones_from_dict({u'serviceAccountEmail': self.request.get(u'serviceAccountEmail'), u'scope': self.request.get(u'scope')}) class JobOidctoken(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({u'serviceAccountEmail': self.request.get('service_account_email'), u'audience': self.request.get('audience')}) def from_response(self): return remove_nones_from_dict({u'serviceAccountEmail': self.request.get(u'serviceAccountEmail'), u'audience': self.request.get(u'audience')}) if __name__ == '__main__': main()
resmo/ansible
lib/ansible/modules/cloud/google/gcp_cloudscheduler_job.py
Python
gpl-3.0
31,832
define([ 'angular' , './instance-controller' , './instance-directive' , '../../glyph/glyph' ], function( angular , Controller , directive , glyphModule ) { "use strict"; return angular.module('mtk.instance', [glyphModule.name]) .controller('InstanceController', Controller) .directive('mtkInstance', directive) ; });
jeroenbreen/metapolator
app/lib/ui/metapolator/instance-panel/instance/instance.js
JavaScript
gpl-3.0
378
#region Copyright & License Information /* * Copyright 2007-2021 The OpenRA Developers (see AUTHORS) * This file is part of OpenRA, which is free software. It is made * available to you under the terms of the GNU General Public License * as published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. For more * information, see COPYING. */ #endregion using System.Collections.Generic; using System.Linq; namespace OpenRA.Mods.Common.UpdateRules.Rules { public class ReplaceShadowPalette : UpdateRule { public override string Name => "Removed ShadowPalette from WithShadow and projectiles."; public override string Description => "The ShadowPalette field has been replaced by ShadowColor on projectiles.\n" + "The Palette field on WithShadow and ShadowPalette on WithParachute have similarly been replaced with ShadowColor."; readonly List<string> locations = new List<string>(); public override IEnumerable<string> AfterUpdate(ModData modData) { if (locations.Any()) yield return "The shadow palette overrides have been removed from the following locations:\n" + UpdateUtils.FormatMessageList(locations) + "\n\n" + "You may wish to inspect and change these."; locations.Clear(); } public override IEnumerable<string> UpdateWeaponNode(ModData modData, MiniYamlNode weaponNode) { foreach (var projectileNode in weaponNode.ChildrenMatching("Projectile")) if (projectileNode.RemoveNodes("ShadowPalette") > 0) locations.Add($"{weaponNode.Key}: {weaponNode.Key} ({weaponNode.Location.Filename})"); yield break; } public override IEnumerable<string> UpdateActorNode(ModData modData, MiniYamlNode actorNode) { foreach (var node in actorNode.ChildrenMatching("WithShadow")) if (node.RemoveNodes("Palette") > 0) locations.Add($"{actorNode.Key}: {node.Key} ({actorNode.Location.Filename})"); foreach (var node in actorNode.ChildrenMatching("WithParachute")) if (node.RemoveNodes("ShadowPalette") > 0) locations.Add($"{actorNode.Key}: {node.Key} ({actorNode.Location.Filename})"); yield break; } } }
MustaphaTR/OpenRA
OpenRA.Mods.Common/UpdateRules/Rules/20201213/ReplaceShadowPalette.cs
C#
gpl-3.0
2,162
/** * The MIT License * Copyright (c) 2012 Graylog, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package org.graylog2.plugin; /** * @author Dennis Oelkers <dennis@torch.sh> */ public interface Stoppable { public void stop(); }
berkeleydave/graylog2-server
graylog2-plugin-interfaces/src/main/java/org/graylog2/plugin/Stoppable.java
Java
gpl-3.0
1,275
/* * Copyright (C) 2008 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package dot.junit.opcodes.sub_double.d; public class T_sub_double_3 { public double run(long a, double b) { return 0; } }
s20121035/rk3288_android5.1_repo
cts/tools/vm-tests-tf/src/dot/junit/opcodes/sub_double/d/T_sub_double_3.java
Java
gpl-3.0
761
// Copyright 2015 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. package core import ( "fmt" "math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/pow" ) /* * TODO: move this to another package. */ // MakeChainConfig returns a new ChainConfig with the ethereum default chain settings. func MakeChainConfig() *ChainConfig { return &ChainConfig{HomesteadBlock: big.NewInt(0)} } // FakePow is a non-validating proof of work implementation. // It returns true from Verify for any block. type FakePow struct{} func (f FakePow) Search(block pow.Block, stop <-chan struct{}, index int) (uint64, []byte) { return 0, nil } func (f FakePow) Verify(block pow.Block) bool { return true } func (f FakePow) GetHashrate() int64 { return 0 } func (f FakePow) Turbo(bool) {} // So we can deterministically seed different blockchains var ( canonicalSeed = 1 forkSeed = 2 ) // BlockGen creates blocks for testing. // See GenerateChain for a detailed explanation. type BlockGen struct { i int parent *types.Block chain []*types.Block header *types.Header statedb *state.StateDB gasPool *GasPool txs []*types.Transaction receipts []*types.Receipt uncles []*types.Header } // SetCoinbase sets the coinbase of the generated block. // It can be called at most once. func (b *BlockGen) SetCoinbase(addr common.Address) { if b.gasPool != nil { if len(b.txs) > 0 { panic("coinbase must be set before adding transactions") } panic("coinbase can only be set once") } b.header.Coinbase = addr b.gasPool = new(GasPool).AddGas(b.header.GasLimit) } // SetExtra sets the extra data field of the generated block. func (b *BlockGen) SetExtra(data []byte) { b.header.Extra = data } // AddTx adds a transaction to the generated block. If no coinbase has // been set, the block's coinbase is set to the zero address. // // AddTx panics if the transaction cannot be executed. In addition to // the protocol-imposed limitations (gas limit, etc.), there are some // further limitations on the content of transactions that can be // added. Notably, contract code relying on the BLOCKHASH instruction // will panic during execution. func (b *BlockGen) AddTx(tx *types.Transaction) { if b.gasPool == nil { b.SetCoinbase(common.Address{}) } b.statedb.StartRecord(tx.Hash(), common.Hash{}, len(b.txs)) receipt, _, _, err := ApplyTransaction(MakeChainConfig(), nil, b.gasPool, b.statedb, b.header, tx, b.header.GasUsed, vm.Config{}) if err != nil { panic(err) } b.txs = append(b.txs, tx) b.receipts = append(b.receipts, receipt) } // Number returns the block number of the block being generated. func (b *BlockGen) Number() *big.Int { return new(big.Int).Set(b.header.Number) } // AddUncheckedReceipts forcefully adds a receipts to the block without a // backing transaction. // // AddUncheckedReceipts will cause consensus failures when used during real // chain processing. This is best used in conjunction with raw block insertion. func (b *BlockGen) AddUncheckedReceipt(receipt *types.Receipt) { b.receipts = append(b.receipts, receipt) } // TxNonce returns the next valid transaction nonce for the // account at addr. It panics if the account does not exist. func (b *BlockGen) TxNonce(addr common.Address) uint64 { if !b.statedb.HasAccount(addr) { panic("account does not exist") } return b.statedb.GetNonce(addr) } // AddUncle adds an uncle header to the generated block. func (b *BlockGen) AddUncle(h *types.Header) { b.uncles = append(b.uncles, h) } // PrevBlock returns a previously generated block by number. It panics if // num is greater or equal to the number of the block being generated. // For index -1, PrevBlock returns the parent block given to GenerateChain. func (b *BlockGen) PrevBlock(index int) *types.Block { if index >= b.i { panic("block index out of range") } if index == -1 { return b.parent } return b.chain[index] } // OffsetTime modifies the time instance of a block, implicitly changing its // associated difficulty. It's useful to test scenarios where forking is not // tied to chain length directly. func (b *BlockGen) OffsetTime(seconds int64) { b.header.Time.Add(b.header.Time, new(big.Int).SetInt64(seconds)) if b.header.Time.Cmp(b.parent.Header().Time) <= 0 { panic("block time out of range") } b.header.Difficulty = CalcDifficulty(MakeChainConfig(), b.header.Time.Uint64(), b.parent.Time().Uint64(), b.parent.Number(), b.parent.Difficulty()) } // GenerateChain creates a chain of n blocks. The first block's // parent will be the provided parent. db is used to store // intermediate states and should contain the parent's state trie. // // The generator function is called with a new block generator for // every block. Any transactions and uncles added to the generator // become part of the block. If gen is nil, the blocks will be empty // and their coinbase will be the zero address. // // Blocks created by GenerateChain do not contain valid proof of work // values. Inserting them into BlockChain requires use of FakePow or // a similar non-validating proof of work implementation. func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) { blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n) genblock := func(i int, h *types.Header, statedb *state.StateDB) (*types.Block, types.Receipts) { b := &BlockGen{parent: parent, i: i, chain: blocks, header: h, statedb: statedb} if gen != nil { gen(i, b) } AccumulateRewards(statedb, h, b.uncles) root, err := statedb.Commit() if err != nil { panic(fmt.Sprintf("state write error: %v", err)) } h.Root = root return types.NewBlock(h, b.txs, b.uncles, b.receipts), b.receipts } for i := 0; i < n; i++ { statedb, err := state.New(parent.Root(), db) if err != nil { panic(err) } header := makeHeader(parent, statedb) block, receipt := genblock(i, header, statedb) blocks[i] = block receipts[i] = receipt parent = block } return blocks, receipts } func makeHeader(parent *types.Block, state *state.StateDB) *types.Header { var time *big.Int if parent.Time() == nil { time = big.NewInt(10) } else { time = new(big.Int).Add(parent.Time(), big.NewInt(10)) // block time is fixed at 10 seconds } return &types.Header{ Root: state.IntermediateRoot(), ParentHash: parent.Hash(), Coinbase: parent.Coinbase(), Difficulty: CalcDifficulty(MakeChainConfig(), time.Uint64(), new(big.Int).Sub(time, big.NewInt(10)).Uint64(), parent.Number(), parent.Difficulty()), GasLimit: CalcGasLimit(parent), GasUsed: new(big.Int), Number: new(big.Int).Add(parent.Number(), common.Big1), Time: time, } } // newCanonical creates a chain database, and injects a deterministic canonical // chain. Depending on the full flag, if creates either a full block chain or a // header only chain. func newCanonical(n int, full bool) (ethdb.Database, *BlockChain, error) { // Create the new chain database db, _ := ethdb.NewMemDatabase() evmux := &event.TypeMux{} // Initialize a fresh chain with only a genesis block genesis, _ := WriteTestNetGenesisBlock(db) blockchain, _ := NewBlockChain(db, MakeChainConfig(), FakePow{}, evmux) // Create and inject the requested chain if n == 0 { return db, blockchain, nil } if full { // Full block-chain requested blocks := makeBlockChain(genesis, n, db, canonicalSeed) _, err := blockchain.InsertChain(blocks) return db, blockchain, err } // Header-only chain requested headers := makeHeaderChain(genesis.Header(), n, db, canonicalSeed) _, err := blockchain.InsertHeaderChain(headers, 1) return db, blockchain, err } // makeHeaderChain creates a deterministic chain of headers rooted at parent. func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) []*types.Header { blocks := makeBlockChain(types.NewBlockWithHeader(parent), n, db, seed) headers := make([]*types.Header, len(blocks)) for i, block := range blocks { headers[i] = block.Header() } return headers } // makeBlockChain creates a deterministic chain of blocks rooted at parent. func makeBlockChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block { blocks, _ := GenerateChain(parent, db, n, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)}) }) return blocks }
gophergala2016/etherapis
vendor/github.com/ethereum/go-ethereum/core/chain_makers.go
GO
gpl-3.0
9,385
/************************************************************************* * Copyright 2009-2013 Eucalyptus Systems, Inc. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 3 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see http://www.gnu.org/licenses/. * * Please contact Eucalyptus Systems, Inc., 6755 Hollister Ave., Goleta * CA 93117, USA or visit http://www.eucalyptus.com/licenses/ if you need * additional information or have any questions. ************************************************************************/ package com.eucalyptus.auth.tokens; import com.eucalyptus.auth.AuthException; /** * */ public class SecurityTokenValidationException extends AuthException { private static final long serialVersionUID = 1L; public SecurityTokenValidationException( final String message ) { super( message ); } }
eethomas/eucalyptus
clc/modules/msgs/src/main/java/com/eucalyptus/auth/tokens/SecurityTokenValidationException.java
Java
gpl-3.0
1,303
# -*- coding: utf-8 -*- """ *==LICENSE==* CyanWorlds.com Engine - MMOG client, server and tools Copyright (C) 2011 Cyan Worlds, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Additional permissions under GNU GPL version 3 section 7 If you modify this Program, or any covered work, by linking or combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK, NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK (or a modified version of those libraries), containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA, PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the licensors of this Program grant you additional permission to convey the resulting work. Corresponding Source for a non-source form of such a combination shall include the source code for the parts of OpenSSL and IJG JPEG Library used as well as that of the covered work. You can contact Cyan Worlds, Inc. by email legal@cyan.com or by snail mail at: Cyan Worlds, Inc. 14617 N Newport Hwy Mead, WA 99021 *==LICENSE==* """ """ Module: ahnyTrees Age: Ahnonay Date: April, 2007 Author: Derek Odell Ahnonay Quab control """ from Plasma import * from PlasmaTypes import * # define the attributes that will be entered in max rgnTrees = ptAttribActivator(1, "act: Tree Detector") respTreeAnims = ptAttribResponderList(2, "resp: Tree Anims", byObject=1) objTrees = ptAttribSceneobjectList(3, "obj: Tree Meshs") SDLTrees = ptAttribString(4, "str: SDL Trees (optional)") # globals respTreeAnimsList = [] objTreeList = [] #==================================== class ahnyTrees(ptModifier): ########################### def __init__(self): ptModifier.__init__(self) self.id = 5948 version = 1 self.version = version print "__init__ahnyTrees v%d " % (version) ########################### def OnFirstUpdate(self): global respTreeAnimsList global objTreeList try: ageSDL = PtGetAgeSDL() ageSDL[SDLTrees.value][0] except: print "ahnyTrees.OnServerInitComplete(): ERROR --- Cannot find the Ahnonay Age SDL" ageSDL[SDLTrees.value] = (1,1,1,1,1,1,1,1,1,1,1,1,1,1,1) ageSDL.setFlags(SDLTrees.value,1,1) ageSDL.sendToClients(SDLTrees.value) ageSDL.setNotify(self.key,SDLTrees.value,0.0) for responder in respTreeAnims.value: thisResp = responder.getName() respTreeAnimsList.append(thisResp) for object in objTrees.value: thisObj = object.getName() objTreeList.append(thisObj) ageSDL = PtGetAgeSDL() idx = 0 for visible in ageSDL[SDLTrees.value]: if not visible: respTreeAnims.run(self.key, objectName=respTreeAnimsList[idx], fastforward=1) idx += 1 ########################### def OnNotify(self,state,id,events): global respTreeAnimsList global objTreeList print "ahnyTrees.OnNotify: state=%s id=%d events=" % (state, id), events if id == rgnTrees.id: for event in events: if event[0] == kCollisionEvent and self.sceneobject.isLocallyOwned() : region = event[3] regName = region.getName() for object in objTreeList: if object == regName: ageSDL = PtGetAgeSDL() treeSDL = list(ageSDL[SDLTrees.value]) index = objTreeList.index(object) if treeSDL[index]: respTreeAnims.run(self.key, objectName=respTreeAnimsList[index], netForce = 1) treeSDL[index] = 0 ageSDL[SDLTrees.value] = tuple(treeSDL) print "ahnyTrees.OnNotify: Tree knocked down"
TOC-Shard/moul-scripts
Python/ahnyTrees.py
Python
gpl-3.0
4,774
from eventlet import patcher from eventlet.green import socket from eventlet.green import time from eventlet.green import httplib from eventlet.green import ftplib from eventlet.support import six if six.PY2: to_patch = [('socket', socket), ('httplib', httplib), ('time', time), ('ftplib', ftplib)] try: from eventlet.green import ssl to_patch.append(('ssl', ssl)) except ImportError: pass patcher.inject('urllib', globals(), *to_patch) try: URLopener except NameError: patcher.inject('urllib.request', globals(), *to_patch) # patch a bunch of things that have imports inside the # function body; this is lame and hacky but I don't feel # too bad because urllib is a hacky pile of junk that no # one should be using anyhow URLopener.open_http = patcher.patch_function(URLopener.open_http, ('httplib', httplib)) if hasattr(URLopener, 'open_https'): URLopener.open_https = patcher.patch_function(URLopener.open_https, ('httplib', httplib)) URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, ('ftplib', ftplib)) ftpwrapper.init = patcher.patch_function(ftpwrapper.init, ('ftplib', ftplib)) ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, ('ftplib', ftplib)) del patcher # Run test program when run as a script if __name__ == '__main__': main()
pbaesse/Sissens
lib/python2.7/site-packages/eventlet/green/urllib/__init__.py
Python
gpl-3.0
1,423
// This code was generated by Barbara Worwood using IMS Development Environment (version 1.80 build 5007.25751) // Copyright (C) 1995-2014 IMS MAXIMS. All rights reserved. // WARNING: DO NOT MODIFY the content of this file package ims.RefMan.vo; /** * Linked to core.clinical.PatientProcedure business object (ID: 1003100017). */ public class PatientProcedureForClinicalNotesVo extends ims.core.clinical.vo.PatientProcedureRefVo implements ims.vo.ImsCloneable, Comparable { private static final long serialVersionUID = 1L; public PatientProcedureForClinicalNotesVo() { } public PatientProcedureForClinicalNotesVo(Integer id, int version) { super(id, version); } public PatientProcedureForClinicalNotesVo(ims.RefMan.vo.beans.PatientProcedureForClinicalNotesVoBean bean) { this.id = bean.getId(); this.version = bean.getVersion(); this.procedure = bean.getProcedure() == null ? null : bean.getProcedure().buildVo(); this.proceduredescription = bean.getProcedureDescription(); this.infosource = bean.getInfoSource() == null ? null : ims.core.vo.lookups.SourceofInformation.buildLookup(bean.getInfoSource()); this.carecontext = bean.getCareContext() == null ? null : new ims.core.admin.vo.CareContextRefVo(new Integer(bean.getCareContext().getId()), bean.getCareContext().getVersion()); this.episodeofcare = bean.getEpisodeOfCare() == null ? null : new ims.core.admin.vo.EpisodeOfCareRefVo(new Integer(bean.getEpisodeOfCare().getId()), bean.getEpisodeOfCare().getVersion()); this.authoringinformation = bean.getAuthoringInformation() == null ? null : bean.getAuthoringInformation().buildVo(); this.procdate = bean.getProcDate() == null ? null : bean.getProcDate().buildPartialDate(); this.procedurestatus = bean.getProcedureStatus() == null ? null : ims.core.vo.lookups.PatientProcedureStatus.buildLookup(bean.getProcedureStatus()); this.incompletereason = bean.getIncompleteReason() == null ? null : ims.core.vo.lookups.ProcedureIncompleteReason.buildLookup(bean.getIncompleteReason()); } public void populate(ims.vo.ValueObjectBeanMap map, ims.RefMan.vo.beans.PatientProcedureForClinicalNotesVoBean bean) { this.id = bean.getId(); this.version = bean.getVersion(); this.procedure = bean.getProcedure() == null ? null : bean.getProcedure().buildVo(map); this.proceduredescription = bean.getProcedureDescription(); this.infosource = bean.getInfoSource() == null ? null : ims.core.vo.lookups.SourceofInformation.buildLookup(bean.getInfoSource()); this.carecontext = bean.getCareContext() == null ? null : new ims.core.admin.vo.CareContextRefVo(new Integer(bean.getCareContext().getId()), bean.getCareContext().getVersion()); this.episodeofcare = bean.getEpisodeOfCare() == null ? null : new ims.core.admin.vo.EpisodeOfCareRefVo(new Integer(bean.getEpisodeOfCare().getId()), bean.getEpisodeOfCare().getVersion()); this.authoringinformation = bean.getAuthoringInformation() == null ? null : bean.getAuthoringInformation().buildVo(map); this.procdate = bean.getProcDate() == null ? null : bean.getProcDate().buildPartialDate(); this.procedurestatus = bean.getProcedureStatus() == null ? null : ims.core.vo.lookups.PatientProcedureStatus.buildLookup(bean.getProcedureStatus()); this.incompletereason = bean.getIncompleteReason() == null ? null : ims.core.vo.lookups.ProcedureIncompleteReason.buildLookup(bean.getIncompleteReason()); } public ims.vo.ValueObjectBean getBean() { return this.getBean(new ims.vo.ValueObjectBeanMap()); } public ims.vo.ValueObjectBean getBean(ims.vo.ValueObjectBeanMap map) { ims.RefMan.vo.beans.PatientProcedureForClinicalNotesVoBean bean = null; if(map != null) bean = (ims.RefMan.vo.beans.PatientProcedureForClinicalNotesVoBean)map.getValueObjectBean(this); if (bean == null) { bean = new ims.RefMan.vo.beans.PatientProcedureForClinicalNotesVoBean(); map.addValueObjectBean(this, bean); bean.populate(map, this); } return bean; } public Object getFieldValueByFieldName(String fieldName) { if(fieldName == null) throw new ims.framework.exceptions.CodingRuntimeException("Invalid field name"); fieldName = fieldName.toUpperCase(); if(fieldName.equals("PROCEDURE")) return getProcedure(); if(fieldName.equals("PROCEDUREDESCRIPTION")) return getProcedureDescription(); if(fieldName.equals("INFOSOURCE")) return getInfoSource(); if(fieldName.equals("CARECONTEXT")) return getCareContext(); if(fieldName.equals("EPISODEOFCARE")) return getEpisodeOfCare(); if(fieldName.equals("AUTHORINGINFORMATION")) return getAuthoringInformation(); if(fieldName.equals("PROCDATE")) return getProcDate(); if(fieldName.equals("PROCEDURESTATUS")) return getProcedureStatus(); if(fieldName.equals("INCOMPLETEREASON")) return getIncompleteReason(); return super.getFieldValueByFieldName(fieldName); } public boolean getProcedureIsNotNull() { return this.procedure != null; } public ims.core.vo.ProcedureLiteVo getProcedure() { return this.procedure; } public void setProcedure(ims.core.vo.ProcedureLiteVo value) { this.isValidated = false; this.procedure = value; } public boolean getProcedureDescriptionIsNotNull() { return this.proceduredescription != null; } public String getProcedureDescription() { return this.proceduredescription; } public static int getProcedureDescriptionMaxLength() { return 255; } public void setProcedureDescription(String value) { this.isValidated = false; this.proceduredescription = value; } public boolean getInfoSourceIsNotNull() { return this.infosource != null; } public ims.core.vo.lookups.SourceofInformation getInfoSource() { return this.infosource; } public void setInfoSource(ims.core.vo.lookups.SourceofInformation value) { this.isValidated = false; this.infosource = value; } public boolean getCareContextIsNotNull() { return this.carecontext != null; } public ims.core.admin.vo.CareContextRefVo getCareContext() { return this.carecontext; } public void setCareContext(ims.core.admin.vo.CareContextRefVo value) { this.isValidated = false; this.carecontext = value; } public boolean getEpisodeOfCareIsNotNull() { return this.episodeofcare != null; } public ims.core.admin.vo.EpisodeOfCareRefVo getEpisodeOfCare() { return this.episodeofcare; } public void setEpisodeOfCare(ims.core.admin.vo.EpisodeOfCareRefVo value) { this.isValidated = false; this.episodeofcare = value; } public boolean getAuthoringInformationIsNotNull() { return this.authoringinformation != null; } public ims.core.vo.AuthoringInformationVo getAuthoringInformation() { return this.authoringinformation; } public void setAuthoringInformation(ims.core.vo.AuthoringInformationVo value) { this.isValidated = false; this.authoringinformation = value; } public boolean getProcDateIsNotNull() { return this.procdate != null; } public ims.framework.utils.PartialDate getProcDate() { return this.procdate; } public void setProcDate(ims.framework.utils.PartialDate value) { this.isValidated = false; this.procdate = value; } public boolean getProcedureStatusIsNotNull() { return this.procedurestatus != null; } public ims.core.vo.lookups.PatientProcedureStatus getProcedureStatus() { return this.procedurestatus; } public void setProcedureStatus(ims.core.vo.lookups.PatientProcedureStatus value) { this.isValidated = false; this.procedurestatus = value; } public boolean getIncompleteReasonIsNotNull() { return this.incompletereason != null; } public ims.core.vo.lookups.ProcedureIncompleteReason getIncompleteReason() { return this.incompletereason; } public void setIncompleteReason(ims.core.vo.lookups.ProcedureIncompleteReason value) { this.isValidated = false; this.incompletereason = value; } public boolean isValidated() { if(this.isBusy) return true; this.isBusy = true; if(!this.isValidated) { this.isBusy = false; return false; } if(this.authoringinformation != null) { if(!this.authoringinformation.isValidated()) { this.isBusy = false; return false; } } this.isBusy = false; return true; } public String[] validate() { return validate(null); } public String[] validate(String[] existingErrors) { if(this.isBusy) return null; this.isBusy = true; java.util.ArrayList<String> listOfErrors = new java.util.ArrayList<String>(); if(existingErrors != null) { for(int x = 0; x < existingErrors.length; x++) { listOfErrors.add(existingErrors[x]); } } if(this.proceduredescription == null || this.proceduredescription.length() == 0) listOfErrors.add("ProcedureDescription is mandatory"); else if(this.proceduredescription.length() > 255) listOfErrors.add("The length of the field [proceduredescription] in the value object [ims.RefMan.vo.PatientProcedureForClinicalNotesVo] is too big. It should be less or equal to 255"); if(this.infosource == null) listOfErrors.add("InfoSource is mandatory"); if(this.episodeofcare == null) listOfErrors.add("EpisodeOfCare is mandatory"); if(this.authoringinformation != null) { String[] listOfOtherErrors = this.authoringinformation.validate(); if(listOfOtherErrors != null) { for(int x = 0; x < listOfOtherErrors.length; x++) { listOfErrors.add(listOfOtherErrors[x]); } } } int errorCount = listOfErrors.size(); if(errorCount == 0) { this.isBusy = false; this.isValidated = true; return null; } String[] result = new String[errorCount]; for(int x = 0; x < errorCount; x++) result[x] = (String)listOfErrors.get(x); this.isBusy = false; this.isValidated = false; return result; } public void clearIDAndVersion() { this.id = null; this.version = 0; } public Object clone() { if(this.isBusy) return this; this.isBusy = true; PatientProcedureForClinicalNotesVo clone = new PatientProcedureForClinicalNotesVo(this.id, this.version); if(this.procedure == null) clone.procedure = null; else clone.procedure = (ims.core.vo.ProcedureLiteVo)this.procedure.clone(); clone.proceduredescription = this.proceduredescription; if(this.infosource == null) clone.infosource = null; else clone.infosource = (ims.core.vo.lookups.SourceofInformation)this.infosource.clone(); clone.carecontext = this.carecontext; clone.episodeofcare = this.episodeofcare; if(this.authoringinformation == null) clone.authoringinformation = null; else clone.authoringinformation = (ims.core.vo.AuthoringInformationVo)this.authoringinformation.clone(); if(this.procdate == null) clone.procdate = null; else clone.procdate = (ims.framework.utils.PartialDate)this.procdate.clone(); if(this.procedurestatus == null) clone.procedurestatus = null; else clone.procedurestatus = (ims.core.vo.lookups.PatientProcedureStatus)this.procedurestatus.clone(); if(this.incompletereason == null) clone.incompletereason = null; else clone.incompletereason = (ims.core.vo.lookups.ProcedureIncompleteReason)this.incompletereason.clone(); clone.isValidated = this.isValidated; this.isBusy = false; return clone; } public int compareTo(Object obj) { return compareTo(obj, true); } public int compareTo(Object obj, boolean caseInsensitive) { if (obj == null) { return -1; } if(caseInsensitive); // this is to avoid eclipse warning only. if (!(PatientProcedureForClinicalNotesVo.class.isAssignableFrom(obj.getClass()))) { throw new ClassCastException("A PatientProcedureForClinicalNotesVo object cannot be compared an Object of type " + obj.getClass().getName()); } if (this.id == null) return 1; if (((PatientProcedureForClinicalNotesVo)obj).getBoId() == null) return -1; return this.id.compareTo(((PatientProcedureForClinicalNotesVo)obj).getBoId()); } public synchronized static int generateValueObjectUniqueID() { return ims.vo.ValueObject.generateUniqueID(); } public int countFieldsWithValue() { int count = 0; if(this.procedure != null) count++; if(this.proceduredescription != null) count++; if(this.infosource != null) count++; if(this.carecontext != null) count++; if(this.episodeofcare != null) count++; if(this.authoringinformation != null) count++; if(this.procdate != null) count++; if(this.procedurestatus != null) count++; if(this.incompletereason != null) count++; return count; } public int countValueObjectFields() { return 9; } protected ims.core.vo.ProcedureLiteVo procedure; protected String proceduredescription; protected ims.core.vo.lookups.SourceofInformation infosource; protected ims.core.admin.vo.CareContextRefVo carecontext; protected ims.core.admin.vo.EpisodeOfCareRefVo episodeofcare; protected ims.core.vo.AuthoringInformationVo authoringinformation; protected ims.framework.utils.PartialDate procdate; protected ims.core.vo.lookups.PatientProcedureStatus procedurestatus; protected ims.core.vo.lookups.ProcedureIncompleteReason incompletereason; private boolean isValidated = false; private boolean isBusy = false; }
open-health-hub/openMAXIMS
openmaxims_workspace/ValueObjects/src/ims/RefMan/vo/PatientProcedureForClinicalNotesVo.java
Java
agpl-3.0
13,124
/* * * Copyright 2014 Telefonica Investigacion y Desarrollo, S.A.U * * This file is part of Orion Context Broker. * * Orion Context Broker is free software: you can redistribute it and/or * modify it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * Orion Context Broker is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero * General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with Orion Context Broker. If not, see http://www.gnu.org/licenses/. * * For those usages not covered by this license please contact with * iot_support at tid dot es * * Author: Ken Zangelin */ #include <string> #include <vector> #include "logMsg/logMsg.h" #include "logMsg/traceLevels.h" #include "ngsi/ParseData.h" #include "rest/ConnectionInfo.h" #include "rest/EntityTypeInfo.h" #include "rest/uriParamNames.h" #include "serviceRoutines/postDiscoverContextAvailability.h" #include "serviceRoutines/getEntityByIdAttributeByName.h" /* **************************************************************************** * * getEntityByIdAttributeByNameWithTypeAndId - * * GET /v1/registry/contextEntities/type/{entity::type}/id/{entity::id}/attributes/{attribute::name} * * Payload In: None * Payload Out: DiscoverContextAvailabilityResponse * * URI parameters: * - attributesFormat=object * - entity::type=XXX (must coincide with entity::type in URL) * - !exist=entity::type (if set - error -- entity::type cannot be empty) * - exist=entity::type (not supported - ok if present, ok if not present ...) * * 01. Get values from URL (entityId::type, exist, !exist) * 02. Check validity of URI params * 03. Fill in DiscoverContextAvailabilityRequest * 04. Call standard operation discoverContextAvailability * 05. Cleanup and return result */ std::string getEntityByIdAttributeByNameWithTypeAndId ( ConnectionInfo* ciP, int components, std::vector<std::string>& compV, ParseData* parseDataP ) { std::string entityType = compV[4]; std::string entityId = compV[6]; std::string attributeName = compV[8]; std::string entityTypeFromUriParam = ciP->uriParam[URI_PARAM_ENTITY_TYPE]; EntityTypeInfo typeInfo = EntityTypeEmptyOrNotEmpty; std::string answer; DiscoverContextAvailabilityResponse response; // 01. Get values from URL (entityId::type, exist, !exist) if (ciP->uriParam[URI_PARAM_NOT_EXIST] == URI_PARAM_ENTITY_TYPE) { typeInfo = EntityTypeEmpty; } else if (ciP->uriParam[URI_PARAM_EXIST] == URI_PARAM_ENTITY_TYPE) { typeInfo = EntityTypeNotEmpty; } // // 02. Check validity of URI params ... // and if OK: // 03. Fill in DiscoverContextAvailabilityRequest // 04. Call standard operation discoverContextAvailability // if (typeInfo == EntityTypeEmpty) { parseDataP->dcars.res.errorCode.fill(SccBadRequest, "entity::type cannot be empty for this request"); LM_W(("Bad Input (entity::type cannot be empty for this request)")); answer = parseDataP->dcars.res.render(IndividualContextEntityAttributeWithTypeAndId, ciP->outFormat, ""); } else if ((entityTypeFromUriParam != entityType) && (entityTypeFromUriParam != "")) { parseDataP->dcars.res.errorCode.fill(SccBadRequest, "non-matching entity::types in URL"); LM_W(("Bad Input non-matching entity::types in URL")); answer = parseDataP->dcars.res.render(IndividualContextEntityAttributeWithTypeAndId, ciP->outFormat, ""); } else { // 03. Fill in DiscoverContextAvailabilityRequest parseDataP->dcar.res.fill(entityId, entityType, typeInfo, attributeName); // 04. Call standard operation answer = postDiscoverContextAvailability(ciP, components, compV, parseDataP); } // 05. Cleanup and return result parseDataP->dcar.res.release(); return answer; }
j1fig/fiware-orion
src/lib/serviceRoutines/getEntityByIdAttributeByNameWithTypeAndId.cpp
C++
agpl-3.0
4,317
if (Date.first_day_of_week == 1) { Date.weekdays = $w("L Ma Me J V S D"); } else { Date.weekdays = $w("D L Ma Me J V S"); } Date.months = $w('Janvier Février Mars Avril Mai Juin Juillet Août Septembre Octobre Novembre Décembre'); _translations = { "OK": "OK", "Now": "Maintenant", "Today": "Aujourd'hui", "Clear": "Claire" };
wesbillman/projects
public/javascripts/calendar_date_select/locale/fr.js
JavaScript
agpl-3.0
349
# This migration comes from bookyt_salary (originally 20120201104444) class SwitchToTranslatedSaldoInclusionFlags < ActiveRecord::Migration def up ActsAsTaggableOn::Tag.where(:name => 'ahv').update_all(:name => 'AHV') ActsAsTaggableOn::Tag.where(:name => 'ktg').update_all(:name => 'KTG') ActsAsTaggableOn::Tag.where(:name => 'gross_income').update_all(:name => 'Bruttolohn') ActsAsTaggableOn::Tag.where(:name => 'net_income').update_all(:name => 'Nettolohn') ActsAsTaggableOn::Tag.where(:name => 'payment').update_all(:name => 'Auszahlung') ActsAsTaggableOn::Tag.where(:name => 'uvg').update_all(:name => 'UVG') ActsAsTaggableOn::Tag.where(:name => 'uvgz').update_all(:name => 'UVGZ') ActsAsTaggableOn::Tag.where(:name => 'deduction_at_source').update_all(:name => 'Quellensteuer') SalaryBookingTemplate.where(:amount_relates_to => 'ahv').update_all(:amount_relates_to => 'AHV') SalaryBookingTemplate.where(:amount_relates_to => 'ktg').update_all(:amount_relates_to => 'KTG') SalaryBookingTemplate.where(:amount_relates_to => 'gross_income').update_all(:amount_relates_to => 'Bruttolohn') SalaryBookingTemplate.where(:amount_relates_to => 'net_income').update_all(:amount_relates_to => 'Nettolohn') SalaryBookingTemplate.where(:amount_relates_to => 'payment').update_all(:amount_relates_to => 'Auszahlung') SalaryBookingTemplate.where(:amount_relates_to => 'uvg').update_all(:amount_relates_to => 'UVG') SalaryBookingTemplate.where(:amount_relates_to => 'uvgz').update_all(:amount_relates_to => 'UVGZ') SalaryBookingTemplate.where(:amount_relates_to => 'deduction_at_source').update_all(:amount_relates_to => 'Quellensteuer') end end
xuewenfei/bookyt
db/migrate/20120201112805_switch_to_translated_saldo_inclusion_flags.bookyt_salary.rb
Ruby
agpl-3.0
1,704
// This code was generated by Barbara Worwood using IMS Development Environment (version 1.80 build 5007.25751) // Copyright (C) 1995-2014 IMS MAXIMS. All rights reserved. // WARNING: DO NOT MODIFY the content of this file package ims.RefMan.vo.beans; public class PatientElectiveListTCIForCancelationVoBean extends ims.vo.ValueObjectBean { public PatientElectiveListTCIForCancelationVoBean() { } public PatientElectiveListTCIForCancelationVoBean(ims.RefMan.vo.PatientElectiveListTCIForCancelationVo vo) { this.id = vo.getBoId(); this.version = vo.getBoVersion(); this.currentoutcome = vo.getCurrentOutcome() == null ? null : (ims.RefMan.vo.beans.TCIOutcomeForPatientElectiveListVoBean)vo.getCurrentOutcome().getBean(); this.outcomehistory = vo.getOutcomeHistory() == null ? null : vo.getOutcomeHistory().getBeanCollection(); } public void populate(ims.vo.ValueObjectBeanMap map, ims.RefMan.vo.PatientElectiveListTCIForCancelationVo vo) { this.id = vo.getBoId(); this.version = vo.getBoVersion(); this.currentoutcome = vo.getCurrentOutcome() == null ? null : (ims.RefMan.vo.beans.TCIOutcomeForPatientElectiveListVoBean)vo.getCurrentOutcome().getBean(map); this.outcomehistory = vo.getOutcomeHistory() == null ? null : vo.getOutcomeHistory().getBeanCollection(); } public ims.RefMan.vo.PatientElectiveListTCIForCancelationVo buildVo() { return this.buildVo(new ims.vo.ValueObjectBeanMap()); } public ims.RefMan.vo.PatientElectiveListTCIForCancelationVo buildVo(ims.vo.ValueObjectBeanMap map) { ims.RefMan.vo.PatientElectiveListTCIForCancelationVo vo = null; if(map != null) vo = (ims.RefMan.vo.PatientElectiveListTCIForCancelationVo)map.getValueObject(this); if(vo == null) { vo = new ims.RefMan.vo.PatientElectiveListTCIForCancelationVo(); map.addValueObject(this, vo); vo.populate(map, this); } return vo; } public Integer getId() { return this.id; } public void setId(Integer value) { this.id = value; } public int getVersion() { return this.version; } public void setVersion(int value) { this.version = value; } public ims.RefMan.vo.beans.TCIOutcomeForPatientElectiveListVoBean getCurrentOutcome() { return this.currentoutcome; } public void setCurrentOutcome(ims.RefMan.vo.beans.TCIOutcomeForPatientElectiveListVoBean value) { this.currentoutcome = value; } public ims.RefMan.vo.beans.TCIOutcomeForPatientElectiveListVoBean[] getOutcomeHistory() { return this.outcomehistory; } public void setOutcomeHistory(ims.RefMan.vo.beans.TCIOutcomeForPatientElectiveListVoBean[] value) { this.outcomehistory = value; } private Integer id; private int version; private ims.RefMan.vo.beans.TCIOutcomeForPatientElectiveListVoBean currentoutcome; private ims.RefMan.vo.beans.TCIOutcomeForPatientElectiveListVoBean[] outcomehistory; }
open-health-hub/openMAXIMS
openmaxims_workspace/ValueObjects/src/ims/RefMan/vo/beans/PatientElectiveListTCIForCancelationVoBean.java
Java
agpl-3.0
2,832
module ElasticsearchHelper def searchable_types { :all => _("All results"), :text_article => _("Articles"), :uploaded_file => _("Files"), :community => _("Communities"), :event => _("Events"), :person => _("People") } end def sort_types sorts = { :relevance => _("Relevance"), :lexical => _("Alphabetical"), :more_recent => _("More recent") } selected_type = (params[:selected_type] || nil) if selected_type and selected_type.to_sym != :all klass = selected_type.to_s.classify.constantize sorts.update klass.specific_sort if klass.respond_to? :specific_sort end sorts end def process_results selected_type = (params[:selected_type].presence|| :all).to_sym selected_type == :all ? search_from_all_models : search_from_model(selected_type) end private def search_from_all_models begin filter = (params[:filter] || "").to_sym query = get_query params[:query], sort_by: get_sort_by(filter), categories: params[:categories] Elasticsearch::Model.search(query,searchable_models, size: default_per_page(params[:per_page])).page(params[:page]).records rescue [] end end def search_from_model(model) begin klass = model.to_s.classify.constantize filter = (params[:filter] || "").to_sym query = get_query params[:query], klass: klass, sort_by: get_sort_by(filter ,klass), categories: params[:categories] klass.search(query, size: default_per_page(params[:per_page])).page(params[:page]).records rescue [] end end def default_per_page(per_page=nil) per_page || 10 end def get_sort_by(sort_by, klass=nil) case sort_by when :lexical {"name.raw" => {"order" => "asc"}} when :more_recent {"created_at" => {"order" => "desc"}} else (klass and klass.respond_to?(:get_sort_by)) ? klass.get_sort_by(sort_by) : nil end end def searchable_models searchable_types.except(:all).keys.map {|model| model.to_s.classify.constantize} end def query_string(expression="", models=[]) return { match_all: {} } if not expression { query_string: { query: "*"+expression.downcase.split.join('* *')+"*", fields: fields_from_models(models), tie_breaker: 0.4, minimum_should_match: "100%" } } end def query_method(expression="", models=[], categories=[]) query = {} current_user ||= nil query[:query] = { filtered: { query: query_string(expression, models), filter: { bool: {} } } } query[:query][:filtered][:filter][:bool] = { should: models.map {|model| model.filter(environment: @environment.id, user: current_user )} } unless categories.blank? query[:query][:filtered][:filter][:bool][:must] = models.first.filter_category(categories) end query end def get_query(text="", options={}) klass = options[:klass] sort_by = options[:sort_by] categories = (options[:categories] || "").split(",") categories = categories.map(&:to_i) models = (klass.nil?) ? searchable_models : [klass] query = query_method(text, models, categories) query[:sort] = sort_by if sort_by query end def fields_from_models(klasses) fields = Set.new klasses.each do |klass| klass::SEARCHABLE_FIELDS.map do |key, value| if value and value[:weight] fields.add "#{key}^#{value[:weight]}" else fields.add "#{key}" end end end fields.to_a end end
AlessandroCaetano/noosfero
plugins/elasticsearch/helpers/elasticsearch_helper.rb
Ruby
agpl-3.0
3,705
/** * Copyright 2007-2015, Kaazing Corporation. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kaazing.k3po.lang.internal.ast; public final class AstAbortNode extends AstCommandNode { @Override public <R, P> R accept(Visitor<R, P> visitor, P parameter) throws Exception { return visitor.visit(this, parameter); } @Override protected int hashTo() { return getClass().hashCode(); } @Override protected boolean equalTo(AstRegion that) { return that instanceof AstAbortNode; } @Override protected void describe(StringBuilder buf) { super.describe(buf); buf.append("abort\n"); } }
mgherghe/k3po
lang/src/main/java/org/kaazing/k3po/lang/internal/ast/AstAbortNode.java
Java
agpl-3.0
1,214
package dr.inferencexml.operators; import dr.inference.model.Parameter; import dr.inference.operators.*; import dr.xml.*; /** */ public class UpDownOperatorParser extends AbstractXMLObjectParser { public static final String UP_DOWN_OPERATOR = "upDownOperator"; public static final String UP = "up"; public static final String DOWN = "down"; public static final String SCALE_FACTOR = ScaleOperatorParser.SCALE_FACTOR; public String getParserName() { return UP_DOWN_OPERATOR; } private Scalable[] getArgs(final XMLObject list) throws XMLParseException { Scalable[] args = new Scalable[list.getChildCount()]; for (int k = 0; k < list.getChildCount(); ++k) { final Object child = list.getChild(k); if (child instanceof Parameter) { args[k] = new Scalable.Default((Parameter) child); } else if (child instanceof Scalable) { args[k] = (Scalable) child; } else { XMLObject xo = (XMLObject) child; if (xo.hasAttribute("count")) { final int count = xo.getIntegerAttribute("count"); final Scalable s = (Scalable) xo.getChild(Scalable.class); args[k] = new Scalable() { public int scale(double factor, int nDims) throws OperatorFailedException { return s.scale(factor, count); } public String getName() { return s.getName() + "(" + count + ")"; } }; } else if (xo.hasAttribute("df")) { final int df = xo.getIntegerAttribute("df"); final Scalable s = (Scalable) xo.getChild(Scalable.class); args[k] = new Scalable() { public int scale(double factor, int nDims) throws OperatorFailedException { s.scale(factor, -1); return df; } public String getName() { return s.getName() + "[df=" + df + "]"; } }; } } } return args; } public Object parseXMLObject(XMLObject xo) throws XMLParseException { final double scaleFactor = xo.getDoubleAttribute(SCALE_FACTOR); final double weight = xo.getDoubleAttribute(MCMCOperator.WEIGHT); final CoercionMode mode = CoercionMode.parseMode(xo); final Scalable[] upArgs = getArgs(xo.getChild(UP)); final Scalable[] dnArgs = getArgs(xo.getChild(DOWN)); return new UpDownOperator(upArgs, dnArgs, scaleFactor, weight, mode); } public String getParserDescription() { return "This element represents an operator that scales two parameters in different directions. " + "Each operation involves selecting a scale uniformly at random between scaleFactor and 1/scaleFactor. " + "The up parameter is multipled by this scale and the down parameter is divided by this scale."; } public Class getReturnType() { return UpDownOperator.class; } public XMLSyntaxRule[] getSyntaxRules() { return rules; } private final XMLSyntaxRule[] ee = { new ElementRule(Scalable.class, true), new ElementRule(Parameter.class, true), new ElementRule("scale", new XMLSyntaxRule[]{ AttributeRule.newIntegerRule("count", true), AttributeRule.newIntegerRule("df", true), new ElementRule(Scalable.class), }, true), }; private final XMLSyntaxRule[] rules = { AttributeRule.newDoubleRule(SCALE_FACTOR), AttributeRule.newDoubleRule(MCMCOperator.WEIGHT), AttributeRule.newBooleanRule(CoercableMCMCOperator.AUTO_OPTIMIZE, true), // Allow an arbitrary number of Parameters or Scalable in up or down new ElementRule(UP, ee, 1, Integer.MAX_VALUE), new ElementRule(DOWN, ee, 1, Integer.MAX_VALUE), }; }
danieljue/beast-mcmc
src/dr/inferencexml/operators/UpDownOperatorParser.java
Java
lgpl-2.1
4,368
/* * JBoss, Home of Professional Open Source. * Copyright 2010, Red Hat, Inc., and individual contributors * as indicated by the @author tags. See the copyright.txt file in the * distribution for a full listing of individual contributors. * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this software; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA, or see the FSF site: http://www.fsf.org. */ package org.jboss.as.server.deployment.annotation; import java.lang.ref.Reference; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import org.jboss.as.server.deployment.AttachmentList; import org.jboss.as.server.deployment.Attachments; import org.jboss.as.server.deployment.DeploymentPhaseContext; import org.jboss.as.server.deployment.DeploymentUnit; import org.jboss.as.server.deployment.DeploymentUnitProcessingException; import org.jboss.as.server.deployment.DeploymentUnitProcessor; import org.jboss.as.server.deployment.DeploymentUtils; import org.jboss.as.server.deployment.SubDeploymentMarker; import org.jboss.as.server.deployment.module.AdditionalModuleSpecification; import org.jboss.as.server.deployment.module.ModuleDependency; import org.jboss.as.server.deployment.module.ModuleRootMarker; import org.jboss.as.server.deployment.module.ResourceRoot; import org.jboss.jandex.Index; import org.jboss.modules.ModuleIdentifier; import org.jboss.modules.ModuleLoader; /** * Processor responsible for creating and attaching a {@link CompositeIndex} for a deployment. * <p/> * This must run after the {@link org.jboss.as.server.deployment.module.ManifestDependencyProcessor} * * @author John Bailey * @author Stuart Douglas */ public class CompositeIndexProcessor implements DeploymentUnitProcessor { public void deploy(DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException { final DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit(); final ModuleLoader moduleLoader = deploymentUnit.getAttachment(Attachments.SERVICE_MODULE_LOADER); final Reference<AnnotationIndexSupport> indexSupportRef = deploymentUnit.getAttachment(Attachments.ANNOTATION_INDEX_SUPPORT); assert indexSupportRef != null; final Boolean computeCompositeIndex = deploymentUnit.getAttachment(Attachments.COMPUTE_COMPOSITE_ANNOTATION_INDEX); if (computeCompositeIndex != null && !computeCompositeIndex) { return; } DeploymentUnit top = deploymentUnit.getParent() == null ? deploymentUnit : deploymentUnit.getParent(); Map<ModuleIdentifier, AdditionalModuleSpecification> additionalModuleSpecificationMap = new HashMap<>(); for(AdditionalModuleSpecification i : top.getAttachmentList(Attachments.ADDITIONAL_MODULES)) { additionalModuleSpecificationMap.put(i.getModuleIdentifier(), i); } Map<ModuleIdentifier, CompositeIndex> additionalAnnotationIndexes = new HashMap<ModuleIdentifier, CompositeIndex>(); final List<ModuleIdentifier> additionalModuleIndexes = deploymentUnit.getAttachmentList(Attachments.ADDITIONAL_ANNOTATION_INDEXES); final List<Index> indexes = new ArrayList<Index>(); Map<ModuleIdentifier, DeploymentUnit> subdeploymentDependencies = buildSubdeploymentDependencyMap(deploymentUnit); for (final ModuleIdentifier moduleIdentifier : additionalModuleIndexes) { AdditionalModuleSpecification additional = additionalModuleSpecificationMap.get(moduleIdentifier); if(additional != null) { // This module id refers to a deployment-specific module created based on a MANIFEST.MF Class-Path entry // or jboss-deployment-structure.xml or equivalent jboss-all.xml content. Obtain indexes from its resources. final List<Index> moduleIndexes = new ArrayList<>(); for(ResourceRoot resource : additional.getResourceRoots()) { ResourceRootIndexer.indexResourceRoot(resource); Index indexAttachment = resource.getAttachment(Attachments.ANNOTATION_INDEX); if(indexAttachment != null) { indexes.add(indexAttachment); moduleIndexes.add(indexAttachment); } } if (!moduleIndexes.isEmpty()) { additionalAnnotationIndexes.put(moduleIdentifier, new CompositeIndex(moduleIndexes)); } } else if (subdeploymentDependencies.containsKey(moduleIdentifier)) { // This module id refers to a subdeployment. Find the indices for its resources. List<ResourceRoot> resourceRoots = subdeploymentDependencies.get(moduleIdentifier).getAttachment(Attachments.RESOURCE_ROOTS); final List<ResourceRoot> allResourceRoots = new ArrayList<>(); if (resourceRoots != null) { allResourceRoots.addAll(resourceRoots); } final ResourceRoot deploymentRoot = subdeploymentDependencies.get(moduleIdentifier).getAttachment(Attachments.DEPLOYMENT_ROOT); if (ModuleRootMarker.isModuleRoot(deploymentRoot)) { allResourceRoots.add(deploymentRoot); } final List<Index> moduleIndexes = new ArrayList<>(); for (ResourceRoot resourceRoot : allResourceRoots) { Index index = resourceRoot.getAttachment(Attachments.ANNOTATION_INDEX); if (index != null) { indexes.add(index); moduleIndexes.add(index); } } if (!moduleIndexes.isEmpty()) { additionalAnnotationIndexes.put(moduleIdentifier, new CompositeIndex(moduleIndexes)); } } else { // This module id refers to a module external to the deployment. Get the indices from the support object. CompositeIndex externalModuleIndexes; AnnotationIndexSupport annotationIndexSupport = indexSupportRef.get(); if (annotationIndexSupport != null) { externalModuleIndexes = annotationIndexSupport.getAnnotationIndices(moduleIdentifier.toString(), moduleLoader); } else { // This implies the DeploymentUnitService was restarted after the original operation that held // the strong ref to the AnnotationIndexSupport. So we can't benefit from caching. Just calculate // the indices without worrying about caching. externalModuleIndexes = AnnotationIndexSupport.indexModule(moduleIdentifier.toString(), moduleLoader); } indexes.addAll(externalModuleIndexes.indexes); additionalAnnotationIndexes.put(moduleIdentifier, externalModuleIndexes); } } deploymentUnit.putAttachment(Attachments.ADDITIONAL_ANNOTATION_INDEXES_BY_MODULE, additionalAnnotationIndexes); final List<ResourceRoot> allResourceRoots = new ArrayList<ResourceRoot>(); final List<ResourceRoot> resourceRoots = deploymentUnit.getAttachmentList(Attachments.RESOURCE_ROOTS); for (ResourceRoot resourceRoot : resourceRoots) { // do not add child sub deployments to the composite index if (!SubDeploymentMarker.isSubDeployment(resourceRoot) && ModuleRootMarker.isModuleRoot(resourceRoot)) { allResourceRoots.add(resourceRoot); } } //we merge all Class-Path annotation indexes into the deployments composite index //this means that if component defining annotations (e.g. @Stateless) are specified in a Class-Path //entry references by two sub deployments this component will be created twice. //the spec expects this behaviour, and explicitly warns not to put component defining annotations //in Class-Path items allResourceRoots.addAll(handleClassPathItems(deploymentUnit)); final ResourceRoot deploymentRoot = deploymentUnit.getAttachment(Attachments.DEPLOYMENT_ROOT); if (ModuleRootMarker.isModuleRoot(deploymentRoot)) { allResourceRoots.add(deploymentRoot); } for (ResourceRoot resourceRoot : allResourceRoots) { Index index = resourceRoot.getAttachment(Attachments.ANNOTATION_INDEX); if (index != null) { indexes.add(index); } } deploymentUnit.putAttachment(Attachments.COMPOSITE_ANNOTATION_INDEX, new CompositeIndex(indexes)); } private Map<ModuleIdentifier, DeploymentUnit> buildSubdeploymentDependencyMap(DeploymentUnit deploymentUnit) { Set<ModuleIdentifier> depModuleIdentifiers = new HashSet<>(); for (ModuleDependency dep: deploymentUnit.getAttachment(Attachments.MODULE_SPECIFICATION).getAllDependencies()) { depModuleIdentifiers.add(dep.getIdentifier()); } DeploymentUnit top = deploymentUnit.getParent()==null?deploymentUnit:deploymentUnit.getParent(); Map<ModuleIdentifier, DeploymentUnit> res = new HashMap<>(); AttachmentList<DeploymentUnit> subDeployments = top.getAttachment(Attachments.SUB_DEPLOYMENTS); if (subDeployments != null) { for (DeploymentUnit subDeployment : subDeployments) { ModuleIdentifier moduleIdentifier = subDeployment.getAttachment(Attachments.MODULE_IDENTIFIER); if (depModuleIdentifiers.contains(moduleIdentifier)) { res.put(moduleIdentifier, subDeployment); } } } return res; } /** * Loops through all resource roots that have been made available transitively via Class-Path entries, and * adds them to the list of roots to be processed. */ private Collection<? extends ResourceRoot> handleClassPathItems(final DeploymentUnit deploymentUnit) { final Set<ResourceRoot> additionalRoots = new HashSet<ResourceRoot>(); final ArrayDeque<ResourceRoot> toProcess = new ArrayDeque<ResourceRoot>(); final List<ResourceRoot> resourceRoots = DeploymentUtils.allResourceRoots(deploymentUnit); toProcess.addAll(resourceRoots); final Set<ResourceRoot> processed = new HashSet<ResourceRoot>(resourceRoots); while (!toProcess.isEmpty()) { final ResourceRoot root = toProcess.pop(); final List<ResourceRoot> classPathRoots = root.getAttachmentList(Attachments.CLASS_PATH_RESOURCE_ROOTS); for(ResourceRoot cpRoot : classPathRoots) { if(!processed.contains(cpRoot)) { additionalRoots.add(cpRoot); toProcess.add(cpRoot); processed.add(cpRoot); } } } return additionalRoots; } public void undeploy(DeploymentUnit deploymentUnit) { deploymentUnit.removeAttachment(Attachments.COMPOSITE_ANNOTATION_INDEX); } }
yersan/wildfly-core
server/src/main/java/org/jboss/as/server/deployment/annotation/CompositeIndexProcessor.java
Java
lgpl-2.1
11,861
#include <osgWidget/VncClient> #include <osgDB/Registry> #include <osgViewer/Viewer> #include <osgViewer/ViewerEventHandlers> class EscapeHandler : public osgGA::GUIEventHandler { public: EscapeHandler() {} bool handle(const osgGA::GUIEventAdapter& ea,osgGA::GUIActionAdapter& aa) { if (ea.getHandled()) return false; switch(ea.getEventType()) { case(osgGA::GUIEventAdapter::KEYUP): { if (ea.getKey()==osgGA::GUIEventAdapter::KEY_Escape) { osgViewer::View* view = dynamic_cast<osgViewer::View*>(&aa); if (view) view->getViewerBase()->setDone(true); return true; } break; } default: return false; } return false; } }; int main(int argc,char** argv) { osg::ArgumentParser arguments(&argc, argv); arguments.getApplicationUsage()->addCommandLineOption("--login <url> <username> <password>", "Provide authentication information for http file access."); arguments.getApplicationUsage()->addCommandLineOption("--password <password>", "Provide password for any vnc url on command line not mentioned in --login."); osgViewer::Viewer viewer(arguments); osgWidget::GeometryHints hints(osg::Vec3(0.0f,0.0f,0.0f), osg::Vec3(1.0f,0.0f,0.0f), osg::Vec3(0.0f,0.0f,1.0f), osg::Vec4(1.0f,1.0f,1.0f,1.0f), osgWidget::GeometryHints::RESIZE_HEIGHT_TO_MAINTAINCE_ASPECT_RATIO); osg::ref_ptr<osg::Group> group = new osg::Group; std::string password; while(arguments.read("--password",password)) { } std::string url, username; while (arguments.read("--login", url, username, password)) { osgDB::Registry::instance()->getOrCreateAuthenticationMap()->addAuthenticationDetails( url, new osgDB::AuthenticationDetails(username, password) ); } for(int i=1; i<arguments.argc(); ++i) { if (!arguments.isOption(i)) { std::string hostname = arguments[i]; if (!password.empty()) { osgDB::AuthenticationMap* authenticationMap = osgDB::Registry::instance()->getOrCreateAuthenticationMap(); const osgDB::AuthenticationDetails* details = authenticationMap->getAuthenticationDetails(hostname); if (details == NULL) { authenticationMap->addAuthenticationDetails(hostname, new osgDB::AuthenticationDetails("", password)); } } osg::ref_ptr<osgWidget::VncClient> vncClient = new osgWidget::VncClient; if (vncClient->connect(arguments[i], hints)) { group->addChild(vncClient.get()); hints.position.x() += 1.1f; } } } viewer.setSceneData(group.get()); viewer.addEventHandler(new osgViewer::StatsHandler); // add a custom escape handler, but disable the standard viewer one to enable the vnc images to handle // the escape without it getting caught by the viewer. viewer.addEventHandler(new EscapeHandler); viewer.setKeyEventSetsDone(0); return viewer.run(); }
openscenegraph/osg
examples/osgvnc/osgvnc.cpp
C++
lgpl-2.1
3,495
package com.griddynamics.jagger.webclient.client; import com.google.gwt.core.client.GWT; import com.google.gwt.user.client.rpc.RemoteService; import com.google.gwt.user.client.rpc.RemoteServiceRelativePath; import com.griddynamics.jagger.webclient.client.dto.WebClientStartProperties; import java.util.Map; import java.util.Set; @RemoteServiceRelativePath("rpc/CommonDataService") public interface CommonDataService extends RemoteService { public WebClientStartProperties getWebClientStartProperties(); public static class Async { private static final CommonDataServiceAsync ourInstance = (CommonDataServiceAsync) GWT.create(CommonDataService.class); public static CommonDataServiceAsync getInstance() { return ourInstance; } } }
vladimir-bukhtoyarov/jagger
webclient/src/main/java/com/griddynamics/jagger/webclient/client/CommonDataService.java
Java
lgpl-2.1
785
/** * * Copyright (c) 2014, the Railo Company Ltd. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library. If not, see <http://www.gnu.org/licenses/>. * **/ package lucee.transformer.bytecode.statement.tag; import lucee.transformer.bytecode.BytecodeContext; import lucee.transformer.bytecode.BytecodeException; import lucee.transformer.bytecode.Position; import lucee.transformer.bytecode.visitor.IfVisitor; public final class TagJavaScript extends TagBaseNoFinal { public TagJavaScript(Position start,Position end) { super(start,end); } /** * @see lucee.transformer.bytecode.statement.StatementBase#_writeOut(org.objectweb.asm.commons.GeneratorAdapter) */ public void _writeOut(BytecodeContext bc) throws BytecodeException { IfVisitor ifv=new IfVisitor(); ifv.visitBeforeExpression(); bc.getAdapter().push(true); ifv.visitAfterExpressionBeforeBody(bc); getBody().writeOut(bc); ifv.visitAfterBody(bc); } }
paulklinkenberg/Lucee4
lucee-java/lucee-core/src/lucee/transformer/bytecode/statement/tag/TagJavaScript.java
Java
lgpl-2.1
1,538
/* * A Gradle plugin for the creation of Minecraft mods and MinecraftForge plugins. * Copyright (C) 2013 Minecraft Forge * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 * USA */ package net.minecraftforge.gradle.util.json.fgversion; import java.lang.reflect.Type; import java.util.List; import com.google.gson.JsonDeserializationContext; import com.google.gson.JsonDeserializer; import com.google.gson.JsonElement; import com.google.gson.JsonParseException; import com.google.gson.reflect.TypeToken; public class FGVersionDeserializer implements JsonDeserializer<FGVersionWrapper> { @Override public FGVersionWrapper deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) throws JsonParseException { FGVersionWrapper wrapper = new FGVersionWrapper(); List<FGVersion> versions = context.deserialize(json, new TypeToken<List<FGVersion>>() {}.getType()); for (int i = 0; i < versions.size(); i++) { FGVersion v = versions.get(i); v.index = i; wrapper.versions.add(v.version); wrapper.versionObjects.put(v.version, v); } return wrapper; } }
killjoy1221/ForgeGradle
src/main/java/net/minecraftforge/gradle/util/json/fgversion/FGVersionDeserializer.java
Java
lgpl-2.1
1,875
"""pure-Python sugar wrappers for core 0MQ objects.""" #----------------------------------------------------------------------------- # Copyright (C) 2013 Brian Granger, Min Ragan-Kelley # # This file is part of pyzmq # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from zmq.sugar import ( constants, context, frame, poll, socket, tracker, version ) from zmq import error __all__ = ['constants'] for submod in ( constants, context, error, frame, poll, socket, tracker, version ): __all__.extend(submod.__all__) from zmq.error import * from zmq.sugar.context import * from zmq.sugar.tracker import * from zmq.sugar.socket import * from zmq.sugar.constants import * from zmq.sugar.frame import * from zmq.sugar.poll import * # from zmq.sugar.stopwatch import * # from zmq.sugar._device import * from zmq.sugar.version import *
IsCoolEntertainment/debpkg_python-pyzmq
zmq/sugar/__init__.py
Python
lgpl-3.0
1,187
namespace StockSharp.Algo.Strategies { using System; using System.Collections.Generic; using System.ComponentModel; using Ecng.Common; using Ecng.Serialization; /// <summary> /// Ïàðàìåòð ñòðàòåãèè. /// </summary> public interface IStrategyParam : IPersistable { /// <summary> /// Íàçâàíèå ïàðàìåòðà. /// </summary> string Name { get; } /// <summary> /// Çíà÷åíèå ïàðàìåòðà. /// </summary> object Value { get; set; } /// <summary> /// Çíà÷åíèå Îò ïðè îïòèìèçàöèè. /// </summary> object OptimizeFrom { get; set; } /// <summary> /// Çíà÷åíèå Äî ïðè îïòèìèçàöèè. /// </summary> object OptimizeTo { get; set; } /// <summary> /// Çíà÷åíèå Øàã ïðè îïòèìèçàöèè. /// </summary> object OptimizeStep { get; set; } } /// <summary> /// Îáåðòêà äëÿ òèïèçèðîâàííîãî äîñòóïà ê ïàðàìåòðó ñòðàòåãèè. /// </summary> /// <typeparam name="T">Òèï çíà÷åíèÿ ïàðàìåòðà.</typeparam> public class StrategyParam<T> : IStrategyParam { private readonly Strategy _strategy; /// <summary> /// Ñîçäàòü <see cref="StrategyParam{T}"/>. /// </summary> /// <param name="strategy">Ñòðàòåãèÿ.</param> /// <param name="name">Íàçâàíèå ïàðàìåòðà.</param> public StrategyParam(Strategy strategy, string name) : this(strategy, name, default(T)) { } /// <summary> /// Ñîçäàòü <see cref="StrategyParam{T}"/>. /// </summary> /// <param name="strategy">Ñòðàòåãèÿ.</param> /// <param name="name">Íàçâàíèå ïàðàìåòðà.</param> /// <param name="initialValue">Ïåðâîíà÷àëüíîå çíà÷åíèå.</param> public StrategyParam(Strategy strategy, string name, T initialValue) { if (strategy == null) throw new ArgumentNullException("strategy"); if (name.IsEmpty()) throw new ArgumentNullException("name"); _strategy = strategy; Name = name; _value = initialValue; _strategy.Parameters.Add(this); } /// <summary> /// Íàçâàíèå ïàðàìåòðà. /// </summary> public string Name { get; private set; } private bool _allowNull = typeof(T).IsNullable(); /// <summary> /// Âîçìîæíî ëè â <see cref="Value"/> õðàíèòü çíà÷åíèå, ðàâíîå <see langword="null"/>. /// </summary> public bool AllowNull { get { return _allowNull; } set { _allowNull = value; } } private T _value; /// <summary> /// Çíà÷åíèå ïàðàìåòðà. /// </summary> public T Value { get { return _value; } set { if (!AllowNull && value.IsNull()) throw new ArgumentNullException("value"); if (EqualityComparer<T>.Default.Equals(_value, value)) return; var propChange = _value as INotifyPropertyChanged; if (propChange != null) propChange.PropertyChanged -= OnValueInnerStateChanged; _value = value; _strategy.RaiseParametersChanged(Name); propChange = _value as INotifyPropertyChanged; if (propChange != null) propChange.PropertyChanged += OnValueInnerStateChanged; } } /// <summary> /// Çíà÷åíèå Îò ïðè îïòèìèçàöèè. /// </summary> public object OptimizeFrom { get; set; } /// <summary> /// Çíà÷åíèå Äî ïðè îïòèìèçàöèè. /// </summary> public object OptimizeTo { get; set; } /// <summary> /// Çíà÷åíèå Øàã ïðè îïòèìèçàöèè. /// </summary> public object OptimizeStep { get; set; } private void OnValueInnerStateChanged(object sender, PropertyChangedEventArgs e) { _strategy.RaiseParametersChanged(Name); } object IStrategyParam.Value { get { return Value; } set { Value = (T)value; } } /// <summary> /// Çàãðóçèòü íàñòðîéêè. /// </summary> /// <param name="storage">Õðàíèëèùå íàñòðîåê.</param> public void Load(SettingsStorage storage) { Name = storage.GetValue<string>("Name"); Value = storage.GetValue<T>("Value"); OptimizeFrom = storage.GetValue<T>("OptimizeFrom"); OptimizeTo = storage.GetValue<T>("OptimizeTo"); OptimizeStep = storage.GetValue<object>("OptimizeStep"); } /// <summary> /// Ñîõðàíèòü íàñòðîéêè. /// </summary> /// <param name="storage">Õðàíèëèùå íàñòðîåê.</param> public void Save(SettingsStorage storage) { storage.SetValue("Name", Name); storage.SetValue("Value", Value); storage.SetValue("OptimizeFrom", OptimizeFrom); storage.SetValue("OptimizeTo", OptimizeTo); storage.SetValue("OptimizeStep", OptimizeStep); } } /// <summary> /// Âñïîìîãàòåëüíûé êëàññ äëÿ ñ <see cref="StrategyParam{T}"/>. /// </summary> public static class StrategyParamHelper { /// <summary> /// Ñîçäàòü <see cref="StrategyParam{T}"/>. /// </summary> /// <typeparam name="T">Òèï çíà÷åíèÿ ïàðàìåòðà.</typeparam> /// <param name="strategy">Ñòðàòåãèÿ.</param> /// <param name="name">Íàçâàíèå ïàðàìåòðà.</param> /// <param name="initialValue">Ïåðâîíà÷àëüíîå çíà÷åíèå.</param> /// <returns>Ïàðàìåòð ñòðàòåãèè.</returns> public static StrategyParam<T> Param<T>(this Strategy strategy, string name, T initialValue = default(T)) { return new StrategyParam<T>(strategy, name, initialValue); } /// <summary> /// Ñîçäàòü <see cref="StrategyParam{T}"/>. /// </summary> /// <typeparam name="T">Òèï çíà÷åíèÿ ïàðàìåòðà.</typeparam> /// <param name="param">Ïàðàìåòð ñòðàòåãèè.</param> /// <param name="optimizeFrom">Çíà÷åíèå Îò ïðè îïòèìèçàöèè.</param> /// <param name="optimizeTo">Çíà÷åíèå Äî ïðè îïòèìèçàöèè.</param> /// <param name="optimizeStep">Çíà÷åíèå Øàã ïðè îïòèìèçàöèè.</param> /// <returns>Ïàðàìåòð ñòðàòåãèè.</returns> public static StrategyParam<T> Optimize<T>(this StrategyParam<T> param, T optimizeFrom = default(T), T optimizeTo = default(T), T optimizeStep = default(T)) { if (param == null) throw new ArgumentNullException("param"); param.OptimizeFrom = optimizeFrom; param.OptimizeTo = optimizeTo; param.OptimizeStep = optimizeStep; return param; } } }
donaldlee2008/StockSharp
Algo/Strategies/StrategyParam.cs
C#
lgpl-3.0
5,771
/* * Copyright 2008 Novamente LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package relex; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.util.HashSet; import java.util.Map; import relex.corpus.DocSplitter; import relex.corpus.DocSplitterFactory; import relex.output.CompactView; /** * The WebFormat class provides the central processing point for parsing * sentences and extracting semantic relationships from them. The main() * proceedure is usable as a stand-alone document analyzer; it supports * several flags modifying the displayed output. * * The primary output format generated by this class is the so-called * "compact file format". This format is usefule for the long-term * storage of parse results. This format is intended to serve as input * to later text processing stages -- in this way, if the later stages * are modified, one does not have to re-parse the original English input. * That is, reading in the "compact file format" is hundreds/thousands of * times less CPU-expensive than the original English-language parse. * * The primary interface is the processSentence() method, * which accepts one sentence at a time, parses it, and extracts * relationships from it. */ public class WebFormat extends RelationExtractor { /** * Main entry point */ public static void main(String[] args) { String callString = "WebFormat" + " [-h (show this help)]" + " [-l (do not show parse links)]" + " [-m (do not show parse metadata)]" + " [-n max number of parses to display]" + " [-t (do not show constituent tree)]" + " [--url source URL]" + " [--maxParseSeconds N]"; HashSet<String> flags = new HashSet<String>(); flags.add("-h"); flags.add("-l"); flags.add("-m"); flags.add("-t"); HashSet<String> opts = new HashSet<String>(); opts.add("-n"); opts.add("--maxParseSeconds"); opts.add("--url"); Map<String,String> commandMap = CommandLineArgParser.parse(args, opts, flags); String url = null; String sentence = null; int maxParses = 30; int maxParseSeconds = 60; CompactView cv = new CompactView(); if (commandMap.get("-l") != null) cv.showLinks(false); if (commandMap.get("-m") != null) cv.showMetadata(false); if (commandMap.get("-t") != null) cv.showConstituents(false); // Check for optional command line arguments. try { maxParses = commandMap.get("-n") != null ? Integer.parseInt(commandMap.get("-n").toString()) : 1; maxParseSeconds = commandMap.get("--maxParseSeconds") != null ? Integer.parseInt(commandMap.get("--maxParseSeconds").toString()) : 60; url = commandMap.get("--url"); } catch (Exception e) { System.err.println("Unrecognized parameter."); System.err.println(callString); e.printStackTrace(); return; } if (commandMap.get("-h") != null) { System.err.println(callString); return; } cv.setMaxParses(maxParses); cv.setSourceURL(url); WebFormat re = new WebFormat(); re.setAllowSkippedWords(true); re.setMaxParses(maxParses); re.setMaxParseSeconds(maxParseSeconds); // Pass along the version string. cv.setVersion(re.getVersion()); // If sentence is not passed at command line, read from standard input: BufferedReader stdin = new BufferedReader(new InputStreamReader(System.in)); DocSplitter ds = DocSplitterFactory.create(); // Collect statistics int sentence_count = 0; ParseStats stats = new ParseStats(); System.out.println(cv.header()); while(true) { // Read text from stdin. while (sentence == null) { try { sentence = stdin.readLine(); if ((sentence == null) || "END.".equals(sentence)) { System.out.println(cv.footer()); return; } } catch (IOException e) { System.err.println("Error reading sentence from the standard input!"); } // Buffer up input text, and wait for a whole, // complete sentence before continuing. ds.addText(sentence + " "); sentence = ds.getNextSentence(); } while (sentence != null) { Sentence sntc = re.processSentence(sentence); // Print output System.out.println (cv.toString(sntc)); // Collect statistics sentence_count ++; stats.bin(sntc); if (sentence_count%20 == 0) { System.err.println ("\n" + stats.toString()); } sentence = ds.getNextSentence(); } } } } /* ============================ END OF FILE ====================== */
virneo/relex
src/java/relex/WebFormat.java
Java
apache-2.0
5,001
/** * Copyright 2010-2016 Boxfuse GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Private API. No compatibility guarantees provided. */ package org.flywaydb.core.internal.dbsupport.hsql;
nathanvick/flyway
flyway-core/src/main/java/org/flywaydb/core/internal/dbsupport/hsql/package-info.java
Java
apache-2.0
718
package ca.uhn.fhir.jpa.entity; /* * #%L * HAPI FHIR JPA Server * %% * Copyright (C) 2014 - 2015 University Health Network * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import java.math.BigDecimal; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.Table; //@formatter:off @Entity @Table(name = "HFJ_SPIDX_QUANTITY" /*, indexes= {@Index(name="IDX_SP_NUMBER", columnList="SP_VALUE")}*/ ) @org.hibernate.annotations.Table(appliesTo = "HFJ_SPIDX_QUANTITY", indexes= { @org.hibernate.annotations.Index(name="IDX_SP_QUANTITY", columnNames= {"RES_TYPE", "SP_NAME", "SP_SYSTEM", "SP_UNITS", "SP_VALUE"} )}) //@formatter:on public class ResourceIndexedSearchParamQuantity extends BaseResourceIndexedSearchParam { private static final long serialVersionUID = 1L; @Column(name = "SP_SYSTEM", nullable = true, length = 100) public String mySystem; @Column(name = "SP_UNITS", nullable = true, length = 100) public String myUnits; @Column(name = "SP_VALUE", nullable = true) public BigDecimal myValue; public ResourceIndexedSearchParamQuantity() { //nothing } public ResourceIndexedSearchParamQuantity(String theParamName, BigDecimal theValue, String theSystem, String theUnits) { setParamName(theParamName); setSystem(theSystem); setValue(theValue); setUnits(theUnits); } public String getSystem() { return mySystem; } public String getUnits() { return myUnits; } public BigDecimal getValue() { return myValue; } public void setSystem(String theSystem) { mySystem = theSystem; } public void setUnits(String theUnits) { myUnits = theUnits; } public void setValue(BigDecimal theValue) { myValue = theValue; } }
dhf0820/hapi-fhir-1.2
hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/entity/ResourceIndexedSearchParamQuantity.java
Java
apache-2.0
2,237
/* * IzPack - Copyright 2001-2013 Julien Ponge, All Rights Reserved. * * http://www.izforge.com/izpack/ http://izpack.codehaus.org/ * * Copyright 2013 Tim Anderson * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package com.izforge.izpack.panels.shortcut; import static com.izforge.izpack.api.handler.Prompt.Option.NO; import static com.izforge.izpack.api.handler.Prompt.Option.YES; import static com.izforge.izpack.api.handler.Prompt.Options.YES_NO; import static com.izforge.izpack.api.handler.Prompt.Type.INFORMATION; import static com.izforge.izpack.api.handler.Prompt.Type.QUESTION; import java.util.Properties; import java.util.logging.Level; import java.util.logging.Logger; import com.izforge.izpack.api.adaptator.IXMLElement; import com.izforge.izpack.api.data.InstallData; import com.izforge.izpack.api.handler.Prompt; import com.izforge.izpack.api.resource.Messages; import com.izforge.izpack.api.resource.Resources; import com.izforge.izpack.installer.console.AbstractConsolePanel; import com.izforge.izpack.installer.console.ConsolePanel; import com.izforge.izpack.installer.data.UninstallData; import com.izforge.izpack.installer.event.InstallerListeners; import com.izforge.izpack.installer.panel.PanelView; import com.izforge.izpack.util.Console; import com.izforge.izpack.util.Housekeeper; import com.izforge.izpack.util.PlatformModelMatcher; import com.izforge.izpack.util.TargetFactory; import com.izforge.izpack.util.os.Shortcut; /** * Console implementation of the {@link ShortcutPanel}. * * @author Tim Anderson */ public class ShortcutConsolePanel extends AbstractConsolePanel { private final Prompt prompt; private final InstallData installData; private final ShortcutPanelLogic shortcutPanelLogic; private static final Logger logger = Logger.getLogger(ShortcutConsolePanel.class.getName()); /** * Constructs a {@code ShortcutConsolePanel}. * * @param installData the installation data * @param resources the resources * @param uninstallData the uninstallation data * @param housekeeper the housekeeper * @param factory the target factory * @param listeners the installation listeners * @param matcher the platform-model matcher * @param prompt the prompt * @param panel the parent panel/view */ public ShortcutConsolePanel(InstallData installData, Resources resources, UninstallData uninstallData, Housekeeper housekeeper, TargetFactory factory, InstallerListeners listeners, PlatformModelMatcher matcher, Prompt prompt, PanelView<ConsolePanel> panel) { super(panel); ShortcutPanelLogic shortcutPanelLogic = null; try { shortcutPanelLogic = new ShortcutPanelLogic( installData, resources, uninstallData, housekeeper, factory, listeners, matcher); } catch (Exception exception) { logger.log(Level.WARNING, "Failed to initialise shortcuts: " + exception.getMessage(), exception); } this.prompt = prompt; this.installData = installData; this.shortcutPanelLogic = shortcutPanelLogic; } /** * Runs the panel using the supplied properties. * * @param installData the installation data * @param properties the properties * @return {@code true} if the installation is successful, otherwise {@code false} */ @Override public boolean run(InstallData installData, Properties properties) { boolean result = false; if (shortcutPanelLogic != null) { if (shortcutPanelLogic.isSupported()) { } else if (shortcutPanelLogic.skipIfNotSupported()) { result = true; } } return result; } /** * Runs the panel using the specified console. * * @param installData the installation data * @param console the console * @return {@code true} if the panel ran successfully, otherwise {@code false} */ @Override public boolean run(InstallData installData, Console console) { boolean result = true; try { shortcutPanelLogic.refreshShortcutData(); } catch (Exception e) { return result; } if (shortcutPanelLogic != null && shortcutPanelLogic.canCreateShortcuts()) { if (shortcutPanelLogic.isSupported()) { chooseShortcutLocations(); chooseEffectedUsers(); chooseProgramGroup(console); if (shortcutPanelLogic.isCreateShortcutsImmediately()) { try { shortcutPanelLogic.createAndRegisterShortcuts(); } catch (Exception e) { logger.log(Level.WARNING, e.getMessage(), e); } } return true; } else if (!shortcutPanelLogic.skipIfNotSupported()) { Messages messages = installData.getMessages(); String message = messages.get("ShortcutPanel.alternate.apology"); prompt.message(INFORMATION, message); } } return result; } /** * Prompt user where the shortcuts should be placed. */ private void chooseShortcutLocations() { Prompt.Option createMenuShortcuts = prompt.confirm(QUESTION, shortcutPanelLogic.getCreateShortcutsPrompt(), YES_NO); shortcutPanelLogic.setCreateMenuShortcuts(createMenuShortcuts == YES); if (shortcutPanelLogic.hasDesktopShortcuts()) { boolean selected = shortcutPanelLogic.isDesktopShortcutCheckboxSelected(); Prompt.Option createDesktopShortcuts = prompt.confirm(QUESTION, shortcutPanelLogic.getCreateDesktopShortcutsPrompt(), YES_NO, selected ? YES : NO); shortcutPanelLogic.setCreateDesktopShortcuts(createDesktopShortcuts == YES); } if (shortcutPanelLogic.hasStartupShortcuts()) { boolean selected = shortcutPanelLogic.isStartupShortcutCheckboxSelected(); Prompt.Option createStartupShortcuts = prompt.confirm(QUESTION, shortcutPanelLogic.getCreateStartupShortcutsPrompt(), YES_NO, selected ? YES : NO); shortcutPanelLogic.setCreateStartupShortcuts(createStartupShortcuts == YES); } } /** * Choose for which user's the shortcuts should be created for */ private void chooseEffectedUsers() { boolean isAdmin = shortcutPanelLogic.initUserType(); if (isAdmin && shortcutPanelLogic.isSupportingMultipleUsers()) { boolean selected = !shortcutPanelLogic.isDefaultCurrentUserFlag(); String message = shortcutPanelLogic.getCreateForUserPrompt() + " " + shortcutPanelLogic.getCreateForAllUsersPrompt(); Prompt.Option allUsers = prompt.confirm(QUESTION, message, YES_NO, selected ? YES : NO); shortcutPanelLogic.setUserType(allUsers == YES ? Shortcut.ALL_USERS : Shortcut.CURRENT_USER); } } /** * Choose under which program group to place the shortcuts. */ private void chooseProgramGroup(Console console) { String programGroup = shortcutPanelLogic.getSuggestedProgramGroup(); if (programGroup != null && "".equals(programGroup)) { programGroup = console.prompt(installData.getMessages().get("ShortcutPanel.regular.list"), ""); } shortcutPanelLogic.setGroupName(programGroup); } @Override public void createInstallationRecord(IXMLElement panelRoot) { try { new ShortcutPanelAutomationHelper(shortcutPanelLogic).createInstallationRecord(installData, panelRoot); } catch (Exception e) { logger.log(Level.WARNING, "Could generate automatic installer description for shortcuts."); } } }
codehaus/izpack
izpack-panel/src/main/java/com/izforge/izpack/panels/shortcut/ShortcutConsolePanel.java
Java
apache-2.0
8,720
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.cassandra.thrift; import java.net.SocketAddress; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Encapsulates the current client state (session). * * We rely on the Thrift server to tell us what socket it is * executing a request for via setCurrentSocket, after which currentSession can do its job anywhere. */ public class ThriftSessionManager { private static final Logger logger = LoggerFactory.getLogger(ThriftSessionManager.class); public final static ThriftSessionManager instance = new ThriftSessionManager(); private final ThreadLocal<SocketAddress> remoteSocket = new ThreadLocal<SocketAddress>(); private final Map<SocketAddress, ThriftClientState> activeSocketSessions = new ConcurrentHashMap<SocketAddress, ThriftClientState>(); /** * @param socket the address on which the current thread will work on requests for until further notice */ public void setCurrentSocket(SocketAddress socket) { remoteSocket.set(socket); } /** * @return the current session for the most recently given socket on this thread */ public ThriftClientState currentSession() { SocketAddress socket = remoteSocket.get(); assert socket != null; ThriftClientState cState = activeSocketSessions.get(socket); if (cState == null) { cState = new ThriftClientState(); activeSocketSessions.put(socket, cState); } return cState; } /** * The connection associated with @param socket is permanently finished. */ public void connectionComplete(SocketAddress socket) { assert socket != null; activeSocketSessions.remove(socket); if (logger.isTraceEnabled()) logger.trace("ClientState removed for socket addr {}", socket); } public int getConnectedClients() { return activeSocketSessions.size(); } }
jackliu8722/cassandra-1.2.16
src/java/org/apache/cassandra/thrift/ThriftSessionManager.java
Java
apache-2.0
2,836
/* * Copyright 2016-present Open Networking Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Package for MQ utility. */ package org.onosproject.rabbitmq.util;
gkatsikas/onos
apps/rabbitmq/src/main/java/org/onosproject/rabbitmq/util/package-info.java
Java
apache-2.0
693
<?php /* * This file is part of the Respect\Rest package. * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Respect\Rest\Routines; use Respect\Rest\Request; /** Routine that runs before the route matching */ interface ProxyableWhen { /** Executed to check if the route matchs */ public function when(Request $request, $params); }
googlecodelabs/cloud-lamp-migration
web/core/vendor/respect/rest/library/Respect/Rest/Routines/ProxyableWhen.php
PHP
apache-2.0
436
package ca.uhn.fhir.model.dstu3.composite; import java.util.List; import ca.uhn.fhir.model.api.ICompositeDatatype; import ca.uhn.fhir.model.api.IElement; import ca.uhn.fhir.model.api.annotation.Child; import ca.uhn.fhir.model.api.annotation.DatatypeDef; import ca.uhn.fhir.model.api.annotation.Description; import ca.uhn.fhir.model.base.composite.BaseCodingDt; import ca.uhn.fhir.model.primitive.BooleanDt; import ca.uhn.fhir.model.primitive.CodeDt; import ca.uhn.fhir.model.primitive.StringDt; import ca.uhn.fhir.model.primitive.UriDt; /** * HAPI/FHIR <b>CodingDt</b> Datatype * () * * <p> * <b>Definition:</b> * A reference to a code defined by a terminology system * </p> * * <p> * <b>Requirements:</b> * References to codes are very common in healthcare models * </p> */ @DatatypeDef(name="CodingDt") public class CodingDt extends BaseCodingDt implements ICompositeDatatype, org.hl7.fhir.instance.model.api.IBaseCoding { /** * Constructor */ public CodingDt() { // nothing } /** * Creates a new Coding with the given system and code */ public CodingDt(String theSystem, String theCode) { setSystem(theSystem); setCode(theCode); } /** * Copy constructor: Creates a new Coding with the system and code copied out of the given coding */ public CodingDt(BaseCodingDt theCoding) { this(theCoding.getSystemElement().getValueAsString(), theCoding.getCodeElement().getValue()); } @Child(name="system", type=UriDt.class, order=0, min=0, max=1, summary=true, modifier=false) @Description( shortDefinition="", formalDefinition="The identification of the code system that defines the meaning of the symbol in the code." ) private UriDt mySystem; @Child(name="version", type=StringDt.class, order=1, min=0, max=1, summary=true, modifier=false) @Description( shortDefinition="", formalDefinition="The version of the code system which was used when choosing this code. Note that a well-maintained code system does not need the version reported, because the meaning of codes is consistent across versions. However this cannot consistently be assured. and when the meaning is not guaranteed to be consistent, the version SHOULD be exchanged" ) private StringDt myVersion; @Child(name="code", type=CodeDt.class, order=2, min=0, max=1, summary=true, modifier=false) @Description( shortDefinition="", formalDefinition="A symbol in syntax defined by the system. The symbol may be a predefined code or an expression in a syntax defined by the coding system (e.g. post-coordination)" ) private CodeDt myCode; @Child(name="display", type=StringDt.class, order=3, min=0, max=1, summary=true, modifier=false) @Description( shortDefinition="", formalDefinition="A representation of the meaning of the code in the system, following the rules of the system" ) private StringDt myDisplay; @Child(name="userSelected", type=BooleanDt.class, order=4, min=0, max=1, summary=true, modifier=false) @Description( shortDefinition="", formalDefinition="Indicates that this coding was chosen by a user directly - i.e. off a pick list of available items (codes or displays)" ) private BooleanDt myUserSelected; @Override public boolean isEmpty() { return super.isBaseEmpty() && ca.uhn.fhir.util.ElementUtil.isEmpty( mySystem, myVersion, myCode, myDisplay, myUserSelected); } @Override public <T extends IElement> List<T> getAllPopulatedChildElementsOfType(Class<T> theType) { return ca.uhn.fhir.util.ElementUtil.allPopulatedChildElements(theType, mySystem, myVersion, myCode, myDisplay, myUserSelected); } /** * Gets the value(s) for <b>system</b> (). * creating it if it does * not exist. Will not return <code>null</code>. * * <p> * <b>Definition:</b> * The identification of the code system that defines the meaning of the symbol in the code. * </p> */ public UriDt getSystemElement() { if (mySystem == null) { mySystem = new UriDt(); } return mySystem; } /** * Gets the value(s) for <b>system</b> (). * creating it if it does * not exist. Will not return <code>null</code>. * * <p> * <b>Definition:</b> * The identification of the code system that defines the meaning of the symbol in the code. * </p> */ public String getSystem() { return getSystemElement().getValue(); } /** * Sets the value(s) for <b>system</b> () * * <p> * <b>Definition:</b> * The identification of the code system that defines the meaning of the symbol in the code. * </p> */ public CodingDt setSystem(UriDt theValue) { mySystem = theValue; return this; } /** * Sets the value for <b>system</b> () * * <p> * <b>Definition:</b> * The identification of the code system that defines the meaning of the symbol in the code. * </p> */ public CodingDt setSystem( String theUri) { mySystem = new UriDt(theUri); return this; } /** * Gets the value(s) for <b>version</b> (). * creating it if it does * not exist. Will not return <code>null</code>. * * <p> * <b>Definition:</b> * The version of the code system which was used when choosing this code. Note that a well-maintained code system does not need the version reported, because the meaning of codes is consistent across versions. However this cannot consistently be assured. and when the meaning is not guaranteed to be consistent, the version SHOULD be exchanged * </p> */ public StringDt getVersionElement() { if (myVersion == null) { myVersion = new StringDt(); } return myVersion; } /** * Gets the value(s) for <b>version</b> (). * creating it if it does * not exist. Will not return <code>null</code>. * * <p> * <b>Definition:</b> * The version of the code system which was used when choosing this code. Note that a well-maintained code system does not need the version reported, because the meaning of codes is consistent across versions. However this cannot consistently be assured. and when the meaning is not guaranteed to be consistent, the version SHOULD be exchanged * </p> */ public String getVersion() { return getVersionElement().getValue(); } /** * Sets the value(s) for <b>version</b> () * * <p> * <b>Definition:</b> * The version of the code system which was used when choosing this code. Note that a well-maintained code system does not need the version reported, because the meaning of codes is consistent across versions. However this cannot consistently be assured. and when the meaning is not guaranteed to be consistent, the version SHOULD be exchanged * </p> */ public CodingDt setVersion(StringDt theValue) { myVersion = theValue; return this; } /** * Sets the value for <b>version</b> () * * <p> * <b>Definition:</b> * The version of the code system which was used when choosing this code. Note that a well-maintained code system does not need the version reported, because the meaning of codes is consistent across versions. However this cannot consistently be assured. and when the meaning is not guaranteed to be consistent, the version SHOULD be exchanged * </p> */ public CodingDt setVersion( String theString) { myVersion = new StringDt(theString); return this; } /** * Gets the value(s) for <b>code</b> (). * creating it if it does * not exist. Will not return <code>null</code>. * * <p> * <b>Definition:</b> * A symbol in syntax defined by the system. The symbol may be a predefined code or an expression in a syntax defined by the coding system (e.g. post-coordination) * </p> */ public CodeDt getCodeElement() { if (myCode == null) { myCode = new CodeDt(); } return myCode; } /** * Gets the value(s) for <b>code</b> (). * creating it if it does * not exist. Will not return <code>null</code>. * * <p> * <b>Definition:</b> * A symbol in syntax defined by the system. The symbol may be a predefined code or an expression in a syntax defined by the coding system (e.g. post-coordination) * </p> */ public String getCode() { return getCodeElement().getValue(); } /** * Sets the value(s) for <b>code</b> () * * <p> * <b>Definition:</b> * A symbol in syntax defined by the system. The symbol may be a predefined code or an expression in a syntax defined by the coding system (e.g. post-coordination) * </p> */ public CodingDt setCode(CodeDt theValue) { myCode = theValue; return this; } /** * Sets the value for <b>code</b> () * * <p> * <b>Definition:</b> * A symbol in syntax defined by the system. The symbol may be a predefined code or an expression in a syntax defined by the coding system (e.g. post-coordination) * </p> */ public CodingDt setCode( String theCode) { myCode = new CodeDt(theCode); return this; } /** * Gets the value(s) for <b>display</b> (). * creating it if it does * not exist. Will not return <code>null</code>. * * <p> * <b>Definition:</b> * A representation of the meaning of the code in the system, following the rules of the system * </p> */ public StringDt getDisplayElement() { if (myDisplay == null) { myDisplay = new StringDt(); } return myDisplay; } /** * Gets the value(s) for <b>display</b> (). * creating it if it does * not exist. Will not return <code>null</code>. * * <p> * <b>Definition:</b> * A representation of the meaning of the code in the system, following the rules of the system * </p> */ public String getDisplay() { return getDisplayElement().getValue(); } /** * Sets the value(s) for <b>display</b> () * * <p> * <b>Definition:</b> * A representation of the meaning of the code in the system, following the rules of the system * </p> */ public CodingDt setDisplay(StringDt theValue) { myDisplay = theValue; return this; } /** * Sets the value for <b>display</b> () * * <p> * <b>Definition:</b> * A representation of the meaning of the code in the system, following the rules of the system * </p> */ public CodingDt setDisplay( String theString) { myDisplay = new StringDt(theString); return this; } /** * Gets the value(s) for <b>userSelected</b> (). * creating it if it does * not exist. Will not return <code>null</code>. * * <p> * <b>Definition:</b> * Indicates that this coding was chosen by a user directly - i.e. off a pick list of available items (codes or displays) * </p> */ public BooleanDt getUserSelectedElement() { if (myUserSelected == null) { myUserSelected = new BooleanDt(); } return myUserSelected; } /** * Gets the value(s) for <b>userSelected</b> (). * creating it if it does * not exist. Will not return <code>null</code>. * * <p> * <b>Definition:</b> * Indicates that this coding was chosen by a user directly - i.e. off a pick list of available items (codes or displays) * </p> */ public Boolean getUserSelected() { return getUserSelectedElement().getValue(); } /** * Sets the value(s) for <b>userSelected</b> () * * <p> * <b>Definition:</b> * Indicates that this coding was chosen by a user directly - i.e. off a pick list of available items (codes or displays) * </p> */ public CodingDt setUserSelected(BooleanDt theValue) { myUserSelected = theValue; return this; } /** * Sets the value for <b>userSelected</b> () * * <p> * <b>Definition:</b> * Indicates that this coding was chosen by a user directly - i.e. off a pick list of available items (codes or displays) * </p> */ public CodingDt setUserSelected( boolean theBoolean) { myUserSelected = new BooleanDt(theBoolean); return this; } }
eug48/hapi-fhir
hapi-tinder-plugin/src/main/java/ca/uhn/fhir/model/dstu3/composite/CodingDt.java
Java
apache-2.0
11,977
/* * Licensed to DuraSpace under one or more contributor license agreements. * See the NOTICE file distributed with this work for additional information * regarding copyright ownership. * * DuraSpace licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.fcrepo.kernel.api.exception; /** * Exception indicating that the requested transaction was not found. * * @author bbpennel */ public class TransactionNotFoundException extends TransactionRuntimeException { private static final long serialVersionUID = 1L; /** * Ordinary constructor. * * @param msg the message */ public TransactionNotFoundException(final String msg) { super(msg); } /** * Ordinary constructor. * * @param rootCause the root cause */ public TransactionNotFoundException(final Throwable rootCause) { super(rootCause); } /** * Ordinary constructor. * * @param msg the message * @param rootCause the root cause */ public TransactionNotFoundException(final String msg, final Throwable rootCause) { super(msg, rootCause); } }
dbernstein/fcrepo4
fcrepo-kernel-api/src/main/java/org/fcrepo/kernel/api/exception/TransactionNotFoundException.java
Java
apache-2.0
1,665
// 不再赘述 var path = require('path') // 加载 confi.index.js var config = require('../config') // 使用一些小工具 var utils = require('./utils') // 加载 webpack var webpack = require('webpack') // 加载 webpack 配置合并工具 var merge = require('webpack-merge') // 加载 webpack.base.conf.js var baseWebpackConfig = require('./webpack.base.conf') // 一个 webpack 扩展,可以提取一些代码并且将它们和文件分离开 // 如果我们想将 webpack 打包成一个文件 css js 分离开,那我们需要这个插件 var ExtractTextPlugin = require('extract-text-webpack-plugin') // 一个可以插入 html 并且创建新的 .html 文件的插件 var HtmlWebpackPlugin = require('html-webpack-plugin') var env = config.build.env // 合并 webpack.base.conf.js var webpackConfig = merge(baseWebpackConfig, { // 使用的 loader module: { loaders: utils.styleLoaders({ sourceMap: config.build.productionSourceMap, extract: true }) }, // 是否使用 #source-map 开发工具,更多信息可以查看 DDFE 往期文章 devtool: config.build.productionSourceMap ? '#source-map' : false, output: { // 编译输出目录 path: config.build.assetsRoot, // 编译输出文件名 // 我们可以在 hash 后加 :6 决定使用几位 hash 值 filename: utils.assetsPath('js/[name].[chunkhash].js'), // 没有指定输出名的文件输出的文件名 chunkFilename: utils.assetsPath('js/[id].[chunkhash].js') }, vue: { // 编译 .vue 文件时使用的 loader loaders: utils.cssLoaders({ sourceMap: config.build.productionSourceMap, extract: true }) }, // 使用的插件 plugins: [ // http://vuejs.github.io/vue-loader/en/workflow/production.html // definePlugin 接收字符串插入到代码当中, 所以你需要的话可以写上 JS 的字符串 new webpack.DefinePlugin({ 'process.env': env }), // 压缩 js (同样可以压缩 css) new webpack.optimize.UglifyJsPlugin({ compress: { warnings: false } }), new webpack.optimize.OccurrenceOrderPlugin(), // 将 css 文件分离出来 new ExtractTextPlugin(utils.assetsPath('css/[name].[contenthash].css')), // 输入输出的 .html 文件 new HtmlWebpackPlugin({ filename: config.build.index, template: 'index.html', // 是否注入 html inject: true, // 压缩的方式 minify: { removeComments: true, collapseWhitespace: true, removeAttributeQuotes: true // more options: // https://github.com/kangax/html-minifier#options-quick-reference }, // necessary to consistently work with multiple chunks via CommonsChunkPlugin chunksSortMode: 'dependency' }), // 没有指定输出文件名的文件输出的静态文件名 new webpack.optimize.CommonsChunkPlugin({ name: 'vendor', minChunks: function (module, count) { // any required modules inside node_modules are extracted to vendor return ( module.resource && /\.js$/.test(module.resource) && module.resource.indexOf( path.join(__dirname, '../node_modules') ) === 0 ) } }), // extract webpack runtime and module manifest to its own file in order to // prevent vendor hash from being updated whenever app bundle is updated new webpack.optimize.CommonsChunkPlugin({ name: 'manifest', chunks: ['vendor'] }) ] }) // 开启 gzip 的情况下使用下方的配置 if (config.build.productionGzip) { // 加载 compression-webpack-plugin 插件 var CompressionWebpackPlugin = require('compression-webpack-plugin') // 向webpackconfig.plugins中加入下方的插件 webpackConfig.plugins.push( // 使用 compression-webpack-plugin 插件进行压缩 new CompressionWebpackPlugin({ asset: '[path].gz[query]', algorithm: 'gzip', test: new RegExp( '\\.(' + config.build.productionGzipExtensions.join('|') + ')$' ), threshold: 10240, minRatio: 0.8 }) ) } module.exports = webpackConfig
huang303513/WebBasicCommonDemos
vue-cli2.0的webpack配置分析/lessproject/build/webpack.prod.conf.js
JavaScript
apache-2.0
4,179
// Example repl is a simple REPL (read-eval-print loop) for GO using // http://github.com/0xfaded/eval to the heavy lifting to implement // the eval() part. // // The intent here is to show how more to use the library, rather than // be a full-featured REPL. // // A more complete REPL including command history, tab completion and // readline editing is available as a separate package: // http://github.com/rocky/go-fish // // (rocky) My intent here is also to have something that I can debug in // the ssa-debugger tortoise/gub.sh. Right now that can't handle the // unsafe package, pointers, and calls to C code. So that let's out // go-gnureadline and lineedit. package main import ( "bufio" "fmt" "go/parser" "io" "os" "reflect" "strings" "github.com/mailgun/godebug/Godeps/_workspace/src/github.com/0xfaded/eval" ) // Simple replacement for GNU readline func readline(prompt string, in *bufio.Reader) (string, error) { fmt.Printf(prompt) line, err := in.ReadString('\n') if err == nil { line = strings.TrimRight(line, "\r\n") } return line, err } func intro_text() { fmt.Printf(`=== A simple Go eval REPL === Results of expression are stored in variable slice "results". The environment is stored in global variable "env". Enter expressions to be evaluated at the "go>" prompt. To see all results, type: "results". To quit, enter: "quit" or Ctrl-D (EOF). `) } // REPL is the a read, eval, and print loop. func REPL(env *eval.SimpleEnv) { var err error // A place to store result values of expressions entered // interactively results := make([]interface{}, 0, 10) env.Vars["results"] = reflect.ValueOf(&results) exprs := 0 in := bufio.NewReader(os.Stdin) line, err := readline("go> ", in) for line != "quit" { if err != nil { if err == io.EOF { break } panic(err) } if expr, err := parser.ParseExpr(line); err != nil { if pair := eval.FormatErrorPos(line, err.Error()); len(pair) == 2 { fmt.Println(pair[0]) fmt.Println(pair[1]) } fmt.Printf("parse error: %s\n", err) } else if cexpr, errs := eval.CheckExpr(expr, env); len(errs) != 0 { for _, cerr := range errs { fmt.Printf("check error: %v\n", cerr) } } else if vals, err := eval.EvalExpr(cexpr, env); err != nil { fmt.Printf("panic: %s\n", err) } else if len(vals) == 0 { fmt.Printf("Kind=Slice\nvoid\n") } else if len(vals) == 1 { value := (vals)[0] if value.IsValid() { kind := value.Kind().String() typ := value.Type().String() if typ != kind { fmt.Printf("Kind = %v\n", kind) fmt.Printf("Type = %v\n", typ) } else { fmt.Printf("Kind = Type = %v\n", kind) } fmt.Printf("results[%d] = %s\n", exprs, eval.Inspect(value)) exprs += 1 results = append(results, (vals)[0].Interface()) } else { fmt.Printf("%s\n", value) } } else { fmt.Printf("Kind = Multi-Value\n") size := len(vals) for i, v := range vals { fmt.Printf("%s", eval.Inspect(v)) if i < size-1 { fmt.Printf(", ") } } fmt.Printf("\n") exprs += 1 results = append(results, vals) } line, err = readline("go> ", in) } } type XI interface { x() } type YI interface { y() } type ZI interface { x() } type X int type Y int type Z int func (X) x() {} func (Y) y() {} func (Z) x() {} // Create an eval.Env environment to use in evaluation. // This is a bit ugly here, because we are rolling everything by hand, but // we want some sort of environment to show off in demo'ing. // The artifical environment we create here consists of // fmt: // fns: fmt.Println, fmt.Printf // os: // types: MyInt // vars: Stdout, Args // main: // type Alice // var alice, aliceptr // // (REPL also adds var results to main) // // See make_env in github.com/rocky/go-fish for an automated way to // create more complete environment from a starting import. func makeBogusEnv() *eval.SimpleEnv { // A copule of things from the fmt package. var fmt_funcs map[string]reflect.Value = make(map[string]reflect.Value) fmt_funcs["Println"] = reflect.ValueOf(fmt.Println) fmt_funcs["Printf"] = reflect.ValueOf(fmt.Printf) // A simple type for demo type MyInt int // A stripped down package environment. See // http://github.com/rocky/go-fish and repl_imports.go for a more // complete environment. pkgs := map[string]eval.Env{ "fmt": &eval.SimpleEnv{ Vars: make(map[string]reflect.Value), Consts: make(map[string]reflect.Value), Funcs: fmt_funcs, Types: make(map[string]reflect.Type), Pkgs: nil, }, "os": &eval.SimpleEnv{ Vars: map[string]reflect.Value{ "Stdout": reflect.ValueOf(&os.Stdout), "Args": reflect.ValueOf(&os.Args)}, Consts: make(map[string]reflect.Value), Funcs: make(map[string]reflect.Value), Types: map[string]reflect.Type{ "MyInt": reflect.TypeOf(*new(MyInt))}, Pkgs: nil, }, } mainEnv := eval.MakeSimpleEnv() mainEnv.Pkgs = pkgs // Some "alice" things for testing type Alice struct { Bob int Secret string } type R rune alice := Alice{1, "shhh"} alicePtr := &alice foo := 10 ints := []int{1, 2, 3, 4} add := func(a, b int) int { return a + b } sum := func(as ...int) int { r := 0 for _, a := range as { r += a } return r } mainEnv.Vars["alice"] = reflect.ValueOf(&alice) mainEnv.Vars["alicePtr"] = reflect.ValueOf(&alicePtr) mainEnv.Vars["foo"] = reflect.ValueOf(&foo) mainEnv.Vars["ints"] = reflect.ValueOf(&ints) mainEnv.Consts["bar"] = reflect.ValueOf(eval.NewConstInt64(5)) mainEnv.Funcs["add"] = reflect.ValueOf(add) mainEnv.Funcs["sum"] = reflect.ValueOf(sum) mainEnv.Types["Alice"] = reflect.TypeOf(Alice{}) mainEnv.Types["R"] = reflect.TypeOf(R(0)) var xi *XI = new(XI) var yi *YI = new(YI) var zi *ZI = new(ZI) *xi = XI(X(0)) *yi = YI(Y(0)) *zi = ZI(Z(0)) mainEnv.Types["XI"] = reflect.TypeOf(xi).Elem() mainEnv.Types["YI"] = reflect.TypeOf(yi).Elem() mainEnv.Types["ZI"] = reflect.TypeOf(zi).Elem() mainEnv.Types["X"] = reflect.TypeOf(X(0)) mainEnv.Types["Y"] = reflect.TypeOf(Y(0)) mainEnv.Types["Z"] = reflect.TypeOf(Z(0)) return mainEnv } func main() { env := makeBogusEnv() intro_text() REPL(env) }
CodyGuo/godebug
Godeps/_workspace/src/github.com/0xfaded/eval/demo/repl.go
GO
apache-2.0
6,199
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package org.apache.hadoop.hbase.io.encoding; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ObjectIntPair; import org.apache.yetus.audience.InterfaceAudience; /** * Compress using: * - store size of common prefix * - save column family once, it is same within HFile * - use integer compression for key, value and prefix (7-bit encoding) * - use bits to avoid duplication key length, value length * and type if it same as previous * - store in 3 bits length of timestamp field * - allow diff in timestamp instead of actual value * * Format: * - 1 byte: flag * - 1-5 bytes: key length (only if FLAG_SAME_KEY_LENGTH is not set in flag) * - 1-5 bytes: value length (only if FLAG_SAME_VALUE_LENGTH is not set in flag) * - 1-5 bytes: prefix length * - ... bytes: rest of the row (if prefix length is small enough) * - ... bytes: qualifier (or suffix depending on prefix length) * - 1-8 bytes: timestamp or diff * - 1 byte: type (only if FLAG_SAME_TYPE is not set in the flag) * - ... bytes: value */ @InterfaceAudience.Private public class DiffKeyDeltaEncoder extends BufferedDataBlockEncoder { static final int FLAG_SAME_KEY_LENGTH = 1; static final int FLAG_SAME_VALUE_LENGTH = 1 << 1; static final int FLAG_SAME_TYPE = 1 << 2; static final int FLAG_TIMESTAMP_IS_DIFF = 1 << 3; static final int MASK_TIMESTAMP_LENGTH = (1 << 4) | (1 << 5) | (1 << 6); static final int SHIFT_TIMESTAMP_LENGTH = 4; static final int FLAG_TIMESTAMP_SIGN = 1 << 7; protected static class DiffCompressionState extends CompressionState { long timestamp; byte[] familyNameWithSize; @Override protected void readTimestamp(ByteBuffer in) { timestamp = in.getLong(); } @Override void copyFrom(CompressionState state) { super.copyFrom(state); DiffCompressionState state2 = (DiffCompressionState) state; timestamp = state2.timestamp; } } private void uncompressSingleKeyValue(DataInputStream source, ByteBuffer buffer, DiffCompressionState state) throws IOException, EncoderBufferTooSmallException { // read the column family at the beginning if (state.isFirst()) { state.familyLength = source.readByte(); state.familyNameWithSize = new byte[(state.familyLength & 0xff) + KeyValue.FAMILY_LENGTH_SIZE]; state.familyNameWithSize[0] = state.familyLength; int read = source.read(state.familyNameWithSize, KeyValue.FAMILY_LENGTH_SIZE, state.familyLength); assert read == state.familyLength; } // read flag byte flag = source.readByte(); // read key/value/common lengths int keyLength; int valueLength; if ((flag & FLAG_SAME_KEY_LENGTH) != 0) { keyLength = state.keyLength; } else { keyLength = ByteBufferUtils.readCompressedInt(source); } if ((flag & FLAG_SAME_VALUE_LENGTH) != 0) { valueLength = state.valueLength; } else { valueLength = ByteBufferUtils.readCompressedInt(source); } int commonPrefix = ByteBufferUtils.readCompressedInt(source); // create KeyValue buffer and fill it prefix int keyOffset = buffer.position(); ensureSpace(buffer, keyLength + valueLength + KeyValue.ROW_OFFSET); buffer.putInt(keyLength); buffer.putInt(valueLength); // copy common from previous key if (commonPrefix > 0) { ByteBufferUtils.copyFromBufferToBuffer(buffer, buffer, state.prevOffset + KeyValue.ROW_OFFSET, commonPrefix); } // copy the rest of the key from the buffer int keyRestLength; if (state.isFirst() || commonPrefix < state.rowLength + KeyValue.ROW_LENGTH_SIZE) { // omit the family part of the key, it is always the same short rowLength; int rowRestLength; // check length of row if (commonPrefix < KeyValue.ROW_LENGTH_SIZE) { // not yet copied, do it now ByteBufferUtils.copyFromStreamToBuffer(buffer, source, KeyValue.ROW_LENGTH_SIZE - commonPrefix); ByteBufferUtils.skip(buffer, -KeyValue.ROW_LENGTH_SIZE); rowLength = buffer.getShort(); rowRestLength = rowLength; } else { // already in buffer, just read it rowLength = buffer.getShort(keyOffset + KeyValue.ROW_OFFSET); rowRestLength = rowLength + KeyValue.ROW_LENGTH_SIZE - commonPrefix; } // copy the rest of row ByteBufferUtils.copyFromStreamToBuffer(buffer, source, rowRestLength); state.rowLength = rowLength; // copy the column family buffer.put(state.familyNameWithSize); keyRestLength = keyLength - rowLength - state.familyNameWithSize.length - (KeyValue.ROW_LENGTH_SIZE + KeyValue.TIMESTAMP_TYPE_SIZE); } else { // prevRowWithSizeLength is the same as on previous row keyRestLength = keyLength - commonPrefix - KeyValue.TIMESTAMP_TYPE_SIZE; } // copy the rest of the key, after column family -> column qualifier ByteBufferUtils.copyFromStreamToBuffer(buffer, source, keyRestLength); // handle timestamp int timestampFitsInBytes = ((flag & MASK_TIMESTAMP_LENGTH) >>> SHIFT_TIMESTAMP_LENGTH) + 1; long timestamp = ByteBufferUtils.readLong(source, timestampFitsInBytes); if ((flag & FLAG_TIMESTAMP_SIGN) != 0) { timestamp = -timestamp; } if ((flag & FLAG_TIMESTAMP_IS_DIFF) != 0) { timestamp = state.timestamp - timestamp; } buffer.putLong(timestamp); // copy the type field byte type; if ((flag & FLAG_SAME_TYPE) != 0) { type = state.type; } else { type = source.readByte(); } buffer.put(type); // copy value part ByteBufferUtils.copyFromStreamToBuffer(buffer, source, valueLength); state.keyLength = keyLength; state.valueLength = valueLength; state.prevOffset = keyOffset; state.timestamp = timestamp; state.type = type; // state.qualifier is unused } @Override public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext, DataOutputStream out) throws IOException { EncodingState state = encodingContext.getEncodingState(); int size = compressSingleKeyValue(out, cell, state.prevCell); size += afterEncodingKeyValue(cell, out, encodingContext); state.prevCell = cell; return size; } private int compressSingleKeyValue(DataOutputStream out, Cell cell, Cell prevCell) throws IOException { int flag = 0; // Do not use more bits that can fit into a byte int kLength = KeyValueUtil.keyLength(cell); int vLength = cell.getValueLength(); long timestamp; long diffTimestamp = 0; int diffTimestampFitsInBytes = 0; int timestampFitsInBytes; int commonPrefix = 0; if (prevCell == null) { timestamp = cell.getTimestamp(); if (timestamp < 0) { flag |= FLAG_TIMESTAMP_SIGN; timestamp = -timestamp; } timestampFitsInBytes = ByteBufferUtils.longFitsIn(timestamp); flag |= (timestampFitsInBytes - 1) << SHIFT_TIMESTAMP_LENGTH; // put column family byte familyLength = cell.getFamilyLength(); out.write(familyLength); PrivateCellUtil.writeFamily(out, cell, familyLength); } else { // Finding common prefix int preKeyLength = KeyValueUtil.keyLength(prevCell); commonPrefix = PrivateCellUtil.findCommonPrefixInFlatKey(cell, prevCell, true, false); if (kLength == preKeyLength) { flag |= FLAG_SAME_KEY_LENGTH; } if (vLength == prevCell.getValueLength()) { flag |= FLAG_SAME_VALUE_LENGTH; } if (cell.getTypeByte() == prevCell.getTypeByte()) { flag |= FLAG_SAME_TYPE; } // don't compress timestamp and type using prefix encode timestamp timestamp = cell.getTimestamp(); diffTimestamp = prevCell.getTimestamp() - timestamp; boolean negativeTimestamp = timestamp < 0; if (negativeTimestamp) { timestamp = -timestamp; } timestampFitsInBytes = ByteBufferUtils.longFitsIn(timestamp); boolean minusDiffTimestamp = diffTimestamp < 0; if (minusDiffTimestamp) { diffTimestamp = -diffTimestamp; } diffTimestampFitsInBytes = ByteBufferUtils.longFitsIn(diffTimestamp); if (diffTimestampFitsInBytes < timestampFitsInBytes) { flag |= (diffTimestampFitsInBytes - 1) << SHIFT_TIMESTAMP_LENGTH; flag |= FLAG_TIMESTAMP_IS_DIFF; if (minusDiffTimestamp) { flag |= FLAG_TIMESTAMP_SIGN; } } else { flag |= (timestampFitsInBytes - 1) << SHIFT_TIMESTAMP_LENGTH; if (negativeTimestamp) { flag |= FLAG_TIMESTAMP_SIGN; } } } out.write(flag); if ((flag & FLAG_SAME_KEY_LENGTH) == 0) { ByteBufferUtils.putCompressedInt(out, kLength); } if ((flag & FLAG_SAME_VALUE_LENGTH) == 0) { ByteBufferUtils.putCompressedInt(out, vLength); } ByteBufferUtils.putCompressedInt(out, commonPrefix); short rLen = cell.getRowLength(); if (commonPrefix < rLen + KeyValue.ROW_LENGTH_SIZE) { // Previous and current rows are different. Copy the differing part of // the row, skip the column family, and copy the qualifier. PrivateCellUtil.writeRowKeyExcludingCommon(cell, rLen, commonPrefix, out); PrivateCellUtil.writeQualifier(out, cell, cell.getQualifierLength()); } else { // The common part includes the whole row. As the column family is the // same across the whole file, it will automatically be included in the // common prefix, so we need not special-case it here. // What we write here is the non common part of the qualifier int commonQualPrefix = commonPrefix - (rLen + KeyValue.ROW_LENGTH_SIZE) - (cell.getFamilyLength() + KeyValue.FAMILY_LENGTH_SIZE); PrivateCellUtil.writeQualifierSkippingBytes(out, cell, cell.getQualifierLength(), commonQualPrefix); } if ((flag & FLAG_TIMESTAMP_IS_DIFF) == 0) { ByteBufferUtils.putLong(out, timestamp, timestampFitsInBytes); } else { ByteBufferUtils.putLong(out, diffTimestamp, diffTimestampFitsInBytes); } if ((flag & FLAG_SAME_TYPE) == 0) { out.write(cell.getTypeByte()); } PrivateCellUtil.writeValue(out, cell, vLength); return kLength + vLength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE; } @Override public Cell getFirstKeyCellInBlock(ByteBuff block) { block.mark(); block.position(Bytes.SIZEOF_INT); byte familyLength = block.get(); block.skip(familyLength); byte flag = block.get(); int keyLength = ByteBuff.readCompressedInt(block); // TODO : See if we can avoid these reads as the read values are not getting used ByteBuff.readCompressedInt(block); // valueLength ByteBuff.readCompressedInt(block); // commonLength ByteBuffer result = ByteBuffer.allocate(keyLength); // copy row assert !(result.isDirect()); int pos = result.arrayOffset(); block.get(result.array(), pos, Bytes.SIZEOF_SHORT); pos += Bytes.SIZEOF_SHORT; short rowLength = result.getShort(); block.get(result.array(), pos, rowLength); pos += rowLength; // copy family int savePosition = block.position(); block.position(Bytes.SIZEOF_INT); block.get(result.array(), pos, familyLength + Bytes.SIZEOF_BYTE); pos += familyLength + Bytes.SIZEOF_BYTE; // copy qualifier block.position(savePosition); int qualifierLength = keyLength - pos + result.arrayOffset() - KeyValue.TIMESTAMP_TYPE_SIZE; block.get(result.array(), pos, qualifierLength); pos += qualifierLength; // copy the timestamp and type int timestampFitInBytes = ((flag & MASK_TIMESTAMP_LENGTH) >>> SHIFT_TIMESTAMP_LENGTH) + 1; long timestamp = ByteBuff.readLong(block, timestampFitInBytes); if ((flag & FLAG_TIMESTAMP_SIGN) != 0) { timestamp = -timestamp; } result.putLong(pos, timestamp); pos += Bytes.SIZEOF_LONG; block.get(result.array(), pos, Bytes.SIZEOF_BYTE); block.reset(); // The result is already a BB. So always we will create a KeyOnlyKv. return new KeyValue.KeyOnlyKeyValue(result.array(), 0, keyLength); } @Override public String toString() { return DiffKeyDeltaEncoder.class.getSimpleName(); } protected static class DiffSeekerState extends SeekerState { private int rowLengthWithSize; private long timestamp; public DiffSeekerState(ObjectIntPair<ByteBuffer> tmpPair, boolean includeTags) { super(tmpPair, includeTags); } @Override protected void copyFromNext(SeekerState that) { super.copyFromNext(that); DiffSeekerState other = (DiffSeekerState) that; rowLengthWithSize = other.rowLengthWithSize; timestamp = other.timestamp; } } @Override public EncodedSeeker createSeeker(HFileBlockDecodingContext decodingCtx) { return new DiffSeekerStateBufferedEncodedSeeker(decodingCtx); } @Override protected ByteBuffer internalDecodeKeyValues(DataInputStream source, int allocateHeaderLength, int skipLastBytes, HFileBlockDefaultDecodingContext decodingCtx) throws IOException { int decompressedSize = source.readInt(); ByteBuffer buffer = ByteBuffer.allocate(decompressedSize + allocateHeaderLength); buffer.position(allocateHeaderLength); DiffCompressionState state = new DiffCompressionState(); while (source.available() > skipLastBytes) { uncompressSingleKeyValue(source, buffer, state); afterDecodingKeyValue(source, buffer, decodingCtx); } if (source.available() != skipLastBytes) { throw new IllegalStateException("Read too much bytes."); } return buffer; } private static class DiffSeekerStateBufferedEncodedSeeker extends BufferedEncodedSeeker<DiffSeekerState> { private byte[] familyNameWithSize; private static final int TIMESTAMP_WITH_TYPE_LENGTH = Bytes.SIZEOF_LONG + Bytes.SIZEOF_BYTE; private DiffSeekerStateBufferedEncodedSeeker(HFileBlockDecodingContext decodingCtx) { super(decodingCtx); } private void decode(boolean isFirst) { byte flag = currentBuffer.get(); byte type = 0; if ((flag & FLAG_SAME_KEY_LENGTH) == 0) { if (!isFirst) { type = current.keyBuffer[current.keyLength - Bytes.SIZEOF_BYTE]; } current.keyLength = ByteBuff.readCompressedInt(currentBuffer); } if ((flag & FLAG_SAME_VALUE_LENGTH) == 0) { current.valueLength = ByteBuff.readCompressedInt(currentBuffer); } current.lastCommonPrefix = ByteBuff.readCompressedInt(currentBuffer); current.ensureSpaceForKey(); if (current.lastCommonPrefix < Bytes.SIZEOF_SHORT) { // length of row is different, copy everything except family // copy the row size currentBuffer.get(current.keyBuffer, current.lastCommonPrefix, Bytes.SIZEOF_SHORT - current.lastCommonPrefix); current.rowLengthWithSize = Bytes.toShort(current.keyBuffer, 0) + Bytes.SIZEOF_SHORT; // copy the rest of row currentBuffer.get(current.keyBuffer, Bytes.SIZEOF_SHORT, current.rowLengthWithSize - Bytes.SIZEOF_SHORT); // copy the column family System.arraycopy(familyNameWithSize, 0, current.keyBuffer, current.rowLengthWithSize, familyNameWithSize.length); // copy the qualifier currentBuffer.get(current.keyBuffer, current.rowLengthWithSize + familyNameWithSize.length, current.keyLength - current.rowLengthWithSize - familyNameWithSize.length - TIMESTAMP_WITH_TYPE_LENGTH); } else if (current.lastCommonPrefix < current.rowLengthWithSize) { // we have to copy part of row and qualifier, // but column family is in right place // before column family (rest of row) currentBuffer.get(current.keyBuffer, current.lastCommonPrefix, current.rowLengthWithSize - current.lastCommonPrefix); // after column family (qualifier) currentBuffer.get(current.keyBuffer, current.rowLengthWithSize + familyNameWithSize.length, current.keyLength - current.rowLengthWithSize - familyNameWithSize.length - TIMESTAMP_WITH_TYPE_LENGTH); } else { // copy just the ending currentBuffer.get(current.keyBuffer, current.lastCommonPrefix, current.keyLength - TIMESTAMP_WITH_TYPE_LENGTH - current.lastCommonPrefix); } // timestamp int pos = current.keyLength - TIMESTAMP_WITH_TYPE_LENGTH; int timestampFitInBytes = 1 + ((flag & MASK_TIMESTAMP_LENGTH) >>> SHIFT_TIMESTAMP_LENGTH); long timestampOrDiff = ByteBuff.readLong(currentBuffer, timestampFitInBytes); if ((flag & FLAG_TIMESTAMP_SIGN) != 0) { timestampOrDiff = -timestampOrDiff; } if ((flag & FLAG_TIMESTAMP_IS_DIFF) == 0) { // it is timestamp current.timestamp = timestampOrDiff; } else { // it is diff current.timestamp = current.timestamp - timestampOrDiff; } Bytes.putLong(current.keyBuffer, pos, current.timestamp); pos += Bytes.SIZEOF_LONG; // type if ((flag & FLAG_SAME_TYPE) == 0) { currentBuffer.get(current.keyBuffer, pos, Bytes.SIZEOF_BYTE); } else if ((flag & FLAG_SAME_KEY_LENGTH) == 0) { current.keyBuffer[pos] = type; } current.valueOffset = currentBuffer.position(); currentBuffer.skip(current.valueLength); if (includesTags()) { decodeTags(); } if (includesMvcc()) { current.memstoreTS = ByteBufferUtils.readVLong(currentBuffer); } else { current.memstoreTS = 0; } current.nextKvOffset = currentBuffer.position(); } @Override protected void decodeFirst() { currentBuffer.skip(Bytes.SIZEOF_INT); // read column family byte familyNameLength = currentBuffer.get(); familyNameWithSize = new byte[familyNameLength + Bytes.SIZEOF_BYTE]; familyNameWithSize[0] = familyNameLength; currentBuffer.get(familyNameWithSize, Bytes.SIZEOF_BYTE, familyNameLength); decode(true); } @Override protected void decodeNext() { decode(false); } @Override protected DiffSeekerState createSeekerState() { return new DiffSeekerState(this.tmpPair, this.includesTags()); } } }
mahak/hbase
hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java
Java
apache-2.0
19,734
package store import ( "strconv" "strings" "github.com/docker/swarmkit/api" "github.com/docker/swarmkit/api/naming" memdb "github.com/hashicorp/go-memdb" ) const tableTask = "task" func init() { register(ObjectStoreConfig{ Table: &memdb.TableSchema{ Name: tableTask, Indexes: map[string]*memdb.IndexSchema{ indexID: { Name: indexID, Unique: true, Indexer: api.TaskIndexerByID{}, }, indexName: { Name: indexName, AllowMissing: true, Indexer: taskIndexerByName{}, }, indexRuntime: { Name: indexRuntime, AllowMissing: true, Indexer: taskIndexerByRuntime{}, }, indexServiceID: { Name: indexServiceID, AllowMissing: true, Indexer: taskIndexerByServiceID{}, }, indexNodeID: { Name: indexNodeID, AllowMissing: true, Indexer: taskIndexerByNodeID{}, }, indexSlot: { Name: indexSlot, AllowMissing: true, Indexer: taskIndexerBySlot{}, }, indexDesiredState: { Name: indexDesiredState, Indexer: taskIndexerByDesiredState{}, }, indexTaskState: { Name: indexTaskState, Indexer: taskIndexerByTaskState{}, }, indexNetwork: { Name: indexNetwork, AllowMissing: true, Indexer: taskIndexerByNetwork{}, }, indexSecret: { Name: indexSecret, AllowMissing: true, Indexer: taskIndexerBySecret{}, }, indexCustom: { Name: indexCustom, Indexer: api.TaskCustomIndexer{}, AllowMissing: true, }, }, }, Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { var err error snapshot.Tasks, err = FindTasks(tx, All) return err }, Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { tasks, err := FindTasks(tx, All) if err != nil { return err } for _, t := range tasks { if err := DeleteTask(tx, t.ID); err != nil { return err } } for _, t := range snapshot.Tasks { if err := CreateTask(tx, t); err != nil { return err } } return nil }, ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { switch v := sa.Target.(type) { case *api.StoreAction_Task: obj := v.Task switch sa.Action { case api.StoreActionKindCreate: return CreateTask(tx, obj) case api.StoreActionKindUpdate: return UpdateTask(tx, obj) case api.StoreActionKindRemove: return DeleteTask(tx, obj.ID) } } return errUnknownStoreAction }, }) } // CreateTask adds a new task to the store. // Returns ErrExist if the ID is already taken. func CreateTask(tx Tx, t *api.Task) error { return tx.create(tableTask, t) } // UpdateTask updates an existing task in the store. // Returns ErrNotExist if the node doesn't exist. func UpdateTask(tx Tx, t *api.Task) error { return tx.update(tableTask, t) } // DeleteTask removes a task from the store. // Returns ErrNotExist if the task doesn't exist. func DeleteTask(tx Tx, id string) error { return tx.delete(tableTask, id) } // GetTask looks up a task by ID. // Returns nil if the task doesn't exist. func GetTask(tx ReadTx, id string) *api.Task { t := tx.get(tableTask, id) if t == nil { return nil } return t.(*api.Task) } // FindTasks selects a set of tasks and returns them. func FindTasks(tx ReadTx, by By) ([]*api.Task, error) { checkType := func(by By) error { switch by.(type) { case byName, byNamePrefix, byIDPrefix, byRuntime, byDesiredState, byTaskState, byNode, byService, bySlot, byReferencedNetworkID, byReferencedSecretID, byCustom, byCustomPrefix: return nil default: return ErrInvalidFindBy } } taskList := []*api.Task{} appendResult := func(o api.StoreObject) { taskList = append(taskList, o.(*api.Task)) } err := tx.find(tableTask, by, checkType, appendResult) return taskList, err } type taskIndexerByName struct{} func (ti taskIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { return fromArgs(args...) } func (ti taskIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { t := obj.(*api.Task) name := naming.Task(t) // Add the null character as a terminator return true, []byte(strings.ToLower(name) + "\x00"), nil } func (ti taskIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { return prefixFromArgs(args...) } type taskIndexerByRuntime struct{} func (ti taskIndexerByRuntime) FromArgs(args ...interface{}) ([]byte, error) { return fromArgs(args...) } func (ti taskIndexerByRuntime) FromObject(obj interface{}) (bool, []byte, error) { t := obj.(*api.Task) r, err := naming.Runtime(t.Spec) if err != nil { return false, nil, nil } return true, []byte(r + "\x00"), nil } func (ti taskIndexerByRuntime) PrefixFromArgs(args ...interface{}) ([]byte, error) { return prefixFromArgs(args...) } type taskIndexerByServiceID struct{} func (ti taskIndexerByServiceID) FromArgs(args ...interface{}) ([]byte, error) { return fromArgs(args...) } func (ti taskIndexerByServiceID) FromObject(obj interface{}) (bool, []byte, error) { t := obj.(*api.Task) // Add the null character as a terminator val := t.ServiceID + "\x00" return true, []byte(val), nil } type taskIndexerByNodeID struct{} func (ti taskIndexerByNodeID) FromArgs(args ...interface{}) ([]byte, error) { return fromArgs(args...) } func (ti taskIndexerByNodeID) FromObject(obj interface{}) (bool, []byte, error) { t := obj.(*api.Task) // Add the null character as a terminator val := t.NodeID + "\x00" return true, []byte(val), nil } type taskIndexerBySlot struct{} func (ti taskIndexerBySlot) FromArgs(args ...interface{}) ([]byte, error) { return fromArgs(args...) } func (ti taskIndexerBySlot) FromObject(obj interface{}) (bool, []byte, error) { t := obj.(*api.Task) // Add the null character as a terminator val := t.ServiceID + "\x00" + strconv.FormatUint(t.Slot, 10) + "\x00" return true, []byte(val), nil } type taskIndexerByDesiredState struct{} func (ti taskIndexerByDesiredState) FromArgs(args ...interface{}) ([]byte, error) { return fromArgs(args...) } func (ti taskIndexerByDesiredState) FromObject(obj interface{}) (bool, []byte, error) { t := obj.(*api.Task) // Add the null character as a terminator return true, []byte(strconv.FormatInt(int64(t.DesiredState), 10) + "\x00"), nil } type taskIndexerByNetwork struct{} func (ti taskIndexerByNetwork) FromArgs(args ...interface{}) ([]byte, error) { return fromArgs(args...) } func (ti taskIndexerByNetwork) FromObject(obj interface{}) (bool, [][]byte, error) { t := obj.(*api.Task) var networkIDs [][]byte for _, na := range t.Spec.Networks { // Add the null character as a terminator networkIDs = append(networkIDs, []byte(na.Target+"\x00")) } return len(networkIDs) != 0, networkIDs, nil } type taskIndexerBySecret struct{} func (ti taskIndexerBySecret) FromArgs(args ...interface{}) ([]byte, error) { return fromArgs(args...) } func (ti taskIndexerBySecret) FromObject(obj interface{}) (bool, [][]byte, error) { t := obj.(*api.Task) container := t.Spec.GetContainer() if container == nil { return false, nil, nil } var secretIDs [][]byte for _, secretRef := range container.Secrets { // Add the null character as a terminator secretIDs = append(secretIDs, []byte(secretRef.SecretID+"\x00")) } return len(secretIDs) != 0, secretIDs, nil } type taskIndexerByTaskState struct{} func (ts taskIndexerByTaskState) FromArgs(args ...interface{}) ([]byte, error) { return fromArgs(args...) } func (ts taskIndexerByTaskState) FromObject(obj interface{}) (bool, []byte, error) { t := obj.(*api.Task) // Add the null character as a terminator return true, []byte(strconv.FormatInt(int64(t.Status.State), 10) + "\x00"), nil }
mstanleyjones/docker
vendor/github.com/docker/swarmkit/manager/state/store/tasks.go
GO
apache-2.0
7,821
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package proxy import ( "fmt" "net" "reflect" "strings" "sync" "k8s.io/klog/v2" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" apiservice "k8s.io/kubernetes/pkg/api/v1/service" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/proxy/metrics" utilproxy "k8s.io/kubernetes/pkg/proxy/util" ) // BaseServiceInfo contains base information that defines a service. // This could be used directly by proxier while processing services, // or can be used for constructing a more specific ServiceInfo struct // defined by the proxier if needed. type BaseServiceInfo struct { clusterIP net.IP port int protocol v1.Protocol nodePort int loadBalancerStatus v1.LoadBalancerStatus sessionAffinityType v1.ServiceAffinity stickyMaxAgeSeconds int externalIPs []string loadBalancerSourceRanges []string healthCheckNodePort int nodeLocalExternal bool nodeLocalInternal bool internalTrafficPolicy *v1.ServiceInternalTrafficPolicyType hintsAnnotation string } var _ ServicePort = &BaseServiceInfo{} // String is part of ServicePort interface. func (info *BaseServiceInfo) String() string { return fmt.Sprintf("%s:%d/%s", info.clusterIP, info.port, info.protocol) } // ClusterIP is part of ServicePort interface. func (info *BaseServiceInfo) ClusterIP() net.IP { return info.clusterIP } // Port is part of ServicePort interface. func (info *BaseServiceInfo) Port() int { return info.port } // SessionAffinityType is part of the ServicePort interface. func (info *BaseServiceInfo) SessionAffinityType() v1.ServiceAffinity { return info.sessionAffinityType } // StickyMaxAgeSeconds is part of the ServicePort interface func (info *BaseServiceInfo) StickyMaxAgeSeconds() int { return info.stickyMaxAgeSeconds } // Protocol is part of ServicePort interface. func (info *BaseServiceInfo) Protocol() v1.Protocol { return info.protocol } // LoadBalancerSourceRanges is part of ServicePort interface func (info *BaseServiceInfo) LoadBalancerSourceRanges() []string { return info.loadBalancerSourceRanges } // HealthCheckNodePort is part of ServicePort interface. func (info *BaseServiceInfo) HealthCheckNodePort() int { return info.healthCheckNodePort } // NodePort is part of the ServicePort interface. func (info *BaseServiceInfo) NodePort() int { return info.nodePort } // ExternalIPStrings is part of ServicePort interface. func (info *BaseServiceInfo) ExternalIPStrings() []string { return info.externalIPs } // LoadBalancerIPStrings is part of ServicePort interface. func (info *BaseServiceInfo) LoadBalancerIPStrings() []string { var ips []string for _, ing := range info.loadBalancerStatus.Ingress { ips = append(ips, ing.IP) } return ips } // NodeLocalExternal is part of ServicePort interface. func (info *BaseServiceInfo) NodeLocalExternal() bool { return info.nodeLocalExternal } // NodeLocalInternal is part of ServicePort interface func (info *BaseServiceInfo) NodeLocalInternal() bool { return info.nodeLocalInternal } // InternalTrafficPolicy is part of ServicePort interface func (info *BaseServiceInfo) InternalTrafficPolicy() *v1.ServiceInternalTrafficPolicyType { return info.internalTrafficPolicy } // HintsAnnotation is part of ServicePort interface. func (info *BaseServiceInfo) HintsAnnotation() string { return info.hintsAnnotation } func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, service *v1.Service) *BaseServiceInfo { nodeLocalExternal := false if apiservice.RequestsOnlyLocalTraffic(service) { nodeLocalExternal = true } nodeLocalInternal := false if utilfeature.DefaultFeatureGate.Enabled(features.ServiceInternalTrafficPolicy) { nodeLocalInternal = apiservice.RequestsOnlyLocalTrafficForInternal(service) } var stickyMaxAgeSeconds int if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP { // Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds) } clusterIP := utilproxy.GetClusterIPByFamily(sct.ipFamily, service) info := &BaseServiceInfo{ clusterIP: net.ParseIP(clusterIP), port: int(port.Port), protocol: port.Protocol, nodePort: int(port.NodePort), sessionAffinityType: service.Spec.SessionAffinity, stickyMaxAgeSeconds: stickyMaxAgeSeconds, nodeLocalExternal: nodeLocalExternal, nodeLocalInternal: nodeLocalInternal, internalTrafficPolicy: service.Spec.InternalTrafficPolicy, hintsAnnotation: service.Annotations[v1.AnnotationTopologyAwareHints], } loadBalancerSourceRanges := make([]string, len(service.Spec.LoadBalancerSourceRanges)) for i, sourceRange := range service.Spec.LoadBalancerSourceRanges { loadBalancerSourceRanges[i] = strings.TrimSpace(sourceRange) } // filter external ips, source ranges and ingress ips // prior to dual stack services, this was considered an error, but with dual stack // services, this is actually expected. Hence we downgraded from reporting by events // to just log lines with high verbosity ipFamilyMap := utilproxy.MapIPsByIPFamily(service.Spec.ExternalIPs) info.externalIPs = ipFamilyMap[sct.ipFamily] // Log the IPs not matching the ipFamily if ips, ok := ipFamilyMap[utilproxy.OtherIPFamily(sct.ipFamily)]; ok && len(ips) > 0 { klog.V(4).Infof("service change tracker(%v) ignored the following external IPs(%s) for service %v/%v as they don't match IPFamily", sct.ipFamily, strings.Join(ips, ","), service.Namespace, service.Name) } ipFamilyMap = utilproxy.MapCIDRsByIPFamily(loadBalancerSourceRanges) info.loadBalancerSourceRanges = ipFamilyMap[sct.ipFamily] // Log the CIDRs not matching the ipFamily if cidrs, ok := ipFamilyMap[utilproxy.OtherIPFamily(sct.ipFamily)]; ok && len(cidrs) > 0 { klog.V(4).Infof("service change tracker(%v) ignored the following load balancer source ranges(%s) for service %v/%v as they don't match IPFamily", sct.ipFamily, strings.Join(cidrs, ","), service.Namespace, service.Name) } // Obtain Load Balancer Ingress IPs var ips []string for _, ing := range service.Status.LoadBalancer.Ingress { if ing.IP != "" { ips = append(ips, ing.IP) } } if len(ips) > 0 { ipFamilyMap = utilproxy.MapIPsByIPFamily(ips) if ipList, ok := ipFamilyMap[utilproxy.OtherIPFamily(sct.ipFamily)]; ok && len(ipList) > 0 { klog.V(4).Infof("service change tracker(%v) ignored the following load balancer(%s) ingress ips for service %v/%v as they don't match IPFamily", sct.ipFamily, strings.Join(ipList, ","), service.Namespace, service.Name) } // Create the LoadBalancerStatus with the filtered IPs for _, ip := range ipFamilyMap[sct.ipFamily] { info.loadBalancerStatus.Ingress = append(info.loadBalancerStatus.Ingress, v1.LoadBalancerIngress{IP: ip}) } } if apiservice.NeedsHealthCheck(service) { p := service.Spec.HealthCheckNodePort if p == 0 { klog.Errorf("Service %s/%s has no healthcheck nodeport", service.Namespace, service.Name) } else { info.healthCheckNodePort = int(p) } } return info } type makeServicePortFunc func(*v1.ServicePort, *v1.Service, *BaseServiceInfo) ServicePort // This handler is invoked by the apply function on every change. This function should not modify the // ServiceMap's but just use the changes for any Proxier specific cleanup. type processServiceMapChangeFunc func(previous, current ServiceMap) // serviceChange contains all changes to services that happened since proxy rules were synced. For a single object, // changes are accumulated, i.e. previous is state from before applying the changes, // current is state after applying all of the changes. type serviceChange struct { previous ServiceMap current ServiceMap } // ServiceChangeTracker carries state about uncommitted changes to an arbitrary number of // Services, keyed by their namespace and name. type ServiceChangeTracker struct { // lock protects items. lock sync.Mutex // items maps a service to its serviceChange. items map[types.NamespacedName]*serviceChange // makeServiceInfo allows proxier to inject customized information when processing service. makeServiceInfo makeServicePortFunc processServiceMapChange processServiceMapChangeFunc ipFamily v1.IPFamily recorder record.EventRecorder } // NewServiceChangeTracker initializes a ServiceChangeTracker func NewServiceChangeTracker(makeServiceInfo makeServicePortFunc, ipFamily v1.IPFamily, recorder record.EventRecorder, processServiceMapChange processServiceMapChangeFunc) *ServiceChangeTracker { return &ServiceChangeTracker{ items: make(map[types.NamespacedName]*serviceChange), makeServiceInfo: makeServiceInfo, recorder: recorder, ipFamily: ipFamily, processServiceMapChange: processServiceMapChange, } } // Update updates given service's change map based on the <previous, current> service pair. It returns true if items changed, // otherwise return false. Update can be used to add/update/delete items of ServiceChangeMap. For example, // Add item // - pass <nil, service> as the <previous, current> pair. // Update item // - pass <oldService, service> as the <previous, current> pair. // Delete item // - pass <service, nil> as the <previous, current> pair. func (sct *ServiceChangeTracker) Update(previous, current *v1.Service) bool { svc := current if svc == nil { svc = previous } // previous == nil && current == nil is unexpected, we should return false directly. if svc == nil { return false } metrics.ServiceChangesTotal.Inc() namespacedName := types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name} sct.lock.Lock() defer sct.lock.Unlock() change, exists := sct.items[namespacedName] if !exists { change = &serviceChange{} change.previous = sct.serviceToServiceMap(previous) sct.items[namespacedName] = change } change.current = sct.serviceToServiceMap(current) // if change.previous equal to change.current, it means no change if reflect.DeepEqual(change.previous, change.current) { delete(sct.items, namespacedName) } else { klog.V(2).Infof("Service %s updated: %d ports", namespacedName, len(change.current)) } metrics.ServiceChangesPending.Set(float64(len(sct.items))) return len(sct.items) > 0 } // UpdateServiceMapResult is the updated results after applying service changes. type UpdateServiceMapResult struct { // HCServiceNodePorts is a map of Service names to node port numbers which indicate the health of that Service on this Node. // The value(uint16) of HCServices map is the service health check node port. HCServiceNodePorts map[types.NamespacedName]uint16 // UDPStaleClusterIP holds stale (no longer assigned to a Service) Service IPs that had UDP ports. // Callers can use this to abort timeout-waits or clear connection-tracking information. UDPStaleClusterIP sets.String } // Update updates ServiceMap base on the given changes. func (sm ServiceMap) Update(changes *ServiceChangeTracker) (result UpdateServiceMapResult) { result.UDPStaleClusterIP = sets.NewString() sm.apply(changes, result.UDPStaleClusterIP) // TODO: If this will appear to be computationally expensive, consider // computing this incrementally similarly to serviceMap. result.HCServiceNodePorts = make(map[types.NamespacedName]uint16) for svcPortName, info := range sm { if info.HealthCheckNodePort() != 0 { result.HCServiceNodePorts[svcPortName.NamespacedName] = uint16(info.HealthCheckNodePort()) } } return result } // ServiceMap maps a service to its ServicePort. type ServiceMap map[ServicePortName]ServicePort // serviceToServiceMap translates a single Service object to a ServiceMap. // // NOTE: service object should NOT be modified. func (sct *ServiceChangeTracker) serviceToServiceMap(service *v1.Service) ServiceMap { if service == nil { return nil } if utilproxy.ShouldSkipService(service) { return nil } clusterIP := utilproxy.GetClusterIPByFamily(sct.ipFamily, service) if clusterIP == "" { return nil } serviceMap := make(ServiceMap) svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} for i := range service.Spec.Ports { servicePort := &service.Spec.Ports[i] svcPortName := ServicePortName{NamespacedName: svcName, Port: servicePort.Name, Protocol: servicePort.Protocol} baseSvcInfo := sct.newBaseServiceInfo(servicePort, service) if sct.makeServiceInfo != nil { serviceMap[svcPortName] = sct.makeServiceInfo(servicePort, service, baseSvcInfo) } else { serviceMap[svcPortName] = baseSvcInfo } } return serviceMap } // apply the changes to ServiceMap and update the stale udp cluster IP set. The UDPStaleClusterIP argument is passed in to store the // udp protocol service cluster ip when service is deleted from the ServiceMap. // apply triggers processServiceMapChange on every change. func (sm *ServiceMap) apply(changes *ServiceChangeTracker, UDPStaleClusterIP sets.String) { changes.lock.Lock() defer changes.lock.Unlock() for _, change := range changes.items { if changes.processServiceMapChange != nil { changes.processServiceMapChange(change.previous, change.current) } sm.merge(change.current) // filter out the Update event of current changes from previous changes before calling unmerge() so that can // skip deleting the Update events. change.previous.filter(change.current) sm.unmerge(change.previous, UDPStaleClusterIP) } // clear changes after applying them to ServiceMap. changes.items = make(map[types.NamespacedName]*serviceChange) metrics.ServiceChangesPending.Set(0) } // merge adds other ServiceMap's elements to current ServiceMap. // If collision, other ALWAYS win. Otherwise add the other to current. // In other words, if some elements in current collisions with other, update the current by other. // It returns a string type set which stores all the newly merged services' identifier, ServicePortName.String(), to help users // tell if a service is deleted or updated. // The returned value is one of the arguments of ServiceMap.unmerge(). // ServiceMap A Merge ServiceMap B will do following 2 things: // * update ServiceMap A. // * produce a string set which stores all other ServiceMap's ServicePortName.String(). // For example, // - A{} // - B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} // - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} // - produce string set {"ns/cluster-ip:http"} // - A{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 345, "UDP"}} // - B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} // - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} // - produce string set {"ns/cluster-ip:http"} func (sm *ServiceMap) merge(other ServiceMap) sets.String { // existingPorts is going to store all identifiers of all services in `other` ServiceMap. existingPorts := sets.NewString() for svcPortName, info := range other { // Take ServicePortName.String() as the newly merged service's identifier and put it into existingPorts. existingPorts.Insert(svcPortName.String()) _, exists := (*sm)[svcPortName] if !exists { klog.V(1).Infof("Adding new service port %q at %s", svcPortName, info.String()) } else { klog.V(1).Infof("Updating existing service port %q at %s", svcPortName, info.String()) } (*sm)[svcPortName] = info } return existingPorts } // filter filters out elements from ServiceMap base on given ports string sets. func (sm *ServiceMap) filter(other ServiceMap) { for svcPortName := range *sm { // skip the delete for Update event. if _, ok := other[svcPortName]; ok { delete(*sm, svcPortName) } } } // unmerge deletes all other ServiceMap's elements from current ServiceMap. We pass in the UDPStaleClusterIP strings sets // for storing the stale udp service cluster IPs. We will clear stale udp connection base on UDPStaleClusterIP later func (sm *ServiceMap) unmerge(other ServiceMap, UDPStaleClusterIP sets.String) { for svcPortName := range other { info, exists := (*sm)[svcPortName] if exists { klog.V(1).Infof("Removing service port %q", svcPortName) if info.Protocol() == v1.ProtocolUDP { UDPStaleClusterIP.Insert(info.ClusterIP().String()) } delete(*sm, svcPortName) } else { klog.Errorf("Service port %q doesn't exists", svcPortName) } } }
ravilr/kubernetes
pkg/proxy/service.go
GO
apache-2.0
17,273
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package com.eas.opc.da.dcom; import org.jinterop.dcom.common.JIException; import org.jinterop.dcom.core.JIStruct; import org.jinterop.dcom.core.JIVariant; /** * * @author pk */ public class OPCITEMSTATE { private JIStruct struct; public static JIStruct getEmptyStruct() throws JIException { JIStruct emptyStruct = new JIStruct(); emptyStruct.addMember(Integer.class); //hClient emptyStruct.addMember(new FileTime().getStruct()); //ftTimeStamp emptyStruct.addMember(Short.class); //wQuality emptyStruct.addMember(Short.class); //wReserved emptyStruct.addMember(JIVariant.class); //vDataValue return emptyStruct; } public OPCITEMSTATE(JIStruct struct) { this.struct = struct; } public JIStruct getStruct() { return struct; } public Integer getClientHandle() { return (Integer) struct.getMember(0); } public FileTime getTimeStamp() { return new FileTime((JIStruct) struct.getMember(1)); } public Short getQuality() { return (Short) struct.getMember(2); } public JIVariant getDataValue() { return (JIVariant) struct.getMember(4); } }
altsoft/PlatypusJS
opc-da/src/main/java/com/eas/opc/da/dcom/OPCITEMSTATE.java
Java
apache-2.0
1,334
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/fill_zeros_like_op.h" namespace paddle { namespace operators { class FillZerosLikeOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of FillZerosLikeOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of FillZerosLikeOp should not be null."); ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->ShareLoD("X", /*->*/ "Out"); } }; class FillZerosLikeOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("X", "The input of fill-zeros-like op."); AddOutput("Out", "The variable will be filled up with zeros."); AddComment(R"DOC( FillZerosLike Operator. Fill up a variable with zeros. The output will have the same size as the input. )DOC"); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, ops::FillZerosLikeOp, ops::FillZerosLikeOpMaker); REGISTER_OP_CPU_KERNEL( fill_zeros_like, ops::FillZerosLikeKernel<paddle::platform::CPUDeviceContext, int>, ops::FillZerosLikeKernel<paddle::platform::CPUDeviceContext, int64_t>, ops::FillZerosLikeKernel<paddle::platform::CPUDeviceContext, float>, ops::FillZerosLikeKernel<paddle::platform::CPUDeviceContext, double>, ops::FillZerosLikeKernel<paddle::platform::CPUDeviceContext, bool>);
Canpio/Paddle
paddle/fluid/operators/fill_zeros_like_op.cc
C++
apache-2.0
2,212
#region Using using System; #endregion namespace PPWCode.Util.OddsAndEnds.I.DateTimeProvider { public class GenuineDateTimeProvider : IDateTimeProvider { public GenuineDateTimeProvider() { } public static IDateTimeProvider CreateInstance() { return new GenuineDateTimeProvider(); } public DateTime Today { get { return DateTime.Today; } } public DateTime Now { get { return DateTime.Now; } } } }
jandppw/ppwcode-recovered-from-google-code
dotnet/Util/OddsAndEnds/I/2.n/2.1.1/src/I/DateTimeProvider/GenuineDateTimeProvider.cs
C#
apache-2.0
577
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.rest.action.admin.cluster.node.restart; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.restart.NodesRestartRequest; import org.elasticsearch.action.admin.cluster.node.restart.NodesRestartResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.*; import java.io.IOException; import static org.elasticsearch.rest.action.support.RestXContentBuilder.restContentBuilder; /** * */ public class RestNodesRestartAction extends BaseRestHandler { @Inject public RestNodesRestartAction(Settings settings, Client client, RestController controller) { super(settings, client); controller.registerHandler(RestRequest.Method.POST, "/_cluster/nodes/_restart", this); controller.registerHandler(RestRequest.Method.POST, "/_cluster/nodes/{nodeId}/_restart", this); } @Override public void handleRequest(final RestRequest request, final RestChannel channel) { String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); NodesRestartRequest nodesRestartRequest = new NodesRestartRequest(nodesIds); nodesRestartRequest.listenerThreaded(false); nodesRestartRequest.delay(request.paramAsTime("delay", nodesRestartRequest.delay())); client.admin().cluster().nodesRestart(nodesRestartRequest, new ActionListener<NodesRestartResponse>() { @Override public void onResponse(NodesRestartResponse result) { try { XContentBuilder builder = restContentBuilder(request); builder.startObject(); builder.field("cluster_name", result.getClusterName().value()); builder.startObject("nodes"); for (NodesRestartResponse.NodeRestartResponse nodeInfo : result) { builder.startObject(nodeInfo.getNode().id()); builder.field("name", nodeInfo.getNode().name()); builder.endObject(); } builder.endObject(); builder.endObject(); channel.sendResponse(new XContentRestResponse(request, RestStatus.OK, builder)); } catch (Throwable e) { onFailure(e); } } @Override public void onFailure(Throwable e) { try { channel.sendResponse(new XContentThrowableRestResponse(request, e)); } catch (IOException e1) { logger.error("Failed to send failure response", e1); } } }); } }
libosu/elasticsearch
src/main/java/org/elasticsearch/rest/action/admin/cluster/node/restart/RestNodesRestartAction.java
Java
apache-2.0
3,716
/*************************GO-LICENSE-START********************************* * Copyright 2015 ThoughtWorks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *************************GO-LICENSE-END***********************************/ package com.thoughtworks.cruise.api.response; import java.util.List; public class BuildCause { private boolean trigger_forced; private String trigger_message; private String approver; private List<MaterialRevision> material_revisions; public BuildCause(boolean triggerForced, String triggerMessage, String approver, List<MaterialRevision> materialRevisions) { super(); this.trigger_forced = triggerForced; this.trigger_message = triggerMessage; this.approver = approver; this.material_revisions = materialRevisions; } public boolean isTriggerForced() { return trigger_forced; } public void setTriggerForced(boolean triggerForced) { this.trigger_forced = triggerForced; } public String getTriggerMessage() { return trigger_message; } public void setTriggerMessage(String triggerMessage) { this.trigger_message = triggerMessage; } public String getApprover() { return approver; } public void setApprover(String approver) { this.approver = approver; } public List<MaterialRevision> getMaterialRevisions() { return material_revisions; } public void setMaterialRevisions(List<MaterialRevision> materialRevisions) { this.material_revisions = materialRevisions; } }
ketan/functional-tests
src/test/java/com/thoughtworks/cruise/api/response/BuildCause.java
Java
apache-2.0
1,969
var assert = require('assert'); var util = require('util'); var events = require('events'); var utils = require('../../lib/utils'); var helper = require('../test-helper'); var InetAddress = require('../../lib/types').InetAddress; describe('InetAddress', function () { describe('constructor', function () { it('should validate the Buffer length', function () { assert.throws(function () { new InetAddress(new Buffer(10)); }); assert.throws(function () { new InetAddress(null); }); assert.throws(function () { new InetAddress(); }); assert.doesNotThrow(function () { new InetAddress(new Buffer(16)); }); assert.doesNotThrow(function () { new InetAddress(new Buffer(4)); }); }); }); describe('#toString()', function () { it('should convert IPv6 to string representation', function () { var val = new InetAddress(new Buffer('aabb0000eeff00112233445566778899', 'hex')); assert.strictEqual(val.version, 6); assert.strictEqual(val.toString(), 'aabb::eeff:11:2233:4455:6677:8899'); val = new InetAddress(new Buffer('aabbccddeeff00112233445566778899', 'hex')); assert.strictEqual(val.toString(), 'aabb:ccdd:eeff:11:2233:4455:6677:8899'); val = new InetAddress(new Buffer('aabb0000000000112233445566778899', 'hex')); assert.strictEqual(val.toString(), 'aabb::11:2233:4455:6677:8899'); val = new InetAddress(new Buffer('aabb0001000100112233445500000000', 'hex')); assert.strictEqual(val.toString(), 'aabb:1:1:11:2233:4455::'); val = new InetAddress(new Buffer('00000000000100112233445500aa00bb', 'hex')); assert.strictEqual(val.toString(), '::1:11:2233:4455:aa:bb'); val = new InetAddress(new Buffer('000000000000000022330000000000bb', 'hex')); assert.strictEqual(val.toString(), '::2233:0:0:bb'); val = new InetAddress(new Buffer('00000000000000000000000000000001', 'hex')); assert.strictEqual(val.toString(), '::1'); }); it('should convert IPv4 to string representation', function () { var val = new InetAddress(new Buffer([127, 0, 0, 1])); assert.strictEqual(val.version, 4); assert.strictEqual(val.toString(), '127.0.0.1'); val = new InetAddress(new Buffer([198, 168, 1, 1])); assert.strictEqual(val.toString(), '198.168.1.1'); val = new InetAddress(new Buffer([10, 12, 254, 32])); assert.strictEqual(val.toString(), '10.12.254.32'); }); }); describe('#equals()', function () { it('should return true when the bytes are the same', function () { var hex1 = 'aabb0000eeff00112233445566778899'; var hex2 = 'ffff0000eeff00112233445566778899'; var buf1 = new Buffer(hex1, 'hex'); var val1 = new InetAddress(buf1); var val2 = new InetAddress(new Buffer(hex2, 'hex')); assert.ok(val1.equals(new InetAddress(buf1))); assert.ok(val1.equals(new InetAddress(new Buffer(hex1, 'hex')))); assert.ok(!val1.equals(val2)); }); }); describe('fromString()', function () { it('should parse IPv6 string representation', function () { [ 'aabb::eeff:11:2233:4455:6677:8899', 'aabb:1:eeff:11:2233:4455:6677:8899', 'aabb:1:eeff:11:2233:4455:6677:8899', '::1:11:2233:4455:aa:bb', '::2233:0:0:bb', '::1234', '10fa::1' ].forEach(function (item) { var val = InetAddress.fromString(item, 'hex'); helper.assertInstanceOf(val, InetAddress); assert.strictEqual(val.toString(), item); }); }); it('should parse IPv4 string representation', function () { var val = InetAddress.fromString('127.0.0.1'); helper.assertInstanceOf(val, InetAddress); assert.strictEqual(val.toString(), '127.0.0.1'); val = InetAddress.fromString('198.168.1.1'); helper.assertInstanceOf(val, InetAddress); assert.strictEqual(val.toString(), '198.168.1.1'); val = InetAddress.fromString('10.11.12.13'); helper.assertInstanceOf(val, InetAddress); assert.strictEqual(val.toString(), '10.11.12.13'); }); it('should throw when can not parse to 4 or 16 bytes', function () { assert.throws(function () { InetAddress.fromString('127.0.0.1.10'); }, Error); }); }); });
thiagoveras/nodejs-driver
test/unit/inet-address-tests.js
JavaScript
apache-2.0
4,324
# Copyright 2015 CloudByte Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import uuid from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import units import six from six.moves import http_client from six.moves import urllib from cinder import exception from cinder.i18n import _, _LE, _LI from cinder.volume.drivers.cloudbyte import options from cinder.volume.drivers.san import san LOG = logging.getLogger(__name__) class CloudByteISCSIDriver(san.SanISCSIDriver): """CloudByte ISCSI Driver. Version history: 1.0.0 - Initial driver 1.1.0 - Add chap support and minor bug fixes 1.1.1 - Add wait logic for delete volumes """ VERSION = '1.1.1' volume_stats = {} def __init__(self, *args, **kwargs): super(CloudByteISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values( options.cloudbyte_add_qosgroup_opts) self.configuration.append_config_values( options.cloudbyte_create_volume_opts) self.configuration.append_config_values( options.cloudbyte_connection_opts) self.cb_use_chap = self.configuration.use_chap_auth self.get_volume_stats() def _get_url(self, cmd, params, apikey): """Will prepare URL that connects to CloudByte.""" if params is None: params = {} params['command'] = cmd params['response'] = 'json' sanitized_params = {} for key in params: value = params[key] if value is not None: sanitized_params[key] = six.text_type(value) sanitized_params = urllib.parse.urlencode(sanitized_params) url = ('/client/api?%s' % sanitized_params) LOG.debug("CloudByte URL to be executed: [%s].", url) # Add the apikey api = {} api['apiKey'] = apikey url = url + '&' + urllib.parse.urlencode(api) return url def _extract_http_error(self, error_data): # Extract the error message from error_data error_msg = "" # error_data is a single key value dict for key, value in error_data.items(): error_msg = value.get('errortext') return error_msg def _execute_and_get_response_details(self, host, url): """Will prepare response after executing an http request.""" res_details = {} try: # Prepare the connection connection = http_client.HTTPSConnection(host) # Make the connection connection.request('GET', url) # Extract the response as the connection was successful response = connection.getresponse() # Read the response data = response.read() # Transform the json string into a py object data = json.loads(data) # Extract http error msg if any error_details = None if response.status != 200: error_details = self._extract_http_error(data) # Prepare the return object res_details['data'] = data res_details['error'] = error_details res_details['http_status'] = response.status finally: connection.close() LOG.debug("CloudByte connection was closed successfully.") return res_details def _api_request_for_cloudbyte(self, cmd, params, version=None): """Make http calls to CloudByte.""" LOG.debug("Executing CloudByte API for command [%s].", cmd) if version is None: version = CloudByteISCSIDriver.VERSION # Below is retrieved from /etc/cinder/cinder.conf apikey = self.configuration.cb_apikey if apikey is None: msg = (_("API key is missing for CloudByte driver.")) raise exception.VolumeBackendAPIException(data=msg) host = self.configuration.san_ip # Construct the CloudByte URL with query params url = self._get_url(cmd, params, apikey) data = {} error_details = None http_status = None try: # Execute CloudByte API & frame the response res_obj = self._execute_and_get_response_details(host, url) data = res_obj['data'] error_details = res_obj['error'] http_status = res_obj['http_status'] except http_client.HTTPException as ex: msg = (_("Error executing CloudByte API [%(cmd)s], " "Error: %(err)s.") % {'cmd': cmd, 'err': ex}) raise exception.VolumeBackendAPIException(data=msg) # Check if it was an error response from CloudByte if http_status != 200: msg = (_("Failed to execute CloudByte API [%(cmd)s]." " Http status: %(status)s," " Error: %(error)s.") % {'cmd': cmd, 'status': http_status, 'error': error_details}) raise exception.VolumeBackendAPIException(data=msg) LOG.info(_LI("CloudByte API executed successfully for command [%s]."), cmd) return data def _request_tsm_details(self, account_id): params = {"accountid": account_id} # List all CloudByte tsm data = self._api_request_for_cloudbyte("listTsm", params) return data def _override_params(self, default_dict, filtered_user_dict): """Override the default config values with user provided values.""" if filtered_user_dict is None: # Nothing to override return default_dict for key, value in default_dict.items(): # Fill the user dict with default options based on condition if filtered_user_dict.get(key) is None and value is not None: filtered_user_dict[key] = value return filtered_user_dict def _add_qos_group_request(self, volume, tsmid, volume_name): # Get qos related params from configuration params = self.configuration.cb_add_qosgroup if params is None: params = {} params['name'] = "QoS_" + volume_name params['tsmid'] = tsmid data = self._api_request_for_cloudbyte("addQosGroup", params) return data def _create_volume_request(self, volume, datasetid, qosgroupid, tsmid, volume_name): size = volume.get('size') quotasize = six.text_type(size) + "G" # Prepare the user input params params = { "datasetid": datasetid, "name": volume_name, "qosgroupid": qosgroupid, "tsmid": tsmid, "quotasize": quotasize } # Get the additional params from configuration params = self._override_params(self.configuration.cb_create_volume, params) data = self._api_request_for_cloudbyte("createVolume", params) return data def _queryAsyncJobResult_request(self, jobid): async_cmd = "queryAsyncJobResult" params = { "jobId": jobid, } data = self._api_request_for_cloudbyte(async_cmd, params) return data def _get_tsm_details(self, data, tsm_name, account_name): # Filter required tsm's details tsms = data['listTsmResponse'].get('listTsm') if tsms is None: msg = (_("TSM [%(tsm)s] was not found in CloudByte storage " "for account [%(account)s].") % {'tsm': tsm_name, 'account': account_name}) raise exception.VolumeBackendAPIException(data=msg) tsmdetails = {} for tsm in tsms: if tsm['name'] == tsm_name: tsmdetails['datasetid'] = tsm['datasetid'] tsmdetails['tsmid'] = tsm['id'] break return tsmdetails def _retry_volume_operation(self, operation, retries, max_retries, jobid, cb_volume): """CloudByte async calls via the FixedIntervalLoopingCall.""" # Query the CloudByte storage with this jobid volume_response = self._queryAsyncJobResult_request(jobid) count = retries['count'] result_res = None if volume_response is not None: result_res = volume_response.get('queryasyncjobresultresponse') if result_res is None: msg = (_( "Null response received while querying " "for [%(operation)s] based job [%(job)s] " "at CloudByte storage.") % {'operation': operation, 'job': jobid}) raise exception.VolumeBackendAPIException(data=msg) status = result_res.get('jobstatus') if status == 1: LOG.info(_LI("CloudByte operation [%(operation)s] succeeded for " "volume [%(cb_volume)s]."), {'operation': operation, 'cb_volume': cb_volume}) raise loopingcall.LoopingCallDone() elif count == max_retries: # All attempts exhausted LOG.error(_LE("CloudByte operation [%(operation)s] failed" " for volume [%(vol)s]. Exhausted all" " [%(max)s] attempts."), {'operation': operation, 'vol': cb_volume, 'max': max_retries}) raise loopingcall.LoopingCallDone(retvalue=False) else: count += 1 retries['count'] = count LOG.debug("CloudByte operation [%(operation)s] for" " volume [%(vol)s]: retry [%(retry)s] of [%(max)s].", {'operation': operation, 'vol': cb_volume, 'retry': count, 'max': max_retries}) def _wait_for_volume_creation(self, volume_response, cb_volume_name): """Given the job wait for it to complete.""" vol_res = volume_response.get('createvolumeresponse') if vol_res is None: msg = _("Null response received while creating volume [%s] " "at CloudByte storage.") % cb_volume_name raise exception.VolumeBackendAPIException(data=msg) jobid = vol_res.get('jobid') if jobid is None: msg = _("Job id not found in CloudByte's " "create volume [%s] response.") % cb_volume_name raise exception.VolumeBackendAPIException(data=msg) retry_interval = ( self.configuration.cb_confirm_volume_create_retry_interval) max_retries = ( self.configuration.cb_confirm_volume_create_retries) retries = {'count': 0} timer = loopingcall.FixedIntervalLoopingCall( self._retry_volume_operation, 'Create Volume', retries, max_retries, jobid, cb_volume_name) timer.start(interval=retry_interval).wait() def _wait_for_volume_deletion(self, volume_response, cb_volume_id): """Given the job wait for it to complete.""" vol_res = volume_response.get('deleteFileSystemResponse') if vol_res is None: msg = _("Null response received while deleting volume [%s] " "at CloudByte storage.") % cb_volume_id raise exception.VolumeBackendAPIException(data=msg) jobid = vol_res.get('jobid') if jobid is None: msg = _("Job id not found in CloudByte's " "delete volume [%s] response.") % cb_volume_id raise exception.VolumeBackendAPIException(data=msg) retry_interval = ( self.configuration.cb_confirm_volume_delete_retry_interval) max_retries = ( self.configuration.cb_confirm_volume_delete_retries) retries = {'count': 0} timer = loopingcall.FixedIntervalLoopingCall( self._retry_volume_operation, 'Delete Volume', retries, max_retries, jobid, cb_volume_id) timer.start(interval=retry_interval).wait() def _get_volume_id_from_response(self, cb_volumes, volume_name): """Search the volume in CloudByte storage.""" vol_res = cb_volumes.get('listFilesystemResponse') if vol_res is None: msg = _("Null response received from CloudByte's " "list filesystem.") raise exception.VolumeBackendAPIException(data=msg) volumes = vol_res.get('filesystem') if volumes is None: msg = _('No volumes found in CloudByte storage.') raise exception.VolumeBackendAPIException(data=msg) volume_id = None for vol in volumes: if vol['name'] == volume_name: volume_id = vol['id'] break if volume_id is None: msg = _("Volume [%s] not found in CloudByte " "storage.") % volume_name raise exception.VolumeBackendAPIException(data=msg) return volume_id def _get_qosgroupid_id_from_response(self, cb_volumes, volume_id): volumes = cb_volumes['listFilesystemResponse']['filesystem'] qosgroup_id = None for vol in volumes: if vol['id'] == volume_id: qosgroup_id = vol['groupid'] break return qosgroup_id def _build_provider_details_from_volume(self, volume, chap): model_update = {} model_update['provider_location'] = ( '%s %s %s' % (volume['ipaddress'] + ':3260', volume['iqnname'], 0) ) # Will provide CHAP Authentication on forthcoming patches/release model_update['provider_auth'] = None if chap: model_update['provider_auth'] = ('CHAP %(username)s %(password)s' % chap) model_update['provider_id'] = volume['id'] LOG.debug("CloudByte volume iqn: [%(iqn)s] provider id: [%(proid)s].", {'iqn': volume['iqnname'], 'proid': volume['id']}) return model_update def _build_provider_details_from_response(self, cb_volumes, volume_name, chap): """Get provider information.""" model_update = {} volumes = cb_volumes['listFilesystemResponse']['filesystem'] for vol in volumes: if vol['name'] == volume_name: model_update = self._build_provider_details_from_volume(vol, chap) break return model_update def _get_initiator_group_id_from_response(self, data): """Find iSCSI initiator group id.""" ig_list_res = data.get('listInitiatorsResponse') if ig_list_res is None: msg = _("Null response received from CloudByte's " "list iscsi initiators.") raise exception.VolumeBackendAPIException(data=msg) ig_list = ig_list_res.get('initiator') if ig_list is None: msg = _('No iscsi initiators were found in CloudByte.') raise exception.VolumeBackendAPIException(data=msg) ig_id = None for ig in ig_list: if ig.get('initiatorgroup') == 'ALL': ig_id = ig['id'] break return ig_id def _get_iscsi_service_id_from_response(self, volume_id, data): iscsi_service_res = data.get('listVolumeiSCSIServiceResponse') if iscsi_service_res is None: msg = _("Null response received from CloudByte's " "list volume iscsi service.") raise exception.VolumeBackendAPIException(data=msg) iscsi_service_list = iscsi_service_res.get('iSCSIService') if iscsi_service_list is None: msg = _('No iscsi services found in CloudByte storage.') raise exception.VolumeBackendAPIException(data=msg) iscsi_id = None for iscsi_service in iscsi_service_list: if iscsi_service['volume_id'] == volume_id: iscsi_id = iscsi_service['id'] break if iscsi_id is None: msg = _("No iscsi service found for CloudByte " "volume [%s].") % volume_id raise exception.VolumeBackendAPIException(data=msg) else: return iscsi_id def _request_update_iscsi_service(self, iscsi_id, ig_id, ag_id): params = { "id": iscsi_id, "igid": ig_id } if ag_id: params['authgroupid'] = ag_id params['authmethod'] = "CHAP" self._api_request_for_cloudbyte( 'updateVolumeiSCSIService', params) def _get_cb_snapshot_path(self, snapshot_name, volume_id): """Find CloudByte snapshot path.""" params = {"id": volume_id} # List all snapshot from CloudByte cb_snapshots_list = self._api_request_for_cloudbyte( 'listStorageSnapshots', params) # Filter required snapshot from list cb_snap_res = cb_snapshots_list.get('listDatasetSnapshotsResponse') cb_snapshot = {} if cb_snap_res is not None: cb_snapshot = cb_snap_res.get('snapshot') path = None # Filter snapshot path for snap in cb_snapshot: if snap['name'] == snapshot_name: path = snap['path'] break return path def _get_account_id_from_name(self, account_name): params = {} data = self._api_request_for_cloudbyte("listAccount", params) accounts = data["listAccountResponse"]["account"] account_id = None for account in accounts: if account.get("name") == account_name: account_id = account.get("id") break if account_id is None: msg = _("Failed to get CloudByte account details " "for account [%s].") % account_name raise exception.VolumeBackendAPIException(data=msg) return account_id def _search_volume_id(self, cb_volumes, cb_volume_id): """Search the volume in CloudByte.""" volumes_res = cb_volumes.get('listFilesystemResponse') if volumes_res is None: msg = _("No response was received from CloudByte's " "list filesystem api call.") raise exception.VolumeBackendAPIException(data=msg) volumes = volumes_res.get('filesystem') if volumes is None: msg = _("No volume was found at CloudByte storage.") raise exception.VolumeBackendAPIException(data=msg) volume_id = None for vol in volumes: if vol['id'] == cb_volume_id: volume_id = vol['id'] break return volume_id def _get_storage_info(self, tsmname): """Get CloudByte TSM that is associated with OpenStack backend.""" # List all TSMs from CloudByte storage tsm_list = self._api_request_for_cloudbyte('listTsm', params={}) tsm_details_res = tsm_list.get('listTsmResponse') if tsm_details_res is None: msg = _("No response was received from CloudByte storage " "list tsm API call.") raise exception.VolumeBackendAPIException(data=msg) tsm_details = tsm_details_res.get('listTsm') data = {} flag = 0 # Filter required TSM and get storage info for tsms in tsm_details: if tsms['name'] == tsmname: flag = 1 data['total_capacity_gb'] = ( float(tsms['numericquota']) / units.Ki) data['free_capacity_gb'] = ( float(tsms['availablequota']) / units.Ki) break # TSM not found in CloudByte storage if flag == 0: LOG.error(_LE("TSM [%s] not found in CloudByte storage."), tsmname) data['total_capacity_gb'] = 0.0 data['free_capacity_gb'] = 0.0 return data def _get_auth_group_id_from_response(self, data): """Find iSCSI auth group id.""" chap_group = self.configuration.cb_auth_group ag_list_res = data.get('listiSCSIAuthGroupResponse') if ag_list_res is None: msg = _("Null response received from CloudByte's " "list iscsi auth groups.") raise exception.VolumeBackendAPIException(data=msg) ag_list = ag_list_res.get('authgroup') if ag_list is None: msg = _('No iscsi auth groups were found in CloudByte.') raise exception.VolumeBackendAPIException(data=msg) ag_id = None for ag in ag_list: if ag.get('name') == chap_group: ag_id = ag['id'] break else: msg = _("Auth group [%s] details not found in " "CloudByte storage.") % chap_group raise exception.VolumeBackendAPIException(data=msg) return ag_id def _get_auth_group_info(self, account_id, ag_id): """Fetch the auth group details.""" params = {"accountid": account_id, "authgroupid": ag_id} auth_users = self._api_request_for_cloudbyte( 'listiSCSIAuthUser', params) auth_user_details_res = auth_users.get('listiSCSIAuthUsersResponse') if auth_user_details_res is None: msg = _("No response was received from CloudByte storage " "list iSCSI auth user API call.") raise exception.VolumeBackendAPIException(data=msg) auth_user_details = auth_user_details_res.get('authuser') if auth_user_details is None: msg = _("Auth user details not found in CloudByte storage.") raise exception.VolumeBackendAPIException(data=msg) chapuser = auth_user_details[0].get('chapusername') chappassword = auth_user_details[0].get('chappassword') if chapuser is None or chappassword is None: msg = _("Invalid chap user details found in CloudByte storage.") raise exception.VolumeBackendAPIException(data=msg) data = {'username': chapuser, 'password': chappassword, 'ag_id': ag_id} return data def _get_chap_info(self, account_id): """Fetch the chap details.""" params = {"accountid": account_id} iscsi_auth_data = self._api_request_for_cloudbyte( 'listiSCSIAuthGroup', params) ag_id = self._get_auth_group_id_from_response( iscsi_auth_data) return self._get_auth_group_info(account_id, ag_id) def _export(self): model_update = {'provider_auth': None} if self.cb_use_chap is True: account_name = self.configuration.cb_account_name account_id = self._get_account_id_from_name(account_name) chap = self._get_chap_info(account_id) model_update['provider_auth'] = ('CHAP %(username)s %(password)s' % chap) return model_update def create_volume(self, volume): tsm_name = self.configuration.cb_tsm_name account_name = self.configuration.cb_account_name # Get account id of this account account_id = self._get_account_id_from_name(account_name) # Set backend storage volume name using OpenStack volume id cb_volume_name = volume['id'].replace("-", "") LOG.debug("Will create a volume [%(cb_vol)s] in TSM [%(tsm)s] " "at CloudByte storage w.r.t " "OpenStack volume [%(stack_vol)s].", {'cb_vol': cb_volume_name, 'stack_vol': volume.get('id'), 'tsm': tsm_name}) tsm_data = self._request_tsm_details(account_id) tsm_details = self._get_tsm_details(tsm_data, tsm_name, account_name) # Send request to create a qos group before creating a volume LOG.debug("Creating qos group for CloudByte volume [%s].", cb_volume_name) qos_data = self._add_qos_group_request( volume, tsm_details.get('tsmid'), cb_volume_name) # Extract the qos group id from response qosgroupid = qos_data['addqosgroupresponse']['qosgroup']['id'] LOG.debug("Successfully created qos group for CloudByte volume [%s].", cb_volume_name) # Send a create volume request to CloudByte API vol_data = self._create_volume_request( volume, tsm_details.get('datasetid'), qosgroupid, tsm_details.get('tsmid'), cb_volume_name) # Since create volume is an async call; # need to confirm the creation before proceeding further self._wait_for_volume_creation(vol_data, cb_volume_name) # Fetch iscsi id cb_volumes = self._api_request_for_cloudbyte( 'listFileSystem', params={}) volume_id = self._get_volume_id_from_response(cb_volumes, cb_volume_name) params = {"storageid": volume_id} iscsi_service_data = self._api_request_for_cloudbyte( 'listVolumeiSCSIService', params) iscsi_id = self._get_iscsi_service_id_from_response( volume_id, iscsi_service_data) # Fetch the initiator group ID params = {"accountid": account_id} iscsi_initiator_data = self._api_request_for_cloudbyte( 'listiSCSIInitiator', params) ig_id = self._get_initiator_group_id_from_response( iscsi_initiator_data) LOG.debug("Updating iscsi service for CloudByte volume [%s].", cb_volume_name) ag_id = None chap_info = {} if self.cb_use_chap is True: chap_info = self._get_chap_info(account_id) ag_id = chap_info['ag_id'] # Update the iscsi service with above fetched iscsi_id & ig_id self._request_update_iscsi_service(iscsi_id, ig_id, ag_id) LOG.debug("CloudByte volume [%(vol)s] updated with " "iscsi id [%(iscsi)s] and initiator group [%(ig)s] and " "authentication group [%(ag)s].", {'vol': cb_volume_name, 'iscsi': iscsi_id, 'ig': ig_id, 'ag': ag_id}) # Provide the model after successful completion of above steps provider = self._build_provider_details_from_response( cb_volumes, cb_volume_name, chap_info) LOG.info(_LI("Successfully created a CloudByte volume [%(cb_vol)s] " "w.r.t OpenStack volume [%(stack_vol)s]."), {'cb_vol': cb_volume_name, 'stack_vol': volume.get('id')}) return provider def delete_volume(self, volume): params = {} # OpenStack source volume id source_volume_id = volume['id'] # CloudByte volume id equals OpenStack volume's provider_id cb_volume_id = volume.get('provider_id') LOG.debug("Will delete CloudByte volume [%(cb_vol)s] " "w.r.t OpenStack volume [%(stack_vol)s].", {'cb_vol': cb_volume_id, 'stack_vol': source_volume_id}) # Delete volume at CloudByte if cb_volume_id is not None: cb_volumes = self._api_request_for_cloudbyte( 'listFileSystem', params) # Search cb_volume_id in CloudByte volumes # incase it has already been deleted from CloudByte cb_volume_id = self._search_volume_id(cb_volumes, cb_volume_id) # Delete volume at CloudByte if cb_volume_id is not None: params = {"id": cb_volume_id} del_res = self._api_request_for_cloudbyte('deleteFileSystem', params) self._wait_for_volume_deletion(del_res, cb_volume_id) LOG.info( _LI("Successfully deleted volume [%(cb_vol)s] " "at CloudByte corresponding to " "OpenStack volume [%(stack_vol)s]."), {'cb_vol': cb_volume_id, 'stack_vol': source_volume_id}) else: LOG.error(_LE("CloudByte does not have a volume corresponding " "to OpenStack volume [%s]."), source_volume_id) else: LOG.error(_LE("CloudByte volume information not available for" " OpenStack volume [%s]."), source_volume_id) def create_snapshot(self, snapshot): """Creates a snapshot at CloudByte.""" # OpenStack volume source_volume_id = snapshot['volume_id'] # CloudByte volume id equals OpenStack volume's provider_id cb_volume_id = snapshot.get('volume').get('provider_id') if cb_volume_id is not None: # Set backend storage snapshot name using OpenStack snapshot id snapshot_name = "snap_" + snapshot['id'].replace("-", "") params = { "name": snapshot_name, "id": cb_volume_id } LOG.debug( "Will create CloudByte snapshot [%(cb_snap)s] " "w.r.t CloudByte volume [%(cb_vol)s] " "and OpenStack volume [%(stack_vol)s].", {'cb_snap': snapshot_name, 'cb_vol': cb_volume_id, 'stack_vol': source_volume_id}) self._api_request_for_cloudbyte('createStorageSnapshot', params) # Get the snapshot path from CloudByte path = self._get_cb_snapshot_path(snapshot_name, cb_volume_id) LOG.info( _LI("Created CloudByte snapshot [%(cb_snap)s] " "w.r.t CloudByte volume [%(cb_vol)s] " "and OpenStack volume [%(stack_vol)s]."), {'cb_snap': path, 'cb_vol': cb_volume_id, 'stack_vol': source_volume_id}) model_update = {} # Store snapshot path as snapshot provider_id model_update['provider_id'] = path else: msg = _("Failed to create snapshot. CloudByte volume information " "not found for OpenStack volume [%s].") % source_volume_id raise exception.VolumeBackendAPIException(data=msg) return model_update def create_cloned_volume(self, cloned_volume, src_volume): """Create a clone of an existing volume. First it will create a snapshot of the source/parent volume, then it creates a clone of this newly created snapshot. """ # Extract necessary information from input params parent_volume_id = src_volume.get('id') # Generating id for snapshot # as this is not user entered in this particular usecase snapshot_id = six.text_type(uuid.uuid1()) # Prepare the params for create_snapshot # as well as create_volume_from_snapshot method snapshot_params = { 'id': snapshot_id, 'volume_id': parent_volume_id, 'volume': src_volume, } # Create a snapshot snapshot = self.create_snapshot(snapshot_params) snapshot_params['provider_id'] = snapshot.get('provider_id') # Create a clone of above snapshot return self.create_volume_from_snapshot(cloned_volume, snapshot_params) def create_volume_from_snapshot(self, cloned_volume, snapshot): """Create a clone from an existing snapshot.""" # Getting necessary data from input params parent_volume_id = snapshot['volume_id'] cloned_volume_name = cloned_volume['id'].replace("-", "") # CloudByte volume id equals OpenStack volume's provider_id cb_volume_id = snapshot.get('volume').get('provider_id') # CloudByte snapshot path equals OpenStack snapshot's provider_id cb_snapshot_path = snapshot['provider_id'] params = { "id": cb_volume_id, "clonename": cloned_volume_name, "path": cb_snapshot_path } LOG.debug( "Will create CloudByte clone [%(cb_clone)s] " "at CloudByte snapshot path [%(cb_snap)s] " "w.r.t parent OpenStack volume [%(stack_vol)s].", {'cb_clone': cloned_volume_name, 'cb_snap': cb_snapshot_path, 'stack_vol': parent_volume_id}) # Create clone of the snapshot clone_dataset_snapshot_res = ( self._api_request_for_cloudbyte('cloneDatasetSnapshot', params)) cb_snap = clone_dataset_snapshot_res.get('cloneDatasetSnapshot') cb_vol = {} if cb_snap is not None: cb_vol = cb_snap.get('filesystem') else: msg = ("Error: Clone creation failed for " "OpenStack volume [%(vol)s] with CloudByte " "snapshot path [%(path)s]" % {'vol': parent_volume_id, 'path': cb_snapshot_path}) raise exception.VolumeBackendAPIException(data=msg) LOG.info( _LI("Created a clone [%(cb_clone)s] " "at CloudByte snapshot path [%(cb_snap)s] " "w.r.t parent OpenStack volume [%(stack_vol)s]."), {'cb_clone': cloned_volume_name, 'cb_snap': cb_snapshot_path, 'stack_vol': parent_volume_id}) chap_info = {} if self.cb_use_chap is True: account_name = self.configuration.cb_account_name # Get account id of this account account_id = self._get_account_id_from_name(account_name) chap_info = self._get_chap_info(account_id) model_update = self._build_provider_details_from_volume(cb_vol, chap_info) return model_update def delete_snapshot(self, snapshot): """Delete a snapshot at CloudByte.""" # Find volume id source_volume_id = snapshot['volume_id'] # CloudByte volume id equals OpenStack volume's provider_id cb_volume_id = snapshot.get('volume').get('provider_id') # CloudByte snapshot path equals OpenStack snapshot's provider_id cb_snapshot_path = snapshot['provider_id'] # If cb_snapshot_path is 'None' # then no need to execute CloudByte API if cb_snapshot_path is not None: params = { "id": cb_volume_id, "path": cb_snapshot_path } LOG.debug("Will delete CloudByte snapshot [%(snap)s] w.r.t " "parent CloudByte volume [%(cb_vol)s] " "and parent OpenStack volume [%(stack_vol)s].", {'snap': cb_snapshot_path, 'cb_vol': cb_volume_id, 'stack_vol': source_volume_id}) # Execute CloudByte API self._api_request_for_cloudbyte('deleteSnapshot', params) LOG.info( _LI("Deleted CloudByte snapshot [%(snap)s] w.r.t " "parent CloudByte volume [%(cb_vol)s] " "and parent OpenStack volume [%(stack_vol)s]."), {'snap': cb_snapshot_path, 'cb_vol': cb_volume_id, 'stack_vol': source_volume_id}) else: LOG.error(_LE("CloudByte snapshot information is not available" " for OpenStack volume [%s]."), source_volume_id) def extend_volume(self, volume, new_size): # CloudByte volume id equals OpenStack volume's provider_id cb_volume_id = volume.get('provider_id') params = { "id": cb_volume_id, "quotasize": six.text_type(new_size) + 'G' } # Request the CloudByte api to update the volume self._api_request_for_cloudbyte('updateFileSystem', params) def create_export(self, context, volume, connector): """Setup the iscsi export info.""" return self._export() def ensure_export(self, context, volume): """Verify the iscsi export info.""" return self._export() def get_volume_stats(self, refresh=False): """Get volume statistics. If 'refresh' is True, update/refresh the statistics first. """ if refresh: # Get the TSM name from configuration tsm_name = self.configuration.cb_tsm_name # Get the storage details of this TSM data = self._get_storage_info(tsm_name) data["volume_backend_name"] = ( self.configuration.safe_get('volume_backend_name') or 'CloudByte') data["vendor_name"] = 'CloudByte' data['reserved_percentage'] = 0 data["driver_version"] = CloudByteISCSIDriver.VERSION data["storage_protocol"] = 'iSCSI' LOG.debug("CloudByte driver stats: [%s].", data) # Set this to the instance variable self.volume_stats = data return self.volume_stats
nikesh-mahalka/cinder
cinder/volume/drivers/cloudbyte/cloudbyte.py
Python
apache-2.0
38,259
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.Collections.Generic; using Microsoft.CodeAnalysis; namespace a2md { public sealed class DescriptorEqualityComparer : IEqualityComparer<DiagnosticDescriptor> { public bool Equals(DiagnosticDescriptor x, DiagnosticDescriptor y) => x.Id.Equals(y.Id, StringComparison.Ordinal); // CA1720: Identifier 'obj' contains type name // TOODO: Remove the below suppression once https://github.com/dotnet/roslyn-analyzers/issues/938 is fixed. #pragma warning disable CA1720 public int GetHashCode(DiagnosticDescriptor obj) => obj.Id.GetHashCode(); #pragma warning restore CA1720 } }
heejaechang/roslyn-analyzers
tools/a2md/DescriptorEqualityComparer.cs
C#
apache-2.0
807
/* * Copyright 2018-present Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package com.facebook.buck.core.build.engine; import com.facebook.buck.core.rules.BuildRule; import java.util.SortedSet; public interface RuleDepsCache { SortedSet<BuildRule> get(BuildRule rule); SortedSet<BuildRule> getRuntimeDeps(BuildRule rule); }
LegNeato/buck
src/com/facebook/buck/core/build/engine/RuleDepsCache.java
Java
apache-2.0
865
package brooklyn.util.text; import com.google.common.base.Preconditions; import com.google.common.base.Supplier; /** wraps a call to {@link String#format(String, Object...)} in a toString, i.e. using %s syntax, * useful for places where we want deferred evaluation * (e.g. as message to {@link Preconditions} to skip concatenation when not needed) */ public class FormattedString { private final String pattern; private final Object[] args; public FormattedString(String pattern, Object[] args) { this.pattern = pattern; this.args = args; } @Override public String toString() { return String.format(pattern, args); } public String getPattern() { return pattern; } public Object[] getArgs() { return args; } public Supplier<String> supplier() { return Strings.toStringSupplier(this); } }
rhodgin/brooklyn
utils/common/src/main/java/brooklyn/util/text/FormattedString.java
Java
apache-2.0
892
package recreate import ( "fmt" "time" "github.com/golang/glog" kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api" kclient "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" deployapi "github.com/projectatomic/atomic-enterprise/pkg/deploy/api" stratsupport "github.com/projectatomic/atomic-enterprise/pkg/deploy/strategy/support" deployutil "github.com/projectatomic/atomic-enterprise/pkg/deploy/util" ) // RecreateDeploymentStrategy is a simple strategy appropriate as a default. // Its behavior is to scale down the last deployment to 0, and to scale up the // new deployment to 1. // // A failure to disable any existing deployments will be considered a // deployment failure. type RecreateDeploymentStrategy struct { // getReplicationController knows how to get a replication controller. getReplicationController func(namespace, name string) (*kapi.ReplicationController, error) // scaler is used to scale replication controllers. scaler kubectl.Scaler // codec is used to decode DeploymentConfigs contained in deployments. codec runtime.Codec // hookExecutor can execute a lifecycle hook. hookExecutor hookExecutor // retryTimeout is how long to wait for the replica count update to succeed // before giving up. retryTimeout time.Duration // retryPeriod is how often to try updating the replica count. retryPeriod time.Duration } // NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by // a real HookExecutor and client. func NewRecreateDeploymentStrategy(client kclient.Interface, codec runtime.Codec) *RecreateDeploymentStrategy { scaler, _ := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(client)) return &RecreateDeploymentStrategy{ getReplicationController: func(namespace, name string) (*kapi.ReplicationController, error) { return client.ReplicationControllers(namespace).Get(name) }, scaler: scaler, codec: codec, hookExecutor: &stratsupport.HookExecutor{ PodClient: &stratsupport.HookExecutorPodClientImpl{ CreatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) { return client.Pods(namespace).Create(pod) }, PodWatchFunc: func(namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod { return stratsupport.NewPodWatch(client, namespace, name, resourceVersion, stopChannel) }, }, }, retryTimeout: 120 * time.Second, retryPeriod: 1 * time.Second, } } // Deploy makes deployment active and disables oldDeployments. func (s *RecreateDeploymentStrategy) Deploy(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int) error { return s.DeployWithAcceptor(from, to, desiredReplicas, nil) } // DeployWithAcceptor scales down from and then scales up to. If // updateAcceptor is provided and the desired replica count is >1, the first // replica of to is rolled out and validated before performing the full scale // up. // // This is currently only used in conjunction with the rolling update strategy // for initial deployments. func (s *RecreateDeploymentStrategy) DeployWithAcceptor(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int, updateAcceptor kubectl.UpdateAcceptor) error { config, err := deployutil.DecodeDeploymentConfig(to, s.codec) if err != nil { return fmt.Errorf("couldn't decode config from deployment %s: %v", to.Name, err) } params := config.Template.Strategy.RecreateParams retryParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout) waitParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout) // Execute any pre-hook. if params != nil && params.Pre != nil { if err := s.hookExecutor.Execute(params.Pre, to, "prehook"); err != nil { return fmt.Errorf("Pre hook failed: %s", err) } else { glog.Infof("Pre hook finished") } } // Scale down the from deployment. if from != nil { glog.Infof("Scaling %s down to zero", deployutil.LabelForDeployment(from)) _, err := s.scaleAndWait(from, 0, retryParams, waitParams) if err != nil { return fmt.Errorf("couldn't scale %s to 0: %v", deployutil.LabelForDeployment(from), err) } } // If an UpdateAcceptor is provided and we're trying to scale up to more // than one replica, scale up to 1 and validate the replica, aborting if the // replica isn't acceptable. if updateAcceptor != nil && desiredReplicas > 1 { glog.Infof("Scaling %s to 1 before validating first replica", deployutil.LabelForDeployment(to)) updatedTo, err := s.scaleAndWait(to, 1, retryParams, waitParams) if err != nil { return fmt.Errorf("couldn't scale %s to 1: %v", deployutil.LabelForDeployment(to), err) } glog.Infof("Validating first replica of %s", deployutil.LabelForDeployment(to)) if err := updateAcceptor.Accept(updatedTo); err != nil { return fmt.Errorf("first replica rejected for %s: %v", to.Name, err) } to = updatedTo } // Complete the scale up. glog.Infof("Scaling %s to %d", deployutil.LabelForDeployment(to), desiredReplicas) updatedTo, err := s.scaleAndWait(to, desiredReplicas, retryParams, waitParams) if err != nil { return fmt.Errorf("couldn't scale %s to %d: %v", deployutil.LabelForDeployment(to), desiredReplicas, err) } to = updatedTo // Execute any post-hook. Errors are logged and ignored. if params != nil && params.Post != nil { if err := s.hookExecutor.Execute(params.Post, to, "posthook"); err != nil { util.HandleError(fmt.Errorf("post hook failed: %s", err)) } else { glog.Infof("Post hook finished") } } glog.Infof("Deployment %s successfully made active", to.Name) return nil } func (s *RecreateDeploymentStrategy) scaleAndWait(deployment *kapi.ReplicationController, replicas int, retry *kubectl.RetryParams, wait *kubectl.RetryParams) (*kapi.ReplicationController, error) { if err := s.scaler.Scale(deployment.Namespace, deployment.Name, uint(replicas), &kubectl.ScalePrecondition{-1, ""}, retry, wait); err != nil { return nil, err } updatedDeployment, err := s.getReplicationController(deployment.Namespace, deployment.Name) if err != nil { return nil, err } return updatedDeployment, nil } // hookExecutor knows how to execute a deployment lifecycle hook. type hookExecutor interface { Execute(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, label string) error } // hookExecutorImpl is a pluggable hookExecutor. type hookExecutorImpl struct { executeFunc func(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, label string) error } // Execute executes the provided lifecycle hook func (i *hookExecutorImpl) Execute(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, label string) error { return i.executeFunc(hook, deployment, label) }
pombredanne/atomic-enterprise
pkg/deploy/strategy/recreate/recreate.go
GO
apache-2.0
6,924
/* * Copyright 2015 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jbpm.persistence; import org.drools.persistence.TransactionManager; import org.drools.persistence.map.EnvironmentBuilder; public class ProcessStorageEnvironmentBuilder implements EnvironmentBuilder { private ProcessStorage storage; private MapBasedProcessPersistenceContext context; public ProcessStorageEnvironmentBuilder(ProcessStorage storage) { this.storage = storage; this.context = new MapBasedProcessPersistenceContext( storage ); } public ProcessPersistenceContextManager getPersistenceContextManager() { return new MapProcessPersistenceContextManager( context ) ; } public TransactionManager getTransactionManager() { return new ManualProcessTransactionManager( context, storage ); } }
Multi-Support/jbpm
jbpm-persistence-jpa/src/main/java/org/jbpm/persistence/ProcessStorageEnvironmentBuilder.java
Java
apache-2.0
1,369
<?php final class PHUIHeaderView extends AphrontTagView { const PROPERTY_STATUS = 1; private $header; private $tags = array(); private $image; private $imageURL = null; private $imageEditURL = null; private $subheader; private $headerIcon; private $noBackground; private $bleedHeader; private $profileHeader; private $tall; private $properties = array(); private $actionLinks = array(); private $buttonBar = null; private $policyObject; private $epoch; private $actionItems = array(); private $href; private $actionList; private $actionListID; public function setHeader($header) { $this->header = $header; return $this; } public function setNoBackground($nada) { $this->noBackground = $nada; return $this; } public function setTall($tall) { $this->tall = $tall; return $this; } public function addTag(PHUITagView $tag) { $this->tags[] = $tag; return $this; } public function setImage($uri) { $this->image = $uri; return $this; } public function setImageURL($url) { $this->imageURL = $url; return $this; } public function setImageEditURL($url) { $this->imageEditURL = $url; return $this; } public function setSubheader($subheader) { $this->subheader = $subheader; return $this; } public function setBleedHeader($bleed) { $this->bleedHeader = $bleed; return $this; } public function setProfileHeader($bighead) { $this->profileHeader = $bighead; return $this; } public function setHeaderIcon($icon) { $this->headerIcon = $icon; return $this; } public function setActionList(PhabricatorActionListView $list) { $this->actionList = $list; return $this; } public function setActionListID($action_list_id) { $this->actionListID = $action_list_id; return $this; } public function setPolicyObject(PhabricatorPolicyInterface $object) { $this->policyObject = $object; return $this; } public function addProperty($property, $value) { $this->properties[$property] = $value; return $this; } public function addActionLink(PHUIButtonView $button) { $this->actionLinks[] = $button; return $this; } public function addActionItem($action) { $this->actionItems[] = $action; return $this; } public function setButtonBar(PHUIButtonBarView $bb) { $this->buttonBar = $bb; return $this; } public function setStatus($icon, $color, $name) { // TODO: Normalize "closed/archived" to constants. if ($color == 'dark') { $color = PHUITagView::COLOR_INDIGO; } $tag = id(new PHUITagView()) ->setName($name) ->setIcon($icon) ->setColor($color) ->setType(PHUITagView::TYPE_SHADE); return $this->addProperty(self::PROPERTY_STATUS, $tag); } public function setEpoch($epoch) { $age = time() - $epoch; $age = floor($age / (60 * 60 * 24)); if ($age < 1) { $when = pht('Today'); } else if ($age == 1) { $when = pht('Yesterday'); } else { $when = pht('%s Day(s) Ago', new PhutilNumber($age)); } $this->setStatus('fa-clock-o bluegrey', null, pht('Updated %s', $when)); return $this; } public function setHref($href) { $this->href = $href; return $this; } public function getHref() { return $this->href; } protected function getTagName() { return 'div'; } protected function getTagAttributes() { require_celerity_resource('phui-header-view-css'); $classes = array(); $classes[] = 'phui-header-shell'; if ($this->noBackground) { $classes[] = 'phui-header-no-background'; } if ($this->bleedHeader) { $classes[] = 'phui-bleed-header'; } if ($this->profileHeader) { $classes[] = 'phui-profile-header'; } if ($this->properties || $this->policyObject || $this->subheader || $this->tall) { $classes[] = 'phui-header-tall'; } return array( 'class' => $classes, ); } protected function getTagContent() { if ($this->actionList || $this->actionListID) { $action_button = id(new PHUIButtonView()) ->setTag('a') ->setText(pht('Actions')) ->setHref('#') ->setIcon('fa-bars') ->addClass('phui-mobile-menu'); if ($this->actionList) { $action_button->setDropdownMenu($this->actionList); } else if ($this->actionListID) { $action_button->setDropdownMenuID($this->actionListID); } $this->addActionLink($action_button); } $image = null; if ($this->image) { $image_href = null; if ($this->imageURL) { $image_href = $this->imageURL; } else if ($this->imageEditURL) { $image_href = $this->imageEditURL; } $image = phutil_tag( 'span', array( 'class' => 'phui-header-image', 'style' => 'background-image: url('.$this->image.')', )); if ($image_href) { $edit_view = null; if ($this->imageEditURL) { $edit_view = phutil_tag( 'span', array( 'class' => 'phui-header-image-edit', ), pht('Edit')); } $image = phutil_tag( 'a', array( 'href' => $image_href, 'class' => 'phui-header-image-href', ), array( $image, $edit_view, )); } } $viewer = $this->getUser(); $left = array(); $right = array(); $space_header = null; if ($viewer) { $space_header = id(new PHUISpacesNamespaceContextView()) ->setUser($viewer) ->setObject($this->policyObject); } if ($this->actionLinks) { $actions = array(); foreach ($this->actionLinks as $button) { if (!$button->getColor()) { $button->setColor(PHUIButtonView::GREY); } $button->addClass(PHUI::MARGIN_SMALL_LEFT); $button->addClass('phui-header-action-link'); $actions[] = $button; } $right[] = phutil_tag( 'div', array( 'class' => 'phui-header-action-links', ), $actions); } if ($this->buttonBar) { $right[] = phutil_tag( 'div', array( 'class' => 'phui-header-action-links', ), $this->buttonBar); } if ($this->actionItems) { $action_list = array(); if ($this->actionItems) { foreach ($this->actionItems as $item) { $action_list[] = phutil_tag( 'li', array( 'class' => 'phui-header-action-item', ), $item); } } $right[] = phutil_tag( 'ul', array( 'class' => 'phui-header-action-list', ), $action_list); } $icon = null; if ($this->headerIcon) { if ($this->headerIcon instanceof PHUIIconView) { $icon = id(clone $this->headerIcon) ->addClass('phui-header-icon'); } else { $icon = id(new PHUIIconView()) ->setIcon($this->headerIcon) ->addClass('phui-header-icon'); } } $header_content = $this->header; $href = $this->getHref(); if ($href !== null) { $header_content = phutil_tag( 'a', array( 'href' => $href, ), $header_content); } $left[] = phutil_tag( 'span', array( 'class' => 'phui-header-header', ), array( $space_header, $icon, $header_content, )); if ($this->subheader) { $left[] = phutil_tag( 'div', array( 'class' => 'phui-header-subheader', ), array( $this->subheader, )); } if ($this->properties || $this->policyObject || $this->tags) { $property_list = array(); foreach ($this->properties as $type => $property) { switch ($type) { case self::PROPERTY_STATUS: $property_list[] = $property; break; default: throw new Exception(pht('Incorrect Property Passed')); break; } } if ($this->policyObject) { $property_list[] = $this->renderPolicyProperty($this->policyObject); } if ($this->tags) { $property_list[] = $this->tags; } $left[] = phutil_tag( 'div', array( 'class' => 'phui-header-subheader', ), $property_list); } // We here at @phabricator $header_image = null; if ($image) { $header_image = phutil_tag( 'div', array( 'class' => 'phui-header-col1', ), $image); } // All really love $header_left = phutil_tag( 'div', array( 'class' => 'phui-header-col2', ), $left); // Tables and Pokemon. $header_right = phutil_tag( 'div', array( 'class' => 'phui-header-col3', ), $right); $header_row = phutil_tag( 'div', array( 'class' => 'phui-header-row', ), array( $header_image, $header_left, $header_right, )); return phutil_tag( 'h1', array( 'class' => 'phui-header-view', ), $header_row); } private function renderPolicyProperty(PhabricatorPolicyInterface $object) { $viewer = $this->getUser(); $policies = PhabricatorPolicyQuery::loadPolicies($viewer, $object); $view_capability = PhabricatorPolicyCapability::CAN_VIEW; $policy = idx($policies, $view_capability); if (!$policy) { return null; } // If an object is in a Space with a strictly stronger (more restrictive) // policy, we show the more restrictive policy. This better aligns the // UI hint with the actual behavior. // NOTE: We'll do this even if the viewer has access to only one space, and // show them information about the existence of spaces if they click // through. $use_space_policy = false; if ($object instanceof PhabricatorSpacesInterface) { $space_phid = PhabricatorSpacesNamespaceQuery::getObjectSpacePHID( $object); $spaces = PhabricatorSpacesNamespaceQuery::getViewerSpaces($viewer); $space = idx($spaces, $space_phid); if ($space) { $space_policies = PhabricatorPolicyQuery::loadPolicies( $viewer, $space); $space_policy = idx($space_policies, $view_capability); if ($space_policy) { if ($space_policy->isStrongerThan($policy)) { $policy = $space_policy; $use_space_policy = true; } } } } $container_classes = array(); $container_classes[] = 'policy-header-callout'; $phid = $object->getPHID(); $policy_name = array($policy->getShortName()); $policy_icon = $policy->getIcon().' bluegrey'; if ($object instanceof PhabricatorPolicyCodexInterface) { $codex = PhabricatorPolicyCodex::newFromObject($object, $viewer); $codex_name = $codex->getPolicyShortName($policy, $view_capability); if ($codex_name !== null) { $policy_name = $codex_name; } $codex_icon = $codex->getPolicyIcon($policy, $view_capability); if ($codex_icon !== null) { $policy_icon = $codex_icon; } $codex_classes = $codex->getPolicyTagClasses($policy, $view_capability); foreach ($codex_classes as $codex_class) { $container_classes[] = $codex_class; } } if (!is_array($policy_name)) { $policy_name = (array)$policy_name; } $arrow = id(new PHUIIconView()) ->setIcon('fa-angle-right') ->addClass('policy-tier-separator'); $policy_name = phutil_implode_html($arrow, $policy_name); $icon = id(new PHUIIconView()) ->setIcon($policy_icon); $link = javelin_tag( 'a', array( 'class' => 'policy-link', 'href' => '/policy/explain/'.$phid.'/'.$view_capability.'/', 'sigil' => 'workflow', ), $policy_name); return phutil_tag( 'span', array( 'class' => implode(' ', $container_classes), ), array($icon, $link)); } }
freebsd/phabricator
src/view/phui/PHUIHeaderView.php
PHP
apache-2.0
12,343
var V; function F() { } ; var C = (function () { function C() { } C.prototype.pF = function () { }; C.prototype.rF = function () { }; C.prototype.pgF = function () { }; C.prototype.psF = function (param) { }; C.prototype.rgF = function () { }; C.prototype.rsF = function (param) { }; C.tF = function tF() { }; C.tsF = function tsF(param) { }; C.tgF = function tgF() { }; return C; })(); ; ; var M; (function (M) { var V; function F() { } ; var C = (function () { function C() { } C.prototype.pF = function () { }; C.prototype.rF = function () { }; C.prototype.pgF = function () { }; C.prototype.psF = function (param) { }; C.prototype.rgF = function () { }; C.prototype.rsF = function (param) { }; C.tF = function tF() { }; C.tsF = function tsF(param) { }; C.tgF = function tgF() { }; return C; })(); ; ; var M; (function (M) { var V; function F() { } ; var C = (function () { function C() { } return C; })(); ; ; ; M.eV; function eF() { } M.eF = eF; ; var eC = (function () { function eC() { } return eC; })(); M.eC = eC; ; ; ; ; ; })(M || (M = {})); M.eV; function eF() { } M.eF = eF; ; var eC = (function () { function eC() { } eC.prototype.pF = function () { }; eC.prototype.rF = function () { }; eC.prototype.pgF = function () { }; eC.prototype.psF = function (param) { }; eC.prototype.rgF = function () { }; eC.prototype.rsF = function (param) { }; eC.tF = function tF() { }; eC.tsF = function tsF(param) { }; eC.tgF = function tgF() { }; return eC; })(); M.eC = eC; ; ; (function (eM) { var V; function F() { } ; var C = (function () { function C() { } return C; })(); ; ; ; eM.eV; function eF() { } eM.eF = eF; ; var eC = (function () { function eC() { } return eC; })(); eM.eC = eC; ; ; ; ; ; })(M.eM || (M.eM = {})); var eM = M.eM; ; })(M || (M = {})); exports.eV; function eF() { } exports.eF = eF; ; var eC = (function () { function eC() { } eC.prototype.pF = function () { }; eC.prototype.rF = function () { }; eC.prototype.pgF = function () { }; eC.prototype.psF = function (param) { }; eC.prototype.rgF = function () { }; eC.prototype.rsF = function (param) { }; eC.tF = function tF() { }; eC.tsF = function tsF(param) { }; eC.tgF = function tgF() { }; return eC; })(); exports.eC = eC; ; ; (function (eM) { var V; function F() { } ; var C = (function () { function C() { } C.prototype.pF = function () { }; C.prototype.rF = function () { }; C.prototype.pgF = function () { }; C.prototype.psF = function (param) { }; C.prototype.rgF = function () { }; C.prototype.rsF = function (param) { }; C.tF = function tF() { }; C.tsF = function tsF(param) { }; C.tgF = function tgF() { }; return C; })(); ; ; var M; (function (M) { var V; function F() { } ; var C = (function () { function C() { } return C; })(); ; ; ; M.eV; function eF() { } M.eF = eF; ; var eC = (function () { function eC() { } return eC; })(); M.eC = eC; ; ; ; ; ; })(M || (M = {})); eM.eV; function eF() { } eM.eF = eF; ; var eC = (function () { function eC() { } eC.prototype.pF = function () { }; eC.prototype.rF = function () { }; eC.prototype.pgF = function () { }; eC.prototype.psF = function (param) { }; eC.prototype.rgF = function () { }; eC.prototype.rsF = function (param) { }; eC.tF = function tF() { }; eC.tsF = function tsF(param) { }; eC.tgF = function tgF() { }; return eC; })(); eM.eC = eC; ; ; (function (eM) { var V; function F() { } ; var C = (function () { function C() { } return C; })(); ; ; ; eM.eV; function eF() { } eM.eF = eF; ; var eC = (function () { function eC() { } return eC; })(); eM.eC = eC; ; ; ; ; ; })(eM.eM || (eM.eM = {})); var eM = eM.eM; ; })(exports.eM || (exports.eM = {})); var eM = exports.eM; ;
vcsjones/typescript
tests/baselines/reference/giant.commonjs.js
JavaScript
apache-2.0
5,825
/** * Copyright 2010-2016 Boxfuse GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.flywaydb.core.api; import java.math.BigInteger; import java.util.ArrayList; import java.util.List; import java.util.regex.Pattern; /** * A version of a migration. * * @author Axel Fontaine */ public final class MigrationVersion implements Comparable<MigrationVersion> { /** * Version for an empty schema. */ public static final MigrationVersion EMPTY = new MigrationVersion(null, "<< Empty Schema >>"); /** * Latest version. */ public static final MigrationVersion LATEST = new MigrationVersion(BigInteger.valueOf(-1), "<< Latest Version >>"); /** * Current version. Only a marker. For the real version use Flyway.info().current() instead. */ public static final MigrationVersion CURRENT = new MigrationVersion(BigInteger.valueOf(-2), "<< Current Version >>"); /** * Compiled pattern for matching proper version format */ private static Pattern splitPattern = Pattern.compile("\\.(?=\\d)"); /** * The individual parts this version string is composed of. Ex. 1.2.3.4.0 -> [1, 2, 3, 4, 0] */ private final List<BigInteger> versionParts; /** * The printable text to represent the version. */ private final String displayText; /** * Factory for creating a MigrationVersion from a version String * * @param version The version String. The value {@code current} will be interpreted as MigrationVersion.CURRENT, * a marker for the latest version that has been applied to the database. * @return The MigrationVersion */ @SuppressWarnings("ConstantConditions") public static MigrationVersion fromVersion(String version) { if ("current".equalsIgnoreCase(version)) return CURRENT; if (LATEST.getVersion().equals(version)) return LATEST; if (version == null) return EMPTY; return new MigrationVersion(version); } /** * Creates a Version using this version string. * * @param version The version in one of the following formats: 6, 6.0, 005, 1.2.3.4, 201004200021. <br/>{@code null} * means that this version refers to an empty schema. */ private MigrationVersion(String version) { String normalizedVersion = version.replace('_', '.'); this.versionParts = tokenize(normalizedVersion); this.displayText = normalizedVersion; } /** * Creates a Version using this version string. * * @param version The version in one of the following formats: 6, 6.0, 005, 1.2.3.4, 201004200021. <br/>{@code null} * means that this version refers to an empty schema. * @param displayText The alternative text to display instead of the version number. */ private MigrationVersion(BigInteger version, String displayText) { this.versionParts = new ArrayList<BigInteger>(); this.versionParts.add(version); this.displayText = displayText; } /** * @return The textual representation of the version. */ @Override public String toString() { return displayText; } /** * @return Numeric version as String */ public String getVersion() { if (this.equals(EMPTY)) return null; if (this.equals(LATEST)) return Long.toString(Long.MAX_VALUE); return displayText; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; MigrationVersion version1 = (MigrationVersion) o; return compareTo(version1) == 0; } @Override public int hashCode() { return versionParts == null ? 0 : versionParts.hashCode(); } @SuppressWarnings("NullableProblems") public int compareTo(MigrationVersion o) { if (o == null) { return 1; } if (this == EMPTY) { return o == EMPTY ? 0 : Integer.MIN_VALUE; } if (this == CURRENT) { return o == CURRENT ? 0 : Integer.MIN_VALUE; } if (this == LATEST) { return o == LATEST ? 0 : Integer.MAX_VALUE; } if (o == EMPTY) { return Integer.MAX_VALUE; } if (o == CURRENT) { return Integer.MAX_VALUE; } if (o == LATEST) { return Integer.MIN_VALUE; } final List<BigInteger> elements1 = versionParts; final List<BigInteger> elements2 = o.versionParts; int largestNumberOfElements = Math.max(elements1.size(), elements2.size()); for (int i = 0; i < largestNumberOfElements; i++) { final int compared = getOrZero(elements1, i).compareTo(getOrZero(elements2, i)); if (compared != 0) { return compared; } } return 0; } private BigInteger getOrZero(List<BigInteger> elements, int i) { return i < elements.size() ? elements.get(i) : BigInteger.ZERO; } /** * Splits this string into list of Long * * @param str The string to split. * @return The resulting array. */ private List<BigInteger> tokenize(String str) { List<BigInteger> numbers = new ArrayList<BigInteger>(); for (String number : splitPattern.split(str)) { try { numbers.add(new BigInteger(number)); } catch (NumberFormatException e) { throw new FlywayException( "Invalid version containing non-numeric characters. Only 0..9 and . are allowed. Invalid version: " + str); } } for (int i = numbers.size() - 1; i > 0; i--) { if (!numbers.get(i).equals(BigInteger.ZERO)) break; numbers.remove(i); } return numbers; } }
nathanvick/flyway
flyway-core/src/main/java/org/flywaydb/core/api/MigrationVersion.java
Java
apache-2.0
6,513
<?php // Exit if accessed directly if( !defined( 'ABSPATH' ) ) { exit; } /** * Main Widget Template * * * @file sidebar.php * @package Responsive * @author Emil Uzelac * @copyright 2003 - 2014 CyberChimps * @license license.txt * @version Release: 1.0 * @filesource wp-content/themes/responsive/sidebar.php * @link http://codex.wordpress.org/Theme_Development#Widgets_.28sidebar.php.29 * @since available since Release 1.0 */ /* * If this is a full-width page, exit */ if( 'full-width-page' == responsive_get_layout() ) { return; } ?> <?php responsive_widgets_before(); // above widgets container hook ?> <div id="widgets" class="<?php echo implode( ' ', responsive_get_sidebar_classes() ); ?>"> <?php responsive_widgets(); // above widgets hook ?> <?php if( !dynamic_sidebar( 'ciencia' ) ) : ?> <div class="widget-wrapper"> <div class="widget-title"><h3><?php _e( 'In Archive', 'responsive' ); ?></h3></div> <ul> <?php wp_get_archives( array( 'type' => 'monthly' ) ); ?> </ul> </div><!-- end of .widget-wrapper --> <?php endif; //end of main-sidebar ?> <?php responsive_widgets_end(); // after widgets hook ?> </div><!-- end of #widgets --> <?php responsive_widgets_after(); // after widgets container hook ?>
Doap/sinkjuice.com
wp-content/themes/prensiguia/sidebar-ciencia.php
PHP
apache-2.0
1,336
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.cache; import java.nio.ByteBuffer; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.GridDirectTransient; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; /** * */ public class CacheEntrySerializablePredicate implements CacheEntryPredicate { /** */ private static final long serialVersionUID = 0L; /** */ @GridToStringInclude @GridDirectTransient private CacheEntryPredicate p; /** */ private byte[] bytes; /** * Required by {@link org.apache.ignite.plugin.extensions.communication.Message}. */ public CacheEntrySerializablePredicate() { // No-op. } /** * @param p Serializable predicate. */ public CacheEntrySerializablePredicate(CacheEntryPredicate p) { assert p != null; this.p = p; } /** * @return Predicate. */ public CacheEntryPredicate predicate() { return p; } /** {@inheritDoc} */ @Override public void onAckReceived() { // No-op. } /** {@inheritDoc} */ @Override public void entryLocked(boolean locked) { assert p != null; p.entryLocked(locked); } /** {@inheritDoc} */ @Override public void finishUnmarshal(GridCacheContext ctx, ClassLoader ldr) throws IgniteCheckedException { assert p != null || bytes != null; if (p == null) { p = U.unmarshal(ctx.marshaller(), bytes, U.resolveClassLoader(ldr, ctx.gridConfig())); p.finishUnmarshal(ctx, ldr); } } /** {@inheritDoc} */ @Override public void prepareMarshal(GridCacheContext ctx) throws IgniteCheckedException { assert p != null; p.prepareMarshal(ctx); if (bytes == null) bytes = U.marshal(ctx.marshaller(), p); } /** {@inheritDoc} */ @Override public boolean apply(GridCacheEntryEx e) { assert p != null; return p.apply(e); } /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); if (!writer.isHeaderWritten()) { if (!writer.writeHeader(directType(), fieldsCount())) return false; writer.onHeaderWritten(); } switch (writer.state()) { case 0: if (!writer.writeByteArray("bytes", bytes)) return false; writer.incrementState(); } return true; } /** {@inheritDoc} */ @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) { reader.setBuffer(buf); if (!reader.beforeMessageRead()) return false; switch (reader.state()) { case 0: bytes = reader.readByteArray("bytes"); if (!reader.isLastRead()) return false; reader.incrementState(); } return reader.afterMessageRead(CacheEntrySerializablePredicate.class); } /** {@inheritDoc} */ @Override public byte directType() { return 99; } /** {@inheritDoc} */ @Override public byte fieldsCount() { return 1; } }
afinka77/ignite
modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntrySerializablePredicate.java
Java
apache-2.0
4,293
/* * Copyright 2009 The Closure Compiler Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.javascript.jscomp.testing; import com.google.debugging.sourcemap.proto.Mapping.OriginalMapping; import com.google.javascript.jscomp.Region; import com.google.javascript.jscomp.SourceExcerptProvider; import com.google.javascript.jscomp.SourceFile; /** * A simple source excerpt provider for testing. */ public final class SimpleSourceExcerptProvider implements SourceExcerptProvider { private final SourceFile sourceFile; public SimpleSourceExcerptProvider(String source) { sourceFile = SourceFile.fromCode("input", source); } @Override public String getSourceLine(String sourceName, int lineNumber) { return sourceFile.getLine(lineNumber); } @Override public Region getSourceLines(String sourceName, int lineNumber, int length) { return sourceFile.getLines(lineNumber, length); } @Override public Region getSourceRegion(String sourceName, int lineNumber) { return sourceFile.getRegion(lineNumber); } @Override public OriginalMapping getSourceMapping(String sourceName, int lineNumber, int columnNumber) { return null; } }
GoogleChromeLabs/chromeos_smart_card_connector
third_party/closure-compiler/src/src/com/google/javascript/jscomp/testing/SimpleSourceExcerptProvider.java
Java
apache-2.0
1,757
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # vim: tabstop=4 shiftwidth=4 softtabstop=4 import logging from neutronclient.neutron import v2_0 as neutronV20 class ListCredential(neutronV20.ListCommand): """List credentials that belong to a given tenant.""" resource = 'credential' log = logging.getLogger(__name__ + '.ListCredential') _formatters = {} list_columns = ['credential_id', 'credential_name', 'user_name', 'password', 'type'] class ShowCredential(neutronV20.ShowCommand): """Show information of a given credential.""" resource = 'credential' log = logging.getLogger(__name__ + '.ShowCredential') allow_names = False class CreateCredential(neutronV20.CreateCommand): """Creates a credential.""" resource = 'credential' log = logging.getLogger(__name__ + '.CreateCredential') def add_known_arguments(self, parser): parser.add_argument( 'credential_name', help='Name/Ip address for Credential') parser.add_argument( 'credential_type', help='Type of the Credential') parser.add_argument( '--username', help='Username for the credential') parser.add_argument( '--password', help='Password for the credential') def args2body(self, parsed_args): body = {'credential': { 'credential_name': parsed_args.credential_name}} if parsed_args.credential_type: body['credential'].update({'type': parsed_args.credential_type}) if parsed_args.username: body['credential'].update({'user_name': parsed_args.username}) if parsed_args.password: body['credential'].update({'password': parsed_args.password}) return body class DeleteCredential(neutronV20.DeleteCommand): """Delete a given credential.""" log = logging.getLogger(__name__ + '.DeleteCredential') resource = 'credential' allow_names = False
vichoward/python-neutronclient
neutronclient/neutron/v2_0/credential.py
Python
apache-2.0
2,705
/** * @license * Copyright 2016 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ foam.CLASS({ package: 'foam.graphics', name: 'StereoCView', extends: 'foam.graphics.CView', methods: [ function paintChildren(x) { this.children.sort(function(o1, o2) { return o2.z - o1.z; }); for ( var i = 0 ; i < this.children.length ; i++ ) { var c = this.children[i]; c.x += 20; c.paint(x); } x.translate(500, 0); for ( var i = 0 ; i < this.children.length ; i++ ) { var c = this.children[i]; c.x -= 20; c.paint(x); } } ] });
jacksonic/vjlofvhjfgm
src/foam/graphics/StereoCView.js
JavaScript
apache-2.0
1,166
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; public class PercentilesBucketPipelineAggregator extends BucketMetricsPipelineAggregator { private final double[] percents; private boolean keyed = true; private List<Double> data; PercentilesBucketPipelineAggregator(String name, double[] percents, boolean keyed, String[] bucketsPaths, GapPolicy gapPolicy, DocValueFormat formatter, Map<String, Object> metadata) { super(name, bucketsPaths, gapPolicy, formatter, metadata); this.percents = percents; this.keyed = keyed; } @Override protected void preCollection() { data = new ArrayList<>(1024); } @Override protected void collectBucketValue(String bucketKey, Double bucketValue) { data.add(bucketValue); } @Override protected InternalAggregation buildAggregation(Map<String, Object> metadata) { // Perform the sorting and percentile collection now that all the data // has been collected. Collections.sort(data); double[] percentiles = new double[percents.length]; if (data.size() == 0) { for (int i = 0; i < percents.length; i++) { percentiles[i] = Double.NaN; } } else { for (int i = 0; i < percents.length; i++) { int index = (int) Math.round((percents[i] / 100.0) * (data.size() - 1)); percentiles[i] = data.get(index); } } // todo need postCollection() to clean up temp sorted data? return new InternalPercentilesBucket(name(), percents, percentiles, keyed, format, metadata); } }
gingerwizard/elasticsearch
server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java
Java
apache-2.0
2,793
// Generated by xsd compiler for android/java // DO NOT CHANGE! package com.ebay.trading.api; /** * * The status of gallery image generation. That status will return either a value of 'Success' or * a value that indicates why the gallery image has not been generated. * */ public enum GalleryStatusCodeType { /** * * Gallery Image successfully generated. * */ SUCCESS("Success"), /** * * Gallery image has not yet been generated. * */ PENDING("Pending"), /** * * The URL for the image is not valid. * */ INVALID_URL("InvalidUrl"), /** * * URL does not start with http:// - That is the only protocol currently supported for pictures. * */ INVALID_PROTOCOL("InvalidProtocol"), /** * * There is a problem with the file containing the image. * */ INVALID_FILE("InvalidFile"), /** * * The server containing your image was unavailable when we tried to retrieve it. * */ SERVER_DOWN("ServerDown"), /** * * We could not find your Gallery image when we went to retrieve it. * */ IMAGE_NON_EXISTENT("ImageNonExistent"), /** * * The image failed to come across the Internet when we tried to retrieve it. * */ IMAGE_READ_TIME_OUT("ImageReadTimeOut"), /** * * The file containing your image is not in standard jpeg, bmp, or tif format. * */ INVALID_FILE_FORMAT("InvalidFileFormat"), /** * * We were not able to process the image. * */ IMAGE_PROCESSING_ERROR("ImageProcessingError"), /** * * Reserved for internal or future use. * */ CUSTOM_CODE("CustomCode"); private final String value; GalleryStatusCodeType(String v) { value = v; } public String value() { return value; } public static GalleryStatusCodeType fromValue(String v) { if (v != null) { for (GalleryStatusCodeType c: GalleryStatusCodeType.values()) { if (c.value.equals(v)) { return c; } } } throw new IllegalArgumentException(v); } }
uaraven/nano
sample/webservice/eBayDemoApp/src/com/ebay/trading/api/GalleryStatusCodeType.java
Java
apache-2.0
2,546
var path = require('path'); var assign = require('object-assign'); var forEachBail = require('enhanced-resolve/lib/forEachBail'); var basename = require('enhanced-resolve/lib/getPaths').basename; module.exports = function (modulesToResolveToEs5) { return { apply: doApply.bind(this, modulesToResolveToEs5) }; }; function doApply(modulesToResolveToEs5, resolver) { // file type taken from: https://github.com/webpack/enhanced-resolve/blob/v4.0.0/test/plugins.js var target = resolver.ensureHook("undescribed-raw-file"); //console.log(resolver.hooks); resolver.getHook("resolve") .tapAsync("ViewerResolvePlugin", (request, resolveContext, callback) => { for(var package of modulesToResolveToEs5){ if(request.request.indexOf(package+"/") == 0){ const newRequest = Object.assign({}, request, { request: package, }); return resolver.doResolve(target, newRequest, "viewer resolved", resolveContext, callback); } } return callback(); }); }
BabylonJS/Babylon.js
Tools/WebpackPlugins/viewerResolve.js
JavaScript
apache-2.0
1,075
// Copyright 2000-2021 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.tasks.config; import com.intellij.openapi.Disposable; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.components.*; import com.intellij.openapi.util.text.StringUtil; import com.intellij.tasks.TaskRepository; import com.intellij.tasks.TaskRepositoryType; import com.intellij.tasks.impl.TaskManagerImpl; import com.intellij.util.containers.CollectionFactory; import com.intellij.util.containers.ContainerUtil; import com.intellij.util.containers.HashingStrategy; import com.intellij.util.xmlb.XmlSerializer; import org.jdom.Element; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.util.*; /** * @author Dmitry Avdeev */ @State(name = "RecentTaskRepositories", storages = @Storage(StoragePathMacros.NON_ROAMABLE_FILE)) @Service(Service.Level.APP) public final class RecentTaskRepositories implements PersistentStateComponent<Element>, Disposable { private final Set<TaskRepository> myRepositories = CollectionFactory.createCustomHashingStrategySet(HASHING_STRATEGY); private static final HashingStrategy<TaskRepository> HASHING_STRATEGY = new HashingStrategy<>() { @Override public int hashCode(@Nullable TaskRepository object) { return object == null || object.getUrl() == null ? 0 : object.getUrl().hashCode(); } @Override public boolean equals(TaskRepository o1, TaskRepository o2) { return o1 == o2 || (o1 != null && o2 != null && Objects.equals(o1.getUrl(), o2.getUrl())); } }; public RecentTaskRepositories() { // remove repositories pertaining to non-existent types TaskRepositoryType.addEPListChangeListener(this, () -> { List<Class<?>> possibleRepositoryClasses = TaskRepositoryType.getRepositoryClasses(); myRepositories.removeIf(repository -> { return !ContainerUtil.exists(possibleRepositoryClasses, clazz -> clazz.isAssignableFrom(repository.getClass())); }); }); } public static RecentTaskRepositories getInstance() { return ApplicationManager.getApplication().getService(RecentTaskRepositories.class); } public Set<TaskRepository> getRepositories() { Set<TaskRepository> set = CollectionFactory.createCustomHashingStrategySet(HASHING_STRATEGY); set.addAll(ContainerUtil.findAll(myRepositories, repository -> !StringUtil.isEmptyOrSpaces(repository.getUrl()))); return set; } public void addRepositories(Collection<TaskRepository> repositories) { Collection<TaskRepository> old = new ArrayList<>(myRepositories); myRepositories.clear(); if (doAddReps(repositories)) return; doAddReps(old); } private boolean doAddReps(Collection<TaskRepository> repositories) { for (TaskRepository repository : repositories) { if (!StringUtil.isEmptyOrSpaces(repository.getUrl())) { if (myRepositories.size() == 10) { return true; } myRepositories.add(repository); } } return false; } @Override public Element getState() { return XmlSerializer.serialize(myRepositories.toArray(new TaskRepository[0])); } @Override public void loadState(@NotNull Element state) { myRepositories.clear(); myRepositories.addAll(TaskManagerImpl.loadRepositories(state)); } @Override public void dispose() {} }
siosio/intellij-community
plugins/tasks/tasks-core/src/com/intellij/tasks/config/RecentTaskRepositories.java
Java
apache-2.0
3,466
/** * Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies * * Please see distribution for license. */ package com.opengamma.integration.copier.snapshot.reader; import java.util.Map; import com.opengamma.core.marketdatasnapshot.CurveKey; import com.opengamma.core.marketdatasnapshot.CurveSnapshot; import com.opengamma.core.marketdatasnapshot.UnstructuredMarketDataSnapshot; import com.opengamma.core.marketdatasnapshot.VolatilitySurfaceKey; import com.opengamma.core.marketdatasnapshot.VolatilitySurfaceSnapshot; import com.opengamma.core.marketdatasnapshot.YieldCurveKey; import com.opengamma.core.marketdatasnapshot.YieldCurveSnapshot; /** * Abstract snapshot reader with methods that provide getters for the specific elements a snapshot * note that VolatilityCubes are not present. */ public abstract interface SnapshotReader { Map<CurveKey, CurveSnapshot> readCurves(); UnstructuredMarketDataSnapshot readGlobalValues(); Map<VolatilitySurfaceKey, VolatilitySurfaceSnapshot> readVolatilitySurfaces(); Map<YieldCurveKey, YieldCurveSnapshot> readYieldCurves(); void close(); String getName(); String getBasisViewName(); }
DevStreet/FinanceAnalytics
projects/OG-Integration/src/main/java/com/opengamma/integration/copier/snapshot/reader/SnapshotReader.java
Java
apache-2.0
1,187
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "ValidatingCodec.hh" #include <string> #include <map> #include <algorithm> #include <boost/shared_ptr.hpp> #include <boost/make_shared.hpp> #include <boost/weak_ptr.hpp> #include <boost/any.hpp> #include "ValidSchema.hh" #include "Decoder.hh" #include "Encoder.hh" #include "NodeImpl.hh" namespace avro { namespace parsing { using boost::shared_ptr; using boost::weak_ptr; using boost::static_pointer_cast; using std::map; using std::vector; using std::pair; using std::string; using std::reverse; using std::ostringstream; /** Follows the design of Avro Parser in Java. */ Production ValidatingGrammarGenerator::generate(const NodePtr& n) { map<NodePtr, shared_ptr<Production> > m; Production result = doGenerate(n, m); fixup(result, m); return result; } Symbol ValidatingGrammarGenerator::generate(const ValidSchema& schema) { return Symbol::rootSymbol(generate(schema.root())); } Production ValidatingGrammarGenerator::doGenerate(const NodePtr& n, map<NodePtr, shared_ptr<Production> > &m) { switch (n->type()) { case AVRO_NULL: return Production(1, Symbol::nullSymbol()); case AVRO_BOOL: return Production(1, Symbol::boolSymbol()); case AVRO_INT: return Production(1, Symbol::intSymbol()); case AVRO_LONG: return Production(1, Symbol::longSymbol()); case AVRO_FLOAT: return Production(1, Symbol::floatSymbol()); case AVRO_DOUBLE: return Production(1, Symbol::doubleSymbol()); case AVRO_STRING: return Production(1, Symbol::stringSymbol()); case AVRO_BYTES: return Production(1, Symbol::bytesSymbol()); case AVRO_FIXED: { Symbol r[] = { Symbol::sizeCheckSymbol(n->fixedSize()), Symbol::fixedSymbol() }; Production result(r, r + 2); m[n] = boost::make_shared<Production>(result); return result; } case AVRO_RECORD: { Production result; m.erase(n); size_t c = n->leaves(); for (size_t i = 0; i < c; ++i) { const NodePtr& leaf = n->leafAt(i); Production v = doGenerate(leaf, m); copy(v.rbegin(), v.rend(), back_inserter(result)); } reverse(result.begin(), result.end()); bool found = m.find(n) != m.end(); shared_ptr<Production> p = boost::make_shared<Production>(result); m[n] = p; return found ? Production(1, Symbol::indirect(p)) : result; } case AVRO_ENUM: { Symbol r[] = { Symbol::sizeCheckSymbol(n->names()), Symbol::enumSymbol() }; Production result(r, r + 2); m[n] = boost::make_shared<Production>(result); return result; } case AVRO_ARRAY: { Symbol r[] = { Symbol::arrayEndSymbol(), Symbol::repeater(doGenerate(n->leafAt(0), m), true), Symbol::arrayStartSymbol() }; return Production(r, r + 3); } case AVRO_MAP: { Production v = doGenerate(n->leafAt(1), m); v.push_back(Symbol::stringSymbol()); Symbol r[] = { Symbol::mapEndSymbol(), Symbol::repeater(v, false), Symbol::mapStartSymbol() }; return Production(r, r + 3); } case AVRO_UNION: { vector<Production> vv; size_t c = n->leaves(); vv.reserve(c); for (size_t i = 0; i < c; ++i) { vv.push_back(doGenerate(n->leafAt(i), m)); } Symbol r[] = { Symbol::alternative(vv), Symbol::unionSymbol() }; return Production(r, r + 2); } case AVRO_SYMBOLIC: { shared_ptr<NodeSymbolic> ns = static_pointer_cast<NodeSymbolic>(n); NodePtr nn = ns->getNode(); map<NodePtr, shared_ptr<Production> >::iterator it = m.find(nn); if (it != m.end() && it->second) { return *it->second; } else { m[nn] = shared_ptr<Production>(); return Production(1, Symbol::placeholder(nn)); } } default: throw Exception("Unknown node type"); } } struct DummyHandler { size_t handle(const Symbol& s) { return 0; } }; template <typename P> class ValidatingDecoder : public Decoder { const shared_ptr<Decoder> base; DummyHandler handler_; P parser; void init(InputStream& is); void decodeNull(); bool decodeBool(); int32_t decodeInt(); int64_t decodeLong(); float decodeFloat(); double decodeDouble(); void decodeString(string& value); void skipString(); void decodeBytes(vector<uint8_t>& value); void skipBytes(); void decodeFixed(size_t n, vector<uint8_t>& value); void skipFixed(size_t n); size_t decodeEnum(); size_t arrayStart(); size_t arrayNext(); size_t skipArray(); size_t mapStart(); size_t mapNext(); size_t skipMap(); size_t decodeUnionIndex(); public: ValidatingDecoder(const ValidSchema& s, const shared_ptr<Decoder> b) : base(b), parser(ValidatingGrammarGenerator().generate(s), NULL, handler_) { } }; template <typename P> void ValidatingDecoder<P>::init(InputStream& is) { base->init(is); } template <typename P> void ValidatingDecoder<P>::decodeNull() { parser.advance(Symbol::sNull); } template <typename P> bool ValidatingDecoder<P>::decodeBool() { parser.advance(Symbol::sBool); return base->decodeBool(); } template <typename P> int32_t ValidatingDecoder<P>::decodeInt() { parser.advance(Symbol::sInt); return base->decodeInt(); } template <typename P> int64_t ValidatingDecoder<P>::decodeLong() { parser.advance(Symbol::sLong); return base->decodeLong(); } template <typename P> float ValidatingDecoder<P>::decodeFloat() { parser.advance(Symbol::sFloat); return base->decodeFloat(); } template <typename P> double ValidatingDecoder<P>::decodeDouble() { parser.advance(Symbol::sDouble); return base->decodeDouble(); } template <typename P> void ValidatingDecoder<P>::decodeString(string& value) { parser.advance(Symbol::sString); base->decodeString(value); } template <typename P> void ValidatingDecoder<P>::skipString() { parser.advance(Symbol::sString); base->skipString(); } template <typename P> void ValidatingDecoder<P>::decodeBytes(vector<uint8_t>& value) { parser.advance(Symbol::sBytes); base->decodeBytes(value); } template <typename P> void ValidatingDecoder<P>::skipBytes() { parser.advance(Symbol::sBytes); base->skipBytes(); } template <typename P> void ValidatingDecoder<P>::decodeFixed(size_t n, vector<uint8_t>& value) { parser.advance(Symbol::sFixed); parser.assertSize(n); base->decodeFixed(n, value); } template <typename P> void ValidatingDecoder<P>::skipFixed(size_t n) { parser.advance(Symbol::sFixed); parser.assertSize(n); base->skipFixed(n); } template <typename P> size_t ValidatingDecoder<P>::decodeEnum() { parser.advance(Symbol::sEnum); size_t result = base->decodeEnum(); parser.assertLessThanSize(result); return result; } template <typename P> size_t ValidatingDecoder<P>::arrayStart() { parser.advance(Symbol::sArrayStart); size_t result = base->arrayStart(); if (result == 0) { parser.popRepeater(); parser.advance(Symbol::sArrayEnd); } else { parser.setRepeatCount(result); } return result; } template <typename P> size_t ValidatingDecoder<P>::arrayNext() { size_t result = base->arrayNext(); if (result == 0) { parser.popRepeater(); parser.advance(Symbol::sArrayEnd); } else { parser.setRepeatCount(result); } return result; } template <typename P> size_t ValidatingDecoder<P>::skipArray() { parser.advance(Symbol::sArrayStart); size_t n = base->skipArray(); if (n == 0) { parser.pop(); } else { parser.setRepeatCount(n); parser.skip(*base); } parser.advance(Symbol::sArrayEnd); return 0; } template <typename P> size_t ValidatingDecoder<P>::mapStart() { parser.advance(Symbol::sMapStart); size_t result = base->mapStart(); if (result == 0) { parser.popRepeater(); parser.advance(Symbol::sMapEnd); } else { parser.setRepeatCount(result); } return result; } template <typename P> size_t ValidatingDecoder<P>::mapNext() { size_t result = base->mapNext(); if (result == 0) { parser.popRepeater(); parser.advance(Symbol::sMapEnd); } else { parser.setRepeatCount(result); } return result; } template <typename P> size_t ValidatingDecoder<P>::skipMap() { parser.advance(Symbol::sMapStart); size_t n = base->skipMap(); if (n == 0) { parser.pop(); } else { parser.setRepeatCount(n); parser.skip(*base); } parser.advance(Symbol::sMapEnd); return 0; } template <typename P> size_t ValidatingDecoder<P>::decodeUnionIndex() { parser.advance(Symbol::sUnion); size_t result = base->decodeUnionIndex(); parser.selectBranch(result); return result; } template <typename P> class ValidatingEncoder : public Encoder { DummyHandler handler_; P parser_; EncoderPtr base_; void init(OutputStream& os); void flush(); void encodeNull(); void encodeBool(bool b); void encodeInt(int32_t i); void encodeLong(int64_t l); void encodeFloat(float f); void encodeDouble(double d); void encodeString(const std::string& s); void encodeBytes(const uint8_t *bytes, size_t len); void encodeFixed(const uint8_t *bytes, size_t len); void encodeEnum(size_t e); void arrayStart(); void arrayEnd(); void mapStart(); void mapEnd(); void setItemCount(size_t count); void startItem(); void encodeUnionIndex(size_t e); public: ValidatingEncoder(const ValidSchema& schema, const EncoderPtr& base) : parser_(ValidatingGrammarGenerator().generate(schema), NULL, handler_), base_(base) { } }; template<typename P> void ValidatingEncoder<P>::init(OutputStream& os) { base_->init(os); } template<typename P> void ValidatingEncoder<P>::flush() { base_->flush(); } template<typename P> void ValidatingEncoder<P>::encodeNull() { parser_.advance(Symbol::sNull); base_->encodeNull(); } template<typename P> void ValidatingEncoder<P>::encodeBool(bool b) { parser_.advance(Symbol::sBool); base_->encodeBool(b); } template<typename P> void ValidatingEncoder<P>::encodeInt(int32_t i) { parser_.advance(Symbol::sInt); base_->encodeInt(i); } template<typename P> void ValidatingEncoder<P>::encodeLong(int64_t l) { parser_.advance(Symbol::sLong); base_->encodeLong(l); } template<typename P> void ValidatingEncoder<P>::encodeFloat(float f) { parser_.advance(Symbol::sFloat); base_->encodeFloat(f); } template<typename P> void ValidatingEncoder<P>::encodeDouble(double d) { parser_.advance(Symbol::sDouble); base_->encodeDouble(d); } template<typename P> void ValidatingEncoder<P>::encodeString(const std::string& s) { parser_.advance(Symbol::sString); base_->encodeString(s); } template<typename P> void ValidatingEncoder<P>::encodeBytes(const uint8_t *bytes, size_t len) { parser_.advance(Symbol::sBytes); base_->encodeBytes(bytes, len); } template<typename P> void ValidatingEncoder<P>::encodeFixed(const uint8_t *bytes, size_t len) { parser_.advance(Symbol::sFixed); parser_.assertSize(len); base_->encodeFixed(bytes, len); } template<typename P> void ValidatingEncoder<P>::encodeEnum(size_t e) { parser_.advance(Symbol::sEnum); parser_.assertLessThanSize(e); base_->encodeEnum(e); } template<typename P> void ValidatingEncoder<P>::arrayStart() { parser_.advance(Symbol::sArrayStart); base_->arrayStart(); } template<typename P> void ValidatingEncoder<P>::arrayEnd() { parser_.popRepeater(); parser_.advance(Symbol::sArrayEnd); base_->arrayEnd(); } template<typename P> void ValidatingEncoder<P>::mapStart() { parser_.advance(Symbol::sMapStart); base_->mapStart(); } template<typename P> void ValidatingEncoder<P>::mapEnd() { parser_.popRepeater(); parser_.advance(Symbol::sMapEnd); base_->mapEnd(); } template<typename P> void ValidatingEncoder<P>::setItemCount(size_t count) { parser_.setRepeatCount(count); base_->setItemCount(count); } template<typename P> void ValidatingEncoder<P>::startItem() { if (parser_.top() != Symbol::sRepeater) { throw Exception("startItem at not an item boundary"); } base_->startItem(); } template<typename P> void ValidatingEncoder<P>::encodeUnionIndex(size_t e) { parser_.advance(Symbol::sUnion); parser_.selectBranch(e); base_->encodeUnionIndex(e); } } // namespace parsing DecoderPtr validatingDecoder(const ValidSchema& s, const DecoderPtr& base) { return boost::make_shared<parsing::ValidatingDecoder< parsing::SimpleParser<parsing::DummyHandler> > >(s, base); } EncoderPtr validatingEncoder(const ValidSchema& schema, const EncoderPtr& base) { return boost::make_shared<parsing::ValidatingEncoder< parsing::SimpleParser<parsing::DummyHandler> > >(schema, base); } } // namespace avro
RallySoftware/avro
lang/c++/impl/parsing/ValidatingCodec.cc
C++
apache-2.0
14,444
/** * Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies * * Please see distribution for license. */ package com.opengamma.analytics.financial.model.option.pricing.tree; /** * A call has payoff max[S1 * S2 - K, 0], while a put pays off max[K - S1 * S2, 0] at expiry */ public class ProductOptionFunctionProvider extends OptionFunctionProvider2D { /** * @param strike Strike price * @param timeToExpiry Time to expiry * @param steps Number of steps * @param isCall True if call, false if put */ public ProductOptionFunctionProvider(final double strike, final double timeToExpiry, final int steps, final boolean isCall) { super(strike, timeToExpiry, steps, isCall); } @Override public double[][] getPayoffAtExpiry(final double assetPrice1, final double assetPrice2, final double upOverDown1, final double upOverDown2) { final double strike = getStrike(); final int nStepsP = getNumberOfSteps() + 1; final double sign = getSign(); final double[][] values = new double[nStepsP][nStepsP]; double priceTmp1 = assetPrice1; for (int i = 0; i < nStepsP; ++i) { double priceTmp2 = assetPrice2; for (int j = 0; j < nStepsP; ++j) { values[i][j] = Math.max(sign * (priceTmp1 * priceTmp2 - strike), 0.); priceTmp2 *= upOverDown2; } priceTmp1 *= upOverDown1; } return values; } @Override public double[][] getPayoffAtExpiryTrinomial(final double assetPrice1, final double assetPrice2, final double middleOverDown1, final double middleOverDown2) { final double strike = getStrike(); final int nNodes = 2 * getNumberOfSteps() + 1; final double sign = getSign(); final double[][] values = new double[nNodes][nNodes]; double priceTmp1 = assetPrice1; for (int i = 0; i < nNodes; ++i) { double priceTmp2 = assetPrice2; for (int j = 0; j < nNodes; ++j) { values[i][j] = Math.max(sign * (priceTmp1 * priceTmp2 - strike), 0.); priceTmp2 *= middleOverDown2; } priceTmp1 *= middleOverDown1; } return values; } @Override public int hashCode() { return super.hashCode(); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (!(obj instanceof ProductOptionFunctionProvider)) { return false; } return super.equals(obj); } }
jeorme/OG-Platform
projects/OG-Analytics/src/main/java/com/opengamma/analytics/financial/model/option/pricing/tree/ProductOptionFunctionProvider.java
Java
apache-2.0
2,452
/** * Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies * * Please see distribution for license. */ package com.opengamma.component.factory.engine; import java.util.LinkedHashMap; import java.util.Map; import org.joda.beans.Bean; import org.joda.beans.BeanBuilder; import org.joda.beans.BeanDefinition; import org.joda.beans.JodaBeanUtils; import org.joda.beans.MetaProperty; import org.joda.beans.Property; import org.joda.beans.PropertyDefinition; import org.joda.beans.impl.direct.DirectBeanBuilder; import org.joda.beans.impl.direct.DirectMetaProperty; import org.joda.beans.impl.direct.DirectMetaPropertyMap; import com.opengamma.component.ComponentInfo; import com.opengamma.component.ComponentRepository; import com.opengamma.component.factory.AbstractComponentFactory; import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource; import com.opengamma.core.marketdatasnapshot.MarketDataSnapshotSource; import com.opengamma.engine.marketdata.CombinedMarketDataProviderFactory; import com.opengamma.engine.marketdata.MarketDataProviderFactory; import com.opengamma.engine.marketdata.historical.HistoricalMarketDataProviderFactory; import com.opengamma.engine.marketdata.historical.HistoricalShockMarketDataProviderFactory; import com.opengamma.engine.marketdata.historical.LatestHistoricalMarketDataProviderFactory; import com.opengamma.engine.marketdata.random.RandomizingMarketDataProviderFactory; import com.opengamma.engine.marketdata.resolver.CachingMarketDataProviderResolver; import com.opengamma.engine.marketdata.resolver.MarketDataProviderResolver; import com.opengamma.engine.marketdata.resolver.TypeBasedMarketDataProviderResolver; import com.opengamma.engine.marketdata.snapshot.UserMarketDataProviderFactory; import com.opengamma.engine.marketdata.spec.CombinedMarketDataSpecification; import com.opengamma.engine.marketdata.spec.FixedHistoricalMarketDataSpecification; import com.opengamma.engine.marketdata.spec.HistoricalShockMarketDataSpecification; import com.opengamma.engine.marketdata.spec.LatestHistoricalMarketDataSpecification; import com.opengamma.engine.marketdata.spec.LiveMarketDataSpecification; import com.opengamma.engine.marketdata.spec.RandomizingMarketDataSpecification; import com.opengamma.engine.marketdata.spec.UserMarketDataSpecification; import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesResolver; /** * Component factory for the market data provider resolver. */ @BeanDefinition public class MarketDataProviderResolverComponentFactory extends AbstractComponentFactory { /** * The classifier under which to publish. */ @PropertyDefinition(validate = "notNull") private String _classifier; /** * The live market data provider factory. May be null if no live data required. */ @PropertyDefinition() private MarketDataProviderFactory _liveMarketDataProviderFactory; /** * The historical time-series source. */ @PropertyDefinition(validate = "notNull") private HistoricalTimeSeriesSource _historicalTimeSeriesSource; /** * The historical time-series resolver. */ @PropertyDefinition(validate = "notNull") private HistoricalTimeSeriesResolver _historicalTimeSeriesResolver; /** * The market data snapshot source. */ @PropertyDefinition(validate = "notNull") private MarketDataSnapshotSource _marketDataSnapshotSource; @Override public void init(final ComponentRepository repo, final LinkedHashMap<String, String> configuration) throws Exception { initMarketDataProviderResolver(repo); } protected MarketDataProviderResolver createMarketDataProviderResolver() { final TypeBasedMarketDataProviderResolver providerResolver = new TypeBasedMarketDataProviderResolver(); if (getLiveMarketDataProviderFactory() != null) { providerResolver.addProvider(LiveMarketDataSpecification.class, getLiveMarketDataProviderFactory()); } final MarketDataProviderFactory fixedHistoricalMarketDataProviderFactory = initFixedHistoricalMarketDataProviderFactory(); providerResolver.addProvider(FixedHistoricalMarketDataSpecification.class, fixedHistoricalMarketDataProviderFactory); final MarketDataProviderFactory latestHistoricalMarketDataProviderFactory = initLatestHistoricalMarketDataProviderFactory(); providerResolver.addProvider(LatestHistoricalMarketDataSpecification.class, latestHistoricalMarketDataProviderFactory); final MarketDataProviderFactory userMarketDataProviderFactory = initUserMarketDataProviderFactory(); providerResolver.addProvider(UserMarketDataSpecification.class, userMarketDataProviderFactory); final MarketDataProviderFactory combinedMarketDataProviderFactory = initCombinedMarketDataProviderFactory(providerResolver); providerResolver.addProvider(CombinedMarketDataSpecification.class, combinedMarketDataProviderFactory); final MarketDataProviderFactory historicalShockMarketDataProviderFactory = initHistoricalShockMarketDataProviderFactory(providerResolver); providerResolver.addProvider(HistoricalShockMarketDataSpecification.class, historicalShockMarketDataProviderFactory); final MarketDataProviderFactory randomizingMarketDataProviderFactory = initRandomizingMarketDataProviderFactory(providerResolver); providerResolver.addProvider(RandomizingMarketDataSpecification.class, randomizingMarketDataProviderFactory); return providerResolver; } private void initMarketDataProviderResolver(final ComponentRepository repo) { final MarketDataProviderResolver resolver = new CachingMarketDataProviderResolver(createMarketDataProviderResolver()); final ComponentInfo info = new ComponentInfo(MarketDataProviderResolver.class, getClassifier()); repo.registerComponent(info, resolver); } private MarketDataProviderFactory initRandomizingMarketDataProviderFactory(MarketDataProviderResolver resolver) { return new RandomizingMarketDataProviderFactory(resolver); } protected MarketDataProviderFactory initFixedHistoricalMarketDataProviderFactory() { return new HistoricalMarketDataProviderFactory(getHistoricalTimeSeriesSource(), getHistoricalTimeSeriesResolver()); } protected MarketDataProviderFactory initLatestHistoricalMarketDataProviderFactory() { return new LatestHistoricalMarketDataProviderFactory(getHistoricalTimeSeriesSource(), getHistoricalTimeSeriesResolver()); } protected MarketDataProviderFactory initUserMarketDataProviderFactory() { return new UserMarketDataProviderFactory(getMarketDataSnapshotSource()); } protected MarketDataProviderFactory initCombinedMarketDataProviderFactory(final MarketDataProviderResolver underlyingResolver) { return new CombinedMarketDataProviderFactory(underlyingResolver); } protected MarketDataProviderFactory initHistoricalShockMarketDataProviderFactory(MarketDataProviderResolver underlyingResolver) { return new HistoricalShockMarketDataProviderFactory(underlyingResolver); } //------------------------- AUTOGENERATED START ------------------------- ///CLOVER:OFF /** * The meta-bean for {@code MarketDataProviderResolverComponentFactory}. * @return the meta-bean, not null */ public static MarketDataProviderResolverComponentFactory.Meta meta() { return MarketDataProviderResolverComponentFactory.Meta.INSTANCE; } static { JodaBeanUtils.registerMetaBean(MarketDataProviderResolverComponentFactory.Meta.INSTANCE); } @Override public MarketDataProviderResolverComponentFactory.Meta metaBean() { return MarketDataProviderResolverComponentFactory.Meta.INSTANCE; } //----------------------------------------------------------------------- /** * Gets the classifier under which to publish. * @return the value of the property, not null */ public String getClassifier() { return _classifier; } /** * Sets the classifier under which to publish. * @param classifier the new value of the property, not null */ public void setClassifier(String classifier) { JodaBeanUtils.notNull(classifier, "classifier"); this._classifier = classifier; } /** * Gets the the {@code classifier} property. * @return the property, not null */ public final Property<String> classifier() { return metaBean().classifier().createProperty(this); } //----------------------------------------------------------------------- /** * Gets the live market data provider factory. May be null if no live data required. * @return the value of the property */ public MarketDataProviderFactory getLiveMarketDataProviderFactory() { return _liveMarketDataProviderFactory; } /** * Sets the live market data provider factory. May be null if no live data required. * @param liveMarketDataProviderFactory the new value of the property */ public void setLiveMarketDataProviderFactory(MarketDataProviderFactory liveMarketDataProviderFactory) { this._liveMarketDataProviderFactory = liveMarketDataProviderFactory; } /** * Gets the the {@code liveMarketDataProviderFactory} property. * @return the property, not null */ public final Property<MarketDataProviderFactory> liveMarketDataProviderFactory() { return metaBean().liveMarketDataProviderFactory().createProperty(this); } //----------------------------------------------------------------------- /** * Gets the historical time-series source. * @return the value of the property, not null */ public HistoricalTimeSeriesSource getHistoricalTimeSeriesSource() { return _historicalTimeSeriesSource; } /** * Sets the historical time-series source. * @param historicalTimeSeriesSource the new value of the property, not null */ public void setHistoricalTimeSeriesSource(HistoricalTimeSeriesSource historicalTimeSeriesSource) { JodaBeanUtils.notNull(historicalTimeSeriesSource, "historicalTimeSeriesSource"); this._historicalTimeSeriesSource = historicalTimeSeriesSource; } /** * Gets the the {@code historicalTimeSeriesSource} property. * @return the property, not null */ public final Property<HistoricalTimeSeriesSource> historicalTimeSeriesSource() { return metaBean().historicalTimeSeriesSource().createProperty(this); } //----------------------------------------------------------------------- /** * Gets the historical time-series resolver. * @return the value of the property, not null */ public HistoricalTimeSeriesResolver getHistoricalTimeSeriesResolver() { return _historicalTimeSeriesResolver; } /** * Sets the historical time-series resolver. * @param historicalTimeSeriesResolver the new value of the property, not null */ public void setHistoricalTimeSeriesResolver(HistoricalTimeSeriesResolver historicalTimeSeriesResolver) { JodaBeanUtils.notNull(historicalTimeSeriesResolver, "historicalTimeSeriesResolver"); this._historicalTimeSeriesResolver = historicalTimeSeriesResolver; } /** * Gets the the {@code historicalTimeSeriesResolver} property. * @return the property, not null */ public final Property<HistoricalTimeSeriesResolver> historicalTimeSeriesResolver() { return metaBean().historicalTimeSeriesResolver().createProperty(this); } //----------------------------------------------------------------------- /** * Gets the market data snapshot source. * @return the value of the property, not null */ public MarketDataSnapshotSource getMarketDataSnapshotSource() { return _marketDataSnapshotSource; } /** * Sets the market data snapshot source. * @param marketDataSnapshotSource the new value of the property, not null */ public void setMarketDataSnapshotSource(MarketDataSnapshotSource marketDataSnapshotSource) { JodaBeanUtils.notNull(marketDataSnapshotSource, "marketDataSnapshotSource"); this._marketDataSnapshotSource = marketDataSnapshotSource; } /** * Gets the the {@code marketDataSnapshotSource} property. * @return the property, not null */ public final Property<MarketDataSnapshotSource> marketDataSnapshotSource() { return metaBean().marketDataSnapshotSource().createProperty(this); } //----------------------------------------------------------------------- @Override public MarketDataProviderResolverComponentFactory clone() { return JodaBeanUtils.cloneAlways(this); } @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (obj != null && obj.getClass() == this.getClass()) { MarketDataProviderResolverComponentFactory other = (MarketDataProviderResolverComponentFactory) obj; return JodaBeanUtils.equal(getClassifier(), other.getClassifier()) && JodaBeanUtils.equal(getLiveMarketDataProviderFactory(), other.getLiveMarketDataProviderFactory()) && JodaBeanUtils.equal(getHistoricalTimeSeriesSource(), other.getHistoricalTimeSeriesSource()) && JodaBeanUtils.equal(getHistoricalTimeSeriesResolver(), other.getHistoricalTimeSeriesResolver()) && JodaBeanUtils.equal(getMarketDataSnapshotSource(), other.getMarketDataSnapshotSource()) && super.equals(obj); } return false; } @Override public int hashCode() { int hash = 7; hash = hash * 31 + JodaBeanUtils.hashCode(getClassifier()); hash = hash * 31 + JodaBeanUtils.hashCode(getLiveMarketDataProviderFactory()); hash = hash * 31 + JodaBeanUtils.hashCode(getHistoricalTimeSeriesSource()); hash = hash * 31 + JodaBeanUtils.hashCode(getHistoricalTimeSeriesResolver()); hash = hash * 31 + JodaBeanUtils.hashCode(getMarketDataSnapshotSource()); return hash ^ super.hashCode(); } @Override public String toString() { StringBuilder buf = new StringBuilder(192); buf.append("MarketDataProviderResolverComponentFactory{"); int len = buf.length(); toString(buf); if (buf.length() > len) { buf.setLength(buf.length() - 2); } buf.append('}'); return buf.toString(); } @Override protected void toString(StringBuilder buf) { super.toString(buf); buf.append("classifier").append('=').append(JodaBeanUtils.toString(getClassifier())).append(',').append(' '); buf.append("liveMarketDataProviderFactory").append('=').append(JodaBeanUtils.toString(getLiveMarketDataProviderFactory())).append(',').append(' '); buf.append("historicalTimeSeriesSource").append('=').append(JodaBeanUtils.toString(getHistoricalTimeSeriesSource())).append(',').append(' '); buf.append("historicalTimeSeriesResolver").append('=').append(JodaBeanUtils.toString(getHistoricalTimeSeriesResolver())).append(',').append(' '); buf.append("marketDataSnapshotSource").append('=').append(JodaBeanUtils.toString(getMarketDataSnapshotSource())).append(',').append(' '); } //----------------------------------------------------------------------- /** * The meta-bean for {@code MarketDataProviderResolverComponentFactory}. */ public static class Meta extends AbstractComponentFactory.Meta { /** * The singleton instance of the meta-bean. */ static final Meta INSTANCE = new Meta(); /** * The meta-property for the {@code classifier} property. */ private final MetaProperty<String> _classifier = DirectMetaProperty.ofReadWrite( this, "classifier", MarketDataProviderResolverComponentFactory.class, String.class); /** * The meta-property for the {@code liveMarketDataProviderFactory} property. */ private final MetaProperty<MarketDataProviderFactory> _liveMarketDataProviderFactory = DirectMetaProperty.ofReadWrite( this, "liveMarketDataProviderFactory", MarketDataProviderResolverComponentFactory.class, MarketDataProviderFactory.class); /** * The meta-property for the {@code historicalTimeSeriesSource} property. */ private final MetaProperty<HistoricalTimeSeriesSource> _historicalTimeSeriesSource = DirectMetaProperty.ofReadWrite( this, "historicalTimeSeriesSource", MarketDataProviderResolverComponentFactory.class, HistoricalTimeSeriesSource.class); /** * The meta-property for the {@code historicalTimeSeriesResolver} property. */ private final MetaProperty<HistoricalTimeSeriesResolver> _historicalTimeSeriesResolver = DirectMetaProperty.ofReadWrite( this, "historicalTimeSeriesResolver", MarketDataProviderResolverComponentFactory.class, HistoricalTimeSeriesResolver.class); /** * The meta-property for the {@code marketDataSnapshotSource} property. */ private final MetaProperty<MarketDataSnapshotSource> _marketDataSnapshotSource = DirectMetaProperty.ofReadWrite( this, "marketDataSnapshotSource", MarketDataProviderResolverComponentFactory.class, MarketDataSnapshotSource.class); /** * The meta-properties. */ private final Map<String, MetaProperty<?>> _metaPropertyMap$ = new DirectMetaPropertyMap( this, (DirectMetaPropertyMap) super.metaPropertyMap(), "classifier", "liveMarketDataProviderFactory", "historicalTimeSeriesSource", "historicalTimeSeriesResolver", "marketDataSnapshotSource"); /** * Restricted constructor. */ protected Meta() { } @Override protected MetaProperty<?> metaPropertyGet(String propertyName) { switch (propertyName.hashCode()) { case -281470431: // classifier return _classifier; case -301472921: // liveMarketDataProviderFactory return _liveMarketDataProviderFactory; case 358729161: // historicalTimeSeriesSource return _historicalTimeSeriesSource; case -946313676: // historicalTimeSeriesResolver return _historicalTimeSeriesResolver; case -2019554651: // marketDataSnapshotSource return _marketDataSnapshotSource; } return super.metaPropertyGet(propertyName); } @Override public BeanBuilder<? extends MarketDataProviderResolverComponentFactory> builder() { return new DirectBeanBuilder<MarketDataProviderResolverComponentFactory>(new MarketDataProviderResolverComponentFactory()); } @Override public Class<? extends MarketDataProviderResolverComponentFactory> beanType() { return MarketDataProviderResolverComponentFactory.class; } @Override public Map<String, MetaProperty<?>> metaPropertyMap() { return _metaPropertyMap$; } //----------------------------------------------------------------------- /** * The meta-property for the {@code classifier} property. * @return the meta-property, not null */ public final MetaProperty<String> classifier() { return _classifier; } /** * The meta-property for the {@code liveMarketDataProviderFactory} property. * @return the meta-property, not null */ public final MetaProperty<MarketDataProviderFactory> liveMarketDataProviderFactory() { return _liveMarketDataProviderFactory; } /** * The meta-property for the {@code historicalTimeSeriesSource} property. * @return the meta-property, not null */ public final MetaProperty<HistoricalTimeSeriesSource> historicalTimeSeriesSource() { return _historicalTimeSeriesSource; } /** * The meta-property for the {@code historicalTimeSeriesResolver} property. * @return the meta-property, not null */ public final MetaProperty<HistoricalTimeSeriesResolver> historicalTimeSeriesResolver() { return _historicalTimeSeriesResolver; } /** * The meta-property for the {@code marketDataSnapshotSource} property. * @return the meta-property, not null */ public final MetaProperty<MarketDataSnapshotSource> marketDataSnapshotSource() { return _marketDataSnapshotSource; } //----------------------------------------------------------------------- @Override protected Object propertyGet(Bean bean, String propertyName, boolean quiet) { switch (propertyName.hashCode()) { case -281470431: // classifier return ((MarketDataProviderResolverComponentFactory) bean).getClassifier(); case -301472921: // liveMarketDataProviderFactory return ((MarketDataProviderResolverComponentFactory) bean).getLiveMarketDataProviderFactory(); case 358729161: // historicalTimeSeriesSource return ((MarketDataProviderResolverComponentFactory) bean).getHistoricalTimeSeriesSource(); case -946313676: // historicalTimeSeriesResolver return ((MarketDataProviderResolverComponentFactory) bean).getHistoricalTimeSeriesResolver(); case -2019554651: // marketDataSnapshotSource return ((MarketDataProviderResolverComponentFactory) bean).getMarketDataSnapshotSource(); } return super.propertyGet(bean, propertyName, quiet); } @Override protected void propertySet(Bean bean, String propertyName, Object newValue, boolean quiet) { switch (propertyName.hashCode()) { case -281470431: // classifier ((MarketDataProviderResolverComponentFactory) bean).setClassifier((String) newValue); return; case -301472921: // liveMarketDataProviderFactory ((MarketDataProviderResolverComponentFactory) bean).setLiveMarketDataProviderFactory((MarketDataProviderFactory) newValue); return; case 358729161: // historicalTimeSeriesSource ((MarketDataProviderResolverComponentFactory) bean).setHistoricalTimeSeriesSource((HistoricalTimeSeriesSource) newValue); return; case -946313676: // historicalTimeSeriesResolver ((MarketDataProviderResolverComponentFactory) bean).setHistoricalTimeSeriesResolver((HistoricalTimeSeriesResolver) newValue); return; case -2019554651: // marketDataSnapshotSource ((MarketDataProviderResolverComponentFactory) bean).setMarketDataSnapshotSource((MarketDataSnapshotSource) newValue); return; } super.propertySet(bean, propertyName, newValue, quiet); } @Override protected void validate(Bean bean) { JodaBeanUtils.notNull(((MarketDataProviderResolverComponentFactory) bean)._classifier, "classifier"); JodaBeanUtils.notNull(((MarketDataProviderResolverComponentFactory) bean)._historicalTimeSeriesSource, "historicalTimeSeriesSource"); JodaBeanUtils.notNull(((MarketDataProviderResolverComponentFactory) bean)._historicalTimeSeriesResolver, "historicalTimeSeriesResolver"); JodaBeanUtils.notNull(((MarketDataProviderResolverComponentFactory) bean)._marketDataSnapshotSource, "marketDataSnapshotSource"); super.validate(bean); } } ///CLOVER:ON //-------------------------- AUTOGENERATED END -------------------------- }
jeorme/OG-Platform
projects/OG-Component/src/main/java/com/opengamma/component/factory/engine/MarketDataProviderResolverComponentFactory.java
Java
apache-2.0
22,810
/* * Copyright 2013-present Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package com.facebook.buck.dalvik; import com.facebook.buck.jvm.java.classes.FileLike; import com.google.common.collect.MapMaker; import java.io.IOException; import java.io.InputStream; import java.util.concurrent.ConcurrentMap; /** Cache to memoize results from DalvikStatsTool. */ class DalvikStatsCache { private final ConcurrentMap<FileLike, DalvikStatsTool.Stats> cache; DalvikStatsCache() { cache = new MapMaker().weakKeys().makeMap(); } DalvikStatsTool.Stats getStats(FileLike entry) { String name = entry.getRelativePath(); if (!name.endsWith(".class")) { // Probably something like a pom.properties file in a JAR: this does not contribute // to the linear alloc size, so return zero. return DalvikStatsTool.Stats.ZERO; } DalvikStatsTool.Stats stats = cache.get(entry); if (stats != null) { return stats; } try (InputStream is = entry.getInput()) { stats = DalvikStatsTool.getEstimate(is); cache.put(entry, stats); return stats; } catch (IOException e) { throw new RuntimeException(String.format("Error calculating size for %s.", name), e); } catch (RuntimeException e) { throw new RuntimeException(String.format("Error calculating size for %s.", name), e); } } }
marcinkwiatkowski/buck
src/com/facebook/buck/dalvik/DalvikStatsCache.java
Java
apache-2.0
1,893
package policybased import ( "errors" "fmt" "sort" kapi "k8s.io/kubernetes/pkg/api" kapierrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/rest" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/client/retry" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/runtime" oapi "github.com/openshift/origin/pkg/api" authorizationapi "github.com/openshift/origin/pkg/authorization/api" authorizationinterfaces "github.com/openshift/origin/pkg/authorization/interfaces" policybindingregistry "github.com/openshift/origin/pkg/authorization/registry/policybinding" rolebindingregistry "github.com/openshift/origin/pkg/authorization/registry/rolebinding" "github.com/openshift/origin/pkg/authorization/rulevalidation" ) type VirtualStorage struct { BindingRegistry policybindingregistry.Registry RuleResolver rulevalidation.AuthorizationRuleResolver CachedRuleResolver rulevalidation.AuthorizationRuleResolver CreateStrategy rest.RESTCreateStrategy UpdateStrategy rest.RESTUpdateStrategy Resource unversioned.GroupResource } // NewVirtualStorage creates a new REST for policies. func NewVirtualStorage(bindingRegistry policybindingregistry.Registry, ruleResolver, cachedRuleResolver rulevalidation.AuthorizationRuleResolver, resource unversioned.GroupResource) rolebindingregistry.Storage { return &VirtualStorage{ BindingRegistry: bindingRegistry, RuleResolver: ruleResolver, CachedRuleResolver: cachedRuleResolver, CreateStrategy: rolebindingregistry.LocalStrategy, UpdateStrategy: rolebindingregistry.LocalStrategy, Resource: resource, } } func (m *VirtualStorage) New() runtime.Object { return &authorizationapi.RoleBinding{} } func (m *VirtualStorage) NewList() runtime.Object { return &authorizationapi.RoleBindingList{} } func (m *VirtualStorage) List(ctx kapi.Context, options *kapi.ListOptions) (runtime.Object, error) { policyBindingList, err := m.BindingRegistry.ListPolicyBindings(ctx, &kapi.ListOptions{}) if err != nil { return nil, err } labelSelector, fieldSelector := oapi.ListOptionsToSelectors(options) roleBindingList := &authorizationapi.RoleBindingList{} for _, policyBinding := range policyBindingList.Items { for _, roleBinding := range policyBinding.RoleBindings { if labelSelector.Matches(labels.Set(roleBinding.Labels)) && fieldSelector.Matches(authorizationapi.RoleBindingToSelectableFields(roleBinding)) { roleBindingList.Items = append(roleBindingList.Items, *roleBinding) } } } sort.Sort(byName(roleBindingList.Items)) return roleBindingList, nil } func (m *VirtualStorage) Get(ctx kapi.Context, name string) (runtime.Object, error) { policyBinding, err := m.getPolicyBindingOwningRoleBinding(ctx, name) if kapierrors.IsNotFound(err) { return nil, kapierrors.NewNotFound(m.Resource, name) } if err != nil { return nil, err } binding, exists := policyBinding.RoleBindings[name] if !exists { return nil, kapierrors.NewNotFound(m.Resource, name) } return binding, nil } func (m *VirtualStorage) Delete(ctx kapi.Context, name string, options *kapi.DeleteOptions) (runtime.Object, error) { if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { owningPolicyBinding, err := m.getPolicyBindingOwningRoleBinding(ctx, name) if kapierrors.IsNotFound(err) { return kapierrors.NewNotFound(m.Resource, name) } if err != nil { return err } if _, exists := owningPolicyBinding.RoleBindings[name]; !exists { return kapierrors.NewNotFound(m.Resource, name) } delete(owningPolicyBinding.RoleBindings, name) owningPolicyBinding.LastModified = unversioned.Now() return m.BindingRegistry.UpdatePolicyBinding(ctx, owningPolicyBinding) }); err != nil { return nil, err } return &unversioned.Status{Status: unversioned.StatusSuccess}, nil } func (m *VirtualStorage) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { return m.createRoleBinding(ctx, obj, false) } func (m *VirtualStorage) CreateRoleBindingWithEscalation(ctx kapi.Context, obj *authorizationapi.RoleBinding) (*authorizationapi.RoleBinding, error) { return m.createRoleBinding(ctx, obj, true) } func (m *VirtualStorage) createRoleBinding(ctx kapi.Context, obj runtime.Object, allowEscalation bool) (*authorizationapi.RoleBinding, error) { // Copy object before passing to BeforeCreate, since it mutates objCopy, err := kapi.Scheme.DeepCopy(obj) if err != nil { return nil, err } obj = objCopy.(runtime.Object) if err := rest.BeforeCreate(m.CreateStrategy, ctx, obj); err != nil { return nil, err } roleBinding := obj.(*authorizationapi.RoleBinding) if !allowEscalation { if err := m.confirmNoEscalation(ctx, roleBinding); err != nil { return nil, err } } // Retry if we hit a conflict on the underlying PolicyBinding object if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { policyBinding, err := m.getPolicyBindingForPolicy(ctx, roleBinding.RoleRef.Namespace, allowEscalation) if err != nil { return err } _, exists := policyBinding.RoleBindings[roleBinding.Name] if exists { return kapierrors.NewAlreadyExists(m.Resource, roleBinding.Name) } roleBinding.ResourceVersion = policyBinding.ResourceVersion policyBinding.RoleBindings[roleBinding.Name] = roleBinding policyBinding.LastModified = unversioned.Now() return m.BindingRegistry.UpdatePolicyBinding(ctx, policyBinding) }); err != nil { return nil, err } return roleBinding, nil } func (m *VirtualStorage) Update(ctx kapi.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { return m.updateRoleBinding(ctx, name, objInfo, false) } func (m *VirtualStorage) UpdateRoleBindingWithEscalation(ctx kapi.Context, obj *authorizationapi.RoleBinding) (*authorizationapi.RoleBinding, bool, error) { return m.updateRoleBinding(ctx, obj.Name, rest.DefaultUpdatedObjectInfo(obj, kapi.Scheme), true) } func (m *VirtualStorage) updateRoleBinding(ctx kapi.Context, name string, objInfo rest.UpdatedObjectInfo, allowEscalation bool) (*authorizationapi.RoleBinding, bool, error) { var updatedRoleBinding *authorizationapi.RoleBinding var roleBindingConflicted = false if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { // Do an initial fetch old, err := m.Get(ctx, name) if err != nil { return err } oldRoleBinding, exists := old.(*authorizationapi.RoleBinding) if !exists { return kapierrors.NewBadRequest(fmt.Sprintf("old obj is not a role binding: %#v", old)) } // get the updated object, so we know what namespace we're binding against obj, err := objInfo.UpdatedObject(ctx, old) if err != nil { return err } roleBinding, ok := obj.(*authorizationapi.RoleBinding) if !ok { return kapierrors.NewBadRequest(fmt.Sprintf("obj is not a role binding: %#v", obj)) } // now that we know which roleRef we want to go to, fetch the policyBinding we'll actually be updating, and re-get the oldRoleBinding policyBinding, err := m.getPolicyBindingForPolicy(ctx, roleBinding.RoleRef.Namespace, allowEscalation) if err != nil { return err } oldRoleBinding, exists = policyBinding.RoleBindings[roleBinding.Name] if !exists { return kapierrors.NewNotFound(m.Resource, roleBinding.Name) } if len(roleBinding.ResourceVersion) == 0 && m.UpdateStrategy.AllowUnconditionalUpdate() { roleBinding.ResourceVersion = oldRoleBinding.ResourceVersion } if err := rest.BeforeUpdate(m.UpdateStrategy, ctx, obj, oldRoleBinding); err != nil { return err } if !allowEscalation { if err := m.confirmNoEscalation(ctx, roleBinding); err != nil { return err } } // conflict detection if roleBinding.ResourceVersion != oldRoleBinding.ResourceVersion { // mark as a conflict err, but return an untyped error to escape the retry roleBindingConflicted = true return errors.New(registry.OptimisticLockErrorMsg) } // non-mutating change if kapi.Semantic.DeepEqual(oldRoleBinding, roleBinding) { updatedRoleBinding = roleBinding return nil } roleBinding.ResourceVersion = policyBinding.ResourceVersion policyBinding.RoleBindings[roleBinding.Name] = roleBinding policyBinding.LastModified = unversioned.Now() if err := m.BindingRegistry.UpdatePolicyBinding(ctx, policyBinding); err != nil { return err } updatedRoleBinding = roleBinding return nil }); err != nil { if roleBindingConflicted { // construct the typed conflict error return nil, false, kapierrors.NewConflict(m.Resource, name, err) } return nil, false, err } return updatedRoleBinding, false, nil } // roleForEscalationCheck tries to use the CachedRuleResolver if available to avoid expensive checks func (m *VirtualStorage) roleForEscalationCheck(binding authorizationinterfaces.RoleBinding) (authorizationinterfaces.Role, error) { if m.CachedRuleResolver != nil { if role, err := m.CachedRuleResolver.GetRole(binding); err == nil { return role, nil } } return m.RuleResolver.GetRole(binding) } func (m *VirtualStorage) confirmNoEscalation(ctx kapi.Context, roleBinding *authorizationapi.RoleBinding) error { modifyingRole, err := m.roleForEscalationCheck(authorizationinterfaces.NewLocalRoleBindingAdapter(roleBinding)) if err != nil { return err } return rulevalidation.ConfirmNoEscalation(ctx, m.Resource, roleBinding.Name, m.RuleResolver, m.CachedRuleResolver, modifyingRole) } // ensurePolicyBindingToMaster returns a PolicyBinding object that has a PolicyRef pointing to the Policy in the passed namespace. func (m *VirtualStorage) ensurePolicyBindingToMaster(ctx kapi.Context, policyNamespace, policyBindingName string) (*authorizationapi.PolicyBinding, error) { policyBinding, err := m.BindingRegistry.GetPolicyBinding(ctx, policyBindingName) if err != nil { if !kapierrors.IsNotFound(err) { return nil, err } // if we have no policyBinding, go ahead and make one. creating one here collapses code paths below. We only take this hit once policyBinding = policybindingregistry.NewEmptyPolicyBinding(kapi.NamespaceValue(ctx), policyNamespace, policyBindingName) if err := m.BindingRegistry.CreatePolicyBinding(ctx, policyBinding); err != nil { // Tolerate the policybinding having been created in the meantime if !kapierrors.IsAlreadyExists(err) { return nil, err } } policyBinding, err = m.BindingRegistry.GetPolicyBinding(ctx, policyBindingName) if err != nil { return nil, err } } if policyBinding.RoleBindings == nil { policyBinding.RoleBindings = make(map[string]*authorizationapi.RoleBinding) } return policyBinding, nil } // getPolicyBindingForPolicy returns a PolicyBinding that points to the specified policyNamespace. It will autocreate ONLY if policyNamespace equals the master namespace func (m *VirtualStorage) getPolicyBindingForPolicy(ctx kapi.Context, policyNamespace string, allowAutoProvision bool) (*authorizationapi.PolicyBinding, error) { // we can autocreate a PolicyBinding object if the RoleBinding is for the master namespace OR if we've been explicitly told to create the policying binding. // the latter happens during priming if (policyNamespace == "") || allowAutoProvision { return m.ensurePolicyBindingToMaster(ctx, policyNamespace, authorizationapi.GetPolicyBindingName(policyNamespace)) } policyBinding, err := m.BindingRegistry.GetPolicyBinding(ctx, authorizationapi.GetPolicyBindingName(policyNamespace)) if err != nil { return nil, err } if policyBinding.RoleBindings == nil { policyBinding.RoleBindings = make(map[string]*authorizationapi.RoleBinding) } return policyBinding, nil } func (m *VirtualStorage) getPolicyBindingOwningRoleBinding(ctx kapi.Context, bindingName string) (*authorizationapi.PolicyBinding, error) { policyBindingList, err := m.BindingRegistry.ListPolicyBindings(ctx, &kapi.ListOptions{}) if err != nil { return nil, err } for _, policyBinding := range policyBindingList.Items { _, exists := policyBinding.RoleBindings[bindingName] if exists { return &policyBinding, nil } } return nil, kapierrors.NewNotFound(m.Resource, bindingName) } type byName []authorizationapi.RoleBinding func (r byName) Len() int { return len(r) } func (r byName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } func (r byName) Less(i, j int) bool { return r[i].Name < r[j].Name }
fabric8io/gofabric8
vendor/github.com/openshift/origin/pkg/authorization/registry/rolebinding/policybased/virtual_storage.go
GO
apache-2.0
12,439
/* * Copyright 2016 The Cartographer Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cartographer_ros/ros_log_sink.h" #include <chrono> #include <cstring> #include <string> #include <thread> #include "glog/log_severity.h" #include "ros/console.h" namespace cartographer_ros { namespace { const char* GetBasename(const char* filepath) { const char* base = std::strrchr(filepath, '/'); return base ? (base + 1) : filepath; } } // namespace ScopedRosLogSink::ScopedRosLogSink() : will_die_(false) { AddLogSink(this); } ScopedRosLogSink::~ScopedRosLogSink() { RemoveLogSink(this); } void ScopedRosLogSink::send(const ::google::LogSeverity severity, const char* const filename, const char* const base_filename, const int line, const struct std::tm* const tm_time, const char* const message, const size_t message_len) { const std::string message_string = ::google::LogSink::ToString( severity, GetBasename(filename), line, tm_time, message, message_len); switch (severity) { case ::google::GLOG_INFO: ROS_INFO_STREAM(message_string); break; case ::google::GLOG_WARNING: ROS_WARN_STREAM(message_string); break; case ::google::GLOG_ERROR: ROS_ERROR_STREAM(message_string); break; case ::google::GLOG_FATAL: ROS_FATAL_STREAM(message_string); will_die_ = true; break; } } void ScopedRosLogSink::WaitTillSent() { if (will_die_) { // Give ROS some time to actually publish our message. std::this_thread::sleep_for(std::chrono::milliseconds(1000)); } } } // namespace cartographer_ros
af-silva/cartographer_ros
cartographer_ros/src/ros_log_sink.cc
C++
apache-2.0
2,260
/** * Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies * * Please see distribution for license. */ package com.opengamma.engine.marketdata.live; import java.util.Set; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.opengamma.engine.view.ViewProcess; import com.opengamma.engine.view.impl.ViewProcessImpl; import com.opengamma.engine.view.impl.ViewProcessorInternal; import com.opengamma.id.ExternalScheme; import com.opengamma.util.ArgumentChecker; import com.opengamma.util.jms.JmsConnector; /** * Listens to JMS messages announcing that market data providers have become available and * forces all view processes to rebuild their graph. This retries any failed market data subscriptions. * @deprecated This is a temporary fix, once PLAT-3908 is resolved this class won't be necessary */ @Deprecated public class ViewProcessAvailabilityNotificationListener extends AvailabilityNotificationListener { /** Logger. */ private static final Logger s_logger = LoggerFactory.getLogger(ViewProcessAvailabilityNotificationListener.class); /** The view processor, used to obtain the running view processes. */ private final ViewProcessorInternal _viewProcessor; /** * @param topic The topic for {@link MarketDataAvailabilityNotification} messages * @param jmsConnector For receiving JMS messages * @param viewProcessor The view processor, used to obtain the running view processes. */ public ViewProcessAvailabilityNotificationListener(String topic, JmsConnector jmsConnector, ViewProcessorInternal viewProcessor) { super(topic, jmsConnector); ArgumentChecker.notNull(viewProcessor, "viewProcessor"); _viewProcessor = viewProcessor; } @Override protected void notificationReceived(Set<ExternalScheme> schemes) { for (ViewProcess viewProcess : _viewProcessor.getViewProcesses()) { if (viewProcess instanceof ViewProcessImpl) { s_logger.info("Forcing graph rebuild for {}", viewProcess); ((ViewProcessImpl) viewProcess).forceGraphRebuild(); } } } }
jeorme/OG-Platform
projects/OG-Engine/src/main/java/com/opengamma/engine/marketdata/live/ViewProcessAvailabilityNotificationListener.java
Java
apache-2.0
2,195
package com.google.api.ads.dfp.jaxws.v201411; import javax.xml.bind.annotation.XmlEnum; import javax.xml.bind.annotation.XmlType; /** * <p>Java class for ReportJobStatus. * * <p>The following schema fragment specifies the expected content contained within this class. * <p> * <pre> * &lt;simpleType name="ReportJobStatus"> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}string"> * &lt;enumeration value="COMPLETED"/> * &lt;enumeration value="IN_PROGRESS"/> * &lt;enumeration value="FAILED"/> * &lt;/restriction> * &lt;/simpleType> * </pre> * */ @XmlType(name = "ReportJobStatus") @XmlEnum public enum ReportJobStatus { /** * * The {@link ReportJob} has completed successfully and is ready to download. * * */ COMPLETED, /** * * The {@link ReportJob} is still being executed. * * */ IN_PROGRESS, /** * * The {@link ReportJob} has failed to run to completion. * * */ FAILED; public String value() { return name(); } public static ReportJobStatus fromValue(String v) { return valueOf(v); } }
nafae/developer
modules/dfp_appengine/src/main/java/com/google/api/ads/dfp/jaxws/v201411/ReportJobStatus.java
Java
apache-2.0
1,274
// ---------------------------------------------------------------------------------- // // Copyright Microsoft Corporation // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ---------------------------------------------------------------------------------- using Microsoft.Azure.Management.SiteRecovery.Models; using System; using System.Collections.Generic; using System.Linq; using System.Management.Automation; namespace Microsoft.Azure.Commands.SiteRecovery { /// <summary> /// Updates Azure Site Recovery Recovery Plan object in memory. /// </summary> [Cmdlet(VerbsData.Edit, "AzureRmSiteRecoveryRecoveryPlan", DefaultParameterSetName = ASRParameterSets.AppendGroup)] public class EditAzureSiteRecoveryRecoveryPlan : SiteRecoveryCmdletBase { #region Parameters /// <summary> /// Gets or sets Name of the Recovery Plan. /// </summary> [Parameter(Mandatory = true, ValueFromPipeline = true)] [ValidateNotNullOrEmpty] public ASRRecoveryPlan RecoveryPlan { get; set; } /// <summary> /// Gets or sets switch parameter /// </summary> [Parameter(ParameterSetName = ASRParameterSets.AppendGroup, Mandatory = true)] public SwitchParameter AppendGroup { get; set; } /// <summary> /// Gets or sets switch parameter /// </summary> [Parameter(ParameterSetName = ASRParameterSets.RemoveGroup, Mandatory = true)] public ASRRecoveryPlanGroup RemoveGroup { get; set; } /// <summary> /// Gets or sets group /// </summary> [Parameter(ParameterSetName = ASRParameterSets.AddProtectedEntities, Mandatory = true)] [Parameter(ParameterSetName = ASRParameterSets.RemoveProtectedEntities, Mandatory = true)] public ASRRecoveryPlanGroup Group { get; set; } /// <summary> /// Gets or sets switch parameter /// </summary> [Parameter(ParameterSetName = ASRParameterSets.AddProtectedEntities, Mandatory = true)] public ASRProtectionEntity[] AddProtectedEntities { get; set; } /// <summary> /// Gets or sets switch parameter /// </summary> [Parameter(ParameterSetName = ASRParameterSets.RemoveProtectedEntities, Mandatory = true)] public ASRProtectionEntity[] RemoveProtectedEntities { get; set; } #endregion Parameters /// <summary> /// ProcessRecord of the command. /// </summary> public override void ExecuteSiteRecoveryCmdlet() { base.ExecuteSiteRecoveryCmdlet(); ASRRecoveryPlanGroup tempGroup; switch (this.ParameterSetName) { case ASRParameterSets.AppendGroup: RecoveryPlanGroup recoveryPlanGroup = new RecoveryPlanGroup() { GroupType = Constants.Boot, ReplicationProtectedItems = new List<RecoveryPlanProtectedItem>(), StartGroupActions = new List<RecoveryPlanAction>(), EndGroupActions = new List<RecoveryPlanAction>() }; this.RecoveryPlan.Groups.Add(new ASRRecoveryPlanGroup("Group " + (RecoveryPlan.Groups.Count - 1).ToString(), recoveryPlanGroup)); break; case ASRParameterSets.RemoveGroup: tempGroup = this.RecoveryPlan.Groups.FirstOrDefault(g => String.Compare(g.Name, RemoveGroup.Name, StringComparison.OrdinalIgnoreCase) == 0); if (tempGroup != null) { this.RecoveryPlan.Groups.Remove(tempGroup); this.RecoveryPlan = this.RecoveryPlan.RefreshASRRecoveryPlanGroupNames(); } else { throw new PSArgumentException(string.Format(Properties.Resources.GroupNotFoundInRecoveryPlan, this.RemoveGroup.Name, this.RecoveryPlan.FriendlyName)); } break; case ASRParameterSets.AddProtectedEntities: foreach (ASRProtectionEntity pe in AddProtectedEntities) { string fabricName = Utilities.GetValueFromArmId(pe.ID, ARMResourceTypeConstants.ReplicationFabrics); // fetch the latest PE object ProtectableItemResponse protectableItemResponse = RecoveryServicesClient.GetAzureSiteRecoveryProtectableItem(fabricName, pe.ProtectionContainerId, pe.Name); ReplicationProtectedItemResponse replicationProtectedItemResponse = RecoveryServicesClient.GetAzureSiteRecoveryReplicationProtectedItem(fabricName, pe.ProtectionContainerId, Utilities.GetValueFromArmId(protectableItemResponse.ProtectableItem.Properties.ReplicationProtectedItemId, ARMResourceTypeConstants.ReplicationProtectedItems)); tempGroup = this.RecoveryPlan.Groups.FirstOrDefault(g => String.Compare(g.Name, Group.Name, StringComparison.OrdinalIgnoreCase) == 0); if (tempGroup != null) { foreach (ASRRecoveryPlanGroup gp in this.RecoveryPlan.Groups) { if (gp.ReplicationProtectedItems == null) continue; if (gp.ReplicationProtectedItems.Any(pi => String.Compare(pi.Id, replicationProtectedItemResponse.ReplicationProtectedItem.Id, StringComparison.OrdinalIgnoreCase) == 0)) { throw new PSArgumentException(string.Format(Properties.Resources.VMAlreadyPartOfGroup, pe.FriendlyName, gp.Name, this.RecoveryPlan.FriendlyName)); } } this.RecoveryPlan.Groups[RecoveryPlan.Groups.IndexOf(tempGroup)].ReplicationProtectedItems.Add(replicationProtectedItemResponse.ReplicationProtectedItem); } else { throw new PSArgumentException(string.Format(Properties.Resources.GroupNotFoundInRecoveryPlan, this.Group.Name, this.RecoveryPlan.FriendlyName)); } } break; case ASRParameterSets.RemoveProtectedEntities: foreach (ASRProtectionEntity pe in RemoveProtectedEntities) { string fabricName = Utilities.GetValueFromArmId(pe.ID, ARMResourceTypeConstants.ReplicationFabrics); // fetch the latest PE object ProtectableItemResponse protectableItemResponse = RecoveryServicesClient.GetAzureSiteRecoveryProtectableItem(fabricName, pe.ProtectionContainerId, pe.Name); tempGroup = this.RecoveryPlan.Groups.FirstOrDefault(g => String.Compare(g.Name, Group.Name, StringComparison.OrdinalIgnoreCase) == 0); if (tempGroup != null) { var ReplicationProtectedItem = this.RecoveryPlan.Groups[RecoveryPlan.Groups.IndexOf(tempGroup)]. ReplicationProtectedItems. FirstOrDefault(pi => String.Compare(pi.Id, protectableItemResponse.ProtectableItem.Properties.ReplicationProtectedItemId, StringComparison.OrdinalIgnoreCase) == 0); if (ReplicationProtectedItem != null) { this.RecoveryPlan.Groups[RecoveryPlan.Groups.IndexOf(tempGroup)].ReplicationProtectedItems.Remove(ReplicationProtectedItem); } else { throw new PSArgumentException(string.Format(Properties.Resources.VMNotFoundInGroup, pe.FriendlyName, this.Group.Name, this.RecoveryPlan.FriendlyName)); } } else { throw new PSArgumentException(string.Format(Properties.Resources.GroupNotFoundInRecoveryPlan, this.Group.Name, this.RecoveryPlan.FriendlyName)); } } break; }; this.WriteObject(this.RecoveryPlan); } } }
hovsepm/azure-powershell
src/ResourceManager/SiteRecovery/Commands.SiteRecovery/RecoveryPlan/EditAzureSiteRecoveryRecoveryPlan.cs
C#
apache-2.0
9,317
// This file was procedurally generated from the following sources: // - src/dstr-binding-for-await/ary-ptrn-rest-ary-rest.case // - src/dstr-binding-for-await/default/for-await-of-async-gen-const.template /*--- description: Rest element containing a rest element (for-await-of statement) esid: sec-for-in-and-for-of-statements-runtime-semantics-labelledevaluation features: [destructuring-binding, async-iteration] flags: [generated, async] info: | IterationStatement : for await ( ForDeclaration of AssignmentExpression ) Statement [...] 2. Return ? ForIn/OfBodyEvaluation(ForDeclaration, Statement, keyResult, lexicalBinding, labelSet, async). 13.7.5.13 Runtime Semantics: ForIn/OfBodyEvaluation [...] 4. Let destructuring be IsDestructuring of lhs. [...] 6. Repeat [...] j. If destructuring is false, then [...] k. Else i. If lhsKind is assignment, then [...] ii. Else if lhsKind is varBinding, then [...] iii. Else, 1. Assert: lhsKind is lexicalBinding. 2. Assert: lhs is a ForDeclaration. 3. Let status be the result of performing BindingInitialization for lhs passing nextValue and iterationEnv as arguments. [...] 13.3.3.6 Runtime Semantics: IteratorBindingInitialization BindingRestElement : ... BindingPattern 1. Let A be ArrayCreate(0). [...] 3. Repeat [...] b. If iteratorRecord.[[done]] is true, then i. Return the result of performing BindingInitialization of BindingPattern with A and environment as the arguments. [...] ---*/ var values = [1, 2, 3]; var iterCount = 0; async function *fn() { for await (const [...[...x]] of [values]) { assert(Array.isArray(x)); assert.sameValue(x.length, 3); assert.sameValue(x[0], 1); assert.sameValue(x[1], 2); assert.sameValue(x[2], 3); assert.notSameValue(x, values); iterCount += 1; } } fn().next() .then(() => assert.sameValue(iterCount, 1, 'iteration occurred as expected'), $DONE) .then($DONE, $DONE);
sebastienros/jint
Jint.Tests.Test262/test/language/statements/for-await-of/async-gen-dstr-const-ary-ptrn-rest-ary-rest.js
JavaScript
bsd-2-clause
2,180
cask 'progressive-downloader' do version '2.11.3' sha256 'ca8c5ef946b8c745837d39980b5d8c95d3f15ad28d8f531289163cf6079760b6' url "https://www.macpsd.net/update/#{version}/PSD.dmg" name 'Progressive Downloader' homepage 'https://www.macpsd.net/' depends_on macos: '>= :mavericks' app 'Progressive Downloader.app' zap delete: [ '~/Library/Preferences/com.PS.PSD.plist', '~/Library/Application Support/Progressive Downloader Data', '~/Library/Caches/com.PS.PSD', ] end
muan/homebrew-cask
Casks/progressive-downloader.rb
Ruby
bsd-2-clause
547
/* * Copyright (C) 2008 Apple Inc. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @constructor * @extends {WebInspector.Widget} * @param {!Array.<!WebInspector.DataGrid.ColumnDescriptor>} columnsArray * @param {function(!WebInspector.DataGridNode, string, string, string)=} editCallback * @param {function(!WebInspector.DataGridNode)=} deleteCallback * @param {function()=} refreshCallback * @param {function(!WebInspector.ContextMenu, !WebInspector.DataGridNode)=} contextMenuCallback */ WebInspector.DataGrid = function(columnsArray, editCallback, deleteCallback, refreshCallback, contextMenuCallback) { WebInspector.Widget.call(this); this.registerRequiredCSS("ui_lazy/dataGrid.css"); this.element.className = "data-grid"; // Override this.element.tabIndex = 0; this.element.addEventListener("keydown", this._keyDown.bind(this), false); var headerContainer = createElementWithClass("div", "header-container"); /** @type {!Element} */ this._headerTable = headerContainer.createChild("table", "header"); /** @type {!Object.<string, !Element>} */ this._headerTableHeaders = {}; /** @type {!Element} */ this._scrollContainer = createElementWithClass("div", "data-container"); /** @type {!Element} */ this._dataTable = this._scrollContainer.createChild("table", "data"); this._dataTable.addEventListener("mousedown", this._mouseDownInDataTable.bind(this)); this._dataTable.addEventListener("click", this._clickInDataTable.bind(this), true); this._dataTable.addEventListener("contextmenu", this._contextMenuInDataTable.bind(this), true); // FIXME: Add a createCallback which is different from editCallback and has different // behavior when creating a new node. if (editCallback) this._dataTable.addEventListener("dblclick", this._ondblclick.bind(this), false); /** @type {function(!WebInspector.DataGridNode, string, string, string)|undefined} */ this._editCallback = editCallback; /** @type {function(!WebInspector.DataGridNode)|undefined} */ this._deleteCallback = deleteCallback; /** @type {function()|undefined} */ this._refreshCallback = refreshCallback; /** @type {function(!WebInspector.ContextMenu, !WebInspector.DataGridNode)|undefined} */ this._contextMenuCallback = contextMenuCallback; this.element.appendChild(headerContainer); this.element.appendChild(this._scrollContainer); /** @type {!Element} */ this._headerRow = createElement("tr"); /** @type {!Element} */ this._headerTableColumnGroup = createElement("colgroup"); /** @type {!Element} */ this._dataTableColumnGroup = createElement("colgroup"); /** @type {!Element} */ this._topFillerRow = createElementWithClass("tr", "revealed"); /** @type {!Element} */ this._bottomFillerRow = createElementWithClass("tr", "revealed"); this.setVerticalPadding(0, 0); /** @type {!Array.<!WebInspector.DataGrid.ColumnDescriptor>} */ this._columnsArray = columnsArray; /** @type {!Array.<!WebInspector.DataGrid.ColumnDescriptor>} */ this._visibleColumnsArray = columnsArray; /** @type {!Object.<string, !WebInspector.DataGrid.ColumnDescriptor>} */ this._columns = {}; /** @type {?string} */ this._cellClass = null; for (var i = 0; i < columnsArray.length; ++i) { var column = columnsArray[i]; var columnIdentifier = column.identifier = column.id || i; this._columns[columnIdentifier] = column; if (column.disclosure) this.disclosureColumnIdentifier = columnIdentifier; var cell = createElement("th"); cell.className = columnIdentifier + "-column"; cell.columnIdentifier = columnIdentifier; this._headerTableHeaders[columnIdentifier] = cell; var div = createElement("div"); if (column.titleDOMFragment) div.appendChild(column.titleDOMFragment); else div.textContent = column.title; cell.appendChild(div); if (column.sort) { cell.classList.add(column.sort); this._sortColumnCell = cell; } if (column.sortable) { cell.addEventListener("click", this._clickInHeaderCell.bind(this), false); cell.classList.add("sortable"); cell.createChild("div", "sort-order-icon-container").createChild("div", "sort-order-icon"); } } this._headerTable.appendChild(this._headerTableColumnGroup); this.headerTableBody.appendChild(this._headerRow); this._dataTable.appendChild(this._dataTableColumnGroup); this.dataTableBody.appendChild(this._topFillerRow); this.dataTableBody.appendChild(this._bottomFillerRow); this._refreshHeader(); /** @type {boolean} */ this._editing = false; /** @type {?WebInspector.DataGridNode} */ this.selectedNode = null; /** @type {boolean} */ this.expandNodesWhenArrowing = false; this.setRootNode(new WebInspector.DataGridNode()); /** @type {number} */ this.indentWidth = 15; /** @type {!Array.<!Element|{__index: number, __position: number}>} */ this._resizers = []; /** @type {boolean} */ this._columnWidthsInitialized = false; /** @type {number} */ this._cornerWidth = WebInspector.DataGrid.CornerWidth; /** @type {!WebInspector.DataGrid.ResizeMethod} */ this._resizeMethod = WebInspector.DataGrid.ResizeMethod.Nearest; } // Keep in sync with .data-grid col.corner style rule. WebInspector.DataGrid.CornerWidth = 14; /** @typedef {!{id: ?string, editable: boolean, longText: ?boolean, sort: !WebInspector.DataGrid.Order, sortable: boolean, align: !WebInspector.DataGrid.Align}} */ WebInspector.DataGrid.ColumnDescriptor; WebInspector.DataGrid.Events = { SelectedNode: "SelectedNode", DeselectedNode: "DeselectedNode", SortingChanged: "SortingChanged", ColumnsResized: "ColumnsResized" } /** @enum {string} */ WebInspector.DataGrid.Order = { Ascending: "sort-ascending", Descending: "sort-descending" } /** @enum {string} */ WebInspector.DataGrid.Align = { Center: "center", Right: "right" } WebInspector.DataGrid.prototype = { /** * @param {string} cellClass */ setCellClass: function(cellClass) { this._cellClass = cellClass; }, _refreshHeader: function() { this._headerTableColumnGroup.removeChildren(); this._dataTableColumnGroup.removeChildren(); this._headerRow.removeChildren(); this._topFillerRow.removeChildren(); this._bottomFillerRow.removeChildren(); for (var i = 0; i < this._visibleColumnsArray.length; ++i) { var column = this._visibleColumnsArray[i]; var columnIdentifier = column.identifier; var headerColumn = this._headerTableColumnGroup.createChild("col"); var dataColumn = this._dataTableColumnGroup.createChild("col"); if (column.width) { headerColumn.style.width = column.width; dataColumn.style.width = column.width; } this._headerRow.appendChild(this._headerTableHeaders[columnIdentifier]); this._topFillerRow.createChild("td", "top-filler-td"); this._bottomFillerRow.createChild("td", "bottom-filler-td").columnIdentifier_ = columnIdentifier; } this._headerRow.createChild("th", "corner"); this._topFillerRow.createChild("td", "corner").classList.add("top-filler-td"); this._bottomFillerRow.createChild("td", "corner").classList.add("bottom-filler-td"); this._headerTableColumnGroup.createChild("col", "corner"); this._dataTableColumnGroup.createChild("col", "corner"); }, /** * @param {number} top * @param {number} bottom * @protected */ setVerticalPadding: function(top, bottom) { this._topFillerRow.style.height = top + "px"; if (top || bottom) this._bottomFillerRow.style.height = bottom + "px"; else this._bottomFillerRow.style.height = "auto"; }, /** * @param {!WebInspector.DataGridNode} rootNode * @protected */ setRootNode: function(rootNode) { if (this._rootNode) { this._rootNode.removeChildren(); this._rootNode.dataGrid = null; this._rootNode._isRoot = false; } /** @type {!WebInspector.DataGridNode} */ this._rootNode = rootNode; rootNode._isRoot = true; rootNode.hasChildren = false; rootNode._expanded = true; rootNode._revealed = true; rootNode.dataGrid = this; }, /** * @return {!WebInspector.DataGridNode} */ rootNode: function() { return this._rootNode; }, _ondblclick: function(event) { if (this._editing || this._editingNode) return; var columnIdentifier = this.columnIdentifierFromNode(event.target); if (!columnIdentifier || !this._columns[columnIdentifier].editable) return; this._startEditing(event.target); }, /** * @param {!WebInspector.DataGridNode} node * @param {number} cellIndex */ _startEditingColumnOfDataGridNode: function(node, cellIndex) { this._editing = true; /** @type {?WebInspector.DataGridNode} */ this._editingNode = node; this._editingNode.select(); var element = this._editingNode._element.children[cellIndex]; WebInspector.InplaceEditor.startEditing(element, this._startEditingConfig(element)); element.getComponentSelection().setBaseAndExtent(element, 0, element, 1); }, _startEditing: function(target) { var element = target.enclosingNodeOrSelfWithNodeName("td"); if (!element) return; this._editingNode = this.dataGridNodeFromNode(target); if (!this._editingNode) { if (!this.creationNode) return; this._editingNode = this.creationNode; } // Force editing the 1st column when editing the creation node if (this._editingNode.isCreationNode) return this._startEditingColumnOfDataGridNode(this._editingNode, this._nextEditableColumn(-1)); this._editing = true; WebInspector.InplaceEditor.startEditing(element, this._startEditingConfig(element)); element.getComponentSelection().setBaseAndExtent(element, 0, element, 1); }, renderInline: function() { this.element.classList.add("inline"); this._cornerWidth = 0; this.updateWidths(); }, _startEditingConfig: function(element) { return new WebInspector.InplaceEditor.Config(this._editingCommitted.bind(this), this._editingCancelled.bind(this), element.textContent); }, _editingCommitted: function(element, newText, oldText, context, moveDirection) { var columnIdentifier = this.columnIdentifierFromNode(element); if (!columnIdentifier) { this._editingCancelled(element); return; } var column = this._columns[columnIdentifier]; var cellIndex = this._visibleColumnsArray.indexOf(column); var textBeforeEditing = this._editingNode.data[columnIdentifier]; var currentEditingNode = this._editingNode; /** * @param {boolean} wasChange * @this {WebInspector.DataGrid} */ function moveToNextIfNeeded(wasChange) { if (!moveDirection) return; if (moveDirection === "forward") { var firstEditableColumn = this._nextEditableColumn(-1); if (currentEditingNode.isCreationNode && cellIndex === firstEditableColumn && !wasChange) return; var nextEditableColumn = this._nextEditableColumn(cellIndex); if (nextEditableColumn !== -1) return this._startEditingColumnOfDataGridNode(currentEditingNode, nextEditableColumn); var nextDataGridNode = currentEditingNode.traverseNextNode(true, null, true); if (nextDataGridNode) return this._startEditingColumnOfDataGridNode(nextDataGridNode, firstEditableColumn); if (currentEditingNode.isCreationNode && wasChange) { this.addCreationNode(false); return this._startEditingColumnOfDataGridNode(this.creationNode, firstEditableColumn); } return; } if (moveDirection === "backward") { var prevEditableColumn = this._nextEditableColumn(cellIndex, true); if (prevEditableColumn !== -1) return this._startEditingColumnOfDataGridNode(currentEditingNode, prevEditableColumn); var lastEditableColumn = this._nextEditableColumn(this._visibleColumnsArray.length, true); var nextDataGridNode = currentEditingNode.traversePreviousNode(true, true); if (nextDataGridNode) return this._startEditingColumnOfDataGridNode(nextDataGridNode, lastEditableColumn); return; } } if (textBeforeEditing == newText) { this._editingCancelled(element); moveToNextIfNeeded.call(this, false); return; } // Update the text in the datagrid that we typed this._editingNode.data[columnIdentifier] = newText; // Make the callback - expects an editing node (table row), the column number that is being edited, // the text that used to be there, and the new text. this._editCallback(this._editingNode, columnIdentifier, textBeforeEditing, newText); if (this._editingNode.isCreationNode) this.addCreationNode(false); this._editingCancelled(element); moveToNextIfNeeded.call(this, true); }, _editingCancelled: function(element) { this._editing = false; this._editingNode = null; }, /** * @param {number} cellIndex * @param {boolean=} moveBackward * @return {number} */ _nextEditableColumn: function(cellIndex, moveBackward) { var increment = moveBackward ? -1 : 1; var columns = this._visibleColumnsArray; for (var i = cellIndex + increment; (i >= 0) && (i < columns.length); i += increment) { if (columns[i].editable) return i; } return -1; }, /** * @return {?string} */ sortColumnIdentifier: function() { if (!this._sortColumnCell) return null; return this._sortColumnCell.columnIdentifier; }, /** * @return {?string} */ sortOrder: function() { if (!this._sortColumnCell || this._sortColumnCell.classList.contains(WebInspector.DataGrid.Order.Ascending)) return WebInspector.DataGrid.Order.Ascending; if (this._sortColumnCell.classList.contains(WebInspector.DataGrid.Order.Descending)) return WebInspector.DataGrid.Order.Descending; return null; }, /** * @return {boolean} */ isSortOrderAscending: function() { return !this._sortColumnCell || this._sortColumnCell.classList.contains(WebInspector.DataGrid.Order.Ascending); }, get headerTableBody() { if ("_headerTableBody" in this) return this._headerTableBody; this._headerTableBody = this._headerTable.getElementsByTagName("tbody")[0]; if (!this._headerTableBody) { this._headerTableBody = this.element.ownerDocument.createElement("tbody"); this._headerTable.insertBefore(this._headerTableBody, this._headerTable.tFoot); } return this._headerTableBody; }, get dataTableBody() { if ("_dataTableBody" in this) return this._dataTableBody; this._dataTableBody = this._dataTable.getElementsByTagName("tbody")[0]; if (!this._dataTableBody) { this._dataTableBody = this.element.ownerDocument.createElement("tbody"); this._dataTable.insertBefore(this._dataTableBody, this._dataTable.tFoot); } return this._dataTableBody; }, /** * @param {!Array.<number>} widths * @param {number} minPercent * @param {number=} maxPercent * @return {!Array.<number>} */ _autoSizeWidths: function(widths, minPercent, maxPercent) { if (minPercent) minPercent = Math.min(minPercent, Math.floor(100 / widths.length)); var totalWidth = 0; for (var i = 0; i < widths.length; ++i) totalWidth += widths[i]; var totalPercentWidth = 0; for (var i = 0; i < widths.length; ++i) { var width = Math.round(100 * widths[i] / totalWidth); if (minPercent && width < minPercent) width = minPercent; else if (maxPercent && width > maxPercent) width = maxPercent; totalPercentWidth += width; widths[i] = width; } var recoupPercent = totalPercentWidth - 100; while (minPercent && recoupPercent > 0) { for (var i = 0; i < widths.length; ++i) { if (widths[i] > minPercent) { --widths[i]; --recoupPercent; if (!recoupPercent) break; } } } while (maxPercent && recoupPercent < 0) { for (var i = 0; i < widths.length; ++i) { if (widths[i] < maxPercent) { ++widths[i]; ++recoupPercent; if (!recoupPercent) break; } } } return widths; }, /** * @param {number} minPercent * @param {number=} maxPercent * @param {number=} maxDescentLevel */ autoSizeColumns: function(minPercent, maxPercent, maxDescentLevel) { var widths = []; for (var i = 0; i < this._columnsArray.length; ++i) widths.push((this._columnsArray[i].title || "").length); maxDescentLevel = maxDescentLevel || 0; var children = this._enumerateChildren(this._rootNode, [], maxDescentLevel + 1); for (var i = 0; i < children.length; ++i) { var node = children[i]; for (var j = 0; j < this._columnsArray.length; ++j) { var text = node.data[this._columnsArray[j].identifier] || ""; if (text.length > widths[j]) widths[j] = text.length; } } widths = this._autoSizeWidths(widths, minPercent, maxPercent); for (var i = 0; i < this._columnsArray.length; ++i) this._columnsArray[i].weight = widths[i]; this._columnWidthsInitialized = false; this.updateWidths(); }, _enumerateChildren: function(rootNode, result, maxLevel) { if (!rootNode._isRoot) result.push(rootNode); if (!maxLevel) return; for (var i = 0; i < rootNode.children.length; ++i) this._enumerateChildren(rootNode.children[i], result, maxLevel - 1); return result; }, onResize: function() { this.updateWidths(); }, // Updates the widths of the table, including the positions of the column // resizers. // // IMPORTANT: This function MUST be called once after the element of the // DataGrid is attached to its parent element and every subsequent time the // width of the parent element is changed in order to make it possible to // resize the columns. // // If this function is not called after the DataGrid is attached to its // parent element, then the DataGrid's columns will not be resizable. updateWidths: function() { var headerTableColumns = this._headerTableColumnGroup.children; // Use container size to avoid changes of table width caused by change of column widths. var tableWidth = this.element.offsetWidth - this._cornerWidth; var numColumns = headerTableColumns.length - 1; // Do not process corner column. // Do not attempt to use offsetes if we're not attached to the document tree yet. if (!this._columnWidthsInitialized && this.element.offsetWidth) { // Give all the columns initial widths now so that during a resize, // when the two columns that get resized get a percent value for // their widths, all the other columns already have percent values // for their widths. for (var i = 0; i < numColumns; i++) { var columnWidth = this.headerTableBody.rows[0].cells[i].offsetWidth; var column = this._visibleColumnsArray[i]; if (!column.weight) column.weight = 100 * columnWidth / tableWidth; } this._columnWidthsInitialized = true; } this._applyColumnWeights(); }, /** * @param {string} name */ setName: function(name) { this._columnWeightsSetting = WebInspector.settings.createSetting("dataGrid-" + name + "-columnWeights", {}); this._loadColumnWeights(); }, _loadColumnWeights: function() { if (!this._columnWeightsSetting) return; var weights = this._columnWeightsSetting.get(); for (var i = 0; i < this._columnsArray.length; ++i) { var column = this._columnsArray[i]; var weight = weights[column.identifier]; if (weight) column.weight = weight; } this._applyColumnWeights(); }, _saveColumnWeights: function() { if (!this._columnWeightsSetting) return; var weights = {}; for (var i = 0; i < this._columnsArray.length; ++i) { var column = this._columnsArray[i]; weights[column.identifier] = column.weight; } this._columnWeightsSetting.set(weights); }, wasShown: function() { this._loadColumnWeights(); }, _applyColumnWeights: function() { var tableWidth = this.element.offsetWidth - this._cornerWidth; if (tableWidth <= 0) return; var sumOfWeights = 0.0; for (var i = 0; i < this._visibleColumnsArray.length; ++i) sumOfWeights += this._visibleColumnsArray[i].weight; var sum = 0; var lastOffset = 0; for (var i = 0; i < this._visibleColumnsArray.length; ++i) { sum += this._visibleColumnsArray[i].weight; var offset = (sum * tableWidth / sumOfWeights) | 0; var width = (offset - lastOffset) + "px"; this._headerTableColumnGroup.children[i].style.width = width; this._dataTableColumnGroup.children[i].style.width = width; lastOffset = offset; } this._positionResizers(); this.dispatchEventToListeners(WebInspector.DataGrid.Events.ColumnsResized); }, /** * @param {!Object.<string, boolean>} columnsVisibility */ setColumnsVisiblity: function(columnsVisibility) { this._visibleColumnsArray = []; for (var i = 0; i < this._columnsArray.length; ++i) { var column = this._columnsArray[i]; if (columnsVisibility[column.identifier]) this._visibleColumnsArray.push(column); } this._refreshHeader(); this._applyColumnWeights(); var nodes = this._enumerateChildren(this.rootNode(), [], -1); for (var i = 0; i < nodes.length; ++i) nodes[i].refresh(); }, get scrollContainer() { return this._scrollContainer; }, _positionResizers: function() { var headerTableColumns = this._headerTableColumnGroup.children; var numColumns = headerTableColumns.length - 1; // Do not process corner column. var left = []; var resizers = this._resizers; while (resizers.length > numColumns - 1) resizers.pop().remove(); for (var i = 0; i < numColumns - 1; i++) { // Get the width of the cell in the first (and only) row of the // header table in order to determine the width of the column, since // it is not possible to query a column for its width. left[i] = (left[i-1] || 0) + this.headerTableBody.rows[0].cells[i].offsetWidth; } // Make n - 1 resizers for n columns. for (var i = 0; i < numColumns - 1; i++) { var resizer = resizers[i]; if (!resizer) { // This is the first call to updateWidth, so the resizers need // to be created. resizer = createElement("div"); resizer.__index = i; resizer.classList.add("data-grid-resizer"); // This resizer is associated with the column to its right. WebInspector.installDragHandle(resizer, this._startResizerDragging.bind(this), this._resizerDragging.bind(this), this._endResizerDragging.bind(this), "col-resize"); this.element.appendChild(resizer); resizers.push(resizer); } if (resizer.__position !== left[i]) { resizer.__position = left[i]; resizer.style.left = left[i] + "px"; } } }, addCreationNode: function(hasChildren) { if (this.creationNode) this.creationNode.makeNormal(); var emptyData = {}; for (var column in this._columns) emptyData[column] = null; this.creationNode = new WebInspector.CreationDataGridNode(emptyData, hasChildren); this.rootNode().appendChild(this.creationNode); }, _keyDown: function(event) { if (!this.selectedNode || event.shiftKey || event.metaKey || event.ctrlKey || this._editing) return; var handled = false; var nextSelectedNode; if (event.keyIdentifier === "Up" && !event.altKey) { nextSelectedNode = this.selectedNode.traversePreviousNode(true); while (nextSelectedNode && !nextSelectedNode.selectable) nextSelectedNode = nextSelectedNode.traversePreviousNode(true); handled = nextSelectedNode ? true : false; } else if (event.keyIdentifier === "Down" && !event.altKey) { nextSelectedNode = this.selectedNode.traverseNextNode(true); while (nextSelectedNode && !nextSelectedNode.selectable) nextSelectedNode = nextSelectedNode.traverseNextNode(true); handled = nextSelectedNode ? true : false; } else if (event.keyIdentifier === "Left") { if (this.selectedNode.expanded) { if (event.altKey) this.selectedNode.collapseRecursively(); else this.selectedNode.collapse(); handled = true; } else if (this.selectedNode.parent && !this.selectedNode.parent._isRoot) { handled = true; if (this.selectedNode.parent.selectable) { nextSelectedNode = this.selectedNode.parent; handled = nextSelectedNode ? true : false; } else if (this.selectedNode.parent) this.selectedNode.parent.collapse(); } } else if (event.keyIdentifier === "Right") { if (!this.selectedNode.revealed) { this.selectedNode.reveal(); handled = true; } else if (this.selectedNode.hasChildren) { handled = true; if (this.selectedNode.expanded) { nextSelectedNode = this.selectedNode.children[0]; handled = nextSelectedNode ? true : false; } else { if (event.altKey) this.selectedNode.expandRecursively(); else this.selectedNode.expand(); } } } else if (event.keyCode === 8 || event.keyCode === 46) { if (this._deleteCallback) { handled = true; this._deleteCallback(this.selectedNode); this.changeNodeAfterDeletion(); } } else if (isEnterKey(event)) { if (this._editCallback) { handled = true; this._startEditing(this.selectedNode._element.children[this._nextEditableColumn(-1)]); } } if (nextSelectedNode) { nextSelectedNode.reveal(); nextSelectedNode.select(); } if (handled) event.consume(true); }, changeNodeAfterDeletion: function() { var nextSelectedNode = this.selectedNode.traverseNextNode(true); while (nextSelectedNode && !nextSelectedNode.selectable) nextSelectedNode = nextSelectedNode.traverseNextNode(true); if (!nextSelectedNode || nextSelectedNode.isCreationNode) { nextSelectedNode = this.selectedNode.traversePreviousNode(true); while (nextSelectedNode && !nextSelectedNode.selectable) nextSelectedNode = nextSelectedNode.traversePreviousNode(true); } if (nextSelectedNode) { nextSelectedNode.reveal(); nextSelectedNode.select(); } }, /** * @param {!Node} target * @return {?WebInspector.DataGridNode} */ dataGridNodeFromNode: function(target) { var rowElement = target.enclosingNodeOrSelfWithNodeName("tr"); return rowElement && rowElement._dataGridNode; }, /** * @param {!Node} target * @return {?string} */ columnIdentifierFromNode: function(target) { var cellElement = target.enclosingNodeOrSelfWithNodeName("td"); return cellElement && cellElement.columnIdentifier_; }, _clickInHeaderCell: function(event) { var cell = event.target.enclosingNodeOrSelfWithNodeName("th"); if (!cell || (cell.columnIdentifier === undefined) || !cell.classList.contains("sortable")) return; var sortOrder = WebInspector.DataGrid.Order.Ascending; if ((cell === this._sortColumnCell) && this.isSortOrderAscending()) sortOrder = WebInspector.DataGrid.Order.Descending; if (this._sortColumnCell) this._sortColumnCell.classList.remove(WebInspector.DataGrid.Order.Ascending, WebInspector.DataGrid.Order.Descending); this._sortColumnCell = cell; cell.classList.add(sortOrder); this.dispatchEventToListeners(WebInspector.DataGrid.Events.SortingChanged); }, /** * @param {string} columnIdentifier * @param {!WebInspector.DataGrid.Order} sortOrder */ markColumnAsSortedBy: function(columnIdentifier, sortOrder) { if (this._sortColumnCell) this._sortColumnCell.classList.remove(WebInspector.DataGrid.Order.Ascending, WebInspector.DataGrid.Order.Descending); this._sortColumnCell = this._headerTableHeaders[columnIdentifier]; this._sortColumnCell.classList.add(sortOrder); }, /** * @param {string} columnIdentifier * @return {!Element} */ headerTableHeader: function(columnIdentifier) { return this._headerTableHeaders[columnIdentifier]; }, _mouseDownInDataTable: function(event) { var gridNode = this.dataGridNodeFromNode(event.target); if (!gridNode || !gridNode.selectable) return; if (gridNode.isEventWithinDisclosureTriangle(event)) return; if (event.metaKey) { if (gridNode.selected) gridNode.deselect(); else gridNode.select(); } else gridNode.select(); }, _contextMenuInDataTable: function(event) { var contextMenu = new WebInspector.ContextMenu(event); var gridNode = this.dataGridNodeFromNode(event.target); if (this._refreshCallback && (!gridNode || gridNode !== this.creationNode)) contextMenu.appendItem(WebInspector.UIString("Refresh"), this._refreshCallback.bind(this)); if (gridNode && gridNode.selectable && !gridNode.isEventWithinDisclosureTriangle(event)) { if (this._editCallback) { if (gridNode === this.creationNode) contextMenu.appendItem(WebInspector.UIString.capitalize("Add ^new"), this._startEditing.bind(this, event.target)); else { var columnIdentifier = this.columnIdentifierFromNode(event.target); if (columnIdentifier && this._columns[columnIdentifier].editable) contextMenu.appendItem(WebInspector.UIString("Edit \"%s\"", this._columns[columnIdentifier].title), this._startEditing.bind(this, event.target)); } } if (this._deleteCallback && gridNode !== this.creationNode) contextMenu.appendItem(WebInspector.UIString.capitalize("Delete"), this._deleteCallback.bind(this, gridNode)); if (this._contextMenuCallback) this._contextMenuCallback(contextMenu, gridNode); } contextMenu.show(); }, _clickInDataTable: function(event) { var gridNode = this.dataGridNodeFromNode(event.target); if (!gridNode || !gridNode.hasChildren) return; if (!gridNode.isEventWithinDisclosureTriangle(event)) return; if (gridNode.expanded) { if (event.altKey) gridNode.collapseRecursively(); else gridNode.collapse(); } else { if (event.altKey) gridNode.expandRecursively(); else gridNode.expand(); } }, /** * @param {!WebInspector.DataGrid.ResizeMethod} method */ setResizeMethod: function(method) { this._resizeMethod = method; }, /** * @return {boolean} */ _startResizerDragging: function(event) { this._currentResizer = event.target; return true; }, _resizerDragging: function(event) { var resizer = this._currentResizer; if (!resizer) return; // Constrain the dragpoint to be within the containing div of the // datagrid. var dragPoint = event.clientX - this.element.totalOffsetLeft(); var firstRowCells = this.headerTableBody.rows[0].cells; var leftEdgeOfPreviousColumn = 0; // Constrain the dragpoint to be within the space made up by the // column directly to the left and the column directly to the right. var leftCellIndex = resizer.__index; var rightCellIndex = leftCellIndex + 1; for (var i = 0; i < leftCellIndex; i++) leftEdgeOfPreviousColumn += firstRowCells[i].offsetWidth; // Differences for other resize methods if (this._resizeMethod === WebInspector.DataGrid.ResizeMethod.Last) { rightCellIndex = this._resizers.length; } else if (this._resizeMethod === WebInspector.DataGrid.ResizeMethod.First) { leftEdgeOfPreviousColumn += firstRowCells[leftCellIndex].offsetWidth - firstRowCells[0].offsetWidth; leftCellIndex = 0; } var rightEdgeOfNextColumn = leftEdgeOfPreviousColumn + firstRowCells[leftCellIndex].offsetWidth + firstRowCells[rightCellIndex].offsetWidth; // Give each column some padding so that they don't disappear. var leftMinimum = leftEdgeOfPreviousColumn + this.ColumnResizePadding; var rightMaximum = rightEdgeOfNextColumn - this.ColumnResizePadding; if (leftMinimum > rightMaximum) return; dragPoint = Number.constrain(dragPoint, leftMinimum, rightMaximum); var position = (dragPoint - this.CenterResizerOverBorderAdjustment); resizer.__position = position; resizer.style.left = position + "px"; var pxLeftColumn = (dragPoint - leftEdgeOfPreviousColumn) + "px"; this._headerTableColumnGroup.children[leftCellIndex].style.width = pxLeftColumn; this._dataTableColumnGroup.children[leftCellIndex].style.width = pxLeftColumn; var pxRightColumn = (rightEdgeOfNextColumn - dragPoint) + "px"; this._headerTableColumnGroup.children[rightCellIndex].style.width = pxRightColumn; this._dataTableColumnGroup.children[rightCellIndex].style.width = pxRightColumn; var leftColumn = this._visibleColumnsArray[leftCellIndex]; var rightColumn = this._visibleColumnsArray[rightCellIndex]; if (leftColumn.weight || rightColumn.weight) { var sumOfWeights = leftColumn.weight + rightColumn.weight; var delta = rightEdgeOfNextColumn - leftEdgeOfPreviousColumn; leftColumn.weight = (dragPoint - leftEdgeOfPreviousColumn) * sumOfWeights / delta; rightColumn.weight = (rightEdgeOfNextColumn - dragPoint) * sumOfWeights / delta; } this._positionResizers(); event.preventDefault(); this.dispatchEventToListeners(WebInspector.DataGrid.Events.ColumnsResized); }, /** * @param {string} columnId * @return {number} */ columnOffset: function(columnId) { if (!this.element.offsetWidth) return 0; for (var i = 1; i < this._visibleColumnsArray.length; ++i) { if (columnId === this._visibleColumnsArray[i].identifier) { if (this._resizers[i - 1]) return this._resizers[i - 1].__position; } } return 0; }, _endResizerDragging: function(event) { this._currentResizer = null; this._saveColumnWeights(); this.dispatchEventToListeners(WebInspector.DataGrid.Events.ColumnsResized); }, ColumnResizePadding: 24, CenterResizerOverBorderAdjustment: 3, __proto__: WebInspector.Widget.prototype } /** @enum {string} */ WebInspector.DataGrid.ResizeMethod = { Nearest: "nearest", First: "first", Last: "last" } /** * @constructor * @extends {WebInspector.Object} * @param {?Object.<string, *>=} data * @param {boolean=} hasChildren */ WebInspector.DataGridNode = function(data, hasChildren) { /** @type {?Element} */ this._element = null; /** @type {boolean} */ this._expanded = false; /** @type {boolean} */ this._selected = false; /** @type {number|undefined} */ this._depth; /** @type {boolean|undefined} */ this._revealed; /** @type {boolean} */ this._attached = false; /** @type {?{parent: !WebInspector.DataGridNode, index: number}} */ this._savedPosition = null; /** @type {boolean} */ this._shouldRefreshChildren = true; /** @type {!Object.<string, *>} */ this._data = data || {}; /** @type {boolean} */ this.hasChildren = hasChildren || false; /** @type {!Array.<!WebInspector.DataGridNode>} */ this.children = []; /** @type {?WebInspector.DataGrid} */ this.dataGrid = null; /** @type {?WebInspector.DataGridNode} */ this.parent = null; /** @type {?WebInspector.DataGridNode} */ this.previousSibling = null; /** @type {?WebInspector.DataGridNode} */ this.nextSibling = null; /** @type {number} */ this.disclosureToggleWidth = 10; } WebInspector.DataGridNode.prototype = { /** @type {boolean} */ selectable: true, /** @type {boolean} */ _isRoot: false, /** * @return {!Element} */ element: function() { if (!this._element) { this.createElement(); this.createCells(); } return /** @type {!Element} */ (this._element); }, /** * @protected */ createElement: function() { this._element = createElement("tr"); this._element._dataGridNode = this; if (this.hasChildren) this._element.classList.add("parent"); if (this.expanded) this._element.classList.add("expanded"); if (this.selected) this._element.classList.add("selected"); if (this.revealed) this._element.classList.add("revealed"); }, /** * @protected */ createCells: function() { this._element.removeChildren(); var columnsArray = this.dataGrid._visibleColumnsArray; for (var i = 0; i < columnsArray.length; ++i) this._element.appendChild(this.createCell(columnsArray[i].identifier)); this._element.appendChild(this._createTDWithClass("corner")); }, get data() { return this._data; }, set data(x) { this._data = x || {}; this.refresh(); }, get revealed() { if (this._revealed !== undefined) return this._revealed; var currentAncestor = this.parent; while (currentAncestor && !currentAncestor._isRoot) { if (!currentAncestor.expanded) { this._revealed = false; return false; } currentAncestor = currentAncestor.parent; } this._revealed = true; return true; }, set hasChildren(x) { if (this._hasChildren === x) return; this._hasChildren = x; if (!this._element) return; this._element.classList.toggle("parent", this._hasChildren); this._element.classList.toggle("expanded", this._hasChildren && this.expanded); }, get hasChildren() { return this._hasChildren; }, set revealed(x) { if (this._revealed === x) return; this._revealed = x; if (this._element) this._element.classList.toggle("revealed", this._revealed); for (var i = 0; i < this.children.length; ++i) this.children[i].revealed = x && this.expanded; }, /** * @return {number} */ get depth() { if (this._depth !== undefined) return this._depth; if (this.parent && !this.parent._isRoot) this._depth = this.parent.depth + 1; else this._depth = 0; return this._depth; }, get leftPadding() { return this.depth * this.dataGrid.indentWidth; }, get shouldRefreshChildren() { return this._shouldRefreshChildren; }, set shouldRefreshChildren(x) { this._shouldRefreshChildren = x; if (x && this.expanded) this.expand(); }, get selected() { return this._selected; }, set selected(x) { if (x) this.select(); else this.deselect(); }, get expanded() { return this._expanded; }, /** * @param {boolean} x */ set expanded(x) { if (x) this.expand(); else this.collapse(); }, refresh: function() { if (!this.dataGrid) this._element = null; if (!this._element) return; this.createCells(); }, /** * @param {string} className * @return {!Element} */ _createTDWithClass: function(className) { var cell = createElementWithClass("td", className); var cellClass = this.dataGrid._cellClass; if (cellClass) cell.classList.add(cellClass); return cell; }, /** * @param {string} columnIdentifier * @return {!Element} */ createTD: function(columnIdentifier) { var cell = this._createTDWithClass(columnIdentifier + "-column"); cell.columnIdentifier_ = columnIdentifier; var alignment = this.dataGrid._columns[columnIdentifier].align; if (alignment) cell.classList.add(alignment); if (columnIdentifier === this.dataGrid.disclosureColumnIdentifier) { cell.classList.add("disclosure"); if (this.leftPadding) cell.style.setProperty("padding-left", this.leftPadding + "px"); } return cell; }, /** * @param {string} columnIdentifier * @return {!Element} */ createCell: function(columnIdentifier) { var cell = this.createTD(columnIdentifier); var data = this.data[columnIdentifier]; if (data instanceof Node) { cell.appendChild(data); } else { cell.textContent = data; if (this.dataGrid._columns[columnIdentifier].longText) cell.title = data; } return cell; }, /** * @return {number} */ nodeSelfHeight: function() { return 16; }, /** * @param {!WebInspector.DataGridNode} child */ appendChild: function(child) { this.insertChild(child, this.children.length); }, /** * @param {!WebInspector.DataGridNode} child * @param {number} index */ insertChild: function(child, index) { if (!child) throw("insertChild: Node can't be undefined or null."); if (child.parent === this) throw("insertChild: Node is already a child of this node."); child.remove(); this.children.splice(index, 0, child); this.hasChildren = true; child.parent = this; child.dataGrid = this.dataGrid; child.recalculateSiblings(index); child._depth = undefined; child._revealed = undefined; child._attached = false; child._shouldRefreshChildren = true; var current = child.children[0]; while (current) { current.dataGrid = this.dataGrid; current._depth = undefined; current._revealed = undefined; current._attached = false; current._shouldRefreshChildren = true; current = current.traverseNextNode(false, child, true); } if (this.expanded) child._attach(); if (!this.revealed) child.revealed = false; }, remove: function() { if (this.parent) this.parent.removeChild(this); }, /** * @param {!WebInspector.DataGridNode} child */ removeChild: function(child) { if (!child) throw("removeChild: Node can't be undefined or null."); if (child.parent !== this) throw("removeChild: Node is not a child of this node."); child.deselect(); child._detach(); this.children.remove(child, true); if (child.previousSibling) child.previousSibling.nextSibling = child.nextSibling; if (child.nextSibling) child.nextSibling.previousSibling = child.previousSibling; child.dataGrid = null; child.parent = null; child.nextSibling = null; child.previousSibling = null; if (this.children.length <= 0) this.hasChildren = false; }, removeChildren: function() { for (var i = 0; i < this.children.length; ++i) { var child = this.children[i]; child.deselect(); child._detach(); child.dataGrid = null; child.parent = null; child.nextSibling = null; child.previousSibling = null; } this.children = []; this.hasChildren = false; }, /** * @param {number} myIndex */ recalculateSiblings: function(myIndex) { if (!this.parent) return; var previousChild = this.parent.children[myIndex - 1] || null; if (previousChild) previousChild.nextSibling = this; this.previousSibling = previousChild; var nextChild = this.parent.children[myIndex + 1] || null; if (nextChild) nextChild.previousSibling = this; this.nextSibling = nextChild; }, collapse: function() { if (this._isRoot) return; if (this._element) this._element.classList.remove("expanded"); this._expanded = false; for (var i = 0; i < this.children.length; ++i) this.children[i].revealed = false; }, collapseRecursively: function() { var item = this; while (item) { if (item.expanded) item.collapse(); item = item.traverseNextNode(false, this, true); } }, populate: function() { }, expand: function() { if (!this.hasChildren || this.expanded) return; if (this._isRoot) return; if (this.revealed && !this._shouldRefreshChildren) for (var i = 0; i < this.children.length; ++i) this.children[i].revealed = true; if (this._shouldRefreshChildren) { for (var i = 0; i < this.children.length; ++i) this.children[i]._detach(); this.populate(); if (this._attached) { for (var i = 0; i < this.children.length; ++i) { var child = this.children[i]; if (this.revealed) child.revealed = true; child._attach(); } } this._shouldRefreshChildren = false; } if (this._element) this._element.classList.add("expanded"); this._expanded = true; }, expandRecursively: function() { var item = this; while (item) { item.expand(); item = item.traverseNextNode(false, this); } }, reveal: function() { if (this._isRoot) return; var currentAncestor = this.parent; while (currentAncestor && !currentAncestor._isRoot) { if (!currentAncestor.expanded) currentAncestor.expand(); currentAncestor = currentAncestor.parent; } this.element().scrollIntoViewIfNeeded(false); }, /** * @param {boolean=} supressSelectedEvent */ select: function(supressSelectedEvent) { if (!this.dataGrid || !this.selectable || this.selected) return; if (this.dataGrid.selectedNode) this.dataGrid.selectedNode.deselect(); this._selected = true; this.dataGrid.selectedNode = this; if (this._element) this._element.classList.add("selected"); if (!supressSelectedEvent) this.dataGrid.dispatchEventToListeners(WebInspector.DataGrid.Events.SelectedNode); }, revealAndSelect: function() { if (this._isRoot) return; this.reveal(); this.select(); }, /** * @param {boolean=} supressDeselectedEvent */ deselect: function(supressDeselectedEvent) { if (!this.dataGrid || this.dataGrid.selectedNode !== this || !this.selected) return; this._selected = false; this.dataGrid.selectedNode = null; if (this._element) this._element.classList.remove("selected"); if (!supressDeselectedEvent) this.dataGrid.dispatchEventToListeners(WebInspector.DataGrid.Events.DeselectedNode); }, /** * @param {boolean} skipHidden * @param {?WebInspector.DataGridNode=} stayWithin * @param {boolean=} dontPopulate * @param {!Object=} info * @return {?WebInspector.DataGridNode} */ traverseNextNode: function(skipHidden, stayWithin, dontPopulate, info) { if (!dontPopulate && this.hasChildren) this.populate(); if (info) info.depthChange = 0; var node = (!skipHidden || this.revealed) ? this.children[0] : null; if (node && (!skipHidden || this.expanded)) { if (info) info.depthChange = 1; return node; } if (this === stayWithin) return null; node = (!skipHidden || this.revealed) ? this.nextSibling : null; if (node) return node; node = this; while (node && !node._isRoot && !((!skipHidden || node.revealed) ? node.nextSibling : null) && node.parent !== stayWithin) { if (info) info.depthChange -= 1; node = node.parent; } if (!node) return null; return (!skipHidden || node.revealed) ? node.nextSibling : null; }, /** * @param {boolean} skipHidden * @param {boolean=} dontPopulate * @return {?WebInspector.DataGridNode} */ traversePreviousNode: function(skipHidden, dontPopulate) { var node = (!skipHidden || this.revealed) ? this.previousSibling : null; if (!dontPopulate && node && node.hasChildren) node.populate(); while (node && ((!skipHidden || (node.revealed && node.expanded)) ? node.children[node.children.length - 1] : null)) { if (!dontPopulate && node.hasChildren) node.populate(); node = ((!skipHidden || (node.revealed && node.expanded)) ? node.children[node.children.length - 1] : null); } if (node) return node; if (!this.parent || this.parent._isRoot) return null; return this.parent; }, /** * @return {boolean} */ isEventWithinDisclosureTriangle: function(event) { if (!this.hasChildren) return false; var cell = event.target.enclosingNodeOrSelfWithNodeName("td"); if (!cell || !cell.classList.contains("disclosure")) return false; var left = cell.totalOffsetLeft() + this.leftPadding; return event.pageX >= left && event.pageX <= left + this.disclosureToggleWidth; }, _attach: function() { if (!this.dataGrid || this._attached) return; this._attached = true; var previousNode = this.traversePreviousNode(true, true); var previousElement = previousNode ? previousNode.element() : this.dataGrid._topFillerRow; this.dataGrid.dataTableBody.insertBefore(this.element(), previousElement.nextSibling); if (this.expanded) for (var i = 0; i < this.children.length; ++i) this.children[i]._attach(); }, _detach: function() { if (!this._attached) return; this._attached = false; if (this._element) this._element.remove(); for (var i = 0; i < this.children.length; ++i) this.children[i]._detach(); this.wasDetached(); }, wasDetached: function() { }, savePosition: function() { if (this._savedPosition) return; if (!this.parent) throw("savePosition: Node must have a parent."); this._savedPosition = { parent: this.parent, index: this.parent.children.indexOf(this) }; }, restorePosition: function() { if (!this._savedPosition) return; if (this.parent !== this._savedPosition.parent) this._savedPosition.parent.insertChild(this, this._savedPosition.index); this._savedPosition = null; }, __proto__: WebInspector.Object.prototype } /** * @constructor * @extends {WebInspector.DataGridNode} */ WebInspector.CreationDataGridNode = function(data, hasChildren) { WebInspector.DataGridNode.call(this, data, hasChildren); /** @type {boolean} */ this.isCreationNode = true; } WebInspector.CreationDataGridNode.prototype = { makeNormal: function() { this.isCreationNode = false; }, __proto__: WebInspector.DataGridNode.prototype }
guorendong/iridium-browser-ubuntu
third_party/WebKit/Source/devtools/front_end/ui_lazy/DataGrid.js
JavaScript
bsd-3-clause
57,639
#! /usr/bin/env python # # fits2pdf.py -- Image a FITS file as a PDF. # # Eric Jeschke (eric@naoj.org) # # Copyright (c) Eric R. Jeschke. All rights reserved. # This is open-source software licensed under a BSD license. # Please see the file LICENSE.txt for details. # """ $ ./fits2pdf.py <fitsfile> <output.pdf> """ import sys, os import logging from ginga.cairow.ImageViewCairo import ImageViewCairo import cairo from ginga import AstroImage STD_FORMAT = '%(asctime)s | %(levelname)1.1s | %(filename)s:%(lineno)d (%(funcName)s) | %(message)s' point_in = 1/72.0 point_cm = 0.0352777778 def main(options, args): logger = logging.getLogger("example1") logger.setLevel(logging.INFO) fmt = logging.Formatter(STD_FORMAT) stderrHdlr = logging.StreamHandler() stderrHdlr.setFormatter(fmt) logger.addHandler(stderrHdlr) fi = ImageViewCairo(logger) fi.configure(500, 1000) # Load fits file filepath = args[0] image = AstroImage.AstroImage(logger=logger) image.load_file(filepath) # Make any adjustments to the image that we want fi.set_bg(1.0, 1.0, 1.0) fi.set_image(image) fi.auto_levels() fi.zoom_fit() fi.center_image() ht_pts = 11.0 / point_in wd_pts = 8.5 / point_in off_x, off_y = 0, 0 outfilepath = args[1] out_f = open(outfilepath, 'w') surface = cairo.PDFSurface(out_f, wd_pts, ht_pts) # set pixels per inch surface.set_fallback_resolution(300, 300) surface.set_device_offset(off_x, off_y) try: fi.save_image_as_surface(surface) surface.show_page() surface.flush() surface.finish() finally: out_f.close() if __name__ == '__main__': main(None, sys.argv[1:]) # END
rajul/ginga
scripts/fits2pdf.py
Python
bsd-3-clause
1,754
#!/usr/bin/env vpython3 # Copyright 2021 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from blinkpy.web_tests.stale_expectation_removal import builders from unexpected_passes_common import constants from unexpected_passes_common import data_types class BuilderRunsTestOfInterestUnittest(unittest.TestCase): def setUp(self): self.instance = builders.WebTestBuilders(False) def testMatch(self): """Tests that a match can be successfully found.""" test_map = { 'isolated_scripts': [ { 'isolate_name': 'blink_web_tests', }, ], } self.assertTrue( self.instance._BuilderRunsTestOfInterest(test_map, None)) # Re-add once WebGPU tests are supported. # test_map = { # 'isolated_scripts': [ # { # 'isolate_name': 'webgpu_blink_web_tests', # }, # ], # } # self.assertTrue( # self.instance._BuilderRunsTestOfInterest(test_map, None)) def testNoMatch(self): test_map = { 'isolated_scripts': [ { 'isolate_name': 'foo_web_tests', }, ], } self.assertFalse( self.instance._BuilderRunsTestOfInterest(test_map, None)) class GetFakeCiBuildersUnittest(unittest.TestCase): def testStringsConvertedToBuilderEntries(self): """Tests that the easier-to-read strings get converted to BuilderEntry.""" instance = builders.WebTestBuilders(False) fake_builders = instance.GetFakeCiBuilders() ci_builder = data_types.BuilderEntry('linux-blink-rel-dummy', constants.BuilderTypes.CI, False) expected_try = set([ data_types.BuilderEntry('linux-blink-rel', constants.BuilderTypes.TRY, False), data_types.BuilderEntry('v8_linux_blink_rel', constants.BuilderTypes.TRY, False) ]) self.assertEqual(fake_builders[ci_builder], expected_try) class GetNonChromiumBuildersUnittest(unittest.TestCase): def testStringsConvertedToBuilderEntries(self): """Tests that the easier-to-read strings get converted to BuilderEntry.""" instance = builders.WebTestBuilders(False) builder = data_types.BuilderEntry('ToTMacOfficial', constants.BuilderTypes.CI, False) self.assertIn(builder, instance.GetNonChromiumBuilders()) if __name__ == '__main__': unittest.main(verbosity=2)
chromium/chromium
third_party/blink/tools/blinkpy/web_tests/stale_expectation_removal/builders_unittest.py
Python
bsd-3-clause
2,804
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package lockedfile creates and manipulates files whose contents should only // change atomically. package lockedfile import ( "fmt" "io" "io/ioutil" "os" "runtime" ) // A File is a locked *os.File. // // Closing the file releases the lock. // // If the program exits while a file is locked, the operating system releases // the lock but may not do so promptly: callers must ensure that all locked // files are closed before exiting. type File struct { osFile closed bool } // osFile embeds a *os.File while keeping the pointer itself unexported. // (When we close a File, it must be the same file descriptor that we opened!) type osFile struct { *os.File } // OpenFile is like os.OpenFile, but returns a locked file. // If flag includes os.O_WRONLY or os.O_RDWR, the file is write-locked; // otherwise, it is read-locked. func OpenFile(name string, flag int, perm os.FileMode) (*File, error) { var ( f = new(File) err error ) f.osFile.File, err = openFile(name, flag, perm) if err != nil { return nil, err } // Although the operating system will drop locks for open files when the go // command exits, we want to hold locks for as little time as possible, and we // especially don't want to leave a file locked after we're done with it. Our // Close method is what releases the locks, so use a finalizer to report // missing Close calls on a best-effort basis. runtime.SetFinalizer(f, func(f *File) { panic(fmt.Sprintf("lockedfile.File %s became unreachable without a call to Close", f.Name())) }) return f, nil } // Open is like os.Open, but returns a read-locked file. func Open(name string) (*File, error) { return OpenFile(name, os.O_RDONLY, 0) } // Create is like os.Create, but returns a write-locked file. func Create(name string) (*File, error) { return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) } // Edit creates the named file with mode 0666 (before umask), // but does not truncate existing contents. // // If Edit succeeds, methods on the returned File can be used for I/O. // The associated file descriptor has mode O_RDWR and the file is write-locked. func Edit(name string) (*File, error) { return OpenFile(name, os.O_RDWR|os.O_CREATE, 0666) } // Close unlocks and closes the underlying file. // // Close may be called multiple times; all calls after the first will return a // non-nil error. func (f *File) Close() error { if f.closed { return &os.PathError{ Op: "close", Path: f.Name(), Err: os.ErrClosed, } } f.closed = true err := closeFile(f.osFile.File) runtime.SetFinalizer(f, nil) return err } // Read opens the named file with a read-lock and returns its contents. func Read(name string) ([]byte, error) { f, err := Open(name) if err != nil { return nil, err } defer f.Close() return ioutil.ReadAll(f) } // Write opens the named file (creating it with the given permissions if needed), // then write-locks it and overwrites it with the given content. func Write(name string, content io.Reader, perm os.FileMode) (err error) { f, err := OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) if err != nil { return err } _, err = io.Copy(f, content) if closeErr := f.Close(); err == nil { err = closeErr } return err } // Transform invokes t with the result of reading the named file, with its lock // still held. // // If t returns a nil error, Transform then writes the returned contents back to // the file, making a best effort to preserve existing contents on error. // // t must not modify the slice passed to it. func Transform(name string, t func([]byte) ([]byte, error)) (err error) { f, err := Edit(name) if err != nil { return err } defer f.Close() old, err := ioutil.ReadAll(f) if err != nil { return err } new, err := t(old) if err != nil { return err } if len(new) > len(old) { // The overall file size is increasing, so write the tail first: if we're // about to run out of space on the disk, we would rather detect that // failure before we have overwritten the original contents. if _, err := f.WriteAt(new[len(old):], int64(len(old))); err != nil { // Make a best effort to remove the incomplete tail. f.Truncate(int64(len(old))) return err } } // We're about to overwrite the old contents. In case of failure, make a best // effort to roll back before we close the file. defer func() { if err != nil { if _, err := f.WriteAt(old, 0); err == nil { f.Truncate(int64(len(old))) } } }() if len(new) >= len(old) { if _, err := f.WriteAt(new[:len(old)], 0); err != nil { return err } } else { if _, err := f.WriteAt(new, 0); err != nil { return err } // The overall file size is decreasing, so shrink the file to its final size // after writing. We do this after writing (instead of before) so that if // the write fails, enough filesystem space will likely still be reserved // to contain the previous contents. if err := f.Truncate(int64(len(new))); err != nil { return err } } return nil }
akutz/go
src/cmd/go/internal/lockedfile/lockedfile.go
GO
bsd-3-clause
5,191
# Generated by Django 2.2.6 on 2019-11-05 16:01 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('scanners', '0013_auto_20191105_1522'), ] operations = [ migrations.AlterField( model_name='scannerrule', name='action', field=models.PositiveSmallIntegerField(choices=[(1, 'No action'), (20, 'Flag for human review'), (100, 'Delay auto-approval'), (200, 'Delay auto-approval indefinitely')], default=1), ), ]
eviljeff/olympia
src/olympia/scanners/migrations/0014_alter_action_choices_on_scannerrule.py
Python
bsd-3-clause
536
// © 2017 and later: Unicode, Inc. and others. // License & terms of use: http://www.unicode.org/copyright.html #include "unicode/utypes.h" #if !UCONFIG_NO_FORMATTING #include "cstring.h" #include "unicode/ures.h" #include "uresimp.h" #include "charstr.h" #include "number_formatimpl.h" #include "unicode/numfmt.h" #include "number_patternstring.h" #include "number_utils.h" #include "unicode/numberformatter.h" #include "unicode/dcfmtsym.h" #include "number_scientific.h" #include "number_compact.h" #include "uresimp.h" #include "ureslocs.h" using namespace icu; using namespace icu::number; using namespace icu::number::impl; MicroPropsGenerator::~MicroPropsGenerator() = default; NumberFormatterImpl::NumberFormatterImpl(const MacroProps& macros, UErrorCode& status) : NumberFormatterImpl(macros, true, status) { } int32_t NumberFormatterImpl::formatStatic(const MacroProps& macros, DecimalQuantity& inValue, FormattedStringBuilder& outString, UErrorCode& status) { NumberFormatterImpl impl(macros, false, status); MicroProps& micros = impl.preProcessUnsafe(inValue, status); if (U_FAILURE(status)) { return 0; } int32_t length = writeNumber(micros, inValue, outString, 0, status); length += writeAffixes(micros, outString, 0, length, status); return length; } int32_t NumberFormatterImpl::getPrefixSuffixStatic(const MacroProps& macros, Signum signum, StandardPlural::Form plural, FormattedStringBuilder& outString, UErrorCode& status) { NumberFormatterImpl impl(macros, false, status); return impl.getPrefixSuffixUnsafe(signum, plural, outString, status); } // NOTE: C++ SPECIFIC DIFFERENCE FROM JAVA: // The "safe" apply method uses a new MicroProps. In the MicroPropsGenerator, fMicros is copied into the new instance. // The "unsafe" method simply re-uses fMicros, eliminating the extra copy operation. // See MicroProps::processQuantity() for details. int32_t NumberFormatterImpl::format(DecimalQuantity& inValue, FormattedStringBuilder& outString, UErrorCode& status) const { MicroProps micros; preProcess(inValue, micros, status); if (U_FAILURE(status)) { return 0; } int32_t length = writeNumber(micros, inValue, outString, 0, status); length += writeAffixes(micros, outString, 0, length, status); return length; } void NumberFormatterImpl::preProcess(DecimalQuantity& inValue, MicroProps& microsOut, UErrorCode& status) const { if (U_FAILURE(status)) { return; } if (fMicroPropsGenerator == nullptr) { status = U_INTERNAL_PROGRAM_ERROR; return; } fMicroPropsGenerator->processQuantity(inValue, microsOut, status); microsOut.integerWidth.apply(inValue, status); } MicroProps& NumberFormatterImpl::preProcessUnsafe(DecimalQuantity& inValue, UErrorCode& status) { if (U_FAILURE(status)) { return fMicros; // must always return a value } if (fMicroPropsGenerator == nullptr) { status = U_INTERNAL_PROGRAM_ERROR; return fMicros; // must always return a value } fMicroPropsGenerator->processQuantity(inValue, fMicros, status); fMicros.integerWidth.apply(inValue, status); return fMicros; } int32_t NumberFormatterImpl::getPrefixSuffix(Signum signum, StandardPlural::Form plural, FormattedStringBuilder& outString, UErrorCode& status) const { if (U_FAILURE(status)) { return 0; } // #13453: DecimalFormat wants the affixes from the pattern only (modMiddle, aka pattern modifier). // Safe path: use fImmutablePatternModifier. const Modifier* modifier = fImmutablePatternModifier->getModifier(signum, plural); modifier->apply(outString, 0, 0, status); if (U_FAILURE(status)) { return 0; } return modifier->getPrefixLength(); } int32_t NumberFormatterImpl::getPrefixSuffixUnsafe(Signum signum, StandardPlural::Form plural, FormattedStringBuilder& outString, UErrorCode& status) { if (U_FAILURE(status)) { return 0; } // #13453: DecimalFormat wants the affixes from the pattern only (modMiddle, aka pattern modifier). // Unsafe path: use fPatternModifier. fPatternModifier->setNumberProperties(signum, plural); fPatternModifier->apply(outString, 0, 0, status); if (U_FAILURE(status)) { return 0; } return fPatternModifier->getPrefixLength(); } NumberFormatterImpl::NumberFormatterImpl(const MacroProps& macros, bool safe, UErrorCode& status) { fMicroPropsGenerator = macrosToMicroGenerator(macros, safe, status); } ////////// const MicroPropsGenerator* NumberFormatterImpl::macrosToMicroGenerator(const MacroProps& macros, bool safe, UErrorCode& status) { if (U_FAILURE(status)) { return nullptr; } const MicroPropsGenerator* chain = &fMicros; // Check that macros is error-free before continuing. if (macros.copyErrorTo(status)) { return nullptr; } // TODO: Accept currency symbols from DecimalFormatSymbols? // Pre-compute a few values for efficiency. bool isCurrency = utils::unitIsCurrency(macros.unit); bool isNoUnit = utils::unitIsNoUnit(macros.unit); bool isPercent = utils::unitIsPercent(macros.unit); bool isPermille = utils::unitIsPermille(macros.unit); bool isAccounting = macros.sign == UNUM_SIGN_ACCOUNTING || macros.sign == UNUM_SIGN_ACCOUNTING_ALWAYS || macros.sign == UNUM_SIGN_ACCOUNTING_EXCEPT_ZERO; CurrencyUnit currency(u"", status); if (isCurrency) { currency = CurrencyUnit(macros.unit, status); // Restore CurrencyUnit from MeasureUnit } UNumberUnitWidth unitWidth = UNUM_UNIT_WIDTH_SHORT; if (macros.unitWidth != UNUM_UNIT_WIDTH_COUNT) { unitWidth = macros.unitWidth; } bool isCldrUnit = !isCurrency && !isNoUnit && (unitWidth == UNUM_UNIT_WIDTH_FULL_NAME || !(isPercent || isPermille)); // Select the numbering system. LocalPointer<const NumberingSystem> nsLocal; const NumberingSystem* ns; if (macros.symbols.isNumberingSystem()) { ns = macros.symbols.getNumberingSystem(); } else { // TODO: Is there a way to avoid creating the NumberingSystem object? ns = NumberingSystem::createInstance(macros.locale, status); // Give ownership to the function scope. nsLocal.adoptInstead(ns); } const char* nsName = U_SUCCESS(status) ? ns->getName() : "latn"; uprv_strncpy(fMicros.nsName, nsName, 8); fMicros.nsName[8] = 0; // guarantee NUL-terminated // Resolve the symbols. Do this here because currency may need to customize them. if (macros.symbols.isDecimalFormatSymbols()) { fMicros.symbols = macros.symbols.getDecimalFormatSymbols(); } else { LocalPointer<DecimalFormatSymbols> newSymbols( new DecimalFormatSymbols(macros.locale, *ns, status), status); if (U_FAILURE(status)) { return nullptr; } if (isCurrency) { newSymbols->setCurrency(currency.getISOCurrency(), status); if (U_FAILURE(status)) { return nullptr; } } fMicros.symbols = newSymbols.getAlias(); fSymbols.adoptInstead(newSymbols.orphan()); } // Load and parse the pattern string. It is used for grouping sizes and affixes only. // If we are formatting currency, check for a currency-specific pattern. const char16_t* pattern = nullptr; if (isCurrency && fMicros.symbols->getCurrencyPattern() != nullptr) { pattern = fMicros.symbols->getCurrencyPattern(); } if (pattern == nullptr) { CldrPatternStyle patternStyle; if (isCldrUnit) { patternStyle = CLDR_PATTERN_STYLE_DECIMAL; } else if (isPercent || isPermille) { patternStyle = CLDR_PATTERN_STYLE_PERCENT; } else if (!isCurrency || unitWidth == UNUM_UNIT_WIDTH_FULL_NAME) { patternStyle = CLDR_PATTERN_STYLE_DECIMAL; } else if (isAccounting) { // NOTE: Although ACCOUNTING and ACCOUNTING_ALWAYS are only supported in currencies right now, // the API contract allows us to add support to other units in the future. patternStyle = CLDR_PATTERN_STYLE_ACCOUNTING; } else { patternStyle = CLDR_PATTERN_STYLE_CURRENCY; } pattern = utils::getPatternForStyle(macros.locale, nsName, patternStyle, status); if (U_FAILURE(status)) { return nullptr; } } auto patternInfo = new ParsedPatternInfo(); if (patternInfo == nullptr) { status = U_MEMORY_ALLOCATION_ERROR; return nullptr; } fPatternInfo.adoptInstead(patternInfo); PatternParser::parseToPatternInfo(UnicodeString(pattern), *patternInfo, status); if (U_FAILURE(status)) { return nullptr; } ///////////////////////////////////////////////////////////////////////////////////// /// START POPULATING THE DEFAULT MICROPROPS AND BUILDING THE MICROPROPS GENERATOR /// ///////////////////////////////////////////////////////////////////////////////////// // Multiplier if (macros.scale.isValid()) { fMicros.helpers.multiplier.setAndChain(macros.scale, chain); chain = &fMicros.helpers.multiplier; } // Rounding strategy Precision precision; if (!macros.precision.isBogus()) { precision = macros.precision; } else if (macros.notation.fType == Notation::NTN_COMPACT) { precision = Precision::integer().withMinDigits(2); } else if (isCurrency) { precision = Precision::currency(UCURR_USAGE_STANDARD); } else { precision = Precision::maxFraction(6); } UNumberFormatRoundingMode roundingMode; if (macros.roundingMode != kDefaultMode) { roundingMode = macros.roundingMode; } else { // Temporary until ICU 64 roundingMode = precision.fRoundingMode; } fMicros.rounder = {precision, roundingMode, currency, status}; if (U_FAILURE(status)) { return nullptr; } // Grouping strategy if (!macros.grouper.isBogus()) { fMicros.grouping = macros.grouper; } else if (macros.notation.fType == Notation::NTN_COMPACT) { // Compact notation uses minGrouping by default since ICU 59 fMicros.grouping = Grouper::forStrategy(UNUM_GROUPING_MIN2); } else { fMicros.grouping = Grouper::forStrategy(UNUM_GROUPING_AUTO); } fMicros.grouping.setLocaleData(*fPatternInfo, macros.locale); // Padding strategy if (!macros.padder.isBogus()) { fMicros.padding = macros.padder; } else { fMicros.padding = Padder::none(); } // Integer width if (!macros.integerWidth.isBogus()) { fMicros.integerWidth = macros.integerWidth; } else { fMicros.integerWidth = IntegerWidth::standard(); } // Sign display if (macros.sign != UNUM_SIGN_COUNT) { fMicros.sign = macros.sign; } else { fMicros.sign = UNUM_SIGN_AUTO; } // Decimal mark display if (macros.decimal != UNUM_DECIMAL_SEPARATOR_COUNT) { fMicros.decimal = macros.decimal; } else { fMicros.decimal = UNUM_DECIMAL_SEPARATOR_AUTO; } // Use monetary separator symbols fMicros.useCurrency = isCurrency; // Inner modifier (scientific notation) if (macros.notation.fType == Notation::NTN_SCIENTIFIC) { auto newScientificHandler = new ScientificHandler(&macros.notation, fMicros.symbols, chain); if (newScientificHandler == nullptr) { status = U_MEMORY_ALLOCATION_ERROR; return nullptr; } fScientificHandler.adoptInstead(newScientificHandler); chain = fScientificHandler.getAlias(); } else { // No inner modifier required fMicros.modInner = &fMicros.helpers.emptyStrongModifier; } // Middle modifier (patterns, positive/negative, currency symbols, percent) auto patternModifier = new MutablePatternModifier(false); if (patternModifier == nullptr) { status = U_MEMORY_ALLOCATION_ERROR; return nullptr; } fPatternModifier.adoptInstead(patternModifier); patternModifier->setPatternInfo( macros.affixProvider != nullptr ? macros.affixProvider : static_cast<const AffixPatternProvider*>(fPatternInfo.getAlias()), kUndefinedField); patternModifier->setPatternAttributes(fMicros.sign, isPermille); if (patternModifier->needsPlurals()) { patternModifier->setSymbols( fMicros.symbols, currency, unitWidth, resolvePluralRules(macros.rules, macros.locale, status), status); } else { patternModifier->setSymbols(fMicros.symbols, currency, unitWidth, nullptr, status); } if (safe) { fImmutablePatternModifier.adoptInstead(patternModifier->createImmutable(status)); } if (U_FAILURE(status)) { return nullptr; } // Outer modifier (CLDR units and currency long names) if (isCldrUnit) { fLongNameHandler.adoptInstead( LongNameHandler::forMeasureUnit( macros.locale, macros.unit, macros.perUnit, unitWidth, resolvePluralRules(macros.rules, macros.locale, status), chain, status)); chain = fLongNameHandler.getAlias(); } else if (isCurrency && unitWidth == UNUM_UNIT_WIDTH_FULL_NAME) { fLongNameHandler.adoptInstead( LongNameHandler::forCurrencyLongNames( macros.locale, currency, resolvePluralRules(macros.rules, macros.locale, status), chain, status)); chain = fLongNameHandler.getAlias(); } else { // No outer modifier required fMicros.modOuter = &fMicros.helpers.emptyWeakModifier; } if (U_FAILURE(status)) { return nullptr; } // Compact notation if (macros.notation.fType == Notation::NTN_COMPACT) { CompactType compactType = (isCurrency && unitWidth != UNUM_UNIT_WIDTH_FULL_NAME) ? CompactType::TYPE_CURRENCY : CompactType::TYPE_DECIMAL; auto newCompactHandler = new CompactHandler( macros.notation.fUnion.compactStyle, macros.locale, nsName, compactType, resolvePluralRules(macros.rules, macros.locale, status), patternModifier, safe, chain, status); if (newCompactHandler == nullptr) { status = U_MEMORY_ALLOCATION_ERROR; return nullptr; } fCompactHandler.adoptInstead(newCompactHandler); chain = fCompactHandler.getAlias(); } if (U_FAILURE(status)) { return nullptr; } // Always add the pattern modifier as the last element of the chain. if (safe) { fImmutablePatternModifier->addToChain(chain); chain = fImmutablePatternModifier.getAlias(); } else { patternModifier->addToChain(chain); chain = patternModifier; } return chain; } const PluralRules* NumberFormatterImpl::resolvePluralRules(const PluralRules* rulesPtr, const Locale& locale, UErrorCode& status) { if (rulesPtr != nullptr) { return rulesPtr; } // Lazily create PluralRules if (fRules.isNull()) { fRules.adoptInstead(PluralRules::forLocale(locale, status)); } return fRules.getAlias(); } int32_t NumberFormatterImpl::writeAffixes(const MicroProps& micros, FormattedStringBuilder& string, int32_t start, int32_t end, UErrorCode& status) { // Always apply the inner modifier (which is "strong"). int32_t length = micros.modInner->apply(string, start, end, status); if (micros.padding.isValid()) { length += micros.padding .padAndApply(*micros.modMiddle, *micros.modOuter, string, start, length + end, status); } else { length += micros.modMiddle->apply(string, start, length + end, status); length += micros.modOuter->apply(string, start, length + end, status); } return length; } int32_t NumberFormatterImpl::writeNumber(const MicroProps& micros, DecimalQuantity& quantity, FormattedStringBuilder& string, int32_t index, UErrorCode& status) { int32_t length = 0; if (quantity.isInfinite()) { length += string.insert( length + index, micros.symbols->getSymbol(DecimalFormatSymbols::ENumberFormatSymbol::kInfinitySymbol), {UFIELD_CATEGORY_NUMBER, UNUM_INTEGER_FIELD}, status); } else if (quantity.isNaN()) { length += string.insert( length + index, micros.symbols->getSymbol(DecimalFormatSymbols::ENumberFormatSymbol::kNaNSymbol), {UFIELD_CATEGORY_NUMBER, UNUM_INTEGER_FIELD}, status); } else { // Add the integer digits length += writeIntegerDigits(micros, quantity, string, length + index, status); // Add the decimal point if (quantity.getLowerDisplayMagnitude() < 0 || micros.decimal == UNUM_DECIMAL_SEPARATOR_ALWAYS) { length += string.insert( length + index, micros.useCurrency ? micros.symbols->getSymbol( DecimalFormatSymbols::ENumberFormatSymbol::kMonetarySeparatorSymbol) : micros .symbols ->getSymbol( DecimalFormatSymbols::ENumberFormatSymbol::kDecimalSeparatorSymbol), {UFIELD_CATEGORY_NUMBER, UNUM_DECIMAL_SEPARATOR_FIELD}, status); } // Add the fraction digits length += writeFractionDigits(micros, quantity, string, length + index, status); if (length == 0) { // Force output of the digit for value 0 length += utils::insertDigitFromSymbols( string, index, 0, *micros.symbols, {UFIELD_CATEGORY_NUMBER, UNUM_INTEGER_FIELD}, status); } } return length; } int32_t NumberFormatterImpl::writeIntegerDigits(const MicroProps& micros, DecimalQuantity& quantity, FormattedStringBuilder& string, int32_t index, UErrorCode& status) { int length = 0; int integerCount = quantity.getUpperDisplayMagnitude() + 1; for (int i = 0; i < integerCount; i++) { // Add grouping separator if (micros.grouping.groupAtPosition(i, quantity)) { length += string.insert( index, micros.useCurrency ? micros.symbols->getSymbol( DecimalFormatSymbols::ENumberFormatSymbol::kMonetaryGroupingSeparatorSymbol) : micros.symbols->getSymbol( DecimalFormatSymbols::ENumberFormatSymbol::kGroupingSeparatorSymbol), {UFIELD_CATEGORY_NUMBER, UNUM_GROUPING_SEPARATOR_FIELD}, status); } // Get and append the next digit value int8_t nextDigit = quantity.getDigit(i); length += utils::insertDigitFromSymbols( string, index, nextDigit, *micros.symbols, {UFIELD_CATEGORY_NUMBER, UNUM_INTEGER_FIELD}, status); } return length; } int32_t NumberFormatterImpl::writeFractionDigits(const MicroProps& micros, DecimalQuantity& quantity, FormattedStringBuilder& string, int32_t index, UErrorCode& status) { int length = 0; int fractionCount = -quantity.getLowerDisplayMagnitude(); for (int i = 0; i < fractionCount; i++) { // Get and append the next digit value int8_t nextDigit = quantity.getDigit(-i - 1); length += utils::insertDigitFromSymbols( string, length + index, nextDigit, *micros.symbols, {UFIELD_CATEGORY_NUMBER, UNUM_FRACTION_FIELD}, status); } return length; } #endif /* #if !UCONFIG_NO_FORMATTING */
endlessm/chromium-browser
third_party/icu/source/i18n/number_formatimpl.cpp
C++
bsd-3-clause
21,061
class CreateSpreePromotionCategories < ActiveRecord::Migration[4.2] def change create_table :spree_promotion_categories do |t| t.string :name t.timestamps null: false, precision: 6 end add_column :spree_promotions, :promotion_category_id, :integer add_index :spree_promotions, :promotion_category_id end end
ayb/spree
core/db/migrate/20140715182625_create_spree_promotion_categories.rb
Ruby
bsd-3-clause
341
<?php namespace Vmwarephp; use Vmwarephp\Exception as Ex; class Vhost { private $service; function __construct($host, $username, $password) { $this->host = $host; $this->username = $username; $this->password = $password; } function getPort() { $port = parse_url($this->host, PHP_URL_PORT); return $port ? : '443'; } function __get($propertyName) { if (!isset($this->$propertyName)) throw new \InvalidArgumentException('Property ' . $propertyName . ' not set on this object!'); return $this->$propertyName; } function __set($propertyName, $value) { $this->validateProperty($propertyName, $value); $this->$propertyName = $value; } function __call($method, $arguments) { if (!$this->service) $this->initializeService(); return call_user_func_array(array($this->service, $method), $arguments); } function getApiType() { return $this->getServiceContent()->about->apiType; } function changeService(\Vmwarephp\Service $service) { $this->service = $service; } private function initializeService() { if (!$this->service) $this->service = \Vmwarephp\Factory\Service::makeConnected($this); } private function validateProperty($propertyName, $value) { if (in_array($propertyName, array('host', 'username', 'password')) && empty($value)) throw new Ex\InvalidVhost('Vhost ' . ucfirst($propertyName) . ' cannot be empty!'); } }
mattiasgeniar/vmwarephp
library/Vmwarephp/Vhost.php
PHP
bsd-3-clause
1,379
/** * Copyright (C) 2016 Turi * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ #ifndef GRAPHLAB_LOGGER_LOG_ROTATE_HPP #define GRAPHLAB_LOGGER_LOG_ROTATE_HPP #include <cstddef> #include <string> namespace graphlab { /** * Sets up log rotation. * The basic procedure is that it will generate files of the form * * \verbatim * [log_file_name].0 * [log_file_name].1 * [log_file_name].2 * etc. * \endverbatim * * When truncate_limit is set, a maximum number of files is maintained. * Beyond which, older files are deleted. * * A symlink [log_file_name].current is also created which always points to the * most recent log file. * * If log rotation has already been set up, this will stop * the the log rotation and begin a new one. * * Not safe for concurrent use. * * \param log_file_name The prefix to output to. Logs will emit to * [log_file_name].0, [log_file_name].1, etc. * \param log_interval The number of seconds between rotations * \param truncate_limit The maximum number of files to maintain. Must be >= 1 */ void begin_log_rotation(std::string log_file_name, size_t log_interval, size_t truncate_limit); /** * Stops log rotation. * * No-op if log rotation was not started. * * Not safe for concurrent use. */ void stop_log_rotation(); } // graphlab #endif // GRAPHLAB_LOGGER_LOG_ROTATE_HPP
dato-code/SFrame
oss_src/logger/log_rotate.hpp
C++
bsd-3-clause
1,524
//===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This provides a class for OpenMP runtime code generation. // //===----------------------------------------------------------------------===// #include "CGOpenMPRuntime.h" #include "CodeGenFunction.h" #include "clang/AST/Decl.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/Value.h" #include "llvm/Support/raw_ostream.h" #include <cassert> using namespace clang; using namespace CodeGen; CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM) : CGM(CGM), DefaultOpenMPPSource(nullptr) { IdentTy = llvm::StructType::create( "ident_t", CGM.Int32Ty /* reserved_1 */, CGM.Int32Ty /* flags */, CGM.Int32Ty /* reserved_2 */, CGM.Int32Ty /* reserved_3 */, CGM.Int8PtrTy /* psource */, NULL); // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...) llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty), llvm::PointerType::getUnqual(CGM.Int32Ty)}; Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true); } llvm::Value * CGOpenMPRuntime::GetOrCreateDefaultOpenMPLocation(OpenMPLocationFlags Flags) { llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags); if (!Entry) { if (!DefaultOpenMPPSource) { // Initialize default location for psource field of ident_t structure of // all ident_t objects. Format is ";file;function;line;column;;". // Taken from // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp_str.c DefaultOpenMPPSource = CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;"); DefaultOpenMPPSource = llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy); } llvm::GlobalVariable *DefaultOpenMPLocation = cast<llvm::GlobalVariable>( CGM.CreateRuntimeVariable(IdentTy, ".kmpc_default_loc.addr")); DefaultOpenMPLocation->setUnnamedAddr(true); DefaultOpenMPLocation->setConstant(true); DefaultOpenMPLocation->setLinkage(llvm::GlobalValue::PrivateLinkage); llvm::Constant *Zero = llvm::ConstantInt::get(CGM.Int32Ty, 0, true); llvm::Constant *Values[] = {Zero, llvm::ConstantInt::get(CGM.Int32Ty, Flags), Zero, Zero, DefaultOpenMPPSource}; llvm::Constant *Init = llvm::ConstantStruct::get(IdentTy, Values); DefaultOpenMPLocation->setInitializer(Init); return DefaultOpenMPLocation; } return Entry; } llvm::Value *CGOpenMPRuntime::EmitOpenMPUpdateLocation( CodeGenFunction &CGF, SourceLocation Loc, OpenMPLocationFlags Flags) { // If no debug info is generated - return global default location. if (CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::NoDebugInfo || Loc.isInvalid()) return GetOrCreateDefaultOpenMPLocation(Flags); assert(CGF.CurFn && "No function in current CodeGenFunction."); llvm::Value *LocValue = nullptr; OpenMPLocMapTy::iterator I = OpenMPLocMap.find(CGF.CurFn); if (I != OpenMPLocMap.end()) { LocValue = I->second; } else { // Generate "ident_t .kmpc_loc.addr;" llvm::AllocaInst *AI = CGF.CreateTempAlloca(IdentTy, ".kmpc_loc.addr"); AI->setAlignment(CGM.getDataLayout().getPrefTypeAlignment(IdentTy)); OpenMPLocMap[CGF.CurFn] = AI; LocValue = AI; CGBuilderTy::InsertPointGuard IPG(CGF.Builder); CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt); CGF.Builder.CreateMemCpy(LocValue, GetOrCreateDefaultOpenMPLocation(Flags), llvm::ConstantExpr::getSizeOf(IdentTy), CGM.PointerAlignInBytes); } // char **psource = &.kmpc_loc_<flags>.addr.psource; llvm::Value *PSource = CGF.Builder.CreateConstInBoundsGEP2_32(LocValue, 0, IdentField_PSource); auto OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding()); if (OMPDebugLoc == nullptr) { SmallString<128> Buffer2; llvm::raw_svector_ostream OS2(Buffer2); // Build debug location PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc); OS2 << ";" << PLoc.getFilename() << ";"; if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl)) { OS2 << FD->getQualifiedNameAsString(); } OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;"; OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str()); OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc; } // *psource = ";<File>;<Function>;<Line>;<Column>;;"; CGF.Builder.CreateStore(OMPDebugLoc, PSource); return LocValue; } llvm::Value *CGOpenMPRuntime::GetOpenMPGlobalThreadNum(CodeGenFunction &CGF, SourceLocation Loc) { assert(CGF.CurFn && "No function in current CodeGenFunction."); llvm::Value *GTid = nullptr; OpenMPGtidMapTy::iterator I = OpenMPGtidMap.find(CGF.CurFn); if (I != OpenMPGtidMap.end()) { GTid = I->second; } else { // Generate "int32 .kmpc_global_thread_num.addr;" CGBuilderTy::InsertPointGuard IPG(CGF.Builder); CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt); llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc)}; GTid = CGF.EmitRuntimeCall( CreateRuntimeFunction(OMPRTL__kmpc_global_thread_num), Args); OpenMPGtidMap[CGF.CurFn] = GTid; } return GTid; } void CGOpenMPRuntime::FunctionFinished(CodeGenFunction &CGF) { assert(CGF.CurFn && "No function in current CodeGenFunction."); if (OpenMPGtidMap.count(CGF.CurFn)) OpenMPGtidMap.erase(CGF.CurFn); if (OpenMPLocMap.count(CGF.CurFn)) OpenMPLocMap.erase(CGF.CurFn); } llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() { return llvm::PointerType::getUnqual(IdentTy); } llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() { return llvm::PointerType::getUnqual(Kmpc_MicroTy); } llvm::Constant * CGOpenMPRuntime::CreateRuntimeFunction(OpenMPRTLFunction Function) { llvm::Constant *RTLFn = nullptr; switch (Function) { case OMPRTL__kmpc_fork_call: { // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro // microtask, ...); llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, getKmpc_MicroPointerTy()}; llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, TypeParams, true); RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call"); break; } case OMPRTL__kmpc_global_thread_num: { // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc); llvm::Type *TypeParams[] = {getIdentTyPointerTy()}; llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.Int32Ty, TypeParams, false); RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num"); break; } } return RTLFn; }
dededong/goblin-core
riscv/llvm/3.5/cfe-3.5.0.src/lib/CodeGen/CGOpenMPRuntime.cpp
C++
bsd-3-clause
7,142
package org.hisp.dhis.datastatistics; /* * Copyright (c) 2004-2017, University of Oslo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * Neither the name of the HISP project nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ import com.fasterxml.jackson.annotation.JsonProperty; import org.hisp.dhis.common.BaseIdentifiableObject; /** * DataStatistics object to be saved as snapshot. * * @author Julie Hill Roa * @author Yrjan A. F. Fraschetti */ public class DataStatistics extends BaseIdentifiableObject { private Double mapViews; private Double chartViews; private Double reportTableViews; private Double eventReportViews; private Double eventChartViews; private Double dashboardViews; private Double dataSetReportViews; private Double totalViews; private Double savedMaps; private Double savedCharts; private Double savedReportTables; private Double savedEventReports; private Double savedEventCharts; private Double savedDashboards; private Double savedIndicators; private Double savedDataValues; private Integer activeUsers; private Integer users; public DataStatistics() { } public DataStatistics( Double mapViews, Double chartViews, Double reportTableViews, Double eventReportViews, Double eventChartViews, Double dashboardViews, Double dataSetReportViews, Double totalViews, Double savedMaps, Double savedCharts, Double savedReportTables, Double savedEventReports, Double savedEventCharts, Double savedDashboards, Double savedIndicators, Double savedDataValues, Integer activeUsers, Integer users ) { this.mapViews = mapViews; this.chartViews = chartViews; this.reportTableViews = reportTableViews; this.eventReportViews = eventReportViews; this.eventChartViews = eventChartViews; this.dashboardViews = dashboardViews; this.dataSetReportViews = dataSetReportViews; this.totalViews = totalViews; this.savedMaps = savedMaps; this.savedCharts = savedCharts; this.savedReportTables = savedReportTables; this.savedEventReports = savedEventReports; this.savedEventCharts = savedEventCharts; this.savedDashboards = savedDashboards; this.savedIndicators = savedIndicators; this.savedDataValues = savedDataValues; this.activeUsers = activeUsers; this.users = users; } @JsonProperty public Integer getActiveUsers() { return activeUsers; } public void setActiveUsers( Integer activeUsers ) { this.activeUsers = activeUsers; } @JsonProperty public Double getMapViews() { return mapViews; } public void setMapViews( Double mapViews ) { this.mapViews = mapViews; } @JsonProperty public Double getChartViews() { return chartViews; } public void setChartViews( Double chartViews ) { this.chartViews = chartViews; } @JsonProperty public Double getReportTableViews() { return reportTableViews; } public void setReportTableViews( Double reportTableViews ) { this.reportTableViews = reportTableViews; } @JsonProperty public Double getEventReportViews() { return eventReportViews; } public void setEventReportViews( Double eventReportViews ) { this.eventReportViews = eventReportViews; } @JsonProperty public Double getEventChartViews() { return eventChartViews; } public void setEventChartViews( Double eventChartViews ) { this.eventChartViews = eventChartViews; } @JsonProperty public Double getDashboardViews() { return dashboardViews; } public void setDashboardViews( Double dashboardViews ) { this.dashboardViews = dashboardViews; } @JsonProperty public Double getDataSetReportViews() { return dataSetReportViews; } public void setDataSetReportViews( Double dataSetReportViews ) { this.dataSetReportViews = dataSetReportViews; } @JsonProperty public Double getTotalViews() { return totalViews; } public void setTotalViews( Double totalViews ) { this.totalViews = totalViews; } @JsonProperty public Double getSavedMaps() { return savedMaps; } public void setSavedMaps( Double savedMaps ) { this.savedMaps = savedMaps; } @JsonProperty public Double getSavedCharts() { return savedCharts; } public void setSavedCharts( Double savedCharts ) { this.savedCharts = savedCharts; } @JsonProperty public Double getSavedReportTables() { return savedReportTables; } public void setSavedReportTables( Double savedReportTables ) { this.savedReportTables = savedReportTables; } @JsonProperty public Double getSavedEventReports() { return savedEventReports; } public void setSavedEventReports( Double savedEventReports ) { this.savedEventReports = savedEventReports; } @JsonProperty public Double getSavedEventCharts() { return savedEventCharts; } public void setSavedEventCharts( Double savedEventCharts ) { this.savedEventCharts = savedEventCharts; } @JsonProperty public Double getSavedDashboards() { return savedDashboards; } public void setSavedDashboards( Double savedDashboards ) { this.savedDashboards = savedDashboards; } @JsonProperty public Double getSavedIndicators() { return savedIndicators; } public void setSavedIndicators( Double savedIndicators ) { this.savedIndicators = savedIndicators; } @JsonProperty public Double getSavedDataValues() { return savedDataValues; } public void setSavedDataValues( Double savedDataValues ) { this.savedDataValues = savedDataValues; } @JsonProperty public Integer getUsers() { return users; } @JsonProperty public void setUsers( Integer users ) { this.users = users; } @Override public String toString() { return super.toString() + "DataStatistics{" + "mapViews=" + mapViews + ", chartViews=" + chartViews + ", reportTableViews=" + reportTableViews + ", eventReportViews=" + eventReportViews + ", eventChartViews=" + eventChartViews + ", dashboardViews=" + dashboardViews + ", totalViews=" + totalViews + ", savedMaps=" + savedMaps + ", savedCharts=" + savedCharts + ", savedReportTables=" + savedReportTables + ", savedEventReports=" + savedEventReports + ", savedEventCharts=" + savedEventCharts + ", savedDashboards=" + savedDashboards + ", savedIndicators=" + savedIndicators + ", savedDataValues=" + savedDataValues + ", activeUsers=" + activeUsers + ", users=" + users + '}'; } }
vmluan/dhis2-core
dhis-2/dhis-api/src/main/java/org/hisp/dhis/datastatistics/DataStatistics.java
Java
bsd-3-clause
8,631
package org.hisp.dhis.report; /* * Copyright (c) 2004-2017, University of Oslo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * Neither the name of the HISP project nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ import java.io.OutputStream; import java.io.Writer; import java.util.Date; import java.util.List; import net.sf.jasperreports.engine.JasperPrint; import org.hisp.dhis.period.Period; /** * @author Lars Helge Overland */ public interface ReportService { String ID = ReportService.class.getName(); String REPORTTYPE_PDF = "pdf"; String REPORTTYPE_XLS = "xls"; String PARAM_RELATIVE_PERIODS = "periods"; String PARAM_RELATIVE_ISO_PERIODS = "periods_iso"; String PARAM_ORG_UNITS = "organisationunits"; String PARAM_ORG_UNITS_UID = "organisationunits_uid"; String PARAM_ORGANISATIONUNIT_LEVEL = "organisationunit_level"; String PARAM_ORGANISATIONUNIT_LEVEL_COLUMN = "organisationunit_level_column"; String PARAM_ORGANISATIONUNIT_UID_LEVEL_COLUMN = "organisationunit_uid_level_column"; String PARAM_ORGANISATIONUNIT_COLUMN_NAME = "organisationunit_name"; String PARAM_PERIOD_NAME = "period_name"; /** * Renders a Jasper Report. * <p/> * Will make the following params available: * <p/> * "periods" String of relative period ids (String) * "organisationunits" String of selected organisation unit ids (String) * "period_name" Name of the selected period (String) * "organisationunit_name" Name of the selected organisation unit (String) * "organisationunit_level" Level of the selected organisation unit (int) * "organisationunit_level_column" Name of the relevant level column in * table _orgunitstructure (String) * * @param out the OutputStream to write the report to. * @param reportUid the uid of the report to render. * @param period the period to use as parameter. * @param organisationUnitUid the uid of the org unit to use as parameter. * @param type the type of the report, can be "xls" and "pdf". */ JasperPrint renderReport( OutputStream out, String reportUid, Period period, String organisationUnitUid, String type ); /** * Renders and writes a HTML-based standard report to the given Writer. * * @param writer the Writer. * @param uid the report uid. * @param date the date. * @param ou the organisation unit uid. */ void renderHtmlReport( Writer writer, String uid, Date date, String ou ); /** * Saves a Report. * * @param report the Report to save. * @return the generated identifier. */ int saveReport( Report report ); /** * Retrieves the Report with the given identifier. * * @param id the identifier of the Report to retrieve. * @return the Report. */ Report getReport( int id ); /** * Retrieves the Report with the given uid. * * @param uid the uid of the Report to retrieve. * @return the Report. */ Report getReport( String uid ); /** * Returns the total number of reports. * * @return the total number of reports. */ int getReportCount(); /** * Returns the number of reports which names are like the given name. * Returns the number of reports which names are like the given name. */ int getReportCountByName( String name ); /** * Retrieves the given number of maximum reports starting at the given start * index. Reports are sorted on the name property. * * @param first the start index. * @param max the maximum number of reports. * @return a list of reports. */ List<Report> getReportsBetween( int first, int max ); /** * Retrieves the given number of maximum reports starting at the given start * index. Reports are sorted on the name property. * * @param first the start index. * @param max the maximum number of reports. * @return a List of reports. */ List<Report> getReportsBetweenByName( String name, int first, int max ); /** * Deletes a Report. * * @param report the Report to delete. */ void deleteReport( Report report ); /** * Retrieves all Reports. * * @return a List of Reports. */ List<Report> getAllReports(); /** * Retrieves the Report with the given name. * * @param name the name. * @return the Report. */ List<Report> getReportByName( String name ); /** * Retrieves Reports with the given uids. * * @param uids the list of uids. * @return a list of ReportTables. */ List<Report> getReportsByUid( List<String> uids ); }
troyel/dhis2-core
dhis-2/dhis-api/src/main/java/org/hisp/dhis/report/ReportService.java
Java
bsd-3-clause
6,363
/* * Copyright (c) 2011-2012 ARM Limited * All rights reserved * * The license below extends only to copyright in the software and shall * not be construed as granting a license to any other intellectual * property including but not limited to intellectual property relating * to a hardware implementation of the functionality of the software * licensed hereunder. You may use the software subject to the license * terms below provided that you ensure that this notice is replicated * unmodified and in its entirety in all distributions of the software, * modified or unmodified, in source code or in binary form. * * Copyright (c) 2006 The Regents of The University of Michigan * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer; * redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution; * neither the name of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Authors: Ali Saidi * Andreas Hansson * William Wang */ /** * @file * Definition of a bus object. */ #include "base/misc.hh" #include "base/trace.hh" #include "debug/Bus.hh" #include "debug/BusAddrRanges.hh" #include "debug/Drain.hh" #include "mem/bus.hh" BaseBus::BaseBus(const BaseBusParams *p) : MemObject(p), headerCycles(p->header_cycles), width(p->width), defaultPortID(InvalidPortID), useDefaultRange(p->use_default_range), defaultBlockSize(p->block_size), cachedBlockSize(0), cachedBlockSizeValid(false) { //width, clock period, and header cycles must be positive if (width <= 0) fatal("Bus width must be positive\n"); if (clock <= 0) fatal("Bus clock period must be positive\n"); if (headerCycles <= 0) fatal("Number of header cycles must be positive\n"); } BaseBus::~BaseBus() { for (MasterPortIter m = masterPorts.begin(); m != masterPorts.end(); ++m) { delete *m; } for (SlavePortIter s = slavePorts.begin(); s != slavePorts.end(); ++s) { delete *s; } } MasterPort & BaseBus::getMasterPort(const std::string &if_name, int idx) { if (if_name == "master" && idx < masterPorts.size()) { // the master port index translates directly to the vector position return *masterPorts[idx]; } else if (if_name == "default") { return *masterPorts[defaultPortID]; } else { return MemObject::getMasterPort(if_name, idx); } } SlavePort & BaseBus::getSlavePort(const std::string &if_name, int idx) { if (if_name == "slave" && idx < slavePorts.size()) { // the slave port index translates directly to the vector position return *slavePorts[idx]; } else { return MemObject::getSlavePort(if_name, idx); } } Tick BaseBus::calcPacketTiming(PacketPtr pkt) { // determine the current time rounded to the closest following // clock edge Tick now = nextCycle(); Tick headerTime = now + headerCycles * clock; // The packet will be sent. Figure out how long it occupies the bus, and // how much of that time is for the first "word", aka bus width. int numCycles = 0; if (pkt->hasData()) { // If a packet has data, it needs ceil(size/width) cycles to send it int dataSize = pkt->getSize(); numCycles += dataSize/width; if (dataSize % width) numCycles++; } // The first word will be delivered after the current tick, the delivery // of the address if any, and one bus cycle to deliver the data pkt->firstWordTime = headerTime + clock; pkt->finishTime = headerTime + numCycles * clock; return headerTime; } template <typename PortClass> BaseBus::Layer<PortClass>::Layer(BaseBus& _bus, const std::string& _name, Tick _clock) : bus(_bus), _name(_name), state(IDLE), clock(_clock), drainEvent(NULL), releaseEvent(this) { } template <typename PortClass> void BaseBus::Layer<PortClass>::occupyLayer(Tick until) { // ensure the state is busy or in retry and never idle at this // point, as the bus should transition from idle as soon as it has // decided to forward the packet to prevent any follow-on calls to // sendTiming seeing an unoccupied bus assert(state != IDLE); // note that we do not change the bus state here, if we are going // from idle to busy it is handled by tryTiming, and if we // are in retry we should remain in retry such that // succeededTiming still sees the accurate state // until should never be 0 as express snoops never occupy the bus assert(until != 0); bus.schedule(releaseEvent, until); DPRINTF(BaseBus, "The bus is now busy from tick %d to %d\n", curTick(), until); } template <typename PortClass> bool BaseBus::Layer<PortClass>::tryTiming(PortClass* port) { // first we see if the bus is busy, next we check if we are in a // retry with a port other than the current one if (state == BUSY || (state == RETRY && port != retryList.front())) { // put the port at the end of the retry list retryList.push_back(port); return false; } // update the state which is shared for request, response and // snoop responses, if we were idle we are now busy, if we are in // a retry, then do not change if (state == IDLE) state = BUSY; return true; } template <typename PortClass> void BaseBus::Layer<PortClass>::succeededTiming(Tick busy_time) { // if a retrying port succeeded, also take it off the retry list if (state == RETRY) { DPRINTF(BaseBus, "Remove retry from list %s\n", retryList.front()->name()); retryList.pop_front(); state = BUSY; } // we should either have gone from idle to busy in the // tryTiming test, or just gone from a retry to busy assert(state == BUSY); // occupy the bus accordingly occupyLayer(busy_time); } template <typename PortClass> void BaseBus::Layer<PortClass>::failedTiming(PortClass* port, Tick busy_time) { // if we are not in a retry, i.e. busy (but never idle), or we are // in a retry but not for the current port, then add the port at // the end of the retry list if (state != RETRY || port != retryList.front()) { retryList.push_back(port); } // even if we retried the current one and did not succeed, // we are no longer retrying but instead busy state = BUSY; // occupy the bus accordingly occupyLayer(busy_time); } template <typename PortClass> void BaseBus::Layer<PortClass>::releaseLayer() { // releasing the bus means we should now be idle assert(state == BUSY); assert(!releaseEvent.scheduled()); // update the state state = IDLE; // bus is now idle, so if someone is waiting we can retry if (!retryList.empty()) { // note that we block (return false on recvTiming) both // because the bus is busy and because the destination is // busy, and in the latter case the bus may be released before // we see a retry from the destination retryWaiting(); } else if (drainEvent) { DPRINTF(Drain, "Bus done draining, processing drain event\n"); //If we weren't able to drain before, do it now. drainEvent->process(); // Clear the drain event once we're done with it. drainEvent = NULL; } } template <typename PortClass> void BaseBus::Layer<PortClass>::retryWaiting() { // this should never be called with an empty retry list assert(!retryList.empty()); // we always go to retrying from idle assert(state == IDLE); // update the state which is shared for request, response and // snoop responses state = RETRY; // note that we might have blocked on the receiving port being // busy (rather than the bus itself) and now call retry before the // destination called retry on the bus retryList.front()->sendRetry(); // If the bus is still in the retry state, sendTiming wasn't // called in zero time (e.g. the cache does this) if (state == RETRY) { retryList.pop_front(); //Burn a cycle for the missed grant. // update the state which is shared for request, response and // snoop responses state = BUSY; // determine the current time rounded to the closest following // clock edge Tick now = bus.nextCycle(); occupyLayer(now + clock); } } template <typename PortClass> void BaseBus::Layer<PortClass>::recvRetry() { // we got a retry from a peer that we tried to send something to // and failed, but we sent it on the account of someone else, and // that source port should be on our retry list, however if the // bus layer is released before this happens and the retry (from // the bus point of view) is successful then this no longer holds // and we could in fact have an empty retry list if (retryList.empty()) return; // if the bus layer is idle if (state == IDLE) { // note that we do not care who told us to retry at the moment, we // merely let the first one on the retry list go retryWaiting(); } } PortID BaseBus::findPort(Addr addr) { /* An interval tree would be a better way to do this. --ali. */ PortID dest_id = checkPortCache(addr); if (dest_id != InvalidPortID) return dest_id; // Check normal port ranges PortMapConstIter i = portMap.find(RangeSize(addr,1)); if (i != portMap.end()) { dest_id = i->second; updatePortCache(dest_id, i->first.start, i->first.end); return dest_id; } // Check if this matches the default range if (useDefaultRange) { AddrRangeConstIter a_end = defaultRange.end(); for (AddrRangeConstIter i = defaultRange.begin(); i != a_end; i++) { if (*i == addr) { DPRINTF(BusAddrRanges, " found addr %#llx on default\n", addr); return defaultPortID; } } } else if (defaultPortID != InvalidPortID) { DPRINTF(BusAddrRanges, "Unable to find destination for addr %#llx, " "will use default port\n", addr); return defaultPortID; } // we should use the range for the default port and it did not // match, or the default port is not set fatal("Unable to find destination for addr %#llx on bus %s\n", addr, name()); } /** Function called by the port when the bus is receiving a range change.*/ void BaseBus::recvRangeChange(PortID master_port_id) { AddrRangeList ranges; AddrRangeIter iter; if (inRecvRangeChange.count(master_port_id)) return; inRecvRangeChange.insert(master_port_id); DPRINTF(BusAddrRanges, "received RangeChange from device id %d\n", master_port_id); clearPortCache(); if (master_port_id == defaultPortID) { defaultRange.clear(); // Only try to update these ranges if the user set a default responder. if (useDefaultRange) { // get the address ranges of the connected slave port AddrRangeList ranges = masterPorts[master_port_id]->getAddrRanges(); for(iter = ranges.begin(); iter != ranges.end(); iter++) { defaultRange.push_back(*iter); DPRINTF(BusAddrRanges, "Adding range %#llx - %#llx for default range\n", iter->start, iter->end); } } } else { assert(master_port_id < masterPorts.size() && master_port_id >= 0); MasterPort *port = masterPorts[master_port_id]; // Clean out any previously existent ids for (PortMapIter portIter = portMap.begin(); portIter != portMap.end(); ) { if (portIter->second == master_port_id) portMap.erase(portIter++); else portIter++; } // get the address ranges of the connected slave port ranges = port->getAddrRanges(); for (iter = ranges.begin(); iter != ranges.end(); iter++) { DPRINTF(BusAddrRanges, "Adding range %#llx - %#llx for id %d\n", iter->start, iter->end, master_port_id); if (portMap.insert(*iter, master_port_id) == portMap.end()) { PortID conflict_id = portMap.find(*iter)->second; fatal("%s has two ports with same range:\n\t%s\n\t%s\n", name(), masterPorts[master_port_id]->getSlavePort().name(), masterPorts[conflict_id]->getSlavePort().name()); } } } DPRINTF(BusAddrRanges, "port list has %d entries\n", portMap.size()); // tell all our neighbouring master ports that our address range // has changed for (SlavePortConstIter p = slavePorts.begin(); p != slavePorts.end(); ++p) (*p)->sendRangeChange(); inRecvRangeChange.erase(master_port_id); } AddrRangeList BaseBus::getAddrRanges() const { AddrRangeList ranges; DPRINTF(BusAddrRanges, "received address range request, returning:\n"); for (AddrRangeConstIter dflt_iter = defaultRange.begin(); dflt_iter != defaultRange.end(); dflt_iter++) { ranges.push_back(*dflt_iter); DPRINTF(BusAddrRanges, " -- Dflt: %#llx : %#llx\n",dflt_iter->start, dflt_iter->end); } for (PortMapConstIter portIter = portMap.begin(); portIter != portMap.end(); portIter++) { bool subset = false; for (AddrRangeConstIter dflt_iter = defaultRange.begin(); dflt_iter != defaultRange.end(); dflt_iter++) { if ((portIter->first.start < dflt_iter->start && portIter->first.end >= dflt_iter->start) || (portIter->first.start < dflt_iter->end && portIter->first.end >= dflt_iter->end)) fatal("Devices can not set ranges that itersect the default set\ but are not a subset of the default set.\n"); if (portIter->first.start >= dflt_iter->start && portIter->first.end <= dflt_iter->end) { subset = true; DPRINTF(BusAddrRanges, " -- %#llx : %#llx is a SUBSET\n", portIter->first.start, portIter->first.end); } } if (!subset) { ranges.push_back(portIter->first); DPRINTF(BusAddrRanges, " -- %#llx : %#llx\n", portIter->first.start, portIter->first.end); } } return ranges; } unsigned BaseBus::findBlockSize() { if (cachedBlockSizeValid) return cachedBlockSize; unsigned max_bs = 0; for (MasterPortConstIter m = masterPorts.begin(); m != masterPorts.end(); ++m) { unsigned tmp_bs = (*m)->peerBlockSize(); if (tmp_bs > max_bs) max_bs = tmp_bs; } for (SlavePortConstIter s = slavePorts.begin(); s != slavePorts.end(); ++s) { unsigned tmp_bs = (*s)->peerBlockSize(); if (tmp_bs > max_bs) max_bs = tmp_bs; } if (max_bs == 0) max_bs = defaultBlockSize; if (max_bs != 64) warn_once("Blocksize found to not be 64... hmm... probably not.\n"); cachedBlockSize = max_bs; cachedBlockSizeValid = true; return max_bs; } template <typename PortClass> unsigned int BaseBus::Layer<PortClass>::drain(Event * de) { //We should check that we're not "doing" anything, and that noone is //waiting. We might be idle but have someone waiting if the device we //contacted for a retry didn't actually retry. if (!retryList.empty() || state != IDLE) { DPRINTF(Drain, "Bus not drained\n"); drainEvent = de; return 1; } return 0; } /** * Bus layer template instantiations. Could be removed with _impl.hh * file, but since there are only two given options (MasterPort and * SlavePort) it seems a bit excessive at this point. */ template class BaseBus::Layer<SlavePort>; template class BaseBus::Layer<MasterPort>;
aferr/LatticeMemCtl
src/mem/bus.cc
C++
bsd-3-clause
17,480
/* * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "modules/audio_processing/aec3/signal_dependent_erle_estimator.h" #include <algorithm> #include <functional> #include <numeric> #include "modules/audio_processing/aec3/spectrum_buffer.h" #include "rtc_base/numerics/safe_minmax.h" namespace webrtc { namespace { constexpr std::array<size_t, SignalDependentErleEstimator::kSubbands + 1> kBandBoundaries = {1, 8, 16, 24, 32, 48, kFftLengthBy2Plus1}; std::array<size_t, kFftLengthBy2Plus1> FormSubbandMap() { std::array<size_t, kFftLengthBy2Plus1> map_band_to_subband; size_t subband = 1; for (size_t k = 0; k < map_band_to_subband.size(); ++k) { RTC_DCHECK_LT(subband, kBandBoundaries.size()); if (k >= kBandBoundaries[subband]) { subband++; RTC_DCHECK_LT(k, kBandBoundaries[subband]); } map_band_to_subband[k] = subband - 1; } return map_band_to_subband; } // Defines the size in blocks of the sections that are used for dividing the // linear filter. The sections are split in a non-linear manner so that lower // sections that typically represent the direct path have a larger resolution // than the higher sections which typically represent more reverberant acoustic // paths. std::vector<size_t> DefineFilterSectionSizes(size_t delay_headroom_blocks, size_t num_blocks, size_t num_sections) { size_t filter_length_blocks = num_blocks - delay_headroom_blocks; std::vector<size_t> section_sizes(num_sections); size_t remaining_blocks = filter_length_blocks; size_t remaining_sections = num_sections; size_t estimator_size = 2; size_t idx = 0; while (remaining_sections > 1 && remaining_blocks > estimator_size * remaining_sections) { RTC_DCHECK_LT(idx, section_sizes.size()); section_sizes[idx] = estimator_size; remaining_blocks -= estimator_size; remaining_sections--; estimator_size *= 2; idx++; } size_t last_groups_size = remaining_blocks / remaining_sections; for (; idx < num_sections; idx++) { section_sizes[idx] = last_groups_size; } section_sizes[num_sections - 1] += remaining_blocks - last_groups_size * remaining_sections; return section_sizes; } // Forms the limits in blocks for each filter section. Those sections // are used for analyzing the echo estimates and investigating which // linear filter sections contribute most to the echo estimate energy. std::vector<size_t> SetSectionsBoundaries(size_t delay_headroom_blocks, size_t num_blocks, size_t num_sections) { std::vector<size_t> estimator_boundaries_blocks(num_sections + 1); if (estimator_boundaries_blocks.size() == 2) { estimator_boundaries_blocks[0] = 0; estimator_boundaries_blocks[1] = num_blocks; return estimator_boundaries_blocks; } RTC_DCHECK_GT(estimator_boundaries_blocks.size(), 2); const std::vector<size_t> section_sizes = DefineFilterSectionSizes(delay_headroom_blocks, num_blocks, estimator_boundaries_blocks.size() - 1); size_t idx = 0; size_t current_size_block = 0; RTC_DCHECK_EQ(section_sizes.size() + 1, estimator_boundaries_blocks.size()); estimator_boundaries_blocks[0] = delay_headroom_blocks; for (size_t k = delay_headroom_blocks; k < num_blocks; ++k) { current_size_block++; if (current_size_block >= section_sizes[idx]) { idx = idx + 1; if (idx == section_sizes.size()) { break; } estimator_boundaries_blocks[idx] = k + 1; current_size_block = 0; } } estimator_boundaries_blocks[section_sizes.size()] = num_blocks; return estimator_boundaries_blocks; } std::array<float, SignalDependentErleEstimator::kSubbands> SetMaxErleSubbands(float max_erle_l, float max_erle_h, size_t limit_subband_l) { std::array<float, SignalDependentErleEstimator::kSubbands> max_erle; std::fill(max_erle.begin(), max_erle.begin() + limit_subband_l, max_erle_l); std::fill(max_erle.begin() + limit_subband_l, max_erle.end(), max_erle_h); return max_erle; } } // namespace SignalDependentErleEstimator::SignalDependentErleEstimator( const EchoCanceller3Config& config, size_t num_capture_channels) : min_erle_(config.erle.min), num_sections_(config.erle.num_sections), num_blocks_(config.filter.refined.length_blocks), delay_headroom_blocks_(config.delay.delay_headroom_samples / kBlockSize), band_to_subband_(FormSubbandMap()), max_erle_(SetMaxErleSubbands(config.erle.max_l, config.erle.max_h, band_to_subband_[kFftLengthBy2 / 2])), section_boundaries_blocks_(SetSectionsBoundaries(delay_headroom_blocks_, num_blocks_, num_sections_)), erle_(num_capture_channels), S2_section_accum_( num_capture_channels, std::vector<std::array<float, kFftLengthBy2Plus1>>(num_sections_)), erle_estimators_( num_capture_channels, std::vector<std::array<float, kSubbands>>(num_sections_)), erle_ref_(num_capture_channels), correction_factors_( num_capture_channels, std::vector<std::array<float, kSubbands>>(num_sections_)), num_updates_(num_capture_channels), n_active_sections_(num_capture_channels) { RTC_DCHECK_LE(num_sections_, num_blocks_); RTC_DCHECK_GE(num_sections_, 1); Reset(); } SignalDependentErleEstimator::~SignalDependentErleEstimator() = default; void SignalDependentErleEstimator::Reset() { for (size_t ch = 0; ch < erle_.size(); ++ch) { erle_[ch].fill(min_erle_); for (auto& erle_estimator : erle_estimators_[ch]) { erle_estimator.fill(min_erle_); } erle_ref_[ch].fill(min_erle_); for (auto& factor : correction_factors_[ch]) { factor.fill(1.0f); } num_updates_[ch].fill(0); n_active_sections_[ch].fill(0); } } // Updates the Erle estimate by analyzing the current input signals. It takes // the render buffer and the filter frequency response in order to do an // estimation of the number of sections of the linear filter that are needed // for getting the majority of the energy in the echo estimate. Based on that // number of sections, it updates the erle estimation by introducing a // correction factor to the erle that is given as an input to this method. void SignalDependentErleEstimator::Update( const RenderBuffer& render_buffer, rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>> filter_frequency_responses, rtc::ArrayView<const float, kFftLengthBy2Plus1> X2, rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2, rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> E2, rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> average_erle, const std::vector<bool>& converged_filters) { RTC_DCHECK_GT(num_sections_, 1); // Gets the number of filter sections that are needed for achieving 90 % // of the power spectrum energy of the echo estimate. ComputeNumberOfActiveFilterSections(render_buffer, filter_frequency_responses); // Updates the correction factors that is used for correcting the erle and // adapt it to the particular characteristics of the input signal. UpdateCorrectionFactors(X2, Y2, E2, converged_filters); // Applies the correction factor to the input erle for getting a more refined // erle estimation for the current input signal. for (size_t ch = 0; ch < erle_.size(); ++ch) { for (size_t k = 0; k < kFftLengthBy2; ++k) { RTC_DCHECK_GT(correction_factors_[ch].size(), n_active_sections_[ch][k]); float correction_factor = correction_factors_[ch][n_active_sections_[ch][k]] [band_to_subband_[k]]; erle_[ch][k] = rtc::SafeClamp(average_erle[ch][k] * correction_factor, min_erle_, max_erle_[band_to_subband_[k]]); } } } void SignalDependentErleEstimator::Dump( const std::unique_ptr<ApmDataDumper>& data_dumper) const { for (auto& erle : erle_estimators_[0]) { data_dumper->DumpRaw("aec3_all_erle", erle); } data_dumper->DumpRaw("aec3_ref_erle", erle_ref_[0]); for (auto& factor : correction_factors_[0]) { data_dumper->DumpRaw("aec3_erle_correction_factor", factor); } } // Estimates for each band the smallest number of sections in the filter that // together constitute 90% of the estimated echo energy. void SignalDependentErleEstimator::ComputeNumberOfActiveFilterSections( const RenderBuffer& render_buffer, rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>> filter_frequency_responses) { RTC_DCHECK_GT(num_sections_, 1); // Computes an approximation of the power spectrum if the filter would have // been limited to a certain number of filter sections. ComputeEchoEstimatePerFilterSection(render_buffer, filter_frequency_responses); // For each band, computes the number of filter sections that are needed for // achieving the 90 % energy in the echo estimate. ComputeActiveFilterSections(); } void SignalDependentErleEstimator::UpdateCorrectionFactors( rtc::ArrayView<const float, kFftLengthBy2Plus1> X2, rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> Y2, rtc::ArrayView<const std::array<float, kFftLengthBy2Plus1>> E2, const std::vector<bool>& converged_filters) { for (size_t ch = 0; ch < converged_filters.size(); ++ch) { if (converged_filters[ch]) { constexpr float kX2BandEnergyThreshold = 44015068.0f; constexpr float kSmthConstantDecreases = 0.1f; constexpr float kSmthConstantIncreases = kSmthConstantDecreases / 2.f; auto subband_powers = [](rtc::ArrayView<const float> power_spectrum, rtc::ArrayView<float> power_spectrum_subbands) { for (size_t subband = 0; subband < kSubbands; ++subband) { RTC_DCHECK_LE(kBandBoundaries[subband + 1], power_spectrum.size()); power_spectrum_subbands[subband] = std::accumulate( power_spectrum.begin() + kBandBoundaries[subband], power_spectrum.begin() + kBandBoundaries[subband + 1], 0.f); } }; std::array<float, kSubbands> X2_subbands, E2_subbands, Y2_subbands; subband_powers(X2, X2_subbands); subband_powers(E2[ch], E2_subbands); subband_powers(Y2[ch], Y2_subbands); std::array<size_t, kSubbands> idx_subbands; for (size_t subband = 0; subband < kSubbands; ++subband) { // When aggregating the number of active sections in the filter for // different bands we choose to take the minimum of all of them. As an // example, if for one of the bands it is the direct path its refined // contributor to the final echo estimate, we consider the direct path // is as well the refined contributor for the subband that contains that // particular band. That aggregate number of sections will be later used // as the identifier of the erle estimator that needs to be updated. RTC_DCHECK_LE(kBandBoundaries[subband + 1], n_active_sections_[ch].size()); idx_subbands[subband] = *std::min_element( n_active_sections_[ch].begin() + kBandBoundaries[subband], n_active_sections_[ch].begin() + kBandBoundaries[subband + 1]); } std::array<float, kSubbands> new_erle; std::array<bool, kSubbands> is_erle_updated; is_erle_updated.fill(false); new_erle.fill(0.f); for (size_t subband = 0; subband < kSubbands; ++subband) { if (X2_subbands[subband] > kX2BandEnergyThreshold && E2_subbands[subband] > 0) { new_erle[subband] = Y2_subbands[subband] / E2_subbands[subband]; RTC_DCHECK_GT(new_erle[subband], 0); is_erle_updated[subband] = true; ++num_updates_[ch][subband]; } } for (size_t subband = 0; subband < kSubbands; ++subband) { const size_t idx = idx_subbands[subband]; RTC_DCHECK_LT(idx, erle_estimators_[ch].size()); float alpha = new_erle[subband] > erle_estimators_[ch][idx][subband] ? kSmthConstantIncreases : kSmthConstantDecreases; alpha = static_cast<float>(is_erle_updated[subband]) * alpha; erle_estimators_[ch][idx][subband] += alpha * (new_erle[subband] - erle_estimators_[ch][idx][subband]); erle_estimators_[ch][idx][subband] = rtc::SafeClamp( erle_estimators_[ch][idx][subband], min_erle_, max_erle_[subband]); } for (size_t subband = 0; subband < kSubbands; ++subband) { float alpha = new_erle[subband] > erle_ref_[ch][subband] ? kSmthConstantIncreases : kSmthConstantDecreases; alpha = static_cast<float>(is_erle_updated[subband]) * alpha; erle_ref_[ch][subband] += alpha * (new_erle[subband] - erle_ref_[ch][subband]); erle_ref_[ch][subband] = rtc::SafeClamp(erle_ref_[ch][subband], min_erle_, max_erle_[subband]); } for (size_t subband = 0; subband < kSubbands; ++subband) { constexpr int kNumUpdateThr = 50; if (is_erle_updated[subband] && num_updates_[ch][subband] > kNumUpdateThr) { const size_t idx = idx_subbands[subband]; RTC_DCHECK_GT(erle_ref_[ch][subband], 0.f); // Computes the ratio between the erle that is updated using all the // points and the erle that is updated only on signals that share the // same number of active filter sections. float new_correction_factor = erle_estimators_[ch][idx][subband] / erle_ref_[ch][subband]; correction_factors_[ch][idx][subband] += 0.1f * (new_correction_factor - correction_factors_[ch][idx][subband]); } } } } } void SignalDependentErleEstimator::ComputeEchoEstimatePerFilterSection( const RenderBuffer& render_buffer, rtc::ArrayView<const std::vector<std::array<float, kFftLengthBy2Plus1>>> filter_frequency_responses) { const SpectrumBuffer& spectrum_render_buffer = render_buffer.GetSpectrumBuffer(); const size_t num_render_channels = spectrum_render_buffer.buffer[0].size(); const size_t num_capture_channels = S2_section_accum_.size(); const float one_by_num_render_channels = 1.f / num_render_channels; RTC_DCHECK_EQ(S2_section_accum_.size(), filter_frequency_responses.size()); for (size_t capture_ch = 0; capture_ch < num_capture_channels; ++capture_ch) { RTC_DCHECK_EQ(S2_section_accum_[capture_ch].size() + 1, section_boundaries_blocks_.size()); size_t idx_render = render_buffer.Position(); idx_render = spectrum_render_buffer.OffsetIndex( idx_render, section_boundaries_blocks_[0]); for (size_t section = 0; section < num_sections_; ++section) { std::array<float, kFftLengthBy2Plus1> X2_section; std::array<float, kFftLengthBy2Plus1> H2_section; X2_section.fill(0.f); H2_section.fill(0.f); const size_t block_limit = std::min(section_boundaries_blocks_[section + 1], filter_frequency_responses[capture_ch].size()); for (size_t block = section_boundaries_blocks_[section]; block < block_limit; ++block) { for (size_t render_ch = 0; render_ch < spectrum_render_buffer.buffer[idx_render].size(); ++render_ch) { for (size_t k = 0; k < X2_section.size(); ++k) { X2_section[k] += spectrum_render_buffer.buffer[idx_render][render_ch][k] * one_by_num_render_channels; } } std::transform(H2_section.begin(), H2_section.end(), filter_frequency_responses[capture_ch][block].begin(), H2_section.begin(), std::plus<float>()); idx_render = spectrum_render_buffer.IncIndex(idx_render); } std::transform(X2_section.begin(), X2_section.end(), H2_section.begin(), S2_section_accum_[capture_ch][section].begin(), std::multiplies<float>()); } for (size_t section = 1; section < num_sections_; ++section) { std::transform(S2_section_accum_[capture_ch][section - 1].begin(), S2_section_accum_[capture_ch][section - 1].end(), S2_section_accum_[capture_ch][section].begin(), S2_section_accum_[capture_ch][section].begin(), std::plus<float>()); } } } void SignalDependentErleEstimator::ComputeActiveFilterSections() { for (size_t ch = 0; ch < n_active_sections_.size(); ++ch) { std::fill(n_active_sections_[ch].begin(), n_active_sections_[ch].end(), 0); for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { size_t section = num_sections_; float target = 0.9f * S2_section_accum_[ch][num_sections_ - 1][k]; while (section > 0 && S2_section_accum_[ch][section - 1][k] >= target) { n_active_sections_[ch][k] = --section; } } } } } // namespace webrtc
endlessm/chromium-browser
third_party/webrtc/modules/audio_processing/aec3/signal_dependent_erle_estimator.cc
C++
bsd-3-clause
17,930
/* * (C) 1999-2003 Lars Knoll (knoll@kde.org) * Copyright (C) 2004, 2006, 2007, 2012 Apple Inc. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. */ #include "config.h" #include "core/css/CSSStyleSheet.h" #include "bindings/core/v8/ExceptionState.h" #include "bindings/core/v8/V8Binding.h" #include "bindings/core/v8/V8PerIsolateData.h" #include "core/HTMLNames.h" #include "core/SVGNames.h" #include "core/css/CSSImportRule.h" #include "core/css/CSSRuleList.h" #include "core/css/MediaList.h" #include "core/css/StyleRule.h" #include "core/css/StyleSheetContents.h" #include "core/css/parser/CSSParser.h" #include "core/dom/Document.h" #include "core/dom/ExceptionCode.h" #include "core/dom/Node.h" #include "core/frame/UseCounter.h" #include "core/html/HTMLStyleElement.h" #include "core/inspector/InspectorInstrumentation.h" #include "core/svg/SVGStyleElement.h" #include "platform/weborigin/SecurityOrigin.h" #include "wtf/text/StringBuilder.h" namespace blink { class StyleSheetCSSRuleList final : public CSSRuleList { public: static PassOwnPtrWillBeRawPtr<StyleSheetCSSRuleList> create(CSSStyleSheet* sheet) { return adoptPtrWillBeNoop(new StyleSheetCSSRuleList(sheet)); } DEFINE_INLINE_VIRTUAL_TRACE() { visitor->trace(m_styleSheet); CSSRuleList::trace(visitor); } private: StyleSheetCSSRuleList(CSSStyleSheet* sheet) : m_styleSheet(sheet) { } #if !ENABLE(OILPAN) virtual void ref() override { m_styleSheet->ref(); } virtual void deref() override { m_styleSheet->deref(); } #endif virtual unsigned length() const override { return m_styleSheet->length(); } virtual CSSRule* item(unsigned index) const override { return m_styleSheet->item(index); } virtual CSSStyleSheet* styleSheet() const override { return m_styleSheet; } RawPtrWillBeMember<CSSStyleSheet> m_styleSheet; }; #if ENABLE(ASSERT) static bool isAcceptableCSSStyleSheetParent(Node* parentNode) { // Only these nodes can be parents of StyleSheets, and they need to call // clearOwnerNode() when moved out of document. // Destruction of the style sheet counts as being "moved out of the // document", but only in the non-oilpan version of blink. I.e. don't call // clearOwnerNode() in the owner's destructor in oilpan. return !parentNode || parentNode->isDocumentNode() || isHTMLLinkElement(*parentNode) || isHTMLStyleElement(*parentNode) || isSVGStyleElement(*parentNode) || parentNode->nodeType() == Node::PROCESSING_INSTRUCTION_NODE; } #endif PassRefPtrWillBeRawPtr<CSSStyleSheet> CSSStyleSheet::create(PassRefPtrWillBeRawPtr<StyleSheetContents> sheet, CSSImportRule* ownerRule) { return adoptRefWillBeNoop(new CSSStyleSheet(sheet, ownerRule)); } PassRefPtrWillBeRawPtr<CSSStyleSheet> CSSStyleSheet::create(PassRefPtrWillBeRawPtr<StyleSheetContents> sheet, Node* ownerNode) { return adoptRefWillBeNoop(new CSSStyleSheet(sheet, ownerNode, false, TextPosition::minimumPosition())); } PassRefPtrWillBeRawPtr<CSSStyleSheet> CSSStyleSheet::createInline(PassRefPtrWillBeRawPtr<StyleSheetContents> sheet, Node* ownerNode, const TextPosition& startPosition) { ASSERT(sheet); return adoptRefWillBeNoop(new CSSStyleSheet(sheet, ownerNode, true, startPosition)); } PassRefPtrWillBeRawPtr<CSSStyleSheet> CSSStyleSheet::createInline(Node* ownerNode, const KURL& baseURL, const TextPosition& startPosition, const String& encoding) { CSSParserContext parserContext(ownerNode->document(), 0, baseURL, encoding); RefPtrWillBeRawPtr<StyleSheetContents> sheet = StyleSheetContents::create(baseURL.string(), parserContext); return adoptRefWillBeNoop(new CSSStyleSheet(sheet.release(), ownerNode, true, startPosition)); } CSSStyleSheet::CSSStyleSheet(PassRefPtrWillBeRawPtr<StyleSheetContents> contents, CSSImportRule* ownerRule) : m_contents(contents) , m_isInlineStylesheet(false) , m_isDisabled(false) , m_ownerNode(nullptr) , m_ownerRule(ownerRule) , m_startPosition(TextPosition::minimumPosition()) , m_loadCompleted(false) { m_contents->registerClient(this); } CSSStyleSheet::CSSStyleSheet(PassRefPtrWillBeRawPtr<StyleSheetContents> contents, Node* ownerNode, bool isInlineStylesheet, const TextPosition& startPosition) : m_contents(contents) , m_isInlineStylesheet(isInlineStylesheet) , m_isDisabled(false) , m_ownerNode(ownerNode) , m_ownerRule(nullptr) , m_startPosition(startPosition) , m_loadCompleted(false) { ASSERT(isAcceptableCSSStyleSheetParent(ownerNode)); m_contents->registerClient(this); } CSSStyleSheet::~CSSStyleSheet() { // With oilpan the parent style sheet pointer is strong and the sheet and // its RuleCSSOMWrappers die together and we don't need to clear them here. // Also with oilpan the StyleSheetContents client pointers are weak and // therefore do not need to be cleared here. #if !ENABLE(OILPAN) // For style rules outside the document, .parentStyleSheet can become null even if the style rule // is still observable from JavaScript. This matches the behavior of .parentNode for nodes, but // it's not ideal because it makes the CSSOM's behavior depend on the timing of garbage collection. for (unsigned i = 0; i < m_childRuleCSSOMWrappers.size(); ++i) { if (m_childRuleCSSOMWrappers[i]) m_childRuleCSSOMWrappers[i]->setParentStyleSheet(0); } if (m_mediaCSSOMWrapper) m_mediaCSSOMWrapper->clearParentStyleSheet(); m_contents->unregisterClient(this); #endif } void CSSStyleSheet::willMutateRules() { InspectorInstrumentation::willMutateRules(this); // If we are the only client it is safe to mutate. if (m_contents->clientSize() <= 1 && !m_contents->isInMemoryCache()) { m_contents->clearRuleSet(); if (Document* document = ownerDocument()) m_contents->removeSheetFromCache(document); m_contents->setMutable(); return; } // Only cacheable stylesheets should have multiple clients. ASSERT(m_contents->isCacheable()); // Copy-on-write. m_contents->unregisterClient(this); m_contents = m_contents->copy(); m_contents->registerClient(this); m_contents->setMutable(); // Any existing CSSOM wrappers need to be connected to the copied child rules. reattachChildRuleCSSOMWrappers(); } void CSSStyleSheet::didMutateRules() { ASSERT(m_contents->isMutable()); ASSERT(m_contents->clientSize() <= 1); InspectorInstrumentation::didMutateRules(this); didMutate(PartialRuleUpdate); } void CSSStyleSheet::didMutate(StyleSheetUpdateType updateType) { Document* owner = ownerDocument(); if (!owner) return; // Need FullStyleUpdate when insertRule or deleteRule, // because StyleSheetCollection::analyzeStyleSheetChange cannot detect partial rule update. StyleResolverUpdateMode updateMode = updateType != PartialRuleUpdate ? AnalyzedStyleUpdate : FullStyleUpdate; owner->modifiedStyleSheet(this, updateMode); } void CSSStyleSheet::reattachChildRuleCSSOMWrappers() { for (unsigned i = 0; i < m_childRuleCSSOMWrappers.size(); ++i) { if (!m_childRuleCSSOMWrappers[i]) continue; m_childRuleCSSOMWrappers[i]->reattach(m_contents->ruleAt(i)); } } void CSSStyleSheet::setDisabled(bool disabled) { if (disabled == m_isDisabled) return; m_isDisabled = disabled; didMutate(); } void CSSStyleSheet::setMediaQueries(PassRefPtrWillBeRawPtr<MediaQuerySet> mediaQueries) { m_mediaQueries = mediaQueries; if (m_mediaCSSOMWrapper && m_mediaQueries) m_mediaCSSOMWrapper->reattach(m_mediaQueries.get()); } unsigned CSSStyleSheet::length() const { return m_contents->ruleCount(); } CSSRule* CSSStyleSheet::item(unsigned index) { unsigned ruleCount = length(); if (index >= ruleCount) return 0; if (m_childRuleCSSOMWrappers.isEmpty()) m_childRuleCSSOMWrappers.grow(ruleCount); ASSERT(m_childRuleCSSOMWrappers.size() == ruleCount); RefPtrWillBeMember<CSSRule>& cssRule = m_childRuleCSSOMWrappers[index]; if (!cssRule) cssRule = m_contents->ruleAt(index)->createCSSOMWrapper(this); return cssRule.get(); } void CSSStyleSheet::clearOwnerNode() { didMutate(EntireStyleSheetUpdate); if (m_ownerNode) m_contents->unregisterClient(this); m_ownerNode = nullptr; } bool CSSStyleSheet::canAccessRules() const { if (m_isInlineStylesheet) return true; KURL baseURL = m_contents->baseURL(); if (baseURL.isEmpty()) return true; Document* document = ownerDocument(); if (!document) return true; if (document->securityOrigin()->canRequest(baseURL)) return true; if (m_allowRuleAccessFromOrigin && document->securityOrigin()->canAccess(m_allowRuleAccessFromOrigin.get())) return true; return false; } PassRefPtrWillBeRawPtr<CSSRuleList> CSSStyleSheet::rules() { return cssRules(); } unsigned CSSStyleSheet::insertRule(const String& ruleString, unsigned index, ExceptionState& exceptionState) { ASSERT(m_childRuleCSSOMWrappers.isEmpty() || m_childRuleCSSOMWrappers.size() == m_contents->ruleCount()); if (index > length()) { exceptionState.throwDOMException(IndexSizeError, "The index provided (" + String::number(index) + ") is larger than the maximum index (" + String::number(length()) + ")."); return 0; } CSSParserContext context(m_contents->parserContext(), UseCounter::getFrom(this)); RefPtrWillBeRawPtr<StyleRuleBase> rule = CSSParser::parseRule(context, m_contents.get(), ruleString); // FIXME: @namespace rules have special handling in the CSSOM spec, but it // mostly doesn't make sense since we don't support CSSNamespaceRule if (!rule || rule->isNamespaceRule()) { exceptionState.throwDOMException(SyntaxError, "Failed to parse the rule '" + ruleString + "'."); return 0; } RuleMutationScope mutationScope(this); bool success = m_contents->wrapperInsertRule(rule, index); if (!success) { exceptionState.throwDOMException(HierarchyRequestError, "Failed to insert the rule."); return 0; } if (!m_childRuleCSSOMWrappers.isEmpty()) m_childRuleCSSOMWrappers.insert(index, RefPtrWillBeMember<CSSRule>(nullptr)); return index; } unsigned CSSStyleSheet::insertRule(const String& rule, ExceptionState& exceptionState) { UseCounter::countDeprecation(callingExecutionContext(V8PerIsolateData::mainThreadIsolate()), UseCounter::CSSStyleSheetInsertRuleOptionalArg); return insertRule(rule, 0, exceptionState); } void CSSStyleSheet::deleteRule(unsigned index, ExceptionState& exceptionState) { ASSERT(m_childRuleCSSOMWrappers.isEmpty() || m_childRuleCSSOMWrappers.size() == m_contents->ruleCount()); if (index >= length()) { exceptionState.throwDOMException(IndexSizeError, "The index provided (" + String::number(index) + ") is larger than the maximum index (" + String::number(length() - 1) + ")."); return; } RuleMutationScope mutationScope(this); m_contents->wrapperDeleteRule(index); if (!m_childRuleCSSOMWrappers.isEmpty()) { if (m_childRuleCSSOMWrappers[index]) m_childRuleCSSOMWrappers[index]->setParentStyleSheet(0); m_childRuleCSSOMWrappers.remove(index); } } int CSSStyleSheet::addRule(const String& selector, const String& style, int index, ExceptionState& exceptionState) { StringBuilder text; text.append(selector); text.appendLiteral(" { "); text.append(style); if (!style.isEmpty()) text.append(' '); text.append('}'); insertRule(text.toString(), index, exceptionState); // As per Microsoft documentation, always return -1. return -1; } int CSSStyleSheet::addRule(const String& selector, const String& style, ExceptionState& exceptionState) { return addRule(selector, style, length(), exceptionState); } PassRefPtrWillBeRawPtr<CSSRuleList> CSSStyleSheet::cssRules() { if (!canAccessRules()) return nullptr; if (!m_ruleListCSSOMWrapper) m_ruleListCSSOMWrapper = StyleSheetCSSRuleList::create(this); return m_ruleListCSSOMWrapper.get(); } String CSSStyleSheet::href() const { return m_contents->originalURL(); } KURL CSSStyleSheet::baseURL() const { return m_contents->baseURL(); } bool CSSStyleSheet::isLoading() const { return m_contents->isLoading(); } MediaList* CSSStyleSheet::media() const { if (!m_mediaQueries) return 0; if (!m_mediaCSSOMWrapper) m_mediaCSSOMWrapper = MediaList::create(m_mediaQueries.get(), const_cast<CSSStyleSheet*>(this)); return m_mediaCSSOMWrapper.get(); } CSSStyleSheet* CSSStyleSheet::parentStyleSheet() const { return m_ownerRule ? m_ownerRule->parentStyleSheet() : 0; } Document* CSSStyleSheet::ownerDocument() const { const CSSStyleSheet* root = this; while (root->parentStyleSheet()) root = root->parentStyleSheet(); return root->ownerNode() ? &root->ownerNode()->document() : 0; } void CSSStyleSheet::setAllowRuleAccessFromOrigin(PassRefPtr<SecurityOrigin> allowedOrigin) { m_allowRuleAccessFromOrigin = allowedOrigin; } void CSSStyleSheet::clearChildRuleCSSOMWrappers() { m_childRuleCSSOMWrappers.clear(); } bool CSSStyleSheet::sheetLoaded() { ASSERT(m_ownerNode); setLoadCompleted(m_ownerNode->sheetLoaded()); return m_loadCompleted; } void CSSStyleSheet::startLoadingDynamicSheet() { setLoadCompleted(false); m_ownerNode->startLoadingDynamicSheet(); } void CSSStyleSheet::setLoadCompleted(bool completed) { if (completed == m_loadCompleted) return; m_loadCompleted = completed; if (completed) m_contents->clientLoadCompleted(this); else m_contents->clientLoadStarted(this); } DEFINE_TRACE(CSSStyleSheet) { visitor->trace(m_contents); visitor->trace(m_mediaQueries); visitor->trace(m_ownerNode); visitor->trace(m_ownerRule); visitor->trace(m_mediaCSSOMWrapper); visitor->trace(m_childRuleCSSOMWrappers); visitor->trace(m_ruleListCSSOMWrapper); StyleSheet::trace(visitor); } } // namespace blink
guorendong/iridium-browser-ubuntu
third_party/WebKit/Source/core/css/CSSStyleSheet.cpp
C++
bsd-3-clause
15,007