repo_name
stringlengths
4
116
path
stringlengths
4
379
size
stringlengths
1
7
content
stringlengths
3
1.05M
license
stringclasses
15 values
floresconlimon/qutebrowser
qutebrowser/mainwindow/statusbar/textbase.py
3433
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Base text widgets for statusbar.""" from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QLabel, QSizePolicy from PyQt5.QtGui import QPainter from qutebrowser.utils import qtutils, utils class TextBase(QLabel): """A text in the statusbar. Unlike QLabel, the text will get elided. Eliding is loosely based on http://gedgedev.blogspot.ch/2010/12/elided-labels-in-qt.html Attributes: _elidemode: Where to elide the text. _elided_text: The current elided text. """ def __init__(self, parent=None, elidemode=Qt.ElideRight): super().__init__(parent) self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum) self._elidemode = elidemode self._elided_text = '' def __repr__(self): return utils.get_repr(self, text=self.text()) def _update_elided_text(self, width): """Update the elided text when necessary. Args: width: The maximal width the text should take. """ if self.text(): self._elided_text = self.fontMetrics().elidedText( self.text(), self._elidemode, width, Qt.TextShowMnemonic) else: self._elided_text = '' def setText(self, txt): """Extend QLabel::setText. This update the elided text after setting the text, and also works around a weird QLabel redrawing bug where it doesn't redraw correctly when the text is empty -- we explicitly need to call repaint() to resolve this. More info: http://stackoverflow.com/q/21890462/2085149 https://bugreports.qt.io/browse/QTBUG-36945 https://codereview.qt-project.org/#/c/79181/ Args: txt: The text to set (string). """ super().setText(txt) self._update_elided_text(self.geometry().width()) if not txt: # WORKAROUND self.repaint() def resizeEvent(self, e): """Extend QLabel::resizeEvent to update the elided text afterwards.""" super().resizeEvent(e) size = e.size() qtutils.ensure_valid(size) self._update_elided_text(size.width()) def paintEvent(self, e): """Override QLabel::paintEvent to draw elided text.""" if self._elidemode == Qt.ElideNone: super().paintEvent(e) else: e.accept() painter = QPainter(self) geom = self.geometry() qtutils.ensure_valid(geom) painter.drawText(0, 0, geom.width(), geom.height(), self.alignment(), self._elided_text)
gpl-3.0
jenarroyo/moodle-repo
local/qeupgradehelper/listpreupgrade.php
2341
<?php // This file is part of Moodle - http://moodle.org/ // // Moodle is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // Moodle is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Moodle. If not, see <http://www.gnu.org/licenses/>. /** * Script to show all the quizzes in the site with how many attempts they have * that will need to be upgraded. * * @package local * @subpackage qeupgradehelper * @copyright 2010 The Open University * @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later */ require_once(dirname(__FILE__) . '/../../config.php'); require_once(dirname(__FILE__) . '/locallib.php'); require_once($CFG->libdir . '/adminlib.php'); require_login(); require_capability('moodle/site:config', get_context_instance(CONTEXT_SYSTEM)); local_qeupgradehelper_require_not_upgraded(); admin_externalpage_setup('qeupgradehelper', '', array(), local_qeupgradehelper_url('')); $PAGE->navbar->add(get_string('listpreupgrade', 'local_qeupgradehelper')); $renderer = $PAGE->get_renderer('local_qeupgradehelper'); $quizzes = new local_qeupgradehelper_pre_upgrade_quiz_list(); // Look to see if the admin has set things up to only upgrade certain attempts. $partialupgradefile = $CFG->dirroot . '/local/qeupgradehelper/partialupgrade.php'; $partialupgradefunction = 'local_qeupgradehelper_get_quizzes_to_upgrade'; if (is_readable($partialupgradefile)) { include_once($partialupgradefile); if (function_exists($partialupgradefunction)) { $quizzes = new local_qeupgradehelper_pre_upgrade_quiz_list_restricted( $partialupgradefunction()); } } $numveryoldattemtps = local_qeupgradehelper_get_num_very_old_attempts(); if ($quizzes->is_empty()) { echo $renderer->simple_message_page(get_string('noquizattempts', 'local_qeupgradehelper')); } else { echo $renderer->quiz_list_page($quizzes, $numveryoldattemtps); }
gpl-3.0
gitmeri/studio3
plugins/com.aptana.editor.css.formatter/src/com/aptana/editor/css/formatter/preferences/CSSFormatterBlankLinesPage.java
2004
/** * Aptana Studio * Copyright (c) 2005-2011 by Appcelerator, Inc. All Rights Reserved. * Licensed under the terms of the GNU Public License (GPL) v3 (with exceptions). * Please see the license.html included with this distribution for details. * Any modifications to this file must keep this entire header intact. */ package com.aptana.editor.css.formatter.preferences; import java.net.URL; import org.eclipse.swt.layout.GridData; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Group; import com.aptana.editor.css.formatter.CSSFormatterConstants; import com.aptana.formatter.ui.IFormatterControlManager; import com.aptana.formatter.ui.IFormatterModifyDialog; import com.aptana.formatter.ui.preferences.FormatterModifyTabPage; import com.aptana.formatter.ui.util.SWTFactory; public class CSSFormatterBlankLinesPage extends FormatterModifyTabPage { private static final String BLANK_LINES_PREVIEW_NAME = "preview.css"; //$NON-NLS-1$ public CSSFormatterBlankLinesPage(IFormatterModifyDialog dialog) { super(dialog); } protected void createOptions(IFormatterControlManager manager, Composite parent) { Group blankLinesGroup = SWTFactory.createGroup(parent, Messages.CSSFormatterBlankLinesPage_blankLinesGroupLabel, 2, 1, GridData.FILL_HORIZONTAL); manager.createNumber(blankLinesGroup, CSSFormatterConstants.LINES_AFTER_ELEMENTS, Messages.CSSFormatterBlankLinesPage_afterCSSRule); manager.createNumber(blankLinesGroup, CSSFormatterConstants.LINES_AFTER_DECLARATION, Messages.CSSFormatterBlankLinesPage_afterCSSDeclaration); Group preserveLinesGroup = SWTFactory.createGroup(parent, Messages.CSSFormatterBlankLinesPage_existingBlankLinesLabel, 2, 1, GridData.FILL_HORIZONTAL); manager.createNumber(preserveLinesGroup, CSSFormatterConstants.PRESERVED_LINES, Messages.CSSFormatterBlankLinesPage_existingBlankLinesToPreserve); } protected URL getPreviewContent() { return getClass().getResource(BLANK_LINES_PREVIEW_NAME); } }
gpl-3.0
MitosEHR/MitosEHR-Official
lib/sencha-touch-2.0.1/examples/oreilly/app/view/about/VideoList.js
567
Ext.define('Oreilly.view.about.VideoList', { extend: 'Ext.List', xtype: 'videoList', config: { disableSelection: true, itemCls: 'video', store: 'Videos' }, initialize: function() { this.setItemTpl([ '<div class="thumb" style="background-image: url({thumbnail.sqDefault})"></div>', '<span class="name">{[values.title.replace("' + this.config.hideText + '","")]}</span>' ]); this.callParent(); this.getStore().load({ url: 'http://gdata.youtube.com/feeds/api/playlists/' + this.config.playlistId + '?v=2&alt=jsonc' }); } });
gpl-3.0
kylethayer/bioladder
wiki/includes/config/EtcdConfigParseError.php
56
<?php class EtcdConfigParseError extends Exception { }
gpl-3.0
syslover33/ctank
java/android-sdk-linux_r24.4.1_src/sources/android-23/com/android/printspooler/model/OpenDocumentCallback.java
1094
/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.printspooler.model; /** * Callbacks interface for opening a file. */ public interface OpenDocumentCallback { public static final int ERROR_MALFORMED_PDF_FILE = -1; public static final int ERROR_SECURE_PDF_FILE = -2; /** * Called after the file is opened. */ public void onSuccess(); /** * Called after opening the file failed. * * @param error The error. */ public void onFailure(int error); }
gpl-3.0
kenjichi/nonpermanent
server/api/core/setDomain.js
1159
import { Shops } from "/lib/collections"; import { Logger } from "/server/api"; /** * getDomain * local helper for creating admin users * @param {String} requestUrl - url * @return {String} domain name stripped from requestUrl */ export function getRegistryDomain(requestUrl) { const url = requestUrl || process.env.ROOT_URL; const domain = url.match(/^https?\:\/\/([^\/:?#]+)(?:[\/:?#]|$)/i)[1]; return domain; } /** * @private setDomain * @summary update the default shop url if ROOT_URL supplied is different from current * @return {String} returns insert result */ export function setDomain() { let currentDomain; // we automatically update the shop domain when ROOT_URL changes try { currentDomain = Shops.findOne().domains[0]; } catch (_error) { Logger.error(_error, "Failed to determine default shop."); } // if the server domain changes, update shop const domain = getRegistryDomain(); if (currentDomain && currentDomain !== domain) { Logger.debug("Updating domain to " + domain); Shops.update({ domains: currentDomain }, { $set: { "domains.$": domain } }); } }
gpl-3.0
zarelit/nodeshot
nodeshot/__init__.py
388
VERSION = (1, 0, 0, 'alpha', 0) __version__ = VERSION def get_version(): version = '%s.%s' % (VERSION[0], VERSION[1]) if VERSION[2]: version = '%s.%s' % (version, VERSION[2]) if VERSION[3:] == ('alpha', 0): version = '%s pre-alpha' % version else: if VERSION[3] != 'final': version = '%s %s' % (version, VERSION[3]) return version
gpl-3.0
VividCortex/mysql
nulltime_test.go
1622
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package // // Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at http://mozilla.org/MPL/2.0/. package mysql import ( "database/sql" "database/sql/driver" "testing" "time" ) var ( // Check implementation of interfaces _ driver.Valuer = NullTime{} _ sql.Scanner = (*NullTime)(nil) ) func TestScanNullTime(t *testing.T) { var scanTests = []struct { in interface{} error bool valid bool time time.Time }{ {tDate, false, true, tDate}, {sDate, false, true, tDate}, {[]byte(sDate), false, true, tDate}, {tDateTime, false, true, tDateTime}, {sDateTime, false, true, tDateTime}, {[]byte(sDateTime), false, true, tDateTime}, {tDate0, false, true, tDate0}, {sDate0, false, true, tDate0}, {[]byte(sDate0), false, true, tDate0}, {sDateTime0, false, true, tDate0}, {[]byte(sDateTime0), false, true, tDate0}, {"", true, false, tDate0}, {"1234", true, false, tDate0}, {0, true, false, tDate0}, } var nt = NullTime{} var err error for _, tst := range scanTests { err = nt.Scan(tst.in) if (err != nil) != tst.error { t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil)) } if nt.Valid != tst.valid { t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid) } if nt.Time != tst.time { t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time) } } }
mpl-2.0
dcleao/cdf
pentaho-js/src/test/javascript/cdf/components/NavigatorComponent-spec.js
1743
/*! * Copyright 2002 - 2017 Webdetails, a Hitachi Vantara company. All rights reserved. * * This software was developed by Webdetails and is provided under the terms * of the Mozilla Public License, Version 2.0, or any later version. You may not use * this file except in compliance with the license. If you need a copy of the license, * please go to http://mozilla.org/MPL/2.0/. The Initial Developer is Webdetails. * * Software distributed under the Mozilla Public License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. Please refer to * the license for the specific language governing your rights and limitations. */ define([ "cdf/Dashboard.Clean", "cdf/components/NavigatorComponent" ], function(Dashboard, NavigatorComponent) { /** * ## The Navigator Component */ describe("The Navigator Component #", function() { var dashboard = new Dashboard(); dashboard.init(); var navigatorComponent = new NavigatorComponent({ name: "navigatorMenu", type: "navigator", listeners: [], htmlObject: "sampleObjectNavigator", executeAtStart: true, mode: "horizontal", includeSolutions: true }); dashboard.addComponent(navigatorComponent); /** * ## The Navigator Component # allows a dashboard to execute update */ it("allows a dashboard to execute update", function(done) { spyOn(navigatorComponent, 'update').and.callThrough(); // listen to cdf:postExecution event navigatorComponent.once("cdf:postExecution", function() { expect(navigatorComponent.update).toHaveBeenCalled(); done(); }); dashboard.update(navigatorComponent); }); }); });
mpl-2.0
michath/ConMonkey
js/src/tests/js1_5/extensions/getset-003.js
4790
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ /* * Date: 14 April 2001 * * SUMMARY: Testing obj.prop getter/setter * Note: this is a non-ECMA extension to the language. */ //----------------------------------------------------------------------------- var UBound = 0; var BUGNUMBER = '(none)'; var summary = 'Testing obj.prop getter/setter'; var statprefix = 'Status: '; var status = ''; var statusitems = [ ]; var actual = ''; var actualvalues = [ ]; var expect= ''; var expectedvalues = [ ]; var cnDEFAULT = 'default name'; var cnFRED = 'Fred'; var obj = {}; var obj2 = {}; var s = ''; // SECTION1: define getter/setter directly on an object (not its prototype) obj = new Object(); obj.nameSETS = 0; obj.nameGETS = 0; Object.defineProperty(obj, "name", { enumerable: true, configurable: true, set: function(newValue) {this._name=newValue; this.nameSETS++;}, get: function() {this.nameGETS++; return this._name;} }); status = 'In SECTION1 of test after 0 sets, 0 gets'; actual = [obj.nameSETS,obj.nameGETS]; expect = [0,0]; addThis(); s = obj.name; status = 'In SECTION1 of test after 0 sets, 1 get'; actual = [obj.nameSETS,obj.nameGETS]; expect = [0,1]; addThis(); obj.name = cnFRED; status = 'In SECTION1 of test after 1 set, 1 get'; actual = [obj.nameSETS,obj.nameGETS]; expect = [1,1]; addThis(); obj.name = obj.name; status = 'In SECTION1 of test after 2 sets, 2 gets'; actual = [obj.nameSETS,obj.nameGETS]; expect = [2,2]; addThis(); // SECTION2: define getter/setter in Object.prototype Object.prototype.nameSETS = 0; Object.prototype.nameGETS = 0; Object.defineProperty(Object.prototype, "name", { enumerable: true, configurable: true, set: function(newValue) {this._name=newValue; this.nameSETS++;}, get: function() {this.nameGETS++; return this._name;} }); obj = new Object(); status = 'In SECTION2 of test after 0 sets, 0 gets'; actual = [obj.nameSETS,obj.nameGETS]; expect = [0,0]; addThis(); s = obj.name; status = 'In SECTION2 of test after 0 sets, 1 get'; actual = [obj.nameSETS,obj.nameGETS]; expect = [0,1]; addThis(); obj.name = cnFRED; status = 'In SECTION2 of test after 1 set, 1 get'; actual = [obj.nameSETS,obj.nameGETS]; expect = [1,1]; addThis(); obj.name = obj.name; status = 'In SECTION2 of test after 2 sets, 2 gets'; actual = [obj.nameSETS,obj.nameGETS]; expect = [2,2]; addThis(); // SECTION 3: define getter/setter in prototype of user-defined constructor function TestObject() { } TestObject.prototype.nameSETS = 0; TestObject.prototype.nameGETS = 0; Object.defineProperty(TestObject.prototype, "name", { enumerable: true, configurable: true, set: function(newValue) {this._name=newValue; this.nameSETS++;}, get: function() {this.nameGETS++; return this._name;} }); TestObject.prototype.name = cnDEFAULT; obj = new TestObject(); status = 'In SECTION3 of test after 1 set, 0 gets'; // (we set a default value in the prototype) actual = [obj.nameSETS,obj.nameGETS]; expect = [1,0]; addThis(); s = obj.name; status = 'In SECTION3 of test after 1 set, 1 get'; actual = [obj.nameSETS,obj.nameGETS]; expect = [1,1]; addThis(); obj.name = cnFRED; status = 'In SECTION3 of test after 2 sets, 1 get'; actual = [obj.nameSETS,obj.nameGETS]; expect = [2,1]; addThis(); obj.name = obj.name; status = 'In SECTION3 of test after 3 sets, 2 gets'; actual = [obj.nameSETS,obj.nameGETS]; expect = [3,2]; addThis(); obj2 = new TestObject(); status = 'obj2 = new TestObject() after 1 set, 0 gets'; actual = [obj2.nameSETS,obj2.nameGETS]; expect = [1,0]; // we set a default value in the prototype - addThis(); // Use both obj and obj2 - obj2.name = obj.name + obj2.name; status = 'obj2 = new TestObject() after 2 sets, 1 get'; actual = [obj2.nameSETS,obj2.nameGETS]; expect = [2,1]; addThis(); status = 'In SECTION3 of test after 3 sets, 3 gets'; actual = [obj.nameSETS,obj.nameGETS]; expect = [3,3]; // we left off at [3,2] above - addThis(); //--------------------------------------------------------------------------------- test(); //--------------------------------------------------------------------------------- function addThis() { statusitems[UBound] = status; actualvalues[UBound] = actual.toString(); expectedvalues[UBound] = expect.toString(); UBound++; } function test() { enterFunc ('test'); printBugNumber(BUGNUMBER); printStatus (summary); for (var i = 0; i < UBound; i++) { reportCompare(expectedvalues[i], actualvalues[i], getStatus(i)); } exitFunc ('test'); } function getStatus(i) { return statprefix + statusitems[i]; }
mpl-2.0
grubernaut/packer
vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/ecs/attach_disk.go
4002
package ecs //Licensed under the Apache License, Version 2.0 (the "License"); //you may not use this file except in compliance with the License. //You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // //Unless required by applicable law or agreed to in writing, software //distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //See the License for the specific language governing permissions and //limitations under the License. // // Code generated by Alibaba Cloud SDK Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" ) // AttachDisk invokes the ecs.AttachDisk API synchronously // api document: https://help.aliyun.com/api/ecs/attachdisk.html func (client *Client) AttachDisk(request *AttachDiskRequest) (response *AttachDiskResponse, err error) { response = CreateAttachDiskResponse() err = client.DoAction(request, response) return } // AttachDiskWithChan invokes the ecs.AttachDisk API asynchronously // api document: https://help.aliyun.com/api/ecs/attachdisk.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) AttachDiskWithChan(request *AttachDiskRequest) (<-chan *AttachDiskResponse, <-chan error) { responseChan := make(chan *AttachDiskResponse, 1) errChan := make(chan error, 1) err := client.AddAsyncTask(func() { defer close(responseChan) defer close(errChan) response, err := client.AttachDisk(request) if err != nil { errChan <- err } else { responseChan <- response } }) if err != nil { errChan <- err close(responseChan) close(errChan) } return responseChan, errChan } // AttachDiskWithCallback invokes the ecs.AttachDisk API asynchronously // api document: https://help.aliyun.com/api/ecs/attachdisk.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) AttachDiskWithCallback(request *AttachDiskRequest, callback func(response *AttachDiskResponse, err error)) <-chan int { result := make(chan int, 1) err := client.AddAsyncTask(func() { var response *AttachDiskResponse var err error defer close(result) response, err = client.AttachDisk(request) callback(response, err) result <- 1 }) if err != nil { defer close(result) callback(nil, err) result <- 0 } return result } // AttachDiskRequest is the request struct for api AttachDisk type AttachDiskRequest struct { *requests.RpcRequest ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"` InstanceId string `position:"Query" name:"InstanceId"` ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` OwnerAccount string `position:"Query" name:"OwnerAccount"` DiskId string `position:"Query" name:"DiskId"` OwnerId requests.Integer `position:"Query" name:"OwnerId"` Device string `position:"Query" name:"Device"` DeleteWithInstance requests.Boolean `position:"Query" name:"DeleteWithInstance"` } // AttachDiskResponse is the response struct for api AttachDisk type AttachDiskResponse struct { *responses.BaseResponse RequestId string `json:"RequestId" xml:"RequestId"` } // CreateAttachDiskRequest creates a request to invoke AttachDisk API func CreateAttachDiskRequest() (request *AttachDiskRequest) { request = &AttachDiskRequest{ RpcRequest: &requests.RpcRequest{}, } request.InitWithApiInfo("Ecs", "2014-05-26", "AttachDisk", "ecs", "openAPI") return } // CreateAttachDiskResponse creates a response to parse from AttachDisk response func CreateAttachDiskResponse() (response *AttachDiskResponse) { response = &AttachDiskResponse{ BaseResponse: &responses.BaseResponse{}, } return }
mpl-2.0
davidgiven/povray
libraries/boost/libs/thread/src/win32/thread.cpp
21264
// Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // (C) Copyright 2007 Anthony Williams // (C) Copyright 2007 David Deakins #define _WIN32_WINNT 0x400 #define WINVER 0x400 #include <boost/thread/thread.hpp> #include <algorithm> #include <windows.h> #ifndef UNDER_CE #include <process.h> #endif #include <stdio.h> #include <boost/thread/once.hpp> #include <boost/thread/tss.hpp> #include <boost/assert.hpp> #include <boost/throw_exception.hpp> #include <boost/thread/detail/tss_hooks.hpp> #include <boost/date_time/posix_time/conversion.hpp> namespace boost { namespace { boost::once_flag current_thread_tls_init_flag=BOOST_ONCE_INIT; DWORD current_thread_tls_key=0; void create_current_thread_tls_key() { tss_cleanup_implemented(); // if anyone uses TSS, we need the cleanup linked in current_thread_tls_key=TlsAlloc(); BOOST_ASSERT(current_thread_tls_key!=TLS_OUT_OF_INDEXES); } void cleanup_tls_key() { if(current_thread_tls_key) { TlsFree(current_thread_tls_key); current_thread_tls_key=0; } } detail::thread_data_base* get_current_thread_data() { if(!current_thread_tls_key) { return 0; } return (detail::thread_data_base*)TlsGetValue(current_thread_tls_key); } void set_current_thread_data(detail::thread_data_base* new_data) { boost::call_once(current_thread_tls_init_flag,create_current_thread_tls_key); if(current_thread_tls_key) BOOST_VERIFY(TlsSetValue(current_thread_tls_key,new_data)); else boost::throw_exception(thread_resource_error()); } #ifdef BOOST_NO_THREADEX // Windows CE doesn't define _beginthreadex struct ThreadProxyData { typedef unsigned (__stdcall* func)(void*); func start_address_; void* arglist_; ThreadProxyData(func start_address,void* arglist) : start_address_(start_address), arglist_(arglist) {} }; DWORD WINAPI ThreadProxy(LPVOID args) { ThreadProxyData* data=reinterpret_cast<ThreadProxyData*>(args); DWORD ret=data->start_address_(data->arglist_); delete data; return ret; } typedef void* uintptr_t; inline uintptr_t const _beginthreadex(void* security, unsigned stack_size, unsigned (__stdcall* start_address)(void*), void* arglist, unsigned initflag, unsigned* thrdaddr) { DWORD threadID; HANDLE hthread=CreateThread(static_cast<LPSECURITY_ATTRIBUTES>(security),stack_size,ThreadProxy, new ThreadProxyData(start_address,arglist),initflag,&threadID); if (hthread!=0) *thrdaddr=threadID; return reinterpret_cast<uintptr_t const>(hthread); } #endif } namespace detail { struct thread_exit_callback_node { boost::detail::thread_exit_function_base* func; thread_exit_callback_node* next; thread_exit_callback_node(boost::detail::thread_exit_function_base* func_, thread_exit_callback_node* next_): func(func_),next(next_) {} }; struct tss_data_node { void const* key; boost::shared_ptr<boost::detail::tss_cleanup_function> func; void* value; tss_data_node* next; tss_data_node(void const* key_,boost::shared_ptr<boost::detail::tss_cleanup_function> func_,void* value_, tss_data_node* next_): key(key_),func(func_),value(value_),next(next_) {} }; } namespace { void run_thread_exit_callbacks() { detail::thread_data_ptr current_thread_data(get_current_thread_data(),false); if(current_thread_data) { while(current_thread_data->tss_data || current_thread_data->thread_exit_callbacks) { while(current_thread_data->thread_exit_callbacks) { detail::thread_exit_callback_node* const current_node=current_thread_data->thread_exit_callbacks; current_thread_data->thread_exit_callbacks=current_node->next; if(current_node->func) { (*current_node->func)(); boost::detail::heap_delete(current_node->func); } boost::detail::heap_delete(current_node); } while(current_thread_data->tss_data) { detail::tss_data_node* const current_node=current_thread_data->tss_data; current_thread_data->tss_data=current_node->next; if(current_node->func) { (*current_node->func)(current_node->value); } boost::detail::heap_delete(current_node); } } set_current_thread_data(0); } } unsigned __stdcall thread_start_function(void* param) { detail::thread_data_base* const thread_info(reinterpret_cast<detail::thread_data_base*>(param)); set_current_thread_data(thread_info); try { thread_info->run(); } catch(thread_interrupted const&) { } // Removed as it stops the debugger identifying the cause of the exception // Unhandled exceptions still cause the application to terminate // catch(...) // { // std::terminate(); // } run_thread_exit_callbacks(); return 0; } } thread::thread() {} void thread::start_thread() { uintptr_t const new_thread=_beginthreadex(0,0,&thread_start_function,thread_info.get(),CREATE_SUSPENDED,&thread_info->id); if(!new_thread) { boost::throw_exception(thread_resource_error()); } intrusive_ptr_add_ref(thread_info.get()); thread_info->thread_handle=(detail::win32::handle)(new_thread); ResumeThread(thread_info->thread_handle); } thread::thread(detail::thread_data_ptr data): thread_info(data) {} namespace { struct externally_launched_thread: detail::thread_data_base { externally_launched_thread() { ++count; interruption_enabled=false; } void run() {} private: externally_launched_thread(externally_launched_thread&); void operator=(externally_launched_thread&); }; void make_external_thread_data() { externally_launched_thread* me=detail::heap_new<externally_launched_thread>(); try { set_current_thread_data(me); } catch(...) { detail::heap_delete(me); throw; } } detail::thread_data_base* get_or_make_current_thread_data() { detail::thread_data_base* current_thread_data(get_current_thread_data()); if(!current_thread_data) { make_external_thread_data(); current_thread_data=get_current_thread_data(); } return current_thread_data; } } thread::~thread() { detach(); } thread::id thread::get_id() const { return thread::id((get_thread_info)()); } bool thread::joinable() const { return (get_thread_info)(); } void thread::join() { detail::thread_data_ptr local_thread_info=(get_thread_info)(); if(local_thread_info) { this_thread::interruptible_wait(local_thread_info->thread_handle,detail::timeout::sentinel()); release_handle(); } } bool thread::timed_join(boost::system_time const& wait_until) { detail::thread_data_ptr local_thread_info=(get_thread_info)(); if(local_thread_info) { if(!this_thread::interruptible_wait(local_thread_info->thread_handle,get_milliseconds_until(wait_until))) { return false; } release_handle(); } return true; } void thread::detach() { release_handle(); } void thread::release_handle() { thread_info=0; } void thread::interrupt() { detail::thread_data_ptr local_thread_info=(get_thread_info)(); if(local_thread_info) { local_thread_info->interrupt(); } } bool thread::interruption_requested() const { detail::thread_data_ptr local_thread_info=(get_thread_info)(); return local_thread_info.get() && (detail::win32::WaitForSingleObject(local_thread_info->interruption_handle,0)==0); } unsigned thread::hardware_concurrency() { SYSTEM_INFO info={{0}}; GetSystemInfo(&info); return info.dwNumberOfProcessors; } thread::native_handle_type thread::native_handle() { detail::thread_data_ptr local_thread_info=(get_thread_info)(); return local_thread_info?(detail::win32::handle)local_thread_info->thread_handle:detail::win32::invalid_handle_value; } detail::thread_data_ptr thread::get_thread_info BOOST_PREVENT_MACRO_SUBSTITUTION () const { return thread_info; } namespace this_thread { namespace { LARGE_INTEGER get_due_time(detail::timeout const& target_time) { LARGE_INTEGER due_time={{0}}; if(target_time.relative) { unsigned long const elapsed_milliseconds=GetTickCount()-target_time.start; LONGLONG const remaining_milliseconds=(target_time.milliseconds-elapsed_milliseconds); LONGLONG const hundred_nanoseconds_in_one_millisecond=10000; if(remaining_milliseconds>0) { due_time.QuadPart=-(remaining_milliseconds*hundred_nanoseconds_in_one_millisecond); } } else { SYSTEMTIME target_system_time={0}; target_system_time.wYear=target_time.abs_time.date().year(); target_system_time.wMonth=target_time.abs_time.date().month(); target_system_time.wDay=target_time.abs_time.date().day(); target_system_time.wHour=(WORD)target_time.abs_time.time_of_day().hours(); target_system_time.wMinute=(WORD)target_time.abs_time.time_of_day().minutes(); target_system_time.wSecond=(WORD)target_time.abs_time.time_of_day().seconds(); if(!SystemTimeToFileTime(&target_system_time,((FILETIME*)&due_time))) { due_time.QuadPart=0; } else { long const hundred_nanoseconds_in_one_second=10000000; posix_time::time_duration::tick_type const ticks_per_second= target_time.abs_time.time_of_day().ticks_per_second(); if(ticks_per_second>hundred_nanoseconds_in_one_second) { posix_time::time_duration::tick_type const ticks_per_hundred_nanoseconds= ticks_per_second/hundred_nanoseconds_in_one_second; due_time.QuadPart+= target_time.abs_time.time_of_day().fractional_seconds()/ ticks_per_hundred_nanoseconds; } else { due_time.QuadPart+= target_time.abs_time.time_of_day().fractional_seconds()* (hundred_nanoseconds_in_one_second/ticks_per_second); } } } return due_time; } } bool interruptible_wait(detail::win32::handle handle_to_wait_for,detail::timeout target_time) { detail::win32::handle handles[3]={0}; unsigned handle_count=0; unsigned wait_handle_index=~0U; unsigned interruption_index=~0U; unsigned timeout_index=~0U; if(handle_to_wait_for!=detail::win32::invalid_handle_value) { wait_handle_index=handle_count; handles[handle_count++]=handle_to_wait_for; } if(get_current_thread_data() && get_current_thread_data()->interruption_enabled) { interruption_index=handle_count; handles[handle_count++]=get_current_thread_data()->interruption_handle; } detail::win32::handle_manager timer_handle; #ifndef UNDER_CE unsigned const min_timer_wait_period=20; if(!target_time.is_sentinel()) { detail::timeout::remaining_time const time_left=target_time.remaining_milliseconds(); if(time_left.milliseconds > min_timer_wait_period) { // for a long-enough timeout, use a waitable timer (which tracks clock changes) timer_handle=CreateWaitableTimer(NULL,false,NULL); if(timer_handle!=0) { LARGE_INTEGER due_time=get_due_time(target_time); bool const set_time_succeeded=SetWaitableTimer(timer_handle,&due_time,0,0,0,false)!=0; if(set_time_succeeded) { timeout_index=handle_count; handles[handle_count++]=timer_handle; } } } else if(!target_time.relative) { // convert short absolute-time timeouts into relative ones, so we don't race against clock changes target_time=detail::timeout(time_left.milliseconds); } } #endif bool const using_timer=timeout_index!=~0u; detail::timeout::remaining_time time_left(0); do { if(!using_timer) { time_left=target_time.remaining_milliseconds(); } if(handle_count) { unsigned long const notified_index=detail::win32::WaitForMultipleObjects(handle_count,handles,false,using_timer?INFINITE:time_left.milliseconds); if(notified_index<handle_count) { if(notified_index==wait_handle_index) { return true; } else if(notified_index==interruption_index) { detail::win32::ResetEvent(get_current_thread_data()->interruption_handle); throw thread_interrupted(); } else if(notified_index==timeout_index) { return false; } } } else { detail::win32::Sleep(time_left.milliseconds); } if(target_time.relative) { target_time.milliseconds-=detail::timeout::max_non_infinite_wait; } } while(time_left.more); return false; } thread::id get_id() { return thread::id(get_or_make_current_thread_data()); } void interruption_point() { if(interruption_enabled() && interruption_requested()) { detail::win32::ResetEvent(get_current_thread_data()->interruption_handle); throw thread_interrupted(); } } bool interruption_enabled() { return get_current_thread_data() && get_current_thread_data()->interruption_enabled; } bool interruption_requested() { return get_current_thread_data() && (detail::win32::WaitForSingleObject(get_current_thread_data()->interruption_handle,0)==0); } void yield() { detail::win32::Sleep(0); } disable_interruption::disable_interruption(): interruption_was_enabled(interruption_enabled()) { if(interruption_was_enabled) { get_current_thread_data()->interruption_enabled=false; } } disable_interruption::~disable_interruption() { if(get_current_thread_data()) { get_current_thread_data()->interruption_enabled=interruption_was_enabled; } } restore_interruption::restore_interruption(disable_interruption& d) { if(d.interruption_was_enabled) { get_current_thread_data()->interruption_enabled=true; } } restore_interruption::~restore_interruption() { if(get_current_thread_data()) { get_current_thread_data()->interruption_enabled=false; } } } namespace detail { void add_thread_exit_function(thread_exit_function_base* func) { detail::thread_data_base* const current_thread_data(get_or_make_current_thread_data()); thread_exit_callback_node* const new_node= heap_new<thread_exit_callback_node>( func,current_thread_data->thread_exit_callbacks); current_thread_data->thread_exit_callbacks=new_node; } tss_data_node* find_tss_data(void const* key) { detail::thread_data_base* const current_thread_data(get_current_thread_data()); if(current_thread_data) { detail::tss_data_node* current_node=current_thread_data->tss_data; while(current_node) { if(current_node->key==key) { return current_node; } current_node=current_node->next; } } return NULL; } void* get_tss_data(void const* key) { if(tss_data_node* const current_node=find_tss_data(key)) { return current_node->value; } return NULL; } void set_tss_data(void const* key,boost::shared_ptr<tss_cleanup_function> func,void* tss_data,bool cleanup_existing) { if(tss_data_node* const current_node=find_tss_data(key)) { if(cleanup_existing && current_node->func.get() && current_node->value) { (*current_node->func)(current_node->value); } current_node->func=func; current_node->value=tss_data; } else if(func && tss_data) { detail::thread_data_base* const current_thread_data(get_or_make_current_thread_data()); tss_data_node* const new_node= heap_new<tss_data_node>(key,func,tss_data,current_thread_data->tss_data); current_thread_data->tss_data=new_node; } } } BOOST_THREAD_DECL void __cdecl on_process_enter() {} BOOST_THREAD_DECL void __cdecl on_thread_enter() {} BOOST_THREAD_DECL void __cdecl on_process_exit() { boost::cleanup_tls_key(); } BOOST_THREAD_DECL void __cdecl on_thread_exit() { boost::run_thread_exit_callbacks(); } }
agpl-3.0
exercitussolus/yolo
src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java
3894
/* * Licensed to ElasticSearch and Shay Banon under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. ElasticSearch licenses this * file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.index.similarity; import com.google.common.collect.ImmutableMap; import org.apache.lucene.search.similarities.*; import org.elasticsearch.ElasticSearchIllegalArgumentException; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; /** * {@link SimilarityProvider} for {@link IBSimilarity}. * <p/> * Configuration options available: * <ul> * <li>distribution</li> * <li>lambda</li> * <li>normalization</li> * </ul> * @see IBSimilarity For more information about configuration */ public class IBSimilarityProvider extends AbstractSimilarityProvider { private static final ImmutableMap<String, Distribution> DISTRIBUTION_CACHE; private static final ImmutableMap<String, Lambda> LAMBDA_CACHE; static { MapBuilder<String, Distribution> distributions = MapBuilder.newMapBuilder(); distributions.put("ll", new DistributionLL()); distributions.put("spl", new DistributionSPL()); DISTRIBUTION_CACHE = distributions.immutableMap(); MapBuilder<String, Lambda> lamdas = MapBuilder.newMapBuilder(); lamdas.put("df", new LambdaDF()); lamdas.put("ttf", new LambdaTTF()); LAMBDA_CACHE = lamdas.immutableMap(); } private final IBSimilarity similarity; @Inject public IBSimilarityProvider(@Assisted String name, @Assisted Settings settings) { super(name); Distribution distribution = parseDistribution(settings); Lambda lambda = parseLambda(settings); Normalization normalization = parseNormalization(settings); this.similarity = new IBSimilarity(distribution, lambda, normalization); } /** * Parses the given Settings and creates the appropriate {@link Distribution} * * @param settings Settings to parse * @return {@link Normalization} referred to in the Settings */ protected Distribution parseDistribution(Settings settings) { String rawDistribution = settings.get("distribution"); Distribution distribution = DISTRIBUTION_CACHE.get(rawDistribution); if (distribution == null) { throw new ElasticSearchIllegalArgumentException("Unsupported Distribution [" + rawDistribution + "]"); } return distribution; } /** * Parses the given Settings and creates the appropriate {@link Lambda} * * @param settings Settings to parse * @return {@link Normalization} referred to in the Settings */ protected Lambda parseLambda(Settings settings) { String rawLambda = settings.get("lambda"); Lambda lambda = LAMBDA_CACHE.get(rawLambda); if (lambda == null) { throw new ElasticSearchIllegalArgumentException("Unsupported Lambda [" + rawLambda + "]"); } return lambda; } /** * {@inheritDoc} */ @Override public Similarity get() { return similarity; } }
agpl-3.0
UniversityOfHawaii/kfs
kfs-ar/src/main/java/org/kuali/kfs/module/ar/document/service/impl/MilestoneScheduleMaintenanceServiceImpl.java
3645
/* * The Kuali Financial System, a comprehensive financial management system for higher education. * * Copyright 2005-2014 The Kuali Foundation * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.kuali.kfs.module.ar.document.service.impl; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.commons.collections.CollectionUtils; import org.kuali.kfs.module.ar.ArPropertyConstants; import org.kuali.kfs.module.ar.businessobject.InvoiceMilestone; import org.kuali.kfs.module.ar.document.service.ContractsGrantsInvoiceDocumentService; import org.kuali.kfs.module.ar.document.service.MilestoneScheduleMaintenanceService; import org.kuali.rice.krad.service.BusinessObjectService; /** * Default implementation of the Milestone Schedule maintenance service */ public class MilestoneScheduleMaintenanceServiceImpl implements MilestoneScheduleMaintenanceService { protected BusinessObjectService businessObjectService; protected ContractsGrantsInvoiceDocumentService contractsGrantsInvoiceDocumentService; @Override public boolean hasMilestoneBeenCopiedToInvoice(Long proposalNumber, String milestoneId) { Collection<InvoiceMilestone> invoiceMilestones = new ArrayList<InvoiceMilestone>(); Map<String, Object> map = new HashMap<String, Object>(); map.put(ArPropertyConstants.MilestoneFields.MILESTONE_IDENTIFIER, milestoneId); invoiceMilestones = getBusinessObjectService().findMatching(InvoiceMilestone.class, map); // skip ineffective milestones, based on invoice Set<String> effectiveDocumentNumbers = new HashSet<String>(); List<InvoiceMilestone> effectiveInvoiceMilestones = new ArrayList<InvoiceMilestone>(); for (InvoiceMilestone invoiceMilestone : invoiceMilestones) { if (effectiveDocumentNumbers.contains(invoiceMilestone.getDocumentNumber()) || getContractsGrantsInvoiceDocumentService().isInvoiceDocumentEffective(invoiceMilestone.getDocumentNumber())) { effectiveInvoiceMilestones.add(invoiceMilestone); effectiveDocumentNumbers.add(invoiceMilestone.getDocumentNumber()); } } return CollectionUtils.isNotEmpty(effectiveInvoiceMilestones); } public BusinessObjectService getBusinessObjectService() { return businessObjectService; } public void setBusinessObjectService(BusinessObjectService businessObjectService) { this.businessObjectService = businessObjectService; } public ContractsGrantsInvoiceDocumentService getContractsGrantsInvoiceDocumentService() { return contractsGrantsInvoiceDocumentService; } public void setContractsGrantsInvoiceDocumentService(ContractsGrantsInvoiceDocumentService contractsGrantsInvoiceDocumentService) { this.contractsGrantsInvoiceDocumentService = contractsGrantsInvoiceDocumentService; } }
agpl-3.0
jrochas/scale-proactive
src/Tests/unitTests/gcmdeployment/virtualnode/TestGetANodeMultithread.java
4888
/* * ################################################################ * * ProActive Parallel Suite(TM): The Java(TM) library for * Parallel, Distributed, Multi-Core Computing for * Enterprise Grids & Clouds * * Copyright (C) 1997-2012 INRIA/University of * Nice-Sophia Antipolis/ActiveEon * Contact: proactive@ow2.org or contact@activeeon.com * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Affero General Public License * as published by the Free Software Foundation; version 3 of * the License. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * * If needed, contact us to obtain a release under GPL Version 2 or 3 * or a different license than the AGPL. * * Initial developer(s): The ProActive Team * http://proactive.inria.fr/team_members.htm * Contributor(s): * * ################################################################ * $$PROACTIVE_INITIAL_DEV$$ */ package unitTests.gcmdeployment.virtualnode; import junit.framework.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.objectweb.proactive.core.Constants; import org.objectweb.proactive.core.node.Node; import org.objectweb.proactive.core.runtime.ProActiveRuntime; import org.objectweb.proactive.core.runtime.ProActiveRuntimeImpl; import org.objectweb.proactive.extensions.gcmdeployment.GCMApplication.FakeNode; import org.objectweb.proactive.extensions.gcmdeployment.core.GCMVirtualNodeImpl; import functionalTests.FunctionalTest; public class TestGetANodeMultithread extends FunctionalTest { static final int TIMEOUT = 1000; static final int CLIENTS = 10; GCMVirtualNodeImpl vn; GCMApplicationDescriptorMockup gcma; ProActiveRuntime part; @BeforeClass static public void setCapacity() { ProActiveRuntimeImpl.getProActiveRuntime().setCapacity(12000); } @Before public void before() { vn = new GCMVirtualNodeImpl(); gcma = new GCMApplicationDescriptorMockup(); part = ProActiveRuntimeImpl.getProActiveRuntime(); } @Test public void multithreadSimple() throws InterruptedException { final int nodes = 10000; for (int i = 0; i < nodes; i++) { vn.addNode(new FakeNode(gcma, part)); } Client[] clients = new Client[CLIENTS]; for (int client = 0; client < clients.length; client++) { clients[client] = new Client(vn); } for (int client = 0; client < clients.length; client++) { clients[client].start(); } for (int client = 0; client < clients.length; client++) { clients[client].join(); } int sum = 0; for (int client = 0; client < clients.length; client++) { sum += clients[client].counter; } Assert.assertEquals(nodes, sum); } class Client extends Thread { GCMVirtualNodeImpl vn; public int counter; public Client(GCMVirtualNodeImpl vn) { this.vn = vn; counter = -1; } public void run() { Node node; do { node = vn.getANode(TIMEOUT); counter++; } while (node != null); } } static void checkGetANodeIsNull(GCMVirtualNodeImpl vn) { long before = System.currentTimeMillis(); Node rNode = vn.getANode(TIMEOUT); long after = System.currentTimeMillis(); long timeElapsed = after - before; Assert.assertFalse("Timeout too short", timeoutTooShort(TIMEOUT, timeElapsed)); Assert.assertFalse("Timeout too long", timeoutTooLong(TIMEOUT, timeElapsed)); Assert.assertNull(rNode); } void checkGetANodeIsNotNull(GCMVirtualNodeImpl vn, int i) { Node rNode = vn.getANode(TIMEOUT); String nodeName = part.getVMInformation().getName() + "_" + Constants.GCM_NODE_NAME + i; Assert.assertNotNull(rNode); Assert.assertEquals(nodeName, rNode.getNodeInformation().getName()); } static boolean timeoutTooShort(long timeout, long timeElapsed) { if (timeElapsed < timeout) return true; return false; } static boolean timeoutTooLong(long timeout, long timeElapsed) { if (timeElapsed > 2 * timeout) return true; return false; } }
agpl-3.0
bashrc/gnusocial-debian
src/lib/networkpublicnoticestream.php
1742
<?php if (!defined('GNUSOCIAL')) { exit(1); } class NetworkPublicNoticeStream extends ScopingNoticeStream { function __construct(Profile $scoped=null) { parent::__construct(new CachingNoticeStream(new RawNetworkPublicNoticeStream(), 'networkpublic'), $scoped); } } /** * Raw public stream * * @category Stream * @package StatusNet * @author Evan Prodromou <evan@status.net> * @copyright 2011 StatusNet, Inc. * @license http://www.fsf.org/licensing/licenses/agpl-3.0.html AGPL 3.0 * @link http://status.net/ */ class RawNetworkPublicNoticeStream extends NoticeStream { function getNoticeIds($offset, $limit, $since_id, $max_id) { $notice = new Notice(); $notice->selectAdd(); // clears it $notice->selectAdd('id'); $notice->orderBy('created DESC, id DESC'); if (!is_null($offset)) { $notice->limit($offset, $limit); } $notice->whereAdd('is_local ='. Notice::REMOTE); // -1 == blacklisted, -2 == gateway (i.e. Twitter) $notice->whereAdd('is_local !='. Notice::LOCAL_NONPUBLIC); $notice->whereAdd('is_local !='. Notice::GATEWAY); Notice::addWhereSinceId($notice, $since_id); Notice::addWhereMaxId($notice, $max_id); if (!empty($this->selectVerbs)) { $notice->whereAddIn('verb', $this->selectVerbs, $notice->columnType('verb')); } $ids = array(); if ($notice->find()) { while ($notice->fetch()) { $ids[] = $notice->id; } } $notice->free(); $notice = NULL; return $ids; } }
agpl-3.0
UniversityOfHawaii/kfs
kfs-core/src/main/java/org/kuali/kfs/pdp/batch/ExtractChecksStep.java
1633
/* * The Kuali Financial System, a comprehensive financial management system for higher education. * * Copyright 2005-2014 The Kuali Foundation * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.kuali.kfs.pdp.batch; import java.util.Date; import org.kuali.kfs.pdp.batch.service.ExtractPaymentService; import org.kuali.kfs.sys.batch.AbstractStep; public class ExtractChecksStep extends AbstractStep { private static org.apache.log4j.Logger LOG = org.apache.log4j.Logger.getLogger(ExtractChecksStep.class); public ExtractPaymentService extractPaymentService; /** * @see org.kuali.kfs.sys.batch.Step#execute(java.lang.String, java.util.Date) */ public boolean execute(String jobName, Date jobRunDate) throws InterruptedException { LOG.debug("execute() started"); extractPaymentService.extractChecks(); return true; } public void setExtractPaymentService(ExtractPaymentService eps) { extractPaymentService = eps; } }
agpl-3.0
dbrashear/bochs
iodev/speaker.cc
3613
///////////////////////////////////////////////////////////////////////// // $Id: speaker.cc,v 1.15 2009/04/10 08:15:25 vruppert Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright 2003 by David N. Welton <davidw@dedasys.com>. // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #define BX_PLUGGABLE #define NO_DEVICE_INCLUDES #include "iodev.h" #include "speaker.h" #ifdef __linux__ #include <unistd.h> #include <stdio.h> #include <fcntl.h> #include <errno.h> #include <sys/ioctl.h> #include <sys/types.h> #include <sys/stat.h> #include <linux/kd.h> #endif #define LOG_THIS theSpeaker-> bx_speaker_c *theSpeaker= NULL; int libspeaker_LTX_plugin_init(plugin_t *plugin, plugintype_t type, int argc, char *argv[]) { theSpeaker = new bx_speaker_c(); bx_devices.pluginSpeaker = theSpeaker; BX_REGISTER_DEVICE_DEVMODEL(plugin, type, theSpeaker, BX_PLUGIN_SPEAKER); return(0); // Success } void libspeaker_LTX_plugin_fini(void) { delete theSpeaker; } bx_speaker_c::bx_speaker_c() { put("SPEAK"); beep_frequency = 0.0; // Off #ifdef __linux__ consolefd = open("/dev/console", O_WRONLY); #endif } bx_speaker_c::~bx_speaker_c() { #ifdef __linux__ if (consolefd >= 0) { ioctl(consolefd, KIOCSOUND, 0); close(consolefd); } #endif BX_DEBUG(("Exit")); } void bx_speaker_c::init(void) { #ifdef __linux__ if (consolefd != -1) { BX_INFO(("Open /dev/console successfully")); } else { BX_INFO(("Failed to open /dev/console: %s", strerror(errno))); BX_INFO(("Deactivating beep on console")); } #endif this->beep_off(); } void bx_speaker_c::reset(unsigned type) { beep_off(); } void bx_speaker_c::beep_on(float frequency) { beep_frequency = frequency; #ifdef __linux__ if (consolefd != -1) { this->info("pc speaker on with frequency %f", frequency); ioctl(consolefd, KIOCSOUND, (int)(clock_tick_rate/frequency)); } #elif defined(WIN32) usec_start = bx_pc_system.time_usec(); #endif // give the gui a chance to signal beep off bx_gui->beep_on(frequency); } #if defined(WIN32) struct { DWORD frequency; DWORD msec; } beep_info; DWORD WINAPI BeepThread(LPVOID) { static BOOL threadActive = FALSE; while (threadActive) Sleep(10); threadActive = TRUE; Beep(beep_info.frequency, beep_info.msec); threadActive = FALSE; return 0; } #endif void bx_speaker_c::beep_off() { if (beep_frequency != 0.0) { #ifdef __linux__ if (consolefd != -1) { ioctl(consolefd, KIOCSOUND, 0); } #elif defined(WIN32) // FIXME: sound should start at beep_on() and end here DWORD threadID; beep_info.msec = (DWORD)((bx_pc_system.time_usec() - usec_start) / 1000); beep_info.frequency = (DWORD)beep_frequency; CreateThread(NULL, 0, BeepThread, NULL, 0, &threadID); #endif // give the gui a chance to signal beep off bx_gui->beep_off(); beep_frequency = 0.0; } }
lgpl-2.1
gajgeospatial/OpenSceneGraph-3.4.0
src/osgPlugins/shp/ESRIShapeReaderWriter.cpp
4097
#include <osgDB/FileNameUtils> #include <osgDB/FileUtils> #include <osgDB/fstream> #include <osgDB/Registry> #include <osgTerrain/Locator> #include "ESRIType.h" #include "ESRIShape.h" #include "ESRIShapeParser.h" #include "XBaseParser.h" class ESRIShapeReaderWriter : public osgDB::ReaderWriter { public: ESRIShapeReaderWriter() { supportsExtension("shp","Geospatial Shape file format"); supportsOption("double","Read x,y,z data as double an stored as geometry in osg::Vec3dArray's."); } virtual const char* className() { return "ESRI Shape ReaderWriter"; } virtual bool acceptsExtension(const std::string& extension) const { return osgDB::equalCaseInsensitive(extension,"shp"); } virtual ReadResult readObject(const std::string& fileName, const Options* opt) const { return readNode(fileName,opt); } virtual ReadResult readNode(const std::string& file, const Options* options) const { std::string ext = osgDB::getFileExtension(file); if (!acceptsExtension(ext)) return ReadResult::FILE_NOT_HANDLED; std::string fileName = osgDB::findDataFile(file, options); if (fileName.empty()) return ReadResult::FILE_NOT_FOUND; bool useDouble = false; if (options && options->getOptionString().find("double")!=std::string::npos) { useDouble = true; } ESRIShape::ESRIShapeParser sp(fileName, useDouble); std::string xbaseFileName(osgDB::getNameLessExtension(fileName) + ".dbf"); ESRIShape::XBaseParser xbp(xbaseFileName); if (sp.getGeode() && (xbp.getAttributeList().empty() == false)) { if (sp.getGeode()->getNumDrawables() != xbp.getAttributeList().size()) { OSG_WARN << "ESRIShape loader : .dbf file containe different record number that .shp file." << std::endl << " .dbf record skipped." << std::endl; } else { osg::Geode * geode = sp.getGeode(); unsigned int i = 0; ESRIShape::XBaseParser::ShapeAttributeListList::const_iterator it, end = xbp.getAttributeList().end(); for (it = xbp.getAttributeList().begin(); it != end; ++it, ++i) { geode->getDrawable(i)->setUserData(it->get()); } } } if (sp.getGeode()) { std::string projFileName(osgDB::getNameLessExtension(fileName) + ".prj"); if (osgDB::fileExists(projFileName)) { osgDB::ifstream fin(projFileName.c_str()); if (fin) { std::string projstring; while(!fin.eof()) { char readline[4096]; *readline = 0; fin.getline(readline, sizeof(readline)); if (!projstring.empty() && !fin.eof()) { projstring += '\n'; } projstring += readline; } if (!projstring.empty()) { osgTerrain::Locator* locator = new osgTerrain::Locator; sp.getGeode()->setUserData(locator); locator->setFormat("WKT"); locator->setCoordinateSystem(projstring); locator->setDefinedInFile(false); } } } } return sp.getGeode(); } }; REGISTER_OSGPLUGIN(shp, ESRIShapeReaderWriter)
lgpl-2.1
sanguinariojoe/FreeCAD
src/Gui/DlgSettingsMacroImp.cpp
4375
/*************************************************************************** * Copyright (c) 2002 Jürgen Riegel <juergen.riegel@web.de> * * * * This file is part of the FreeCAD CAx development system. * * * * This library is free software; you can redistribute it and/or * * modify it under the terms of the GNU Library General Public * * License as published by the Free Software Foundation; either * * version 2 of the License, or (at your option) any later version. * * * * This library is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU Library General Public License for more details. * * * * You should have received a copy of the GNU Library General Public * * License along with this library; see the file COPYING.LIB. If not, * * write to the Free Software Foundation, Inc., 59 Temple Place, * * Suite 330, Boston, MA 02111-1307, USA * * * ***************************************************************************/ #include "PreCompiled.h" #include "DlgSettingsMacroImp.h" #include "ui_DlgSettingsMacro.h" #include "Action.h" #include "Application.h" #include "MainWindow.h" using namespace Gui::Dialog; /* TRANSLATOR Gui::Dialog::DlgSettingsMacroImp */ /** * Constructs a DlgSettingsMacroImp which is a child of 'parent', with the * name 'name' and widget flags set to 'f' */ DlgSettingsMacroImp::DlgSettingsMacroImp( QWidget* parent ) : PreferencePage( parent ) , ui(new Ui_DlgSettingsMacro) { ui->setupUi(this); // Was never implemented, so hide it ui->FileLogCheckBox->hide(); ui->MacroPath_2->hide(); if (ui->MacroPath->fileName().isEmpty()) { QDir d(QString::fromUtf8(App::GetApplication().getUserMacroDir().c_str())); ui->MacroPath->setFileName(d.path()); } } /** * Destroys the object and frees any allocated resources */ DlgSettingsMacroImp::~DlgSettingsMacroImp() { // no need to delete child widgets, Qt does it all for us } /** Sets the size of the recent macros list from the user parameters. * @see RecentMacrosAction * @see StdCmdRecentMacros */ void DlgSettingsMacroImp::setRecentMacroSize() { RecentMacrosAction *recent = getMainWindow()->findChild<RecentMacrosAction *>(QLatin1String("recentMacros")); if (recent) { ParameterGrp::handle hGrp = WindowParameter::getDefaultParameter()->GetGroup("RecentMacros"); recent->resizeList(hGrp->GetInt("RecentMacros", 4)); } } void DlgSettingsMacroImp::saveSettings() { ui->PrefCheckBox_LocalEnv->onSave(); ui->MacroPath->onSave(); ui->PrefCheckBox_RecordGui->onSave(); ui->PrefCheckBox_GuiAsComment->onSave(); ui->PConsoleCheckBox->onSave(); ui->FileLogCheckBox->onSave(); ui->MacroPath_2->onSave(); ui->RecentMacros->onSave(); ui->ShortcutModifiers->onSave(); ui->ShortcutCount->onSave(); setRecentMacroSize(); } void DlgSettingsMacroImp::loadSettings() { ui->PrefCheckBox_LocalEnv->onRestore(); ui->MacroPath->onRestore(); ui->PrefCheckBox_RecordGui->onRestore(); ui->PrefCheckBox_GuiAsComment->onRestore(); ui->PConsoleCheckBox->onRestore(); ui->FileLogCheckBox->onRestore(); ui->MacroPath_2->onRestore(); ui->RecentMacros->onRestore(); ui->ShortcutModifiers->onRestore(); ui->ShortcutCount->onRestore(); } /** * Sets the strings of the subwidgets using the current language. */ void DlgSettingsMacroImp::changeEvent(QEvent *e) { if (e->type() == QEvent::LanguageChange) { ui->retranslateUi(this); } else { QWidget::changeEvent(e); } } #include "moc_DlgSettingsMacroImp.cpp"
lgpl-2.1
youfoh/webkit-efl
LayoutTests/ietestcenter/Javascript/TestCases/10.4.2-1-4.js
2137
/// Copyright (c) 2009 Microsoft Corporation /// /// Redistribution and use in source and binary forms, with or without modification, are permitted provided /// that the following conditions are met: /// * Redistributions of source code must retain the above copyright notice, this list of conditions and /// the following disclaimer. /// * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and /// the following disclaimer in the documentation and/or other materials provided with the distribution. /// * Neither the name of Microsoft nor the names of its contributors may be used to /// endorse or promote products derived from this software without specific prior written permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR /// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE /// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT /// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS /// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, /// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF /// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. var __10_4_2_1_4 = "str"; ES5Harness.registerTest( { id: "10.4.2-1-4", path: "TestCases/chapter10/10.4/10.4.2/10.4.2-1-4.js", description: "Indirect call to eval has context set to global context (with block)", test: function testcase() { var o = new Object(); o.__10_4_2_1_4 = "str2"; var _eval = eval; var __10_4_2_1_4 = "str1"; with(o) { if(_eval("\'str\' === __10_4_2_1_4") === true && // indirect eval eval("\'str2\' === __10_4_2_1_4") === true) // direct eval return true; } } });
lgpl-2.1
tobyt42/brjs
brjs-core/src/main/java/org/bladerunnerjs/plugin/proxy/VirtualProxyState.java
118
package org.bladerunnerjs.plugin.proxy; public enum VirtualProxyState { Uninitialized, Initlializing, Initialized }
lgpl-3.0
smba/oak
edu.cmu.cs.oak/src/test/resources/mediawiki/includes/api/ApiEditPage.php
18717
<?php /** * * * Created on August 16, 2007 * * Copyright © 2007 Iker Labarga "<Firstname><Lastname>@gmail.com" * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * http://www.gnu.org/copyleft/gpl.html * * @file */ /** * A module that allows for editing and creating pages. * * Currently, this wraps around the EditPage class in an ugly way, * EditPage.php should be rewritten to provide a cleaner interface, * see T20654 if you're inspired to fix this. * * @ingroup API */ class ApiEditPage extends ApiBase { public function execute() { $this->useTransactionalTimeLimit(); $user = $this->getUser(); $params = $this->extractRequestParams(); if ( is_null( $params['text'] ) && is_null( $params['appendtext'] ) && is_null( $params['prependtext'] ) && $params['undo'] == 0 ) { $this->dieUsageMsg( 'missingtext' ); } $pageObj = $this->getTitleOrPageId( $params ); $titleObj = $pageObj->getTitle(); $apiResult = $this->getResult(); if ( $params['redirect'] ) { if ( $params['prependtext'] === null && $params['appendtext'] === null && $params['section'] !== 'new' ) { $this->dieUsage( 'You have attempted to edit using the "redirect"-following' . ' mode, which must be used in conjuction with section=new, prependtext' . ', or appendtext.', 'redirect-appendonly' ); } if ( $titleObj->isRedirect() ) { $oldTitle = $titleObj; $titles = Revision::newFromTitle( $oldTitle, false, Revision::READ_LATEST ) ->getContent( Revision::FOR_THIS_USER, $user ) ->getRedirectChain(); // array_shift( $titles ); $redirValues = []; /** @var $newTitle Title */ foreach ( $titles as $id => $newTitle ) { if ( !isset( $titles[$id - 1] ) ) { $titles[$id - 1] = $oldTitle; } $redirValues[] = [ 'from' => $titles[$id - 1]->getPrefixedText(), 'to' => $newTitle->getPrefixedText() ]; $titleObj = $newTitle; } ApiResult::setIndexedTagName( $redirValues, 'r' ); $apiResult->addValue( null, 'redirects', $redirValues ); // Since the page changed, update $pageObj $pageObj = WikiPage::factory( $titleObj ); } } if ( !isset( $params['contentmodel'] ) || $params['contentmodel'] == '' ) { $contentHandler = $pageObj->getContentHandler(); } else { $contentHandler = ContentHandler::getForModelID( $params['contentmodel'] ); } $name = $titleObj->getPrefixedDBkey(); $model = $contentHandler->getModelID(); if ( $params['undo'] > 0 ) { // allow undo via api } elseif ( $contentHandler->supportsDirectApiEditing() === false ) { $this->dieUsage( "Direct editing via API is not supported for content model $model used by $name", 'no-direct-editing' ); } if ( !isset( $params['contentformat'] ) || $params['contentformat'] == '' ) { $params['contentformat'] = $contentHandler->getDefaultFormat(); } $contentFormat = $params['contentformat']; if ( !$contentHandler->isSupportedFormat( $contentFormat ) ) { $this->dieUsage( "The requested format $contentFormat is not supported for content model " . " $model used by $name", 'badformat' ); } if ( $params['createonly'] && $titleObj->exists() ) { $this->dieUsageMsg( 'createonly-exists' ); } if ( $params['nocreate'] && !$titleObj->exists() ) { $this->dieUsageMsg( 'nocreate-missing' ); } // Now let's check whether we're even allowed to do this $errors = $titleObj->getUserPermissionsErrors( 'edit', $user ); if ( !$titleObj->exists() ) { $errors = array_merge( $errors, $titleObj->getUserPermissionsErrors( 'create', $user ) ); } if ( count( $errors ) ) { if ( is_array( $errors[0] ) ) { switch ( $errors[0][0] ) { case 'blockedtext': $this->dieUsage( 'You have been blocked from editing', 'blocked', 0, [ 'blockinfo' => ApiQueryUserInfo::getBlockInfo( $user->getBlock() ) ] ); break; case 'autoblockedtext': $this->dieUsage( 'Your IP address has been blocked automatically, because it was used by a blocked user', 'autoblocked', 0, [ 'blockinfo' => ApiQueryUserInfo::getBlockInfo( $user->getBlock() ) ] ); break; default: $this->dieUsageMsg( $errors[0] ); } } else { $this->dieUsageMsg( $errors[0] ); } } $toMD5 = $params['text']; if ( !is_null( $params['appendtext'] ) || !is_null( $params['prependtext'] ) ) { $content = $pageObj->getContent(); if ( !$content ) { if ( $titleObj->getNamespace() == NS_MEDIAWIKI ) { # If this is a MediaWiki:x message, then load the messages # and return the message value for x. $text = $titleObj->getDefaultMessageText(); if ( $text === false ) { $text = ''; } try { $content = ContentHandler::makeContent( $text, $this->getTitle() ); } catch ( MWContentSerializationException $ex ) { $this->dieUsage( $ex->getMessage(), 'parseerror' ); return; } } else { # Otherwise, make a new empty content. $content = $contentHandler->makeEmptyContent(); } } // @todo Add support for appending/prepending to the Content interface if ( !( $content instanceof TextContent ) ) { $mode = $contentHandler->getModelID(); $this->dieUsage( "Can't append to pages using content model $mode", 'appendnotsupported' ); } if ( !is_null( $params['section'] ) ) { if ( !$contentHandler->supportsSections() ) { $modelName = $contentHandler->getModelID(); $this->dieUsage( "Sections are not supported for this content model: $modelName.", 'sectionsnotsupported' ); } if ( $params['section'] == 'new' ) { // DWIM if they're trying to prepend/append to a new section. $content = null; } else { // Process the content for section edits $section = $params['section']; $content = $content->getSection( $section ); if ( !$content ) { $this->dieUsage( "There is no section {$section}.", 'nosuchsection' ); } } } if ( !$content ) { $text = ''; } else { $text = $content->serialize( $contentFormat ); } $params['text'] = $params['prependtext'] . $text . $params['appendtext']; $toMD5 = $params['prependtext'] . $params['appendtext']; } if ( $params['undo'] > 0 ) { if ( $params['undoafter'] > 0 ) { if ( $params['undo'] < $params['undoafter'] ) { list( $params['undo'], $params['undoafter'] ) = [ $params['undoafter'], $params['undo'] ]; } $undoafterRev = Revision::newFromId( $params['undoafter'] ); } $undoRev = Revision::newFromId( $params['undo'] ); if ( is_null( $undoRev ) || $undoRev->isDeleted( Revision::DELETED_TEXT ) ) { $this->dieUsageMsg( [ 'nosuchrevid', $params['undo'] ] ); } if ( $params['undoafter'] == 0 ) { $undoafterRev = $undoRev->getPrevious(); } if ( is_null( $undoafterRev ) || $undoafterRev->isDeleted( Revision::DELETED_TEXT ) ) { $this->dieUsageMsg( [ 'nosuchrevid', $params['undoafter'] ] ); } if ( $undoRev->getPage() != $pageObj->getId() ) { $this->dieUsageMsg( [ 'revwrongpage', $undoRev->getId(), $titleObj->getPrefixedText() ] ); } if ( $undoafterRev->getPage() != $pageObj->getId() ) { $this->dieUsageMsg( [ 'revwrongpage', $undoafterRev->getId(), $titleObj->getPrefixedText() ] ); } $newContent = $contentHandler->getUndoContent( $pageObj->getRevision(), $undoRev, $undoafterRev ); if ( !$newContent ) { $this->dieUsageMsg( 'undo-failure' ); } $params['text'] = $newContent->serialize( $params['contentformat'] ); // If no summary was given and we only undid one rev, // use an autosummary if ( is_null( $params['summary'] ) && $titleObj->getNextRevisionID( $undoafterRev->getId() ) == $params['undo'] ) { $params['summary'] = wfMessage( 'undo-summary' ) ->params( $params['undo'], $undoRev->getUserText() )->inContentLanguage()->text(); } } // See if the MD5 hash checks out if ( !is_null( $params['md5'] ) && md5( $toMD5 ) !== $params['md5'] ) { $this->dieUsageMsg( 'hashcheckfailed' ); } // EditPage wants to parse its stuff from a WebRequest // That interface kind of sucks, but it's workable $requestArray = [ 'wpTextbox1' => $params['text'], 'format' => $contentFormat, 'model' => $contentHandler->getModelID(), 'wpEditToken' => $params['token'], 'wpIgnoreBlankSummary' => true, 'wpIgnoreBlankArticle' => true, 'wpIgnoreSelfRedirect' => true, 'bot' => $params['bot'], ]; if ( !is_null( $params['summary'] ) ) { $requestArray['wpSummary'] = $params['summary']; } if ( !is_null( $params['sectiontitle'] ) ) { $requestArray['wpSectionTitle'] = $params['sectiontitle']; } // TODO: Pass along information from 'undoafter' as well if ( $params['undo'] > 0 ) { $requestArray['wpUndidRevision'] = $params['undo']; } // Watch out for basetimestamp == '' or '0' // It gets treated as NOW, almost certainly causing an edit conflict if ( $params['basetimestamp'] !== null && (bool)$this->getMain()->getVal( 'basetimestamp' ) ) { $requestArray['wpEdittime'] = $params['basetimestamp']; } else { $requestArray['wpEdittime'] = $pageObj->getTimestamp(); } if ( $params['starttimestamp'] !== null ) { $requestArray['wpStarttime'] = $params['starttimestamp']; } else { $requestArray['wpStarttime'] = wfTimestampNow(); // Fake wpStartime } if ( $params['minor'] || ( !$params['notminor'] && $user->getOption( 'minordefault' ) ) ) { $requestArray['wpMinoredit'] = ''; } if ( $params['recreate'] ) { $requestArray['wpRecreate'] = ''; } if ( !is_null( $params['section'] ) ) { $section = $params['section']; if ( !preg_match( '/^((T-)?\d+|new)$/', $section ) ) { $this->dieUsage( "The section parameter must be a valid section id or 'new'", 'invalidsection' ); } $content = $pageObj->getContent(); if ( $section !== '0' && $section != 'new' && ( !$content || !$content->getSection( $section ) ) ) { $this->dieUsage( "There is no section {$section}.", 'nosuchsection' ); } $requestArray['wpSection'] = $params['section']; } else { $requestArray['wpSection'] = ''; } $watch = $this->getWatchlistValue( $params['watchlist'], $titleObj ); // Deprecated parameters if ( $params['watch'] ) { $watch = true; } elseif ( $params['unwatch'] ) { $watch = false; } if ( $watch ) { $requestArray['wpWatchthis'] = ''; } // Apply change tags if ( count( $params['tags'] ) ) { $tagStatus = ChangeTags::canAddTagsAccompanyingChange( $params['tags'], $user ); if ( $tagStatus->isOK() ) { $requestArray['wpChangeTags'] = implode( ',', $params['tags'] ); } else { $this->dieStatus( $tagStatus ); } } // Pass through anything else we might have been given, to support extensions // This is kind of a hack but it's the best we can do to make extensions work $requestArray += $this->getRequest()->getValues(); global $wgTitle, $wgRequest; $req = new DerivativeRequest( $this->getRequest(), $requestArray, true ); // Some functions depend on $wgTitle == $ep->mTitle // TODO: Make them not or check if they still do $wgTitle = $titleObj; $articleContext = new RequestContext; $articleContext->setRequest( $req ); $articleContext->setWikiPage( $pageObj ); $articleContext->setUser( $this->getUser() ); /** @var $articleObject Article */ $articleObject = Article::newFromWikiPage( $pageObj, $articleContext ); $ep = new EditPage( $articleObject ); $ep->setApiEditOverride( true ); $ep->setContextTitle( $titleObj ); $ep->importFormData( $req ); $content = $ep->textbox1; // Run hooks // Handle APIEditBeforeSave parameters $r = []; if ( !Hooks::run( 'APIEditBeforeSave', [ $ep, $content, &$r ] ) ) { if ( count( $r ) ) { $r['result'] = 'Failure'; $apiResult->addValue( null, $this->getModuleName(), $r ); return; } $this->dieUsageMsg( 'hookaborted' ); } // Do the actual save $oldRevId = $articleObject->getRevIdFetched(); $result = null; // Fake $wgRequest for some hooks inside EditPage // @todo FIXME: This interface SUCKS $oldRequest = $wgRequest; $wgRequest = $req; $status = $ep->attemptSave( $result ); $wgRequest = $oldRequest; switch ( $status->value ) { case EditPage::AS_HOOK_ERROR: case EditPage::AS_HOOK_ERROR_EXPECTED: if ( isset( $status->apiHookResult ) ) { $r = $status->apiHookResult; $r['result'] = 'Failure'; $apiResult->addValue( null, $this->getModuleName(), $r ); return; } else { $this->dieUsageMsg( 'hookaborted' ); } case EditPage::AS_PARSE_ERROR: $this->dieUsage( $status->getMessage(), 'parseerror' ); case EditPage::AS_IMAGE_REDIRECT_ANON: $this->dieUsageMsg( 'noimageredirect-anon' ); case EditPage::AS_IMAGE_REDIRECT_LOGGED: $this->dieUsageMsg( 'noimageredirect-logged' ); case EditPage::AS_SPAM_ERROR: $this->dieUsageMsg( [ 'spamdetected', $result['spam'] ] ); case EditPage::AS_BLOCKED_PAGE_FOR_USER: $this->dieUsage( 'You have been blocked from editing', 'blocked', 0, [ 'blockinfo' => ApiQueryUserInfo::getBlockInfo( $user->getBlock() ) ] ); case EditPage::AS_MAX_ARTICLE_SIZE_EXCEEDED: case EditPage::AS_CONTENT_TOO_BIG: $this->dieUsageMsg( [ 'contenttoobig', $this->getConfig()->get( 'MaxArticleSize' ) ] ); case EditPage::AS_READ_ONLY_PAGE_ANON: $this->dieUsageMsg( 'noedit-anon' ); case EditPage::AS_READ_ONLY_PAGE_LOGGED: $this->dieUsageMsg( 'noedit' ); case EditPage::AS_READ_ONLY_PAGE: $this->dieReadOnly(); case EditPage::AS_RATE_LIMITED: $this->dieUsageMsg( 'actionthrottledtext' ); case EditPage::AS_ARTICLE_WAS_DELETED: $this->dieUsageMsg( 'wasdeleted' ); case EditPage::AS_NO_CREATE_PERMISSION: $this->dieUsageMsg( 'nocreate-loggedin' ); case EditPage::AS_NO_CHANGE_CONTENT_MODEL: $this->dieUsageMsg( 'cantchangecontentmodel' ); case EditPage::AS_BLANK_ARTICLE: $this->dieUsageMsg( 'blankpage' ); case EditPage::AS_CONFLICT_DETECTED: $this->dieUsageMsg( 'editconflict' ); case EditPage::AS_TEXTBOX_EMPTY: $this->dieUsageMsg( 'emptynewsection' ); case EditPage::AS_CHANGE_TAG_ERROR: $this->dieStatus( $status ); case EditPage::AS_SUCCESS_NEW_ARTICLE: $r['new'] = true; // fall-through case EditPage::AS_SUCCESS_UPDATE: $r['result'] = 'Success'; $r['pageid'] = intval( $titleObj->getArticleID() ); $r['title'] = $titleObj->getPrefixedText(); $r['contentmodel'] = $articleObject->getContentModel(); $newRevId = $articleObject->getLatest(); if ( $newRevId == $oldRevId ) { $r['nochange'] = true; } else { $r['oldrevid'] = intval( $oldRevId ); $r['newrevid'] = intval( $newRevId ); $r['newtimestamp'] = wfTimestamp( TS_ISO_8601, $pageObj->getTimestamp() ); } break; case EditPage::AS_SUMMARY_NEEDED: // Shouldn't happen since we set wpIgnoreBlankSummary, but just in case $this->dieUsageMsg( 'summaryrequired' ); case EditPage::AS_END: default: // $status came from WikiPage::doEdit() $errors = $status->getErrorsArray(); $this->dieUsageMsg( $errors[0] ); // TODO: Add new errors to message map break; } $apiResult->addValue( null, $this->getModuleName(), $r ); } public function mustBePosted() { return true; } public function isWriteMode() { return true; } public function getAllowedParams() { return [ 'title' => [ ApiBase::PARAM_TYPE => 'string', ], 'pageid' => [ ApiBase::PARAM_TYPE => 'integer', ], 'section' => null, 'sectiontitle' => [ ApiBase::PARAM_TYPE => 'string', ], 'text' => [ ApiBase::PARAM_TYPE => 'text', ], 'summary' => null, 'tags' => [ ApiBase::PARAM_TYPE => 'tags', ApiBase::PARAM_ISMULTI => true, ], 'minor' => false, 'notminor' => false, 'bot' => false, 'basetimestamp' => [ ApiBase::PARAM_TYPE => 'timestamp', ], 'starttimestamp' => [ ApiBase::PARAM_TYPE => 'timestamp', ], 'recreate' => false, 'createonly' => false, 'nocreate' => false, 'watch' => [ ApiBase::PARAM_DFLT => false, ApiBase::PARAM_DEPRECATED => true, ], 'unwatch' => [ ApiBase::PARAM_DFLT => false, ApiBase::PARAM_DEPRECATED => true, ], 'watchlist' => [ ApiBase::PARAM_DFLT => 'preferences', ApiBase::PARAM_TYPE => [ 'watch', 'unwatch', 'preferences', 'nochange' ], ], 'md5' => null, 'prependtext' => [ ApiBase::PARAM_TYPE => 'text', ], 'appendtext' => [ ApiBase::PARAM_TYPE => 'text', ], 'undo' => [ ApiBase::PARAM_TYPE => 'integer' ], 'undoafter' => [ ApiBase::PARAM_TYPE => 'integer' ], 'redirect' => [ ApiBase::PARAM_TYPE => 'boolean', ApiBase::PARAM_DFLT => false, ], 'contentformat' => [ ApiBase::PARAM_TYPE => ContentHandler::getAllContentFormats(), ], 'contentmodel' => [ ApiBase::PARAM_TYPE => ContentHandler::getContentModels(), ], 'token' => [ // Standard definition automatically inserted ApiBase::PARAM_HELP_MSG_APPEND => [ 'apihelp-edit-param-token' ], ], ]; } public function needsToken() { return 'csrf'; } protected function getExamplesMessages() { return [ 'action=edit&title=Test&summary=test%20summary&' . 'text=article%20content&basetimestamp=2007-08-24T12:34:54Z&token=123ABC' => 'apihelp-edit-example-edit', 'action=edit&title=Test&summary=NOTOC&minor=&' . 'prependtext=__NOTOC__%0A&basetimestamp=2007-08-24T12:34:54Z&token=123ABC' => 'apihelp-edit-example-prepend', 'action=edit&title=Test&undo=13585&undoafter=13579&' . 'basetimestamp=2007-08-24T12:34:54Z&token=123ABC' => 'apihelp-edit-example-undo', ]; } public function getHelpUrls() { return 'https://www.mediawiki.org/wiki/API:Edit'; } }
lgpl-3.0
Biobanques/qualityforms
vendor/zetacomponents/console-tools/tests/question_dialog_test.php
8870
<?php /** * ezcConsoleQuestionDialogTest class. * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * * @package ConsoleTools * @subpackage Tests * @version //autogentag// * @license http://www.apache.org/licenses/LICENSE-2.0 Apache License, Version 2.0 */ /** * Require generic test case for ezcConsoleDialog implementations. */ require_once dirname( __FILE__ ) . "/dialog_test.php"; /** * Test suite for ezcConsoleQuestionDialog class. * * @package ConsoleTools * @subpackage Tests */ class ezcConsoleQuestionDialogTest extends ezcConsoleDialogTest { public static function suite() { return new PHPUnit_Framework_TestSuite( "ezcConsoleQuestionDialogTest" ); } public function testGetAccessSuccess() { $output = new ezcConsoleOutput(); $dialog = new ezcConsoleQuestionDialog( $output ); $this->assertSame( $output, $dialog->output ); $this->assertEquals( new ezcConsoleQuestionDialogOptions(), $dialog->options ); } public function testGetAccessFailure() { $output = new ezcConsoleOutput(); $dialog = new ezcConsoleQuestionDialog( $output ); $exceptionCaught = false; try { echo $dialog->foo; } catch ( ezcBasePropertyNotFoundException $e ) { $exceptionCaught = true; } $this->assertTrue( $exceptionCaught, "Exception not thrown on access of nonexistent property foo." ); } public function testSetAccessSuccess() { $output = new ezcConsoleOutput(); $dialog = new ezcConsoleQuestionDialog( $output ); $outputNew = new ezcConsoleOutput(); $optionsNew = new ezcConsoleQuestionDialogOptions(); $dialog->output = $outputNew; $dialog->options = $optionsNew; $this->assertSame( $outputNew, $dialog->output ); $this->assertSame( $optionsNew, $dialog->options ); } public function testSetAccessFailure() { $output = new ezcConsoleOutput(); $dialog = new ezcConsoleQuestionDialog( $output ); $exceptionCaught = false; try { $dialog->output = "Foo"; } catch ( ezcBaseValueException $e ) { $exceptionCaught = true; } $this->assertTrue( $exceptionCaught, "Exception not thrown on invalid value for output." ); $exceptionCaught = false; try { $dialog->options = "Foo"; } catch ( ezcBaseValueException $e ) { $exceptionCaught = true; } $this->assertTrue( $exceptionCaught, "Exception not thrown on invalid value for options." ); $exceptionCaught = false; try { $dialog->foo = "bar"; } catch ( ezcBasePropertyNotFoundException $e ) { $exceptionCaught = true; } $this->assertTrue( $exceptionCaught, "Exception not thrown on access of nonexistent property foo." ); $this->assertSame( $output, $dialog->output ); $this->assertEquals( new ezcConsoleQuestionDialogOptions(), $dialog->options ); } public function testIssetAccess() { $output = new ezcConsoleOutput(); $dialog = new ezcConsoleQuestionDialog( $output ); $this->assertTrue( isset( $dialog->options ), "Property options is not set." ); $this->assertTrue( isset( $dialog->output ), "Property options is not set." ); $this->assertFalse( isset( $dialog->foo ), "Property foo is set." ); } public function testBasicMethods() { $output = new ezcConsoleOutput(); $dialog = new ezcConsoleQuestionDialog( $output ); $this->assertFalse( $dialog->hasValidResult(), "Fresh dialog has valid result." ); $exceptionCaught = false; try { $dialog->getResult(); } catch ( ezcConsoleNoValidDialogResultException $e ) { $exceptionCaught = true; } $this->assertTrue( $exceptionCaught, "Excption not thrown on getResult() without result." ); $dialog->reset(); $exceptionCaught = false; try { $dialog->getResult(); } catch ( ezcConsoleNoValidDialogResultException $e ) { $exceptionCaught = true; } $this->assertTrue( $exceptionCaught, "Excption not thrown on getResult() without result." ); } public function testYesNoQuestionFactory() { $output = new ezcConsoleOutput(); $dialog = ezcConsoleQuestionDialog::YesNoQuestion( $output, "Is Jean-Luc a borg?", "y" ); $this->assertInstanceOf( "ezcConsoleQuestionDialogOptions", $dialog->options ); $this->assertEquals( "Is Jean-Luc a borg?", $dialog->options->text ); $this->assertTrue( $dialog->options->showResults ); $this->assertInstanceOf( "ezcConsoleQuestionDialogCollectionValidator", $dialog->options->validator ); $this->assertEquals( array( "y", "n" ), $dialog->options->validator->collection ); $this->assertEquals( "y", $dialog->options->validator->default ); $this->assertEquals( ezcConsoleQuestionDialogCollectionValidator::CONVERT_LOWER, $dialog->options->validator->conversion ); } public function testDialog1() { $this->runDialog( __METHOD__ ); $res[] = $this->readPipe( $this->pipes[1] ); fputs( $this->pipes[0], "A\n" ); $res[] = $this->readPipe( $this->pipes[1] ); fputs( $this->pipes[0], "Y\n" ); $res[] = $this->readPipe( $this->pipes[1] ); // $this->saveDialogResult( __METHOD__, $res ); $this->assertEquals( $this->res, $res ); } public function testDialog2() { $this->runDialog( __METHOD__ ); $res[] = $this->readPipe( $this->pipes[1] ); fputs( $this->pipes[0], "A\n" ); $res[] = $this->readPipe( $this->pipes[1] ); fputs( $this->pipes[0], "3.14\n" ); $res[] = $this->readPipe( $this->pipes[1] ); fputs( $this->pipes[0], "true\n" ); $res[] = $this->readPipe( $this->pipes[1] ); fputs( $this->pipes[0], "23\n" ); $res[] = $this->readPipe( $this->pipes[1] ); // $this->saveDialogResult( __METHOD__, $res ); $this->assertEquals( $this->res, $res ); } public function testDialog3() { $this->runDialog( __METHOD__ ); $res[] = $this->readPipe( $this->pipes[1] ); fputs( $this->pipes[0], "A\n" ); $res[] = $this->readPipe( $this->pipes[1] ); fputs( $this->pipes[0], "y\n" ); $res[] = $this->readPipe( $this->pipes[1] ); // $this->saveDialogResult( __METHOD__, $res ); $this->assertEquals( $this->res, $res ); } public function testDialog4() { $this->runDialog( __METHOD__ ); $res[] = $this->readPipe( $this->pipes[1] ); fputs( $this->pipes[0], "foo\n" ); $res[] = $this->readPipe( $this->pipes[1] ); fputs( $this->pipes[0], "foo.bar@\n" ); $res[] = $this->readPipe( $this->pipes[1] ); fputs( $this->pipes[0], "foo.bar@example\n" ); $res[] = $this->readPipe( $this->pipes[1] ); fputs( $this->pipes[0], "foo.bar@example.com\n" ); $res[] = $this->readPipe( $this->pipes[1] ); // $this->saveDialogResult( __METHOD__, $res ); $this->assertEquals( $this->res, $res ); } public function testDialog5() { $this->runDialog( __METHOD__ ); $res[] = $this->readPipe( $this->pipes[1] ); fputs( $this->pipes[0], "foo\n" ); $res[] = $this->readPipe( $this->pipes[1] ); fclose( $this->pipes[0] ); $res[] = $this->readPipe( $this->pipes[1] ); // $this->saveDialogResult( __METHOD__, $res ); $this->assertEquals( $this->res, $res ); } } ?>
lgpl-3.0
LiuCAs/Gekosale
lib/imageGD/Image/Text/Exception.php
881
<?php /** * Image * * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3.0 of the License, or (at your option) any later version. * * @link http://code.google.com/p/nweb-image * * @category Image * @package Image_Text * @author Krzysztof Kardasz <krzysztof.kardasz@gmail.com> * @copyright Copyright (c) 2008 Krzysztof Kardasz * @license http://www.gnu.org/licenses/lgpl-3.0.txt GNU Lesser General Public * @version 2.1 */ /** * Wyjątki * * @category Image * @package Image_Text * @author Krzysztof Kardasz <krzysztof.kardasz@gmail.com> * @copyright Copyright (c) 2008 Krzysztof Kardasz */ class Image_Text_Exception extends Image_Exception {}
lgpl-3.0
GunoH/intellij-community
platform/execution-impl/src/com/intellij/execution/filters/PatternHyperlinkFormat.java
2600
/* * Copyright 2000-2016 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.execution.filters; import org.jetbrains.annotations.NotNull; import java.util.List; import java.util.regex.Pattern; public class PatternHyperlinkFormat { private final Pattern myPattern; private final boolean myZeroBasedLineNumbering; private final boolean myZeroBasedColumnNumbering; private final PatternHyperlinkPart[] myLinkParts; private final List<String> myRequiredOrderedSubstrings; public PatternHyperlinkFormat(@NotNull Pattern pattern, boolean zeroBasedLineNumbering, boolean zeroBasedColumnNumbering, PatternHyperlinkPart @NotNull ... linkParts) { this(pattern, zeroBasedLineNumbering, zeroBasedColumnNumbering, List.of(), linkParts); } public PatternHyperlinkFormat(@NotNull Pattern pattern, boolean zeroBasedLineNumbering, boolean zeroBasedColumnNumbering, @NotNull List<String> requiredOrderedSubstrings, PatternHyperlinkPart @NotNull ... linkParts) { myPattern = pattern; myZeroBasedLineNumbering = zeroBasedLineNumbering; myZeroBasedColumnNumbering = zeroBasedColumnNumbering; myRequiredOrderedSubstrings = List.copyOf(requiredOrderedSubstrings); myLinkParts = linkParts; } @NotNull Pattern getPattern() { return myPattern; } boolean isZeroBasedLineNumbering() { return myZeroBasedLineNumbering; } boolean isZeroBasedColumnNumbering() { return myZeroBasedColumnNumbering; } PatternHyperlinkPart @NotNull [] getLinkParts() { return myLinkParts; } boolean matchRequiredSubstrings(@NotNull String line) { int ind = 0; for (String required : myRequiredOrderedSubstrings) { int nextInd = line.indexOf(required, ind); if (nextInd < 0) { return false; } ind = nextInd + required.length(); } return true; } }
apache-2.0
GunoH/intellij-community
platform/util/src/com/intellij/util/CompressionUtil.java
7621
// Copyright 2000-2021 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.util; import com.intellij.openapi.util.ThreadLocalCachedByteArray; import com.intellij.openapi.util.io.BufferExposingByteArrayOutputStream; import com.intellij.util.io.DataInputOutputUtil; import com.intellij.util.io.DataOutputStream; import net.jpountz.lz4.LZ4Compressor; import net.jpountz.lz4.LZ4Factory; import net.jpountz.lz4.LZ4FastDecompressor; import org.jetbrains.annotations.NotNull; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.CharBuffer; import java.util.Arrays; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; /** * @author Maxim.Mossienko */ public final class CompressionUtil { private static final int COMPRESSION_THRESHOLD = 64; private static final ThreadLocalCachedByteArray spareBufferLocal = new ThreadLocalCachedByteArray(); public static int writeCompressed(@NotNull DataOutput out, byte @NotNull [] bytes, int start, int length) throws IOException { if (length > COMPRESSION_THRESHOLD) { LZ4Compressor compressor = compressor(); byte[] compressedOutputBuffer = spareBufferLocal.getBuffer(compressor.maxCompressedLength(length)); int compressedSize = compressor.compress(bytes, start, length, compressedOutputBuffer, 0); if (compressedSize < length) { DataInputOutputUtil.writeINT(out, -compressedSize); DataInputOutputUtil.writeINT(out, length - compressedSize); out.write(compressedOutputBuffer, 0, compressedSize); return compressedSize; } } DataInputOutputUtil.writeINT(out, length); out.write(bytes, start, length); return length; } private static final AtomicInteger myCompressionRequests = new AtomicInteger(); private static final AtomicLong myCompressionTime = new AtomicLong(); private static final AtomicInteger myDecompressionRequests = new AtomicInteger(); private static final AtomicLong myDecompressionTime = new AtomicLong(); private static final AtomicLong myDecompressedSize = new AtomicLong(); private static final AtomicLong mySizeBeforeCompression = new AtomicLong(); private static final AtomicLong mySizeAfterCompression = new AtomicLong(); public static final boolean DUMP_COMPRESSION_STATS = SystemProperties.getBooleanProperty("idea.dump.compression.stats", false); public static int writeCompressedWithoutOriginalBufferLength(@NotNull DataOutput out, byte @NotNull [] bytes, int length) throws IOException { long started = DUMP_COMPRESSION_STATS ? System.nanoTime() : 0; LZ4Compressor compressor = compressor(); final byte[] compressedOutputBuffer = spareBufferLocal.getBuffer(compressor.maxCompressedLength(length)); int compressedSize = compressor.compress(bytes, 0, length, compressedOutputBuffer, 0); final long time = (DUMP_COMPRESSION_STATS ? System.nanoTime() : 0) - started; mySizeAfterCompression.addAndGet(compressedSize); mySizeBeforeCompression.addAndGet(length); int requests = myCompressionRequests.incrementAndGet(); long l = myCompressionTime.addAndGet(time); if (DUMP_COMPRESSION_STATS && (requests & 0x1fff) == 0) { System.out.println("Compressed " + requests + " times, size:" + mySizeBeforeCompression + "->" + mySizeAfterCompression + " for " + (l / 1000000) + "ms"); } DataInputOutputUtil.writeINT(out, compressedSize); out.write(compressedOutputBuffer, 0, compressedSize); return compressedSize; } private static LZ4Compressor compressor() { return LZ4Factory.fastestJavaInstance().fastCompressor(); } public static byte @NotNull [] readCompressedWithoutOriginalBufferLength(@NotNull DataInput in, int originalBufferLength) throws IOException { int size = DataInputOutputUtil.readINT(in); byte[] bytes = spareBufferLocal.getBuffer(size); in.readFully(bytes, 0, size); int decompressedRequests = myDecompressionRequests.incrementAndGet(); long started = DUMP_COMPRESSION_STATS ? System.nanoTime() : 0; final byte[] decompressedResult = decompressor().decompress(bytes, 0, originalBufferLength); long doneTime = (DUMP_COMPRESSION_STATS ? System.nanoTime() : 0) - started; long decompressedSize = myDecompressedSize.addAndGet(size); long decompressedTime = myDecompressionTime.addAndGet(doneTime); if (DUMP_COMPRESSION_STATS && (decompressedRequests & 0x1fff) == 0) { System.out.println("Decompressed " + decompressedRequests + " times, size: " + decompressedSize + " for " + (decompressedTime / 1000000) + "ms"); } return decompressedResult; } private static LZ4FastDecompressor decompressor() { return LZ4Factory.fastestJavaInstance().fastDecompressor(); } public static byte @NotNull [] readCompressed(@NotNull DataInput in) throws IOException { int size = DataInputOutputUtil.readINT(in); if (size < 0) { size = -size; byte[] bytes = spareBufferLocal.getBuffer(size); int sizeUncompressed = DataInputOutputUtil.readINT(in) + size; in.readFully(bytes, 0, size); byte[] result = new byte[sizeUncompressed]; int decompressed = decompressor().decompress(bytes, 0, result, 0, sizeUncompressed); assert decompressed == size; return result; } else { byte[] bytes = new byte[size]; in.readFully(bytes); return bytes; } } private static final int STRING_COMPRESSION_THRESHOLD = 1024; @NotNull public static Object compressStringRawBytes(@NotNull CharSequence string) { int length = string.length(); if (length < STRING_COMPRESSION_THRESHOLD) { if (string instanceof CharBuffer && ((CharBuffer)string).capacity() > STRING_COMPRESSION_THRESHOLD) { string = string.toString(); // shrink to size } return string; } try { BufferExposingByteArrayOutputStream bytes = new BufferExposingByteArrayOutputStream(length); @NotNull DataOutput out = new DataOutputStream(bytes); for (int i=0; i< length;i++) { char c = string.charAt(i); DataInputOutputUtil.writeINT(out, c); } LZ4Compressor compressor = compressor(); int bytesWritten = bytes.size(); ByteBuffer dest = ByteBuffer.wrap(spareBufferLocal.getBuffer(compressor.maxCompressedLength(bytesWritten) + 10)); DataInputOutputUtil.writeINT(dest, length); DataInputOutputUtil.writeINT(dest, bytesWritten - length); compressor.compress(ByteBuffer.wrap(bytes.getInternalBuffer(), 0, bytesWritten), dest); return dest.position() < length * 2 ? Arrays.copyOf(dest.array(), dest.position()) : string; } catch (IOException e) { e.printStackTrace(); return string; } } @NotNull public static CharSequence uncompressStringRawBytes(@NotNull Object compressed) { if (compressed instanceof CharSequence) return (CharSequence)compressed; ByteBuffer buffer = ByteBuffer.wrap((byte[])compressed); int len = DataInputOutputUtil.readINT(buffer); int uncompressedLength = DataInputOutputUtil.readINT(buffer) + len; ByteBuffer dest = ByteBuffer.wrap(spareBufferLocal.getBuffer(uncompressedLength), 0, uncompressedLength); decompressor().decompress(buffer, dest); dest.rewind(); char[] chars = new char[len]; for (int i=0; i<len; i++) { int c = DataInputOutputUtil.readINT(dest); chars[i] = (char)c; } return new String(chars); } }
apache-2.0
gweidner/incubator-systemml
src/main/java/org/apache/sysml/runtime/instructions/spark/MapmmSPInstruction.java
18080
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.sysml.runtime.instructions.spark; import java.util.Iterator; import java.util.stream.IntStream; import org.apache.spark.api.java.JavaPairRDD; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.function.Function; import org.apache.spark.api.java.function.PairFlatMapFunction; import org.apache.spark.api.java.function.PairFunction; import scala.Tuple2; import org.apache.sysml.hops.AggBinaryOp.SparkAggType; import org.apache.sysml.hops.OptimizerUtils; import org.apache.sysml.lops.MapMult; import org.apache.sysml.lops.MapMult.CacheType; import org.apache.sysml.runtime.DMLRuntimeException; import org.apache.sysml.runtime.controlprogram.context.ExecutionContext; import org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext; import org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer; import org.apache.sysml.runtime.functionobjects.Multiply; import org.apache.sysml.runtime.functionobjects.Plus; import org.apache.sysml.runtime.instructions.InstructionUtils; import org.apache.sysml.runtime.instructions.cp.CPOperand; import org.apache.sysml.runtime.instructions.spark.data.LazyIterableIterator; import org.apache.sysml.runtime.instructions.spark.data.PartitionedBroadcast; import org.apache.sysml.runtime.instructions.spark.functions.FilterNonEmptyBlocksFunction; import org.apache.sysml.runtime.instructions.spark.utils.RDDAggregateUtils; import org.apache.sysml.runtime.matrix.MatrixCharacteristics; import org.apache.sysml.runtime.matrix.data.MatrixBlock; import org.apache.sysml.runtime.matrix.data.MatrixIndexes; import org.apache.sysml.runtime.matrix.data.OperationsOnMatrixValues; import org.apache.sysml.runtime.matrix.operators.AggregateBinaryOperator; import org.apache.sysml.runtime.matrix.operators.AggregateOperator; import org.apache.sysml.runtime.matrix.operators.Operator; public class MapmmSPInstruction extends BinarySPInstruction { private CacheType _type = null; private boolean _outputEmpty = true; private SparkAggType _aggtype; private MapmmSPInstruction(Operator op, CPOperand in1, CPOperand in2, CPOperand out, CacheType type, boolean outputEmpty, SparkAggType aggtype, String opcode, String istr) { super(SPType.MAPMM, op, in1, in2, out, opcode, istr); _type = type; _outputEmpty = outputEmpty; _aggtype = aggtype; } public static MapmmSPInstruction parseInstruction( String str ) { String parts[] = InstructionUtils.getInstructionPartsWithValueType(str); String opcode = parts[0]; if(!opcode.equalsIgnoreCase(MapMult.OPCODE)) throw new DMLRuntimeException("MapmmSPInstruction.parseInstruction():: Unknown opcode " + opcode); CPOperand in1 = new CPOperand(parts[1]); CPOperand in2 = new CPOperand(parts[2]); CPOperand out = new CPOperand(parts[3]); CacheType type = CacheType.valueOf(parts[4]); boolean outputEmpty = Boolean.parseBoolean(parts[5]); SparkAggType aggtype = SparkAggType.valueOf(parts[6]); AggregateOperator agg = new AggregateOperator(0, Plus.getPlusFnObject()); AggregateBinaryOperator aggbin = new AggregateBinaryOperator(Multiply.getMultiplyFnObject(), agg); return new MapmmSPInstruction(aggbin, in1, in2, out, type, outputEmpty, aggtype, opcode, str); } @Override public void processInstruction(ExecutionContext ec) { SparkExecutionContext sec = (SparkExecutionContext)ec; CacheType type = _type; String rddVar = type.isRight() ? input1.getName() : input2.getName(); String bcastVar = type.isRight() ? input2.getName() : input1.getName(); MatrixCharacteristics mcRdd = sec.getMatrixCharacteristics(rddVar); MatrixCharacteristics mcBc = sec.getMatrixCharacteristics(bcastVar); //get input rdd with preferred number of partitions to avoid unnecessary repartition JavaPairRDD<MatrixIndexes,MatrixBlock> in1 = sec.getBinaryBlockRDDHandleForVariable(rddVar, (requiresFlatMapFunction(type, mcBc) && requiresRepartitioning( type, mcRdd, mcBc, sec.getSparkContext().defaultParallelism())) ? getNumRepartitioning(type, mcRdd, mcBc) : -1, _outputEmpty); //investigate if a repartitioning - including a potential flip of broadcast and rdd //inputs - is required to ensure moderately sized output partitions (2GB limitation) if( requiresFlatMapFunction(type, mcBc) && requiresRepartitioning(type, mcRdd, mcBc, in1.getNumPartitions()) ) { int numParts = getNumRepartitioning(type, mcRdd, mcBc); int numParts2 = getNumRepartitioning(type.getFlipped(), mcBc, mcRdd); if( numParts2 > numParts ) { //flip required type = type.getFlipped(); rddVar = type.isRight() ? input1.getName() : input2.getName(); bcastVar = type.isRight() ? input2.getName() : input1.getName(); mcRdd = sec.getMatrixCharacteristics(rddVar); mcBc = sec.getMatrixCharacteristics(bcastVar); in1 = sec.getBinaryBlockRDDHandleForVariable(rddVar); LOG.warn("Mapmm: Switching rdd ('"+bcastVar+"') and broadcast ('"+rddVar+"') inputs " + "for repartitioning because this allows better control of output partition " + "sizes ("+numParts+" < "+numParts2+")."); } } //get inputs PartitionedBroadcast<MatrixBlock> in2 = sec.getBroadcastForVariable(bcastVar); //empty input block filter if( !_outputEmpty ) in1 = in1.filter(new FilterNonEmptyBlocksFunction()); //execute mapmm and aggregation if necessary and put output into symbol table if( _aggtype == SparkAggType.SINGLE_BLOCK ) { JavaRDD<MatrixBlock> out = in1.map(new RDDMapMMFunction2(type, in2)); MatrixBlock out2 = RDDAggregateUtils.sumStable(out); //put output block into symbol table (no lineage because single block) //this also includes implicit maintenance of matrix characteristics sec.setMatrixOutput(output.getName(), out2, getExtendedOpcode()); } else //MULTI_BLOCK or NONE { JavaPairRDD<MatrixIndexes,MatrixBlock> out = null; if( requiresFlatMapFunction(type, mcBc) ) { if( requiresRepartitioning(type, mcRdd, mcBc, in1.getNumPartitions()) ) { int numParts = getNumRepartitioning(type, mcRdd, mcBc); LOG.warn("Mapmm: Repartition input rdd '"+rddVar+"' from "+in1.getNumPartitions()+" to " +numParts+" partitions to satisfy size restrictions of output partitions."); in1 = in1.repartition(numParts); } out = in1.flatMapToPair( new RDDFlatMapMMFunction(type, in2) ); } else if( preservesPartitioning(mcRdd, type) ) out = in1.mapPartitionsToPair(new RDDMapMMPartitionFunction(type, in2), true); else out = in1.mapToPair( new RDDMapMMFunction(type, in2) ); //empty output block filter if( !_outputEmpty ) out = out.filter(new FilterNonEmptyBlocksFunction()); if( _aggtype == SparkAggType.MULTI_BLOCK ) out = RDDAggregateUtils.sumByKeyStable(out, false); //put output RDD handle into symbol table sec.setRDDHandleForVariable(output.getName(), out); sec.addLineageRDD(output.getName(), rddVar); sec.addLineageBroadcast(output.getName(), bcastVar); //update output statistics if not inferred updateBinaryMMOutputMatrixCharacteristics(sec, true); } } private static boolean preservesPartitioning( MatrixCharacteristics mcIn, CacheType type ) { if( type == CacheType.LEFT ) return mcIn.dimsKnown() && mcIn.getRows() <= mcIn.getRowsPerBlock(); else // RIGHT return mcIn.dimsKnown() && mcIn.getCols() <= mcIn.getColsPerBlock(); } /** * Indicates if there is a need to apply a flatmap rdd operation because a single * input block creates multiple output blocks. * * @param type cache type * @param mcBc matrix characteristics * @return true if single input block creates multiple output blocks */ private static boolean requiresFlatMapFunction( CacheType type, MatrixCharacteristics mcBc) { return (type == CacheType.LEFT && mcBc.getRows() > mcBc.getRowsPerBlock()) || (type == CacheType.RIGHT && mcBc.getCols() > mcBc.getColsPerBlock()); } /** * Indicates if there is a need to repartition the input RDD in order to increase the * degree of parallelism or reduce the output partition size (e.g., Spark still has a * 2GB limitation of partitions) * * @param type cache type * @param mcRdd rdd matrix characteristics * @param mcBc ? * @param numPartitions number of partitions * @return true need to repartition input RDD */ private static boolean requiresRepartitioning( CacheType type, MatrixCharacteristics mcRdd, MatrixCharacteristics mcBc, int numPartitions ) { //note: as repartitioning requires data shuffling, we try to be very conservative here //approach: we repartition, if there is a "outer-product-like" mm (single block common dimension), //the size of output partitions (assuming dense) exceeds a size of 1GB boolean isLeft = (type == CacheType.LEFT); boolean isOuter = isLeft ? (mcRdd.getRows() <= mcRdd.getRowsPerBlock()) : (mcRdd.getCols() <= mcRdd.getColsPerBlock()); boolean isLargeOutput = (OptimizerUtils.estimatePartitionedSizeExactSparsity(isLeft?mcBc.getRows():mcRdd.getRows(), isLeft?mcRdd.getCols():mcBc.getCols(), isLeft?mcBc.getRowsPerBlock():mcRdd.getRowsPerBlock(), isLeft?mcRdd.getColsPerBlock():mcBc.getColsPerBlock(), 1.0) / numPartitions) > 1024*1024*1024; return isOuter && isLargeOutput && mcRdd.dimsKnown() && mcBc.dimsKnown() && numPartitions < getNumRepartitioning(type, mcRdd, mcBc); } /** * Computes the number of target partitions for repartitioning input rdds in case of * outer-product-like mm. * * @param type cache type * @param mcRdd rdd matrix characteristics * @param mcBc ? * @return number of target partitions for repartitioning */ private static int getNumRepartitioning( CacheType type, MatrixCharacteristics mcRdd, MatrixCharacteristics mcBc ) { boolean isLeft = (type == CacheType.LEFT); long sizeOutput = (OptimizerUtils.estimatePartitionedSizeExactSparsity(isLeft?mcBc.getRows():mcRdd.getRows(), isLeft?mcRdd.getCols():mcBc.getCols(), isLeft?mcBc.getRowsPerBlock():mcRdd.getRowsPerBlock(), isLeft?mcRdd.getColsPerBlock():mcBc.getColsPerBlock(), 1.0)); long numParts = sizeOutput / InfrastructureAnalyzer.getHDFSBlockSize(); return (int)Math.min(numParts, (isLeft?mcRdd.getNumColBlocks():mcRdd.getNumRowBlocks())); } private static class RDDMapMMFunction implements PairFunction<Tuple2<MatrixIndexes, MatrixBlock>, MatrixIndexes, MatrixBlock> { private static final long serialVersionUID = 8197406787010296291L; private final CacheType _type; private final AggregateBinaryOperator _op; private final PartitionedBroadcast<MatrixBlock> _pbc; public RDDMapMMFunction( CacheType type, PartitionedBroadcast<MatrixBlock> binput ) { _type = type; _pbc = binput; //created operator for reuse AggregateOperator agg = new AggregateOperator(0, Plus.getPlusFnObject()); _op = new AggregateBinaryOperator(Multiply.getMultiplyFnObject(), agg); } @Override public Tuple2<MatrixIndexes, MatrixBlock> call( Tuple2<MatrixIndexes, MatrixBlock> arg0 ) throws Exception { MatrixIndexes ixIn = arg0._1(); MatrixBlock blkIn = arg0._2(); MatrixIndexes ixOut = new MatrixIndexes(); MatrixBlock blkOut = new MatrixBlock(); if( _type == CacheType.LEFT ) { //get the right hand side matrix MatrixBlock left = _pbc.getBlock(1, (int)ixIn.getRowIndex()); //execute matrix-vector mult OperationsOnMatrixValues.matMult(new MatrixIndexes(1,ixIn.getRowIndex()), left, ixIn, blkIn, ixOut, blkOut, _op); } else //if( _type == CacheType.RIGHT ) { //get the right hand side matrix MatrixBlock right = _pbc.getBlock((int)ixIn.getColumnIndex(), 1); //execute matrix-vector mult OperationsOnMatrixValues.matMult(ixIn, blkIn, new MatrixIndexes(ixIn.getColumnIndex(),1), right, ixOut, blkOut, _op); } //output new tuple return new Tuple2<>(ixOut, blkOut); } } /** * Similar to RDDMapMMFunction but with single output block */ private static class RDDMapMMFunction2 implements Function<Tuple2<MatrixIndexes, MatrixBlock>, MatrixBlock> { private static final long serialVersionUID = -2753453898072910182L; private final CacheType _type; private final AggregateBinaryOperator _op; private final PartitionedBroadcast<MatrixBlock> _pbc; public RDDMapMMFunction2( CacheType type, PartitionedBroadcast<MatrixBlock> binput ) { _type = type; _pbc = binput; //created operator for reuse AggregateOperator agg = new AggregateOperator(0, Plus.getPlusFnObject()); _op = new AggregateBinaryOperator(Multiply.getMultiplyFnObject(), agg); } @Override public MatrixBlock call( Tuple2<MatrixIndexes, MatrixBlock> arg0 ) throws Exception { MatrixIndexes ixIn = arg0._1(); MatrixBlock blkIn = arg0._2(); if( _type == CacheType.LEFT ) { //get the right hand side matrix MatrixBlock left = _pbc.getBlock(1, (int)ixIn.getRowIndex()); //execute matrix-vector mult return OperationsOnMatrixValues.matMult( left, blkIn, new MatrixBlock(), _op); } else //if( _type == CacheType.RIGHT ) { //get the right hand side matrix MatrixBlock right = _pbc.getBlock((int)ixIn.getColumnIndex(), 1); //execute matrix-vector mult return OperationsOnMatrixValues.matMult( blkIn, right, new MatrixBlock(), _op); } } } private static class RDDMapMMPartitionFunction implements PairFlatMapFunction<Iterator<Tuple2<MatrixIndexes, MatrixBlock>>, MatrixIndexes, MatrixBlock> { private static final long serialVersionUID = 1886318890063064287L; private final CacheType _type; private final AggregateBinaryOperator _op; private final PartitionedBroadcast<MatrixBlock> _pbc; public RDDMapMMPartitionFunction( CacheType type, PartitionedBroadcast<MatrixBlock> binput ) { _type = type; _pbc = binput; //created operator for reuse AggregateOperator agg = new AggregateOperator(0, Plus.getPlusFnObject()); _op = new AggregateBinaryOperator(Multiply.getMultiplyFnObject(), agg); } @Override public LazyIterableIterator<Tuple2<MatrixIndexes, MatrixBlock>> call(Iterator<Tuple2<MatrixIndexes, MatrixBlock>> arg0) throws Exception { return new MapMMPartitionIterator(arg0); } /** * Lazy mapmm iterator to prevent materialization of entire partition output in-memory. * The implementation via mapPartitions is required to preserve partitioning information, * which is important for performance. */ private class MapMMPartitionIterator extends LazyIterableIterator<Tuple2<MatrixIndexes, MatrixBlock>> { public MapMMPartitionIterator(Iterator<Tuple2<MatrixIndexes, MatrixBlock>> in) { super(in); } @Override protected Tuple2<MatrixIndexes, MatrixBlock> computeNext(Tuple2<MatrixIndexes, MatrixBlock> arg) throws Exception { MatrixIndexes ixIn = arg._1(); MatrixBlock blkIn = arg._2(); MatrixBlock blkOut = new MatrixBlock(); if( _type == CacheType.LEFT ) { //get the right hand side matrix MatrixBlock left = _pbc.getBlock(1, (int)ixIn.getRowIndex()); //execute index preserving matrix multiplication OperationsOnMatrixValues.matMult(left, blkIn, blkOut, _op); } else //if( _type == CacheType.RIGHT ) { //get the right hand side matrix MatrixBlock right = _pbc.getBlock((int)ixIn.getColumnIndex(), 1); //execute index preserving matrix multiplication OperationsOnMatrixValues.matMult(blkIn, right, blkOut, _op); } return new Tuple2<>(ixIn, blkOut); } } } private static class RDDFlatMapMMFunction implements PairFlatMapFunction<Tuple2<MatrixIndexes, MatrixBlock>, MatrixIndexes, MatrixBlock> { private static final long serialVersionUID = -6076256569118957281L; private final CacheType _type; private final AggregateBinaryOperator _op; private final PartitionedBroadcast<MatrixBlock> _pbc; public RDDFlatMapMMFunction( CacheType type, PartitionedBroadcast<MatrixBlock> binput ) { _type = type; _pbc = binput; //created operator for reuse AggregateOperator agg = new AggregateOperator(0, Plus.getPlusFnObject()); _op = new AggregateBinaryOperator(Multiply.getMultiplyFnObject(), agg); } @Override public Iterator<Tuple2<MatrixIndexes, MatrixBlock>> call( Tuple2<MatrixIndexes, MatrixBlock> arg0 ) throws Exception { MatrixIndexes ixIn = arg0._1(); MatrixBlock blkIn = arg0._2(); if( _type == CacheType.LEFT ) { //for all matching left-hand-side blocks, returned as lazy iterator return IntStream.range(1, _pbc.getNumRowBlocks()+1).mapToObj(i -> new Tuple2<>(new MatrixIndexes(i, ixIn.getColumnIndex()), OperationsOnMatrixValues.matMult(_pbc.getBlock(i, (int)ixIn.getRowIndex()), blkIn, new MatrixBlock(), _op))).iterator(); } else { //RIGHT //for all matching right-hand-side blocks, returned as lazy iterator return IntStream.range(1, _pbc.getNumColumnBlocks()+1).mapToObj(j -> new Tuple2<>(new MatrixIndexes(ixIn.getRowIndex(), j), OperationsOnMatrixValues.matMult(blkIn, _pbc.getBlock((int)ixIn.getColumnIndex(), j), new MatrixBlock(), _op))).iterator(); } } } }
apache-2.0
sanderginn/isis
core/applib/src/main/java/org/apache/isis/applib/events/CollectionUsabilityEvent.java
1789
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.isis.applib.events; import org.apache.isis.applib.Identifier; /** * <i>Supported only by {@link org.apache.isis.applib.services.wrapper.WrapperFactory} service, </i> represents a check as to whether a collection is usable or has been disabled. * * <p> * If {@link #getReason()} is not <tt>null</tt> then provides the reason why the * collection is disabled; otherwise collection is enabled. * * @deprecated - superceded by <code>domainEvent</code> support ({@link org.apache.isis.applib.services.eventbus.PropertyDomainEvent}, {@link org.apache.isis.applib.IsisApplibModule.CollectionDomainEvent}, {@link org.apache.isis.applib.services.eventbus.ActionDomainEvent}). */ @Deprecated public class CollectionUsabilityEvent extends UsabilityEvent { private static final long serialVersionUID = 1L; public CollectionUsabilityEvent(final Object source, final Identifier identifier) { super(source, identifier); } }
apache-2.0
jwren/intellij-community
plugins/textmate/lib/bundles/search-result/src/extension.ts
11004
/*--------------------------------------------------------------------------------------------- * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for license information. *--------------------------------------------------------------------------------------------*/ import * as vscode from 'vscode'; import * as pathUtils from 'path'; const FILE_LINE_REGEX = /^(\S.*):$/; const RESULT_LINE_REGEX = /^(\s+)(\d+)(:| )(\s+)(.*)$/; const ELISION_REGEX = /⟪ ([0-9]+) characters skipped ⟫/g; const SEARCH_RESULT_SELECTOR = { language: 'search-result', exclusive: true }; const DIRECTIVES = ['# Query:', '# Flags:', '# Including:', '# Excluding:', '# ContextLines:']; const FLAGS = ['RegExp', 'CaseSensitive', 'IgnoreExcludeSettings', 'WordMatch']; let cachedLastParse: { version: number, parse: ParsedSearchResults, uri: vscode.Uri } | undefined; let documentChangeListener: vscode.Disposable | undefined; export function activate(context: vscode.ExtensionContext) { const contextLineDecorations = vscode.window.createTextEditorDecorationType({ opacity: '0.7' }); const matchLineDecorations = vscode.window.createTextEditorDecorationType({ fontWeight: 'bold' }); const decorate = (editor: vscode.TextEditor) => { const parsed = parseSearchResults(editor.document).filter(isResultLine); const contextRanges = parsed.filter(line => line.isContext).map(line => line.prefixRange); const matchRanges = parsed.filter(line => !line.isContext).map(line => line.prefixRange); editor.setDecorations(contextLineDecorations, contextRanges); editor.setDecorations(matchLineDecorations, matchRanges); }; if (vscode.window.activeTextEditor && vscode.window.activeTextEditor.document.languageId === 'search-result') { decorate(vscode.window.activeTextEditor); } context.subscriptions.push( vscode.languages.registerDocumentSymbolProvider(SEARCH_RESULT_SELECTOR, { provideDocumentSymbols(document: vscode.TextDocument, token: vscode.CancellationToken): vscode.DocumentSymbol[] { const results = parseSearchResults(document, token) .filter(isFileLine) .map(line => new vscode.DocumentSymbol( line.path, '', vscode.SymbolKind.File, line.allLocations.map(({ originSelectionRange }) => originSelectionRange!).reduce((p, c) => p.union(c), line.location.originSelectionRange!), line.location.originSelectionRange!, )); return results; } }), vscode.languages.registerCompletionItemProvider(SEARCH_RESULT_SELECTOR, { provideCompletionItems(document: vscode.TextDocument, position: vscode.Position): vscode.CompletionItem[] { const line = document.lineAt(position.line); if (position.line > 3) { return []; } if (position.character === 0 || (position.character === 1 && line.text === '#')) { const header = Array.from({ length: DIRECTIVES.length }).map((_, i) => document.lineAt(i).text); return DIRECTIVES .filter(suggestion => header.every(line => line.indexOf(suggestion) === -1)) .map(flag => ({ label: flag, insertText: (flag.slice(position.character)) + ' ' })); } if (line.text.indexOf('# Flags:') === -1) { return []; } return FLAGS .filter(flag => line.text.indexOf(flag) === -1) .map(flag => ({ label: flag, insertText: flag + ' ' })); } }, '#'), vscode.languages.registerDefinitionProvider(SEARCH_RESULT_SELECTOR, { provideDefinition(document: vscode.TextDocument, position: vscode.Position, token: vscode.CancellationToken): vscode.DefinitionLink[] { const lineResult = parseSearchResults(document, token)[position.line]; if (!lineResult) { return []; } if (lineResult.type === 'file') { return lineResult.allLocations; } const location = lineResult.locations.find(l => l.originSelectionRange.contains(position)); if (!location) { return []; } const targetPos = new vscode.Position( location.targetSelectionRange.start.line, location.targetSelectionRange.start.character + (position.character - location.originSelectionRange.start.character) ); return [{ ...location, targetSelectionRange: new vscode.Range(targetPos, targetPos), }]; } }), vscode.languages.registerDocumentLinkProvider(SEARCH_RESULT_SELECTOR, { async provideDocumentLinks(document: vscode.TextDocument, token: vscode.CancellationToken): Promise<vscode.DocumentLink[]> { return parseSearchResults(document, token) .filter(isFileLine) .map(({ location }) => ({ range: location.originSelectionRange!, target: location.targetUri })); } }), vscode.window.onDidChangeActiveTextEditor(editor => { if (editor?.document.languageId === 'search-result') { // Clear the parse whenever we open a new editor. // Conservative because things like the URI might remain constant even if the contents change, and re-parsing even large files is relatively fast. cachedLastParse = undefined; documentChangeListener?.dispose(); documentChangeListener = vscode.workspace.onDidChangeTextDocument(doc => { if (doc.document.uri === editor.document.uri) { decorate(editor); } }); decorate(editor); } }), { dispose() { cachedLastParse = undefined; documentChangeListener?.dispose(); } } ); } function relativePathToUri(path: string, resultsUri: vscode.Uri): vscode.Uri | undefined { const userDataPrefix = '(Settings) '; if (path.startsWith(userDataPrefix)) { return vscode.Uri.file(path.slice(userDataPrefix.length)).with({ scheme: 'vscode-userdata' }); } if (pathUtils.isAbsolute(path)) { if (/^[\\\/]Untitled-\d*$/.test(path)) { return vscode.Uri.file(path.slice(1)).with({ scheme: 'untitled', path: path.slice(1) }); } return vscode.Uri.file(path); } if (path.indexOf('~/') === 0) { const homePath = process.env.HOME || process.env.HOMEPATH || ''; return vscode.Uri.file(pathUtils.join(homePath, path.slice(2))); } const uriFromFolderWithPath = (folder: vscode.WorkspaceFolder, path: string): vscode.Uri => vscode.Uri.joinPath(folder.uri, path); if (vscode.workspace.workspaceFolders) { const multiRootFormattedPath = /^(.*) • (.*)$/.exec(path); if (multiRootFormattedPath) { const [, workspaceName, workspacePath] = multiRootFormattedPath; const folder = vscode.workspace.workspaceFolders.filter(wf => wf.name === workspaceName)[0]; if (folder) { return uriFromFolderWithPath(folder, workspacePath); } } else if (vscode.workspace.workspaceFolders.length === 1) { return uriFromFolderWithPath(vscode.workspace.workspaceFolders[0], path); } else if (resultsUri.scheme !== 'untitled') { // We're in a multi-root workspace, but the path is not multi-root formatted // Possibly a saved search from a single root session. Try checking if the search result document's URI is in a current workspace folder. const prefixMatch = vscode.workspace.workspaceFolders.filter(wf => resultsUri.toString().startsWith(wf.uri.toString()))[0]; if (prefixMatch) { return uriFromFolderWithPath(prefixMatch, path); } } } console.error(`Unable to resolve path ${path}`); return undefined; } type ParsedSearchFileLine = { type: 'file', location: vscode.LocationLink, allLocations: vscode.LocationLink[], path: string }; type ParsedSearchResultLine = { type: 'result', locations: Required<vscode.LocationLink>[], isContext: boolean, prefixRange: vscode.Range }; type ParsedSearchResults = Array<ParsedSearchFileLine | ParsedSearchResultLine>; const isFileLine = (line: ParsedSearchResultLine | ParsedSearchFileLine): line is ParsedSearchFileLine => line.type === 'file'; const isResultLine = (line: ParsedSearchResultLine | ParsedSearchFileLine): line is ParsedSearchResultLine => line.type === 'result'; function parseSearchResults(document: vscode.TextDocument, token?: vscode.CancellationToken): ParsedSearchResults { if (cachedLastParse && cachedLastParse.uri === document.uri && cachedLastParse.version === document.version) { return cachedLastParse.parse; } const lines = document.getText().split(/\r?\n/); const links: ParsedSearchResults = []; let currentTarget: vscode.Uri | undefined = undefined; let currentTargetLocations: vscode.LocationLink[] | undefined = undefined; for (let i = 0; i < lines.length; i++) { // TODO: This is probably always false, given we're pegging the thread... if (token?.isCancellationRequested) { return []; } const line = lines[i]; const fileLine = FILE_LINE_REGEX.exec(line); if (fileLine) { const [, path] = fileLine; currentTarget = relativePathToUri(path, document.uri); if (!currentTarget) { continue; } currentTargetLocations = []; const location: vscode.LocationLink = { targetRange: new vscode.Range(0, 0, 0, 1), targetUri: currentTarget, originSelectionRange: new vscode.Range(i, 0, i, line.length), }; links[i] = { type: 'file', location, allLocations: currentTargetLocations, path }; } if (!currentTarget) { continue; } const resultLine = RESULT_LINE_REGEX.exec(line); if (resultLine) { const [, indentation, _lineNumber, seperator, resultIndentation] = resultLine; const lineNumber = +_lineNumber - 1; const resultStart = (indentation + _lineNumber + seperator + resultIndentation).length; const metadataOffset = (indentation + _lineNumber + seperator).length; const targetRange = new vscode.Range(Math.max(lineNumber - 3, 0), 0, lineNumber + 3, line.length); let locations: Required<vscode.LocationLink>[] = []; // Allow line number, indentation, etc to take you to definition as well. locations.push({ targetRange, targetSelectionRange: new vscode.Range(lineNumber, 0, lineNumber, 1), targetUri: currentTarget, originSelectionRange: new vscode.Range(i, 0, i, resultStart), }); let lastEnd = resultStart; let offset = 0; ELISION_REGEX.lastIndex = resultStart; for (let match: RegExpExecArray | null; (match = ELISION_REGEX.exec(line));) { locations.push({ targetRange, targetSelectionRange: new vscode.Range(lineNumber, offset, lineNumber, offset), targetUri: currentTarget, originSelectionRange: new vscode.Range(i, lastEnd, i, ELISION_REGEX.lastIndex - match[0].length), }); offset += (ELISION_REGEX.lastIndex - lastEnd - match[0].length) + Number(match[1]); lastEnd = ELISION_REGEX.lastIndex; } if (lastEnd < line.length) { locations.push({ targetRange, targetSelectionRange: new vscode.Range(lineNumber, offset, lineNumber, offset), targetUri: currentTarget, originSelectionRange: new vscode.Range(i, lastEnd, i, line.length), }); } currentTargetLocations?.push(...locations); links[i] = { type: 'result', locations, isContext: seperator === ' ', prefixRange: new vscode.Range(i, 0, i, metadataOffset) }; } } cachedLastParse = { version: document.version, parse: links, uri: document.uri }; return links; }
apache-2.0
anindoasaha/php_nginx
php-5.5.16/ext/mbstring/tests/bug43841.phpt
2093
--TEST-- Test mb_strrpos() function : mb_strrpos offset is byte count for negative values --SKIPIF-- <?php extension_loaded('mbstring') or die('skip'); function_exists('mb_strrpos') or die("skip mb_strrpos() is not available in this build"); ?> --FILE-- <?php /* Prototype : int mb_strrpos(string $haystack, string $needle [, int $offset [, string $encoding]]) * Description: Find position of last occurrence of a string within another * Source code: ext/mbstring/mbstring.c */ /* * Test that mb_strrpos offset is byte count for negative values (should be character count) */ $offsets = array(-25, -24, -13, -12); $string_mb = base64_decode('5pel5pys6Kqe44OG44Kt44K544OI44Gn44GZ44CCMDEyMzTvvJXvvJbvv JfvvJjvvJnjgII='); $needle = base64_decode('44CC'); foreach ($offsets as $i) { echo "\n-- Offset is $i --\n"; echo "Multibyte String:\n"; var_dump( mb_strrpos($string_mb, $needle, $i, 'UTF-8') ); echo "ASCII String:\n"; echo "mb_strrpos:\n"; var_dump(mb_strrpos(b'This is na English ta', b'a', $i)); echo "strrpos:\n"; var_dump(strrpos(b'This is na English ta', b'a', $i)); } ?> --EXPECTF-- -- Offset is -25 -- Multibyte String: Warning: mb_strrpos(): Offset is greater than the length of haystack string in %s on line %d bool(false) ASCII String: mb_strrpos: Warning: mb_strrpos(): Offset is greater than the length of haystack string in %s on line %d bool(false) strrpos: Warning: strrpos(): Offset is greater than the length of haystack string in %s on line %d bool(false) -- Offset is -24 -- Multibyte String: Warning: mb_strrpos(): Offset is greater than the length of haystack string in %s on line %d bool(false) ASCII String: mb_strrpos: Warning: mb_strrpos(): Offset is greater than the length of haystack string in %s on line %d bool(false) strrpos: Warning: strrpos(): Offset is greater than the length of haystack string in %s on line %d bool(false) -- Offset is -13 -- Multibyte String: bool(false) ASCII String: mb_strrpos: bool(false) strrpos: bool(false) -- Offset is -12 -- Multibyte String: int(9) ASCII String: mb_strrpos: int(9) strrpos: int(9)
apache-2.0
meteorcloudy/bazel
src/test/java/net/starlark/java/annot/processor/testsources/ExtraPositionalsMissing.java
1324
// Copyright 2018 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package net.starlark.java.annot.processor.testsources; import net.starlark.java.annot.Param; import net.starlark.java.annot.StarlarkMethod; import net.starlark.java.eval.StarlarkThread; import net.starlark.java.eval.StarlarkValue; /** * Test case for a StarlarkMethod method which specifies extraPositionals, but omits that argument. */ public class ExtraPositionalsMissing implements StarlarkValue { @StarlarkMethod( name = "extra_positionals_missing", documented = false, parameters = {@Param(name = "one")}, extraPositionals = @Param(name = "args"), useStarlarkThread = true) public String threeArgMethod(String one, StarlarkThread thread) { return "bar"; } }
apache-2.0
amezgin/amezgin
chapter_014/sort_and_search/merge_sort/src/test/java/ru.job4j/package-info.java
90
/** * @author Alexander Mezgin * @version 1.0 * @since 07.07.2017 */ package ru.job4j;
apache-2.0
aayushkapoor206/whatshot
node_modules/@reactivex/rxjs/dist/amd/util/Immediate.js
8080
/** All credit for this helper goes to http://github.com/YuzuJS/setImmediate */ define(["require", "exports", './root'], function (require, exports, root_1) { exports.Immediate = { setImmediate: function (x) { return 0; }, clearImmediate: function (id) { } }; if (root_1.root && root_1.root.setImmediate) { exports.Immediate.setImmediate = root_1.root.setImmediate; exports.Immediate.clearImmediate = root_1.root.clearImmediate; } else { exports.Immediate = (function (global, Immediate) { var nextHandle = 1, // Spec says greater than zero tasksByHandle = {}, currentlyRunningATask = false, doc = global.document, setImmediate; // Don't get fooled by e.g. browserify environments. if ({}.toString.call(global.process) === '[object process]') { // For Node.js before 0.9 setImmediate = installNextTickImplementation(); } else if (canUsePostMessage()) { // For non-IE10 modern browsers setImmediate = installPostMessageImplementation(); } else if (global.MessageChannel) { // For web workers, where supported setImmediate = installMessageChannelImplementation(); } else if (doc && 'onreadystatechange' in doc.createElement('script')) { // For IE 6–8 setImmediate = installReadyStateChangeImplementation(); } else { // For older browsers setImmediate = installSetTimeoutImplementation(); } Immediate.setImmediate = setImmediate; Immediate.clearImmediate = clearImmediate; return Immediate; function clearImmediate(handle) { delete tasksByHandle[handle]; } function addFromSetImmediateArguments(args) { tasksByHandle[nextHandle] = partiallyApplied.apply(undefined, args); return nextHandle++; } // This function accepts the same arguments as setImmediate, but // returns a function that requires no arguments. function partiallyApplied(handler) { var args = []; for (var _i = 1; _i < arguments.length; _i++) { args[_i - 1] = arguments[_i]; } return function () { if (typeof handler === 'function') { handler.apply(undefined, args); } else { (new Function('' + handler))(); } }; } function runIfPresent(handle) { // From the spec: 'Wait until any invocations of this algorithm started before this one have completed.' // So if we're currently running a task, we'll need to delay this invocation. if (currentlyRunningATask) { // Delay by doing a setTimeout. setImmediate was tried instead, but in Firefox 7 it generated a // 'too much recursion' error. setTimeout(partiallyApplied(runIfPresent, handle), 0); } else { var task = tasksByHandle[handle]; if (task) { currentlyRunningATask = true; try { task(); } finally { clearImmediate(handle); currentlyRunningATask = false; } } } } function installNextTickImplementation() { return function setImmediate() { var handle = addFromSetImmediateArguments(arguments); global.process.nextTick(partiallyApplied(runIfPresent, handle)); return handle; }; } function canUsePostMessage() { // The test against `importScripts` prevents this implementation from being installed inside a web worker, // where `global.postMessage` means something completely different and can't be used for this purpose. if (global.postMessage && !global.importScripts) { var postMessageIsAsynchronous = true; var oldOnMessage = global.onmessage; global.onmessage = function () { postMessageIsAsynchronous = false; }; global.postMessage('', '*'); global.onmessage = oldOnMessage; return postMessageIsAsynchronous; } } function installPostMessageImplementation() { // Installs an event handler on `global` for the `message` event: see // * https://developer.mozilla.org/en/DOM/window.postMessage // * http://www.whatwg.org/specs/web-apps/current-work/multipage/comms.html#crossDocumentMessages var messagePrefix = 'setImmediate$' + Math.random() + '$'; var onGlobalMessage = function (event) { if (event.source === global && typeof event.data === 'string' && event.data.indexOf(messagePrefix) === 0) { runIfPresent(+event.data.slice(messagePrefix.length)); } }; if (global.addEventListener) { global.addEventListener('message', onGlobalMessage, false); } else { global.attachEvent('onmessage', onGlobalMessage); } return function setImmediate() { var handle = addFromSetImmediateArguments(arguments); global.postMessage(messagePrefix + handle, '*'); return handle; }; } function installMessageChannelImplementation() { var channel = new MessageChannel(); channel.port1.onmessage = function (event) { var handle = event.data; runIfPresent(handle); }; return function setImmediate() { var handle = addFromSetImmediateArguments(arguments); channel.port2.postMessage(handle); return handle; }; } function installReadyStateChangeImplementation() { var html = doc.documentElement; return function setImmediate() { var handle = addFromSetImmediateArguments(arguments); // Create a <script> element; its readystatechange event will be fired asynchronously once it is inserted // into the document. Do so, thus queuing up the task. Remember to clean up once it's been called. var script = doc.createElement('script'); script.onreadystatechange = function () { runIfPresent(handle); script.onreadystatechange = null; html.removeChild(script); script = null; }; html.appendChild(script); return handle; }; } function installSetTimeoutImplementation() { return function setImmediate() { var handle = addFromSetImmediateArguments(arguments); setTimeout(partiallyApplied(runIfPresent, handle), 0); return handle; }; } }(root_1.root, exports.Immediate)); } }); //# sourceMappingURL=Immediate.js.map
apache-2.0
stevearm/camlistore
vendor/github.com/lib/pq/buf.go
1328
package pq import ( "bytes" "encoding/binary" "github.com/lib/pq/oid" ) type readBuf []byte func (b *readBuf) int32() (n int) { n = int(int32(binary.BigEndian.Uint32(*b))) *b = (*b)[4:] return } func (b *readBuf) oid() (n oid.Oid) { n = oid.Oid(binary.BigEndian.Uint32(*b)) *b = (*b)[4:] return } func (b *readBuf) int16() (n int) { n = int(binary.BigEndian.Uint16(*b)) *b = (*b)[2:] return } var stringTerm = []byte{0} func (b *readBuf) string() string { i := bytes.Index(*b, stringTerm) if i < 0 { errorf("invalid message format; expected string terminator") } s := (*b)[:i] *b = (*b)[i+1:] return string(s) } func (b *readBuf) next(n int) (v []byte) { v = (*b)[:n] *b = (*b)[n:] return } func (b *readBuf) byte() byte { return b.next(1)[0] } type writeBuf []byte func newWriteBuf(c byte) *writeBuf { b := make(writeBuf, 5) b[0] = c return &b } func (b *writeBuf) int32(n int) { x := make([]byte, 4) binary.BigEndian.PutUint32(x, uint32(n)) *b = append(*b, x...) } func (b *writeBuf) int16(n int) { x := make([]byte, 2) binary.BigEndian.PutUint16(x, uint16(n)) *b = append(*b, x...) } func (b *writeBuf) string(s string) { *b = append(*b, (s + "\000")...) } func (b *writeBuf) byte(c byte) { *b = append(*b, c) } func (b *writeBuf) bytes(v []byte) { *b = append(*b, v...) }
apache-2.0
michaelgallacher/intellij-community
java/java-indexing-impl/src/com/intellij/psi/impl/PsiShortNamesCacheImpl.java
12032
/* * Copyright 2000-2016 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.psi.impl; import com.intellij.openapi.progress.ProgressIndicatorProvider; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.psi.*; import com.intellij.psi.impl.java.stubs.index.JavaFieldNameIndex; import com.intellij.psi.impl.java.stubs.index.JavaMethodNameIndex; import com.intellij.psi.impl.java.stubs.index.JavaShortClassNameIndex; import com.intellij.psi.impl.java.stubs.index.JavaStubIndexKeys; import com.intellij.psi.impl.search.JavaSourceFilterScope; import com.intellij.psi.search.FilenameIndex; import com.intellij.psi.search.GlobalSearchScope; import com.intellij.psi.search.PsiShortNamesCache; import com.intellij.psi.stubs.StubIndex; import com.intellij.util.*; import com.intellij.util.containers.HashSet; import com.intellij.util.indexing.IdFilter; import gnu.trove.THashMap; import gnu.trove.THashSet; import gnu.trove.TObjectHashingStrategy; import org.jetbrains.annotations.NonNls; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.util.*; public class PsiShortNamesCacheImpl extends PsiShortNamesCache { private final PsiManagerEx myManager; public PsiShortNamesCacheImpl(PsiManagerEx manager) { myManager = manager; } @Override @NotNull public PsiFile[] getFilesByName(@NotNull String name) { return FilenameIndex.getFilesByName(myManager.getProject(), name, GlobalSearchScope.projectScope(myManager.getProject())); } @Override @NotNull public String[] getAllFileNames() { return FilenameIndex.getAllFilenames(myManager.getProject()); } @Override @NotNull public PsiClass[] getClassesByName(@NotNull String name, @NotNull final GlobalSearchScope scope) { final Collection<PsiClass> classes = JavaShortClassNameIndex.getInstance().get(name, myManager.getProject(), scope); if (classes.isEmpty()) return PsiClass.EMPTY_ARRAY; ArrayList<PsiClass> list = new ArrayList<>(classes.size()); Map<String, List<PsiClass>> uniqueQName2Classes = new THashMap<>(classes.size()); Set<PsiClass> hiddenClassesToRemove = null; OuterLoop: for (PsiClass aClass : classes) { VirtualFile vFile = aClass.getContainingFile().getVirtualFile(); if (!scope.contains(vFile)) continue; String qName = aClass.getQualifiedName(); if (qName != null) { List<PsiClass> previousQNamedClasses = uniqueQName2Classes.get(qName); List<PsiClass> qNamedClasses; if (previousQNamedClasses != null) { qNamedClasses = new SmartList<>(); for(PsiClass previousClass:previousQNamedClasses) { VirtualFile previousClassVFile = previousClass.getContainingFile().getVirtualFile(); int res = scope.compare(previousClassVFile, vFile); if (res > 0) { continue OuterLoop; // previousClass hides aClass in classpath, so skip adding aClass } else if (res < 0) { // aClass hides previousClass in classpath, so remove it from list later if (hiddenClassesToRemove == null) hiddenClassesToRemove = new THashSet<>(); hiddenClassesToRemove.add(previousClass); qNamedClasses.add(aClass); } else { qNamedClasses.add(aClass); } } } else { qNamedClasses = new SmartList<>(aClass); } uniqueQName2Classes.put(qName, qNamedClasses); } list.add(aClass); } if (hiddenClassesToRemove != null) list.removeAll(hiddenClassesToRemove); return list.toArray(new PsiClass[list.size()]); } @Override @NotNull public String[] getAllClassNames() { return ArrayUtil.toStringArray(JavaShortClassNameIndex.getInstance().getAllKeys(myManager.getProject())); } @Override public void getAllClassNames(@NotNull HashSet<String> set) { Processor<String> processor = Processors.cancelableCollectProcessor(set); processAllClassNames(processor); } @Override public boolean processAllClassNames(Processor<String> processor) { return JavaShortClassNameIndex.getInstance().processAllKeys(myManager.getProject(), processor); } @Override public boolean processAllClassNames(Processor<String> processor, GlobalSearchScope scope, IdFilter filter) { return StubIndex.getInstance().processAllKeys(JavaStubIndexKeys.CLASS_SHORT_NAMES, processor, scope, filter); } @Override public boolean processAllMethodNames(Processor<String> processor, GlobalSearchScope scope, IdFilter filter) { return StubIndex.getInstance().processAllKeys(JavaStubIndexKeys.METHODS, processor, scope, filter); } @Override public boolean processAllFieldNames(Processor<String> processor, GlobalSearchScope scope, IdFilter filter) { return StubIndex.getInstance().processAllKeys(JavaStubIndexKeys.FIELDS, processor, scope, filter); } @Override @NotNull public PsiMethod[] getMethodsByName(@NotNull String name, @NotNull final GlobalSearchScope scope) { Collection<PsiMethod> methods = StubIndex.getElements(JavaStubIndexKeys.METHODS, name, myManager.getProject(), new JavaSourceFilterScope(scope), PsiMethod.class); if (methods.isEmpty()) return PsiMethod.EMPTY_ARRAY; List<PsiMethod> list = filterMembers(methods, scope); return list.toArray(new PsiMethod[list.size()]); } @Override @NotNull public PsiMethod[] getMethodsByNameIfNotMoreThan(@NonNls @NotNull final String name, @NotNull final GlobalSearchScope scope, final int maxCount) { final List<PsiMethod> methods = new SmartList<>(); StubIndex.getInstance().processElements(JavaStubIndexKeys.METHODS, name, myManager.getProject(), scope, PsiMethod.class, new CommonProcessors.CollectProcessor < PsiMethod > (methods){ @Override public boolean process(PsiMethod method) { return methods.size() != maxCount && super.process(method); } }); if (methods.isEmpty()) return PsiMethod.EMPTY_ARRAY; List<PsiMethod> list = filterMembers(methods, scope); return list.toArray(new PsiMethod[list.size()]); } @Override public boolean processMethodsWithName(@NonNls @NotNull String name, @NotNull GlobalSearchScope scope, @NotNull Processor<PsiMethod> processor) { return StubIndex.getInstance().processElements(JavaStubIndexKeys.METHODS, name, myManager.getProject(), scope, PsiMethod.class, processor); } @Override @NotNull public String[] getAllMethodNames() { return ArrayUtil.toStringArray(JavaMethodNameIndex.getInstance().getAllKeys(myManager.getProject())); } @Override public void getAllMethodNames(@NotNull HashSet<String> set) { JavaMethodNameIndex.getInstance().processAllKeys(myManager.getProject(), Processors.cancelableCollectProcessor(set)); } @Override @NotNull public PsiField[] getFieldsByNameIfNotMoreThan(@NotNull String name, @NotNull final GlobalSearchScope scope, final int maxCount) { final List<PsiField> methods = new SmartList<>(); StubIndex.getInstance().processElements(JavaStubIndexKeys.FIELDS, name, myManager.getProject(), scope, PsiField.class, new CommonProcessors.CollectProcessor < PsiField > (methods){ @Override public boolean process(PsiField method) { return methods.size() != maxCount && super.process(method); } }); if (methods.isEmpty()) return PsiField.EMPTY_ARRAY; List<PsiField> list = filterMembers(methods, scope); return list.toArray(new PsiField[list.size()]); } @NotNull @Override public PsiField[] getFieldsByName(@NotNull String name, @NotNull final GlobalSearchScope scope) { final Collection<PsiField> fields = JavaFieldNameIndex.getInstance().get(name, myManager.getProject(), scope); if (fields.isEmpty()) return PsiField.EMPTY_ARRAY; List<PsiField> list = filterMembers(fields, scope); return list.toArray(new PsiField[list.size()]); } @Override @NotNull public String[] getAllFieldNames() { return ArrayUtil.toStringArray(JavaFieldNameIndex.getInstance().getAllKeys(myManager.getProject())); } @Override public void getAllFieldNames(@NotNull HashSet<String> set) { Processor<String> processor = Processors.cancelableCollectProcessor(set); JavaFieldNameIndex.getInstance().processAllKeys(myManager.getProject(), processor); } @Override public boolean processFieldsWithName(@NotNull String name, @NotNull Processor<? super PsiField> processor, @NotNull GlobalSearchScope scope, @Nullable IdFilter filter) { return StubIndex.getInstance().processElements(JavaStubIndexKeys.FIELDS, name, myManager.getProject(), new JavaSourceFilterScope(scope), filter, PsiField.class, processor); } @Override public boolean processMethodsWithName(@NonNls @NotNull String name, @NotNull Processor<? super PsiMethod> processor, @NotNull GlobalSearchScope scope, @Nullable IdFilter filter) { return StubIndex.getInstance().processElements(JavaStubIndexKeys.METHODS, name, myManager.getProject(), new JavaSourceFilterScope(scope), filter, PsiMethod.class, processor); } @Override public boolean processClassesWithName(@NotNull String name, @NotNull Processor<? super PsiClass> processor, @NotNull GlobalSearchScope scope, @Nullable IdFilter filter) { return StubIndex.getInstance().processElements(JavaStubIndexKeys.CLASS_SHORT_NAMES, name, myManager.getProject(), new JavaSourceFilterScope(scope), filter, PsiClass.class, processor); } private <T extends PsiMember> List<T> filterMembers(Collection<T> members, final GlobalSearchScope scope) { List<T> result = new ArrayList<>(members.size()); Set<PsiMember> set = new THashSet<>(members.size(), new TObjectHashingStrategy<PsiMember>() { @Override public int computeHashCode(PsiMember member) { int code = 0; final PsiClass clazz = member.getContainingClass(); if (clazz != null) { String name = clazz.getName(); if (name != null) { code += name.hashCode(); } else { //anonymous classes are not equivalent code += clazz.hashCode(); } } if (member instanceof PsiMethod) { code += 37 * ((PsiMethod)member).getParameterList().getParametersCount(); } return code; } @Override public boolean equals(PsiMember object, PsiMember object1) { return myManager.areElementsEquivalent(object, object1); } }); for (T member : members) { ProgressIndicatorProvider.checkCanceled(); if (!scope.contains(member.getContainingFile().getVirtualFile())) continue; if (!set.add(member)) continue; result.add(member); } return result; } }
apache-2.0
chasetb/sal
watson/migrations/0001_initial.py
1569
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.core.management import call_command def install_watson(apps, schema_editor): call_command("installwatson", verbosity=0) def uninstall_watson(apps, schema_editor): call_command("uninstallwatson", verbosity=0) class Migration(migrations.Migration): dependencies = [ ('contenttypes', '0001_initial'), ] operations = [ migrations.CreateModel( name='SearchEntry', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('engine_slug', models.CharField(default='default', max_length=200, db_index=True)), ('object_id', models.TextField()), ('object_id_int', models.IntegerField(db_index=True, null=True, blank=True)), ('title', models.CharField(max_length=1000)), ('description', models.TextField(blank=True)), ('content', models.TextField(blank=True)), ('url', models.CharField(max_length=1000, blank=True)), ('meta_encoded', models.TextField()), ('content_type', models.ForeignKey(to='contenttypes.ContentType')), ], options={ 'verbose_name_plural': 'search entries', }, bases=(models.Model,), ), migrations.RunPython( install_watson, uninstall_watson, ), ]
apache-2.0
tophj-ibm/moby
vendor/github.com/containerd/containerd/container_opts_unix.go
7400
// +build !windows /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package containerd import ( "context" "encoding/json" "fmt" "os" "path/filepath" "syscall" "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/linux/runctypes" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/platforms" "github.com/gogo/protobuf/proto" protobuf "github.com/gogo/protobuf/types" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) // WithCheckpoint allows a container to be created from the checkpointed information // provided by the descriptor. The image, snapshot, and runtime specifications are // restored on the container func WithCheckpoint(im Image, snapshotKey string) NewContainerOpts { // set image and rw, and spec return func(ctx context.Context, client *Client, c *containers.Container) error { var ( desc = im.Target() id = desc.Digest store = client.ContentStore() ) index, err := decodeIndex(ctx, store, id) if err != nil { return err } var rw *v1.Descriptor for _, m := range index.Manifests { switch m.MediaType { case v1.MediaTypeImageLayer: fk := m rw = &fk case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList: config, err := images.Config(ctx, store, m, platforms.Default()) if err != nil { return errors.Wrap(err, "unable to resolve image config") } diffIDs, err := images.RootFS(ctx, store, config) if err != nil { return errors.Wrap(err, "unable to get rootfs") } setSnapshotterIfEmpty(c) if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, snapshotKey, identity.ChainID(diffIDs).String()); err != nil { if !errdefs.IsAlreadyExists(err) { return err } } c.Image = index.Annotations["image.name"] case images.MediaTypeContainerd1CheckpointConfig: data, err := content.ReadBlob(ctx, store, m.Digest) if err != nil { return errors.Wrap(err, "unable to read checkpoint config") } var any protobuf.Any if err := proto.Unmarshal(data, &any); err != nil { return err } c.Spec = &any } } if rw != nil { // apply the rw snapshot to the new rw layer mounts, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, snapshotKey) if err != nil { return errors.Wrapf(err, "unable to get mounts for %s", snapshotKey) } if _, err := client.DiffService().Apply(ctx, *rw, mounts); err != nil { return errors.Wrap(err, "unable to apply rw diff") } } c.SnapshotKey = snapshotKey return nil } } // WithTaskCheckpoint allows a task to be created with live runtime and memory data from a // previous checkpoint. Additional software such as CRIU may be required to // restore a task from a checkpoint func WithTaskCheckpoint(im Image) NewTaskOpts { return func(ctx context.Context, c *Client, info *TaskInfo) error { desc := im.Target() id := desc.Digest index, err := decodeIndex(ctx, c.ContentStore(), id) if err != nil { return err } for _, m := range index.Manifests { if m.MediaType == images.MediaTypeContainerd1Checkpoint { info.Checkpoint = &types.Descriptor{ MediaType: m.MediaType, Size_: m.Size, Digest: m.Digest, } return nil } } return fmt.Errorf("checkpoint not found in index %s", id) } } func decodeIndex(ctx context.Context, store content.Provider, id digest.Digest) (*v1.Index, error) { var index v1.Index p, err := content.ReadBlob(ctx, store, id) if err != nil { return nil, err } if err := json.Unmarshal(p, &index); err != nil { return nil, err } return &index, nil } // WithRemappedSnapshot creates a new snapshot and remaps the uid/gid for the // filesystem to be used by a container with user namespaces func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts { return withRemappedSnapshotBase(id, i, uid, gid, false) } // WithRemappedSnapshotView is similar to WithRemappedSnapshot but rootfs is mounted as read-only. func WithRemappedSnapshotView(id string, i Image, uid, gid uint32) NewContainerOpts { return withRemappedSnapshotBase(id, i, uid, gid, true) } func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) if err != nil { return err } setSnapshotterIfEmpty(c) var ( snapshotter = client.SnapshotService(c.Snapshotter) parent = identity.ChainID(diffIDs).String() usernsID = fmt.Sprintf("%s-%d-%d", parent, uid, gid) ) if _, err := snapshotter.Stat(ctx, usernsID); err == nil { if _, err := snapshotter.Prepare(ctx, id, usernsID); err == nil { c.SnapshotKey = id c.Image = i.Name() return nil } else if !errdefs.IsNotFound(err) { return err } } mounts, err := snapshotter.Prepare(ctx, usernsID+"-remap", parent) if err != nil { return err } if err := remapRootFS(ctx, mounts, uid, gid); err != nil { snapshotter.Remove(ctx, usernsID) return err } if err := snapshotter.Commit(ctx, usernsID, usernsID+"-remap"); err != nil { return err } if readonly { _, err = snapshotter.View(ctx, id, usernsID) } else { _, err = snapshotter.Prepare(ctx, id, usernsID) } if err != nil { return err } c.SnapshotKey = id c.Image = i.Name() return nil } } func remapRootFS(ctx context.Context, mounts []mount.Mount, uid, gid uint32) error { return mount.WithTempMount(ctx, mounts, func(root string) error { return filepath.Walk(root, incrementFS(root, uid, gid)) }) } func incrementFS(root string, uidInc, gidInc uint32) filepath.WalkFunc { return func(path string, info os.FileInfo, err error) error { if err != nil { return err } var ( stat = info.Sys().(*syscall.Stat_t) u, g = int(stat.Uid + uidInc), int(stat.Gid + gidInc) ) // be sure the lchown the path as to not de-reference the symlink to a host file return os.Lchown(path, u, g) } } // WithNoPivotRoot instructs the runtime not to you pivot_root func WithNoPivotRoot(_ context.Context, _ *Client, info *TaskInfo) error { if info.Options == nil { info.Options = &runctypes.CreateOptions{ NoPivotRoot: true, } return nil } copts, ok := info.Options.(*runctypes.CreateOptions) if !ok { return errors.New("invalid options type, expected runctypes.CreateOptions") } copts.NoPivotRoot = true return nil }
apache-2.0
simonleung8/kubernetes
test/e2e/storage/volume_provisioning.go
30947
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package storage import ( "fmt" "strings" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/api/core/v1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" storage "k8s.io/api/storage/v1" storagebeta "k8s.io/api/storage/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/serviceaccount" clientset "k8s.io/client-go/kubernetes" v1helper "k8s.io/kubernetes/pkg/api/v1/helper" storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/test/e2e/framework" ) type storageClassTest struct { name string cloudProviders []string provisioner string parameters map[string]string claimSize string expectedSize string pvCheck func(volume *v1.PersistentVolume) error } const ( // Plugin name of the external provisioner externalPluginName = "example.com/nfs" ) func testDynamicProvisioning(t storageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) { var err error if class != nil { By("creating a StorageClass " + class.Name) class, err = client.StorageV1().StorageClasses().Create(class) Expect(err).NotTo(HaveOccurred()) defer func() { framework.Logf("deleting storage class %s", class.Name) framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(class.Name, nil)) }() } By("creating a claim") claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) Expect(err).NotTo(HaveOccurred()) defer func() { framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) // typically this claim has already been deleted err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) if err != nil && !apierrs.IsNotFound(err) { framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err) } }() err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) Expect(err).NotTo(HaveOccurred()) By("checking the claim") // Get new copy of the claim claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) // Get the bound PV pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) // Check sizes expectedCapacity := resource.MustParse(t.expectedSize) pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)] Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()), "pvCapacity is not equal to expectedCapacity") requestedCapacity := resource.MustParse(t.claimSize) claimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()), "claimCapacity is not equal to requestedCapacity") // Check PV properties By("checking the PV") Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(v1.PersistentVolumeReclaimDelete)) expectedAccessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes)) Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name)) Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace)) // Run the checker if t.pvCheck != nil { err = t.pvCheck(pv) Expect(err).NotTo(HaveOccurred()) } // We start two pods: // - The first writes 'hello word' to the /mnt/test (= the volume). // - The second one runs grep 'hello world' on /mnt/test. // If both succeed, Kubernetes actually allocated something that is // persistent across pods. By("checking the created volume is writable") runInPodWithVolume(client, claim.Namespace, claim.Name, "echo 'hello world' > /mnt/test/data") By("checking the created volume is readable and retains data") runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data") By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name)) framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)) // Wait for the PV to get deleted. Technically, the first few delete // attempts may fail, as the volume is still attached to a node because // kubelet is slowly cleaning up the previous pod, however it should succeed // in a couple of minutes. Wait 20 minutes to recover from random cloud // hiccups. By(fmt.Sprintf("deleting the claim's PV %q", pv.Name)) framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute)) } // checkAWSEBS checks properties of an AWS EBS. Test framework does not // instantiate full AWS provider, therefore we need use ec2 API directly. func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool) error { diskName := volume.Spec.AWSElasticBlockStore.VolumeID var client *ec2.EC2 tokens := strings.Split(diskName, "/") volumeID := tokens[len(tokens)-1] zone := framework.TestContext.CloudConfig.Zone if len(zone) > 0 { region := zone[:len(zone)-1] cfg := aws.Config{Region: &region} framework.Logf("using region %s", region) client = ec2.New(session.New(), &cfg) } else { framework.Logf("no region configured") client = ec2.New(session.New()) } request := &ec2.DescribeVolumesInput{ VolumeIds: []*string{&volumeID}, } info, err := client.DescribeVolumes(request) if err != nil { return fmt.Errorf("error querying ec2 for volume %q: %v", volumeID, err) } if len(info.Volumes) == 0 { return fmt.Errorf("no volumes found for volume %q", volumeID) } if len(info.Volumes) > 1 { return fmt.Errorf("multiple volumes found for volume %q", volumeID) } awsVolume := info.Volumes[0] if awsVolume.VolumeType == nil { return fmt.Errorf("expected volume type %q, got nil", volumeType) } if *awsVolume.VolumeType != volumeType { return fmt.Errorf("expected volume type %q, got %q", volumeType, *awsVolume.VolumeType) } if encrypted && awsVolume.Encrypted == nil { return fmt.Errorf("expected encrypted volume, got no encryption") } if encrypted && !*awsVolume.Encrypted { return fmt.Errorf("expected encrypted volume, got %v", *awsVolume.Encrypted) } return nil } func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error { cloud, err := framework.GetGCECloud() if err != nil { return err } diskName := volume.Spec.GCEPersistentDisk.PDName disk, err := cloud.GetDiskByNameUnknownZone(diskName) if err != nil { return err } if !strings.HasSuffix(disk.Type, volumeType) { return fmt.Errorf("unexpected disk type %q, expected suffix %q", disk.Type, volumeType) } return nil } var _ = SIGDescribe("Dynamic Provisioning", func() { f := framework.NewDefaultFramework("volume-provisioning") // filled in BeforeEach var c clientset.Interface var ns string BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name }) SIGDescribe("DynamicProvisioner", func() { It("should provision storage with different parameters [Slow]", func() { cloudZone := getRandomCloudZone(c) // This test checks that dynamic provisioning can provision a volume // that can be used to persist data among pods. tests := []storageClassTest{ { "SSD PD on GCE/GKE", []string{"gce", "gke"}, "kubernetes.io/gce-pd", map[string]string{ "type": "pd-ssd", "zone": cloudZone, }, "1.5Gi", "2Gi", func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-ssd") }, }, { "HDD PD on GCE/GKE", []string{"gce", "gke"}, "kubernetes.io/gce-pd", map[string]string{ "type": "pd-standard", }, "1.5Gi", "2Gi", func(volume *v1.PersistentVolume) error { return checkGCEPD(volume, "pd-standard") }, }, // AWS { "gp2 EBS on AWS", []string{"aws"}, "kubernetes.io/aws-ebs", map[string]string{ "type": "gp2", "zone": cloudZone, }, "1.5Gi", "2Gi", func(volume *v1.PersistentVolume) error { return checkAWSEBS(volume, "gp2", false) }, }, { "io1 EBS on AWS", []string{"aws"}, "kubernetes.io/aws-ebs", map[string]string{ "type": "io1", "iopsPerGB": "50", }, "3.5Gi", "4Gi", // 4 GiB is minimum for io1 func(volume *v1.PersistentVolume) error { return checkAWSEBS(volume, "io1", false) }, }, { "sc1 EBS on AWS", []string{"aws"}, "kubernetes.io/aws-ebs", map[string]string{ "type": "sc1", }, "500Gi", // minimum for sc1 "500Gi", func(volume *v1.PersistentVolume) error { return checkAWSEBS(volume, "sc1", false) }, }, { "st1 EBS on AWS", []string{"aws"}, "kubernetes.io/aws-ebs", map[string]string{ "type": "st1", }, "500Gi", // minimum for st1 "500Gi", func(volume *v1.PersistentVolume) error { return checkAWSEBS(volume, "st1", false) }, }, { "encrypted EBS on AWS", []string{"aws"}, "kubernetes.io/aws-ebs", map[string]string{ "encrypted": "true", }, "1Gi", "1Gi", func(volume *v1.PersistentVolume) error { return checkAWSEBS(volume, "gp2", true) }, }, // OpenStack generic tests (works on all OpenStack deployments) { "generic Cinder volume on OpenStack", []string{"openstack"}, "kubernetes.io/cinder", map[string]string{}, "1.5Gi", "2Gi", nil, // there is currently nothing to check on OpenStack }, { "Cinder volume with empty volume type and zone on OpenStack", []string{"openstack"}, "kubernetes.io/cinder", map[string]string{ "type": "", "availability": "", }, "1.5Gi", "2Gi", nil, // there is currently nothing to check on OpenStack }, // vSphere generic test { "generic vSphere volume", []string{"vsphere"}, "kubernetes.io/vsphere-volume", map[string]string{}, "1.5Gi", "1.5Gi", nil, }, { "Azure disk volume with empty sku and location", []string{"azure"}, "kubernetes.io/azure-disk", map[string]string{}, "1Gi", "1Gi", nil, }, } var betaTest *storageClassTest for i, t := range tests { // Beware of clojure, use local variables instead of those from // outer scope test := t if !framework.ProviderIs(test.cloudProviders...) { framework.Logf("Skipping %q: cloud providers is not %v", test.name, test.cloudProviders) continue } // Remember the last supported test for subsequent test of beta API betaTest = &test By("Testing " + test.name) suffix := fmt.Sprintf("%d", i) class := newStorageClass(test, ns, suffix) claim := newClaim(test, ns, suffix) claim.Spec.StorageClassName = &class.Name testDynamicProvisioning(test, c, claim, class) } // Run the last test with storage.k8s.io/v1beta1 and beta annotation on pvc if betaTest != nil { By("Testing " + betaTest.name + " with beta volume provisioning") class := newBetaStorageClass(*betaTest, "beta") // we need to create the class manually, testDynamicProvisioning does not accept beta class class, err := c.StorageV1beta1().StorageClasses().Create(class) Expect(err).NotTo(HaveOccurred()) defer deleteStorageClass(c, class.Name) claim := newClaim(*betaTest, ns, "beta") claim.Annotations = map[string]string{ v1.BetaStorageClassAnnotation: class.Name, } testDynamicProvisioning(*betaTest, c, claim, nil) } }) // NOTE: Slow! The test will wait up to 5 minutes (framework.ClaimProvisionTimeout) // when there is no regression. It("should not provision a volume in an unmanaged GCE zone. [Slow]", func() { framework.SkipUnlessProviderIs("gce", "gke") var suffix string = "unmananged" By("Discovering an unmanaged zone") allZones := sets.NewString() // all zones in the project managedZones := sets.NewString() // subset of allZones gceCloud, err := framework.GetGCECloud() Expect(err).NotTo(HaveOccurred()) // Get all k8s managed zones managedZones, err = gceCloud.GetAllZones() Expect(err).NotTo(HaveOccurred()) // Get a list of all zones in the project zones, err := gceCloud.GetComputeService().Zones.List(framework.TestContext.CloudConfig.ProjectID).Do() Expect(err).NotTo(HaveOccurred()) for _, z := range zones.Items { allZones.Insert(z.Name) } // Get the subset of zones not managed by k8s var unmanagedZone string var popped bool unmanagedZones := allZones.Difference(managedZones) // And select one of them at random. if unmanagedZone, popped = unmanagedZones.PopAny(); !popped { framework.Skipf("No unmanaged zones found.") } By("Creating a StorageClass for the unmanaged zone") test := storageClassTest{ name: "unmanaged_zone", provisioner: "kubernetes.io/gce-pd", parameters: map[string]string{"zone": unmanagedZone}, claimSize: "1Gi", } sc := newStorageClass(test, ns, suffix) sc, err = c.StorageV1().StorageClasses().Create(sc) Expect(err).NotTo(HaveOccurred()) defer deleteStorageClass(c, sc.Name) By("Creating a claim and expecting it to timeout") pvc := newClaim(test, ns, suffix) pvc.Spec.StorageClassName = &sc.Name pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc) Expect(err).NotTo(HaveOccurred()) defer func() { framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) }() // The claim should timeout phase:Pending err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionTimeout) Expect(err).To(HaveOccurred()) framework.Logf(err.Error()) }) It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() { // This case tests for the regressions of a bug fixed by PR #21268 // REGRESSION: Deleting the PVC before the PV is provisioned can result in the PV // not being deleted. // NOTE: Polls until no PVs are detected, times out at 5 minutes. framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") const raceAttempts int = 100 var residualPVs []*v1.PersistentVolume By(fmt.Sprintf("Creating and deleting PersistentVolumeClaims %d times", raceAttempts)) test := storageClassTest{ name: "deletion race", provisioner: "", // Use a native one based on current cloud provider claimSize: "1Gi", } class := newStorageClass(test, ns, "race") class, err := c.StorageV1().StorageClasses().Create(class) Expect(err).NotTo(HaveOccurred()) defer deleteStorageClass(c, class.Name) // To increase chance of detection, attempt multiple iterations for i := 0; i < raceAttempts; i++ { suffix := fmt.Sprintf("race-%d", i) claim := newClaim(test, ns, suffix) claim.Spec.StorageClassName = &class.Name tmpClaim, err := framework.CreatePVC(c, ns, claim) Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns)) } By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name)) residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name) Expect(err).NotTo(HaveOccurred()) // Cleanup the test resources before breaking defer deleteProvisionedVolumesAndDisks(c, residualPVs) // Report indicators of regression if len(residualPVs) > 0 { framework.Logf("Remaining PersistentVolumes:") for i, pv := range residualPVs { framework.Logf("\t%d) %s", i+1, pv.Name) } framework.Failf("Expected 0 PersistentVolumes remaining. Found %d", len(residualPVs)) } framework.Logf("0 PersistentVolumes remain.") }) }) SIGDescribe("DynamicProvisioner External", func() { It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func() { // external dynamic provisioner pods need additional permissions provided by the // persistent-volume-provisioner role framework.BindClusterRole(c.Rbac(), "system:persistent-volume-provisioner", ns, rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: ns, Name: "default"}) err := framework.WaitForAuthorizationUpdate(c.AuthorizationV1beta1(), serviceaccount.MakeUsername(ns, "default"), "", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true) framework.ExpectNoError(err, "Failed to update authorization: %v", err) By("creating an external dynamic provisioner pod") pod := startExternalProvisioner(c, ns) defer framework.DeletePodOrFail(c, ns, pod.Name) By("creating a StorageClass") test := storageClassTest{ name: "external provisioner test", provisioner: externalPluginName, claimSize: "1500Mi", expectedSize: "1500Mi", } class := newStorageClass(test, ns, "external") className := class.Name claim := newClaim(test, ns, "external") // the external provisioner understands Beta only right now, see // https://github.com/kubernetes-incubator/external-storage/issues/37 // claim.Spec.StorageClassName = &className claim.Annotations = map[string]string{ v1.BetaStorageClassAnnotation: className, } By("creating a claim with a external provisioning annotation") testDynamicProvisioning(test, c, claim, class) }) }) SIGDescribe("DynamicProvisioner Default", func() { It("should create and delete default persistent volumes [Slow]", func() { framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") By("creating a claim with no annotation") test := storageClassTest{ name: "default", claimSize: "2Gi", expectedSize: "2Gi", } claim := newClaim(test, ns, "default") testDynamicProvisioning(test, c, claim, nil) }) // Modifying the default storage class can be disruptive to other tests that depend on it It("should be disabled by changing the default annotation[Slow] [Serial] [Disruptive]", func() { framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") scName := getDefaultStorageClassName(c) test := storageClassTest{ name: "default", claimSize: "2Gi", } By("setting the is-default StorageClass annotation to false") verifyDefaultStorageClass(c, scName, true) defer updateDefaultStorageClass(c, scName, "true") updateDefaultStorageClass(c, scName, "false") By("creating a claim with default storageclass and expecting it to timeout") claim := newClaim(test, ns, "default") claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim) Expect(err).NotTo(HaveOccurred()) defer func() { framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, claim.Name, ns)) }() // The claim should timeout phase:Pending err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionTimeout) Expect(err).To(HaveOccurred()) framework.Logf(err.Error()) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(claim.Status.Phase).To(Equal(v1.ClaimPending)) }) // Modifying the default storage class can be disruptive to other tests that depend on it It("should be disabled by removing the default annotation[Slow] [Serial] [Disruptive]", func() { framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") scName := getDefaultStorageClassName(c) test := storageClassTest{ name: "default", claimSize: "2Gi", } By("removing the is-default StorageClass annotation") verifyDefaultStorageClass(c, scName, true) defer updateDefaultStorageClass(c, scName, "true") updateDefaultStorageClass(c, scName, "") By("creating a claim with default storageclass and expecting it to timeout") claim := newClaim(test, ns, "default") claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim) Expect(err).NotTo(HaveOccurred()) defer func() { framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, claim.Name, ns)) }() // The claim should timeout phase:Pending err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionTimeout) Expect(err).To(HaveOccurred()) framework.Logf(err.Error()) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(claim.Status.Phase).To(Equal(v1.ClaimPending)) }) }) }) func getDefaultStorageClassName(c clientset.Interface) string { list, err := c.StorageV1().StorageClasses().List(metav1.ListOptions{}) if err != nil { framework.Failf("Error listing storage classes: %v", err) } var scName string for _, sc := range list.Items { if storageutil.IsDefaultAnnotation(sc.ObjectMeta) { if len(scName) != 0 { framework.Failf("Multiple default storage classes found: %q and %q", scName, sc.Name) } scName = sc.Name } } if len(scName) == 0 { framework.Failf("No default storage class found") } framework.Logf("Default storage class: %q", scName) return scName } func verifyDefaultStorageClass(c clientset.Interface, scName string, expectedDefault bool) { sc, err := c.StorageV1().StorageClasses().Get(scName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(storageutil.IsDefaultAnnotation(sc.ObjectMeta)).To(Equal(expectedDefault)) } func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr string) { sc, err := c.StorageV1().StorageClasses().Get(scName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) if defaultStr == "" { delete(sc.Annotations, storageutil.BetaIsDefaultStorageClassAnnotation) delete(sc.Annotations, storageutil.IsDefaultStorageClassAnnotation) } else { if sc.Annotations == nil { sc.Annotations = make(map[string]string) } sc.Annotations[storageutil.BetaIsDefaultStorageClassAnnotation] = defaultStr sc.Annotations[storageutil.IsDefaultStorageClassAnnotation] = defaultStr } sc, err = c.StorageV1().StorageClasses().Update(sc) Expect(err).NotTo(HaveOccurred()) expectedDefault := false if defaultStr == "true" { expectedDefault = true } verifyDefaultStorageClass(c, scName, expectedDefault) } func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim { claim := v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "pvc-", Namespace: ns, }, Spec: v1.PersistentVolumeClaimSpec{ AccessModes: []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, }, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceStorage): resource.MustParse(t.claimSize), }, }, }, } return &claim } // runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) { pod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ GenerateName: "pvc-volume-tester-", }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "volume-tester", Image: "gcr.io/google_containers/busybox:1.24", Command: []string{"/bin/sh"}, Args: []string{"-c", command}, VolumeMounts: []v1.VolumeMount{ { Name: "my-volume", MountPath: "/mnt/test", }, }, }, }, RestartPolicy: v1.RestartPolicyNever, Volumes: []v1.Volume{ { Name: "my-volume", VolumeSource: v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: claimName, ReadOnly: false, }, }, }, }, }, } pod, err := c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err, "Failed to create pod: %v", err) defer func() { framework.DeletePodOrFail(c, ns, pod.Name) }() framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace)) } func getDefaultPluginName() string { switch { case framework.ProviderIs("gke"), framework.ProviderIs("gce"): return "kubernetes.io/gce-pd" case framework.ProviderIs("aws"): return "kubernetes.io/aws-ebs" case framework.ProviderIs("openstack"): return "kubernetes.io/cinder" case framework.ProviderIs("vsphere"): return "kubernetes.io/vsphere-volume" case framework.ProviderIs("azure"): return "kubernetes.io/azure-disk" } return "" } func newStorageClass(t storageClassTest, ns string, suffix string) *storage.StorageClass { pluginName := t.provisioner if pluginName == "" { pluginName = getDefaultPluginName() } if suffix == "" { suffix = "sc" } return &storage.StorageClass{ TypeMeta: metav1.TypeMeta{ Kind: "StorageClass", }, ObjectMeta: metav1.ObjectMeta{ // Name must be unique, so let's base it on namespace name Name: ns + "-" + suffix, }, Provisioner: pluginName, Parameters: t.parameters, } } // TODO: remove when storage.k8s.io/v1beta1 and beta storage class annotations // are removed. func newBetaStorageClass(t storageClassTest, suffix string) *storagebeta.StorageClass { pluginName := t.provisioner if pluginName == "" { pluginName = getDefaultPluginName() } if suffix == "" { suffix = "default" } return &storagebeta.StorageClass{ TypeMeta: metav1.TypeMeta{ Kind: "StorageClass", }, ObjectMeta: metav1.ObjectMeta{ GenerateName: suffix + "-", }, Provisioner: pluginName, Parameters: t.parameters, } } func startExternalProvisioner(c clientset.Interface, ns string) *v1.Pod { podClient := c.CoreV1().Pods(ns) provisionerPod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ GenerateName: "external-provisioner-", }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "nfs-provisioner", Image: "quay.io/kubernetes_incubator/nfs-provisioner:v1.0.6", SecurityContext: &v1.SecurityContext{ Capabilities: &v1.Capabilities{ Add: []v1.Capability{"DAC_READ_SEARCH"}, }, }, Args: []string{ "-provisioner=" + externalPluginName, "-grace-period=0", }, Ports: []v1.ContainerPort{ {Name: "nfs", ContainerPort: 2049}, {Name: "mountd", ContainerPort: 20048}, {Name: "rpcbind", ContainerPort: 111}, {Name: "rpcbind-udp", ContainerPort: 111, Protocol: v1.ProtocolUDP}, }, Env: []v1.EnvVar{ { Name: "POD_IP", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ FieldPath: "status.podIP", }, }, }, }, ImagePullPolicy: v1.PullIfNotPresent, VolumeMounts: []v1.VolumeMount{ { Name: "export-volume", MountPath: "/export", }, }, }, }, Volumes: []v1.Volume{ { Name: "export-volume", VolumeSource: v1.VolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{}, }, }, }, }, } provisionerPod, err := podClient.Create(provisionerPod) framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod)) By("locating the provisioner pod") pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err) return pod } // waitForProvisionedVolumesDelete is a polling wrapper to scan all PersistentVolumes for any associated to the test's // StorageClass. Returns either an error and nil values or the remaining PVs and their count. func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*v1.PersistentVolume, error) { var remainingPVs []*v1.PersistentVolume err := wait.Poll(10*time.Second, 300*time.Second, func() (bool, error) { remainingPVs = []*v1.PersistentVolume{} allPVs, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{}) if err != nil { return true, err } for _, pv := range allPVs.Items { if v1helper.GetPersistentVolumeClass(&pv) == scName { remainingPVs = append(remainingPVs, &pv) } } if len(remainingPVs) > 0 { return false, nil // Poll until no PVs remain } else { return true, nil // No PVs remain } }) return remainingPVs, err } // deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found" func deleteStorageClass(c clientset.Interface, className string) { err := c.StorageV1().StorageClasses().Delete(className, nil) if err != nil && !apierrs.IsNotFound(err) { Expect(err).NotTo(HaveOccurred()) } } // deleteProvisionedVolumes [gce||gke only] iteratively deletes persistent volumes and attached GCE PDs. func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.PersistentVolume) { for _, pv := range pvs { framework.ExpectNoError(framework.DeletePDWithRetry(pv.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName)) framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name)) } } func getRandomCloudZone(c clientset.Interface) string { nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) // collect values of zone label from all nodes zones := sets.NewString() for _, node := range nodes.Items { if zone, found := node.Labels[kubeletapis.LabelZoneFailureDomain]; found { zones.Insert(zone) } } // return "" in case that no node has zone label zone, _ := zones.PopAny() return zone }
apache-2.0
MagicWiz/log4j2
log4j-core/src/main/java/org/apache/logging/log4j/core/util/NetUtils.java
3861
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache license, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the license for the specific language governing permissions and * limitations under the license. */ package org.apache.logging.log4j.core.util; import java.io.File; import java.net.InetAddress; import java.net.MalformedURLException; import java.net.NetworkInterface; import java.net.SocketException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.net.UnknownHostException; import java.util.Enumeration; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.status.StatusLogger; /** * Networking-related convenience methods. */ public final class NetUtils { private static final Logger LOGGER = StatusLogger.getLogger(); private static final String UNKNOWN_LOCALHOST = "UNKNOWN_LOCALHOST"; private NetUtils() { // empty } /** * This method gets the network name of the machine we are running on. Returns "UNKNOWN_LOCALHOST" in the unlikely * case where the host name cannot be found. * * @return String the name of the local host */ public static String getLocalHostname() { try { final InetAddress addr = InetAddress.getLocalHost(); return addr.getHostName(); } catch (final UnknownHostException uhe) { try { final Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces(); while (interfaces.hasMoreElements()) { final NetworkInterface nic = interfaces.nextElement(); final Enumeration<InetAddress> addresses = nic.getInetAddresses(); while (addresses.hasMoreElements()) { final InetAddress address = addresses.nextElement(); if (!address.isLoopbackAddress()) { final String hostname = address.getHostName(); if (hostname != null) { return hostname; } } } } } catch (final SocketException se) { LOGGER.error("Could not determine local host name", uhe); return UNKNOWN_LOCALHOST; } LOGGER.error("Could not determine local host name", uhe); return UNKNOWN_LOCALHOST; } } /** * Converts a URI string or file path to a URI object. * * @param path the URI string or path * @return the URI object */ public static URI toURI(final String path) { try { // Resolves absolute URI return new URI(path); } catch (final URISyntaxException e) { // A file path or a Apache Commons VFS URL might contain blanks. // A file path may start with a driver letter try { final URL url = new URL(path); return new URI(url.getProtocol(), url.getHost(), url.getPath(), null); } catch (MalformedURLException | URISyntaxException nestedEx) { return new File(path).toURI(); } } } }
apache-2.0
tony810430/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/job/metrics/JobManagerMetricsHandler.java
2409
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.rest.handler.job.metrics; import org.apache.flink.api.common.time.Time; import org.apache.flink.runtime.rest.handler.HandlerRequest; import org.apache.flink.runtime.rest.handler.legacy.metrics.MetricFetcher; import org.apache.flink.runtime.rest.handler.legacy.metrics.MetricStore; import org.apache.flink.runtime.rest.messages.EmptyRequestBody; import org.apache.flink.runtime.rest.messages.job.metrics.JobManagerMetricsHeaders; import org.apache.flink.runtime.rest.messages.job.metrics.JobManagerMetricsMessageParameters; import org.apache.flink.runtime.webmonitor.RestfulGateway; import org.apache.flink.runtime.webmonitor.retriever.GatewayRetriever; import javax.annotation.Nullable; import java.util.Map; /** Handler that returns JobManager metrics. */ public class JobManagerMetricsHandler extends AbstractMetricsHandler<JobManagerMetricsMessageParameters> { public JobManagerMetricsHandler( final GatewayRetriever<? extends RestfulGateway> leaderRetriever, final Time timeout, final Map<String, String> headers, final MetricFetcher metricFetcher) { super( leaderRetriever, timeout, headers, JobManagerMetricsHeaders.getInstance(), metricFetcher); } @Nullable @Override protected MetricStore.ComponentMetricStore getComponentMetricStore( final HandlerRequest<EmptyRequestBody> request, final MetricStore metricStore) { return metricStore.getJobManagerMetricStore(); } }
apache-2.0
jiekechoo/presto
presto-main/src/main/java/com/facebook/presto/byteCode/expression/CastByteCodeExpression.java
10248
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto.byteCode.expression; import com.facebook.presto.byteCode.ByteCodeBlock; import com.facebook.presto.byteCode.ByteCodeNode; import com.facebook.presto.byteCode.MethodGenerationContext; import com.facebook.presto.byteCode.OpCode; import com.facebook.presto.byteCode.ParameterizedType; import com.google.common.collect.ImmutableList; import java.util.List; import static com.facebook.presto.byteCode.ParameterizedType.type; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.primitives.Primitives.wrap; import static java.lang.String.format; class CastByteCodeExpression extends ByteCodeExpression { private final ByteCodeExpression instance; public CastByteCodeExpression(ByteCodeExpression instance, ParameterizedType type) { super(type); this.instance = checkNotNull(instance, "instance is null"); checkArgument(type.getPrimitiveType() != void.class, "Type %s can not be cast to %s", instance.getType(), type); // if we have a primitive to object or object to primitive conversion, it must be an exact boxing or unboxing conversion if (instance.getType().isPrimitive() != type.isPrimitive()) { checkArgument(unwrapPrimitiveType(instance.getType()) == unwrapPrimitiveType(type), "Type %s can not be cast to %s", instance.getType(), type); } } @Override public ByteCodeNode getByteCode(MethodGenerationContext generationContext) { ByteCodeBlock block = new ByteCodeBlock().append(instance.getByteCode(generationContext)); if (instance.getType().isPrimitive()) { Class<?> sourceType = instance.getType().getPrimitiveType(); castPrimitiveToPrimitive(block, sourceType, unwrapPrimitiveType(getType())); // insert boxing conversion if (!getType().isPrimitive()) { Class<?> primitiveTargetType = unwrapPrimitiveType(getType()); return block.invokeStatic(getType(), "valueOf", getType(), type(primitiveTargetType)); } return block; } else if (getType().isPrimitive()) { // unbox Class<?> targetType = getType().getPrimitiveType(); return block.invokeVirtual(wrap(targetType), targetType.getSimpleName() + "Value", targetType); } else { block.checkCast(getType()); } return block; } private static ByteCodeBlock castPrimitiveToPrimitive(ByteCodeBlock block, Class<?> sourceType, Class<?> targetType) { if (sourceType == boolean.class) { if (targetType == boolean.class) { return block; } } if (sourceType == byte.class) { if (targetType == byte.class) { return block; } if (targetType == char.class) { return block; } if (targetType == short.class) { return block; } if (targetType == int.class) { return block; } if (targetType == long.class) { return block.append(OpCode.I2L); } if (targetType == float.class) { return block.append(OpCode.I2F); } if (targetType == double.class) { return block.append(OpCode.I2D); } } if (sourceType == char.class) { if (targetType == byte.class) { return block.append(OpCode.I2B); } if (targetType == char.class) { return block; } if (targetType == short.class) { return block; } if (targetType == int.class) { return block; } if (targetType == long.class) { return block.append(OpCode.I2L); } if (targetType == float.class) { return block.append(OpCode.I2F); } if (targetType == double.class) { return block.append(OpCode.I2D); } } if (sourceType == short.class) { if (targetType == byte.class) { return block.append(OpCode.I2B); } if (targetType == char.class) { return block.append(OpCode.I2C); } if (targetType == short.class) { return block; } if (targetType == int.class) { return block; } if (targetType == long.class) { return block.append(OpCode.I2L); } if (targetType == float.class) { return block.append(OpCode.I2F); } if (targetType == double.class) { return block.append(OpCode.I2D); } } if (sourceType == int.class) { if (targetType == boolean.class) { return block; } if (targetType == byte.class) { return block.append(OpCode.I2B); } if (targetType == char.class) { return block.append(OpCode.I2C); } if (targetType == short.class) { return block.append(OpCode.I2S); } if (targetType == int.class) { return block; } if (targetType == long.class) { return block.append(OpCode.I2L); } if (targetType == float.class) { return block.append(OpCode.I2F); } if (targetType == double.class) { return block.append(OpCode.I2D); } } if (sourceType == long.class) { if (targetType == byte.class) { return block.append(OpCode.L2I).append(OpCode.I2B); } if (targetType == char.class) { return block.append(OpCode.L2I).append(OpCode.I2C); } if (targetType == short.class) { return block.append(OpCode.L2I).append(OpCode.I2S); } if (targetType == int.class) { return block.append(OpCode.L2I); } if (targetType == long.class) { return block; } if (targetType == float.class) { return block.append(OpCode.L2F); } if (targetType == double.class) { return block.append(OpCode.L2D); } } if (sourceType == float.class) { if (targetType == byte.class) { return block.append(OpCode.F2I).append(OpCode.I2B); } if (targetType == char.class) { return block.append(OpCode.F2I).append(OpCode.I2C); } if (targetType == short.class) { return block.append(OpCode.F2I).append(OpCode.I2S); } if (targetType == int.class) { return block.append(OpCode.F2I); } if (targetType == long.class) { return block.append(OpCode.F2L); } if (targetType == float.class) { return block; } if (targetType == double.class) { return block.append(OpCode.F2D); } } if (sourceType == double.class) { if (targetType == byte.class) { return block.append(OpCode.D2I).append(OpCode.I2B); } if (targetType == char.class) { return block.append(OpCode.D2I).append(OpCode.I2C); } if (targetType == short.class) { return block.append(OpCode.D2I).append(OpCode.I2S); } if (targetType == int.class) { return block.append(OpCode.D2I); } if (targetType == long.class) { return block.append(OpCode.D2L); } if (targetType == float.class) { return block.append(OpCode.D2F); } if (targetType == double.class) { return block; } } throw new IllegalArgumentException(format("Type %s can not be cast to %s", sourceType, targetType)); } private static Class<?> unwrapPrimitiveType(ParameterizedType type) { if (type.isPrimitive()) { return type.getPrimitiveType(); } switch (type.getJavaClassName()) { case "java.lang.Boolean": return boolean.class; case "java.lang.Byte": return byte.class; case "java.lang.Character": return char.class; case "java.lang.Short": return short.class; case "java.lang.Integer": return int.class; case "java.lang.Long": return long.class; case "java.lang.Float": return float.class; case "java.lang.Double": return double.class; default: return null; } } @Override protected String formatOneLine() { return "((" + getType().getSimpleName() + ") " + instance + ")"; } @Override public List<ByteCodeNode> getChildNodes() { return ImmutableList.<ByteCodeNode>of(instance); } }
apache-2.0
yamahata/neutron
neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py
10497
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Oleg Bondarev (obondarev@mirantis.com) import contextlib import mock from neutron.services.loadbalancer.drivers.haproxy import cfg from neutron.tests import base class TestHaproxyCfg(base.BaseTestCase): def test_save_config(self): with contextlib.nested( mock.patch('neutron.services.loadbalancer.' 'drivers.haproxy.cfg._build_global'), mock.patch('neutron.services.loadbalancer.' 'drivers.haproxy.cfg._build_defaults'), mock.patch('neutron.services.loadbalancer.' 'drivers.haproxy.cfg._build_frontend'), mock.patch('neutron.services.loadbalancer.' 'drivers.haproxy.cfg._build_backend'), mock.patch('neutron.agent.linux.utils.replace_file') ) as (b_g, b_d, b_f, b_b, replace): test_config = ['globals', 'defaults', 'frontend', 'backend'] b_g.return_value = [test_config[0]] b_d.return_value = [test_config[1]] b_f.return_value = [test_config[2]] b_b.return_value = [test_config[3]] cfg.save_config('test_path', mock.Mock()) replace.assert_called_once_with('test_path', '\n'.join(test_config)) def test_build_global(self): expected_opts = ['global', '\tdaemon', '\tuser nobody', '\tgroup test_group', '\tlog /dev/log local0', '\tlog /dev/log local1 notice', '\tstats socket test_path mode 0666 level user'] opts = cfg._build_global(mock.Mock(), 'test_path', 'test_group') self.assertEqual(expected_opts, list(opts)) def test_build_defaults(self): expected_opts = ['defaults', '\tlog global', '\tretries 3', '\toption redispatch', '\ttimeout connect 5000', '\ttimeout client 50000', '\ttimeout server 50000'] opts = cfg._build_defaults(mock.Mock()) self.assertEqual(expected_opts, list(opts)) def test_build_frontend(self): test_config = {'vip': {'id': 'vip_id', 'protocol': 'HTTP', 'port': {'fixed_ips': [ {'ip_address': '10.0.0.2'}] }, 'protocol_port': 80, 'connection_limit': 2000, }, 'pool': {'id': 'pool_id'}} expected_opts = ['frontend vip_id', '\toption tcplog', '\tbind 10.0.0.2:80', '\tmode http', '\tdefault_backend pool_id', '\tmaxconn 2000', '\toption forwardfor'] opts = cfg._build_frontend(test_config) self.assertEqual(expected_opts, list(opts)) test_config['vip']['connection_limit'] = -1 expected_opts.remove('\tmaxconn 2000') opts = cfg._build_frontend(test_config) self.assertEqual(expected_opts, list(opts)) def test_build_backend(self): test_config = {'pool': {'id': 'pool_id', 'protocol': 'HTTP', 'lb_method': 'ROUND_ROBIN'}, 'members': [{'status': 'ACTIVE', 'admin_state_up': True, 'id': 'member1_id', 'address': '10.0.0.3', 'protocol_port': 80, 'weight': 1}, {'status': 'INACTIVE', 'admin_state_up': True, 'id': 'member2_id', 'address': '10.0.0.4', 'protocol_port': 80, 'weight': 1}, {'status': 'PENDING_CREATE', 'admin_state_up': True, 'id': 'member3_id', 'address': '10.0.0.5', 'protocol_port': 80, 'weight': 1}], 'healthmonitors': [{'admin_state_up': True, 'delay': 3, 'max_retries': 4, 'timeout': 2, 'type': 'TCP'}], 'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}} expected_opts = ['backend pool_id', '\tmode http', '\tbalance roundrobin', '\toption forwardfor', '\ttimeout check 2s', '\tcookie SRV insert indirect nocache', '\tserver member1_id 10.0.0.3:80 weight 1 ' 'check inter 3s fall 4 cookie 0', '\tserver member2_id 10.0.0.4:80 weight 1 ' 'check inter 3s fall 4 cookie 1', '\tserver member3_id 10.0.0.5:80 weight 1 ' 'check inter 3s fall 4 cookie 2'] opts = cfg._build_backend(test_config) self.assertEqual(expected_opts, list(opts)) def test_get_server_health_option(self): test_config = {'healthmonitors': [{'admin_state_up': False, 'delay': 3, 'max_retries': 4, 'timeout': 2, 'type': 'TCP', 'http_method': 'GET', 'url_path': '/', 'expected_codes': '200'}]} self.assertEqual(('', []), cfg._get_server_health_option(test_config)) self.assertEqual(('', []), cfg._get_server_health_option(test_config)) test_config['healthmonitors'][0]['admin_state_up'] = True expected = (' check inter 3s fall 4', ['timeout check 2s']) self.assertEqual(expected, cfg._get_server_health_option(test_config)) test_config['healthmonitors'][0]['type'] = 'HTTPS' expected = (' check inter 3s fall 4', ['timeout check 2s', 'option httpchk GET /', 'http-check expect rstatus 200', 'option ssl-hello-chk']) self.assertEqual(expected, cfg._get_server_health_option(test_config)) def test_has_http_cookie_persistence(self): config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}} self.assertTrue(cfg._has_http_cookie_persistence(config)) config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}} self.assertFalse(cfg._has_http_cookie_persistence(config)) config = {'vip': {'session_persistence': {}}} self.assertFalse(cfg._has_http_cookie_persistence(config)) def test_get_session_persistence(self): config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}} self.assertEqual(cfg._get_session_persistence(config), ['stick-table type ip size 10k', 'stick on src']) config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}} self.assertEqual(cfg._get_session_persistence(config), ['cookie SRV insert indirect nocache']) config = {'vip': {'session_persistence': {'type': 'APP_COOKIE', 'cookie_name': 'test'}}} self.assertEqual(cfg._get_session_persistence(config), ['appsession test len 56 timeout 3h']) config = {'vip': {'session_persistence': {'type': 'APP_COOKIE'}}} self.assertEqual(cfg._get_session_persistence(config), []) config = {'vip': {'session_persistence': {'type': 'UNSUPPORTED'}}} self.assertEqual(cfg._get_session_persistence(config), []) def test_expand_expected_codes(self): exp_codes = '' self.assertEqual(cfg._expand_expected_codes(exp_codes), set([])) exp_codes = '200' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200'])) exp_codes = '200, 201' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200', '201'])) exp_codes = '200, 201,202' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200', '201', '202'])) exp_codes = '200-202' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200', '201', '202'])) exp_codes = '200-202, 205' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200', '201', '202', '205'])) exp_codes = '200, 201-203' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200', '201', '202', '203'])) exp_codes = '200, 201-203, 205' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200', '201', '202', '203', '205'])) exp_codes = '201-200, 205' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['205']))
apache-2.0
JimSadler/theburgessprocess2
RealTalkBlog/wp-content/themes/next-saturday/content-gallery.php
2603
<?php /** * @package Next_Saturday */ ?> <article id="post-<?php the_ID(); ?>" <?php post_class( 'post' ); ?>> <?php next_saturday_entry_date(); ?> <div class="entry-container"> <header class="entry-header"> <h1 class="post-title"> <?php if ( ! is_single() ) : ?> <a href="<?php the_permalink(); ?>" rel="bookmark"><?php the_title(); ?></a> <?php else : ?> <?php the_title(); ?> <?php endif; ?> </h1> </header><!-- .entry-header --> <div class="entry-content"> <?php if ( is_single() ) : ?> <?php the_content( __( 'Continue reading <span class="meta-nav">&rarr;</span>', 'next-saturday' ) ); ?> <?php else : ?> <?php $pattern = get_shortcode_regex(); preg_match( "/$pattern/s", get_the_content(), $match ); $atts = isset( $match[3] ) ? shortcode_parse_atts( $match[3] ) : array(); $images = isset( $atts['ids'] ) ? explode( ',', $atts['ids'] ) : false; if ( ! $images ) : $images = get_posts( array( 'post_parent' => get_the_ID(), 'fields' => 'ids', 'post_type' => 'attachment', 'post_mime_type' => 'image', 'orderby' => 'menu_order', 'order' => 'ASC', 'numberposts' => 999, 'suppress_filters' => false ) ); endif; if ( $images ) : $total_images = count( $images ); $image = array_shift( $images ); ?> <div class="gallery-thumb"> <a href="<?php the_permalink(); ?>"><?php echo wp_get_attachment_image( $image, 'large' ); ?></a> </div><!-- .gallery-thumb --> <p><span class="gallery-summary"><?php printf( _n( 'This gallery contains <a href="%1$s" rel="bookmark">%2$s photo</a>.', 'This gallery contains <a href="%1$s" rel="bookmark">%2$s photos</a>.', $total_images, 'next-saturday' ), esc_url( get_permalink() ), number_format_i18n( $total_images ) ); ?></span></p> <?php endif; ?> <?php the_excerpt(); ?> <?php endif; ?> <?php wp_link_pages( array( 'before' => '<div class="page-link"><span>' . __( 'Pages:', 'next-saturday' ) . '</span>', 'after' => '</div>' ) ); ?> </div><!-- .entry-content --> <div class="entry-meta-wrap"> <div class="entry-meta"> <span class="comments-num"><?php comments_popup_link( __( 'Leave a comment', 'next-saturday' ), __( '1 Comment', 'next-saturday' ), __( '% Comments', 'next-saturday' ) ); ?></span> <?php edit_post_link( __( 'Edit', 'next-saturday' ), '<span class="edit-link">', '</span>' ); ?> </div> </div> </div><!-- .entry-container --> </article><!-- #post-## -->
apache-2.0
zdary/intellij-community
platform/lang-api/src/com/intellij/task/ProjectTaskResult.java
3880
/* * Copyright 2000-2016 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.task; import com.intellij.openapi.module.Module; import com.intellij.openapi.roots.ProjectModelBuildableElement; import com.intellij.util.containers.ContainerUtil; import org.jetbrains.annotations.ApiStatus; import org.jetbrains.annotations.NotNull; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.function.BiPredicate; import java.util.function.Predicate; import java.util.stream.Collectors; /** * @author Vladislav.Soroka * @deprecated in favour of {@link ProjectTaskManager.Result} */ @ApiStatus.ScheduledForRemoval(inVersion = "2020.1") @Deprecated public class ProjectTaskResult { private final boolean myAborted; private final int myErrors; private final int myWarnings; private final Map<ProjectTask, ProjectTaskState> myTasksState; public ProjectTaskResult(boolean aborted, int errors, int warnings) { myAborted = aborted; myErrors = errors; myWarnings = warnings; myTasksState = Collections.emptyMap(); } public ProjectTaskResult(boolean aborted, int errors, int warnings, @NotNull Map<ProjectTask, ProjectTaskState> tasksState) { myAborted = aborted; myErrors = errors; myWarnings = warnings; myTasksState = ContainerUtil.unmodifiableOrEmptyMap(tasksState); } public boolean isAborted() { return myAborted; } public int getErrors() { return myErrors; } public int getWarnings() { return myWarnings; } @NotNull public Map<ProjectTask, ProjectTaskState> getTasksState() { return myTasksState; } public boolean anyMatch(@NotNull BiPredicate<? super ProjectTask, ? super ProjectTaskState> predicate) { return myTasksState.entrySet().stream().anyMatch(entry -> predicate.test(entry.getKey(), entry.getValue())); } @NotNull public List<ProjectTask> getTasks(@NotNull BiPredicate<? super ProjectTask, ? super ProjectTaskState> predicate) { return myTasksState.entrySet().stream() .filter(entry -> predicate.test(entry.getKey(), entry.getValue())) .map(Map.Entry::getKey) .collect(Collectors.toList()); } @NotNull public List<Module> getAffectedModules(@NotNull Predicate<? super ProjectTaskState> predicate) { return myTasksState.entrySet().stream() .filter(entry -> entry.getKey() instanceof ModuleBuildTask) .filter(entry -> predicate.test(entry.getValue())) .map(entry -> ((ModuleBuildTask)entry.getKey()).getModule()) .collect(Collectors.toList()); } @NotNull public <T extends ProjectModelBuildableElement> List<T> getBuildableElements(@NotNull Class<? extends T> buildableClass, @NotNull Predicate<? super ProjectTaskState> predicate) { return myTasksState.entrySet().stream() .filter(entry -> entry.getKey() instanceof ProjectModelBuildTask<?>) .filter(entry -> buildableClass.isInstance(((ProjectModelBuildTask)entry.getKey()).getBuildableElement())) .filter(entry -> predicate.test(entry.getValue())) .map(entry -> buildableClass.cast(((ProjectModelBuildTask)entry.getKey()).getBuildableElement())) .collect(Collectors.toList()); } }
apache-2.0
IdentityServer/IdentityServer4
samples/Clients/old/MvcImplicit/Program.cs
410
using Microsoft.AspNetCore; using Microsoft.AspNetCore.Hosting; namespace MvcImplicit { public class Program { public static void Main(string[] args) { BuildWebHost(args).Run(); } public static IWebHost BuildWebHost(string[] args) => WebHost.CreateDefaultBuilder(args) .UseStartup<Startup>() .Build(); } }
apache-2.0
usirin/koding
go/src/koding/klient/app/update_test.go
4403
package app_test import ( "errors" "fmt" "net" "net/http" "net/url" "sync" "testing" "time" "github.com/koding/logging" "koding/kites/tunnelproxy/discover/discovertest" "koding/klient/app" "koding/klient/remote/mount" ) func TestUpdater(t *testing.T) { const timeout = 250 * time.Millisecond events := make(chan *mount.Event) defer close(events) s := StartUpdateServer() u := &app.Updater{ Endpoint: s.URL().String(), Interval: 50 * time.Millisecond, Log: logging.NewCustom("updater", true), MountEvents: events, } go u.Run() if err := s.WaitForLatestReq(timeout); err != nil { t.Fatal(err) } // Send a mouting event and ensure no // update attempt was made afterwards. events <- &mount.Event{ Path: "/path1", Type: mount.EventMounting, } // From this point update server will mark every latest request as illegal. s.Enable(false) if err := s.WaitForLatestReq(timeout); err == nil { t.Fatal("expected to timeout waiting for latest with disabled autoupdates") } // Send event that mouting succeeded, still no update requests expected. events <- &mount.Event{ Path: "/path1", Type: mount.EventMounted, } if err := s.WaitForLatestReq(timeout); err == nil { t.Fatal("expected to timeout waiting for latest with disabled autoupdates") } // Send unmount event, but for different path that was previously reported // as mounted - this event should be ignored, autoupdates still disabled. events <- &mount.Event{ Path: "/pathX", Type: mount.EventUnmounted, } // Only confirmed umount enables autoupdate, since unmounting // can traisition to failed - this event also does not enable autoupdates. events <- &mount.Event{ Path: "/path1", Type: mount.EventUnmounting, } if err := s.WaitForLatestReq(timeout); err == nil { t.Fatal("expected to timeout waiting for latest with disabled autoupdates") } // Send unmount event for previous mount and expect autoupdates to turn on. events <- &mount.Event{ Path: "/path1", Type: mount.EventUnmounted, } s.Enable(true) if err := s.WaitForLatestReq(timeout); err != nil { t.Fatal(err) } // Send mounting event, expect autoupdates to turn off, send the mount // was failed and expect the autoupdates to turn on again. events <- &mount.Event{ Path: "/path1", Type: mount.EventMounting, } s.Enable(false) if err := s.WaitForLatestReq(timeout); err == nil { t.Fatal("expected to timeout waiting for latest with disabled autoupdates") } events <- &mount.Event{ Path: "/path1", Type: mount.EventMounting, Err: errors.New("mount failed"), } s.Enable(true) if err := s.WaitForLatestReq(timeout); err != nil { t.Fatal(err) } // Ensure no update request was made while the autoupdates // were expected to be disabled. if err := s.Err(); err != nil { t.Fatal(err) } } type UpdateServer struct { mu sync.Mutex lis net.Listener enabled bool reqEnabled []*http.Request reqDisabled []*http.Request } func StartUpdateServer() *UpdateServer { l, err := net.Listen("tcp", ":0") if err != nil { panic(err) } wl := discovertest.NewListener(l) u := &UpdateServer{ lis: wl, enabled: true, } go http.Serve(wl, u) wl.Wait() return u } func (u *UpdateServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { u.mu.Lock() if u.enabled { u.reqEnabled = append(u.reqEnabled, req) } else { u.reqDisabled = append(u.reqDisabled, req) } u.mu.Unlock() w.WriteHeader(http.StatusBadRequest) } func (u *UpdateServer) WaitForLatestReq(timeout time.Duration) error { t := time.After(timeout) n := u.LatestReqNum() for { select { case <-t: return fmt.Errorf("timed out waiting for latest req after %s", timeout) default: if u.LatestReqNum() > n { return nil } time.Sleep(50 * time.Millisecond) } } } func (u *UpdateServer) LatestReqNum() int { u.mu.Lock() defer u.mu.Unlock() return len(u.reqEnabled) } func (u *UpdateServer) Err() error { u.mu.Lock() defer u.mu.Unlock() if len(u.reqDisabled) != 0 { return fmt.Errorf("%d latest requests was served when the updater was disabled", len(u.reqDisabled)) } return nil } func (u *UpdateServer) Enable(b bool) { u.mu.Lock() u.enabled = b u.mu.Unlock() } func (u *UpdateServer) URL() *url.URL { return &url.URL{ Scheme: "http", Host: u.lis.Addr().String(), Path: "/", } }
apache-2.0
minestarks/TypeScript
tests/cases/conformance/salsa/typeFromPrototypeAssignment4.ts
581
// @allowJs: true // @checkJs: true // @emitDeclarationOnly: true // @declaration: true // @outDir: out // @Filename: a.js function Multimap4() { this._map = {}; }; Multimap4["prototype"] = { /** * @param {string} key * @returns {number} the value ok */ get(key) { return this._map[key + '']; } }; Multimap4["prototype"]["add-on"] = function() {}; Multimap4["prototype"]["addon"] = function() {}; Multimap4["prototype"]["__underscores__"] = function() {}; const map4 = new Multimap4(); map4.get(""); map4["add-on"](); map4.addon(); map4.__underscores__();
apache-2.0
itchanges/tddl
tddl-optimizer/src/main/java/com/taobao/tddl/optimizer/rule/RuleStatManager.java
3782
package com.taobao.tddl.optimizer.rule; import java.util.concurrent.ExecutionException; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import com.taobao.tddl.common.exception.TddlException; import com.taobao.tddl.common.model.Group; import com.taobao.tddl.common.model.Matrix; import com.taobao.tddl.common.model.lifecycle.AbstractLifecycle; import com.taobao.tddl.optimizer.costbased.esitimater.stat.KVIndexStat; import com.taobao.tddl.optimizer.costbased.esitimater.stat.LocalStatManager; import com.taobao.tddl.optimizer.costbased.esitimater.stat.RepoStatManager; import com.taobao.tddl.optimizer.costbased.esitimater.stat.StatManager; import com.taobao.tddl.optimizer.costbased.esitimater.stat.TableStat; import com.taobao.tddl.optimizer.exceptions.OptimizerException; import com.taobao.tddl.rule.model.TargetDB; /** * 基于Rule获取到物理的group进行查找 * * @since 5.0.0 */ public class RuleStatManager extends AbstractLifecycle implements StatManager { private OptimizerRule rule; private Matrix matrix; private LocalStatManager local; private boolean useCache; private LoadingCache<Group, RepoStatManager> repos = null; public RuleStatManager(OptimizerRule rule, Matrix matrix){ this.rule = rule; this.matrix = matrix; } protected void doInit() throws TddlException { super.doInit(); repos = CacheBuilder.newBuilder().build(new CacheLoader<Group, RepoStatManager>() { public RepoStatManager load(Group group) throws Exception { RepoStatManager repo = new RepoStatManager(); repo.setGroup(group); repo.setLocal(local); repo.setUseCache(useCache); repo.init(); return repo; } }); } protected void doDestory() throws TddlException { super.doDestory(); for (RepoStatManager repo : repos.asMap().values()) { repo.destory(); } } public KVIndexStat getKVIndex(String indexName) { TargetDB targetDB = rule.shardAny(indexName); if (targetDB.getDbIndex() == null) { // 没有对应的规则,也没有default group,则可能是一个不存在的表 // 尝试找一下local return local.getKVIndex(indexName); } else { Group group = matrix.getGroup(targetDB.getDbIndex()); // 先找到group try { return repos.get(group).getKVIndex(targetDB.getTableNames().iterator().next()); } catch (ExecutionException e) { throw new OptimizerException(e); } } } public TableStat getTable(String tableName) { TargetDB targetDB = rule.shardAny(tableName); if (targetDB.getDbIndex() == null) { // 没有对应的规则,也没有default group,则可能是一个不存在的表 // 尝试找一下local return local.getTable(tableName); } else { Group group = matrix.getGroup(targetDB.getDbIndex()); // 先找到group try { return repos.get(group).getTable(targetDB.getTableNames().iterator().next()); } catch (ExecutionException e) { throw new OptimizerException(e); } } } public void setRule(OptimizerRule rule) { this.rule = rule; } public void setLocal(LocalStatManager local) { this.local = local; } public void setUseCache(boolean useCache) { this.useCache = useCache; } }
apache-2.0
jmhodges/closure-templates
java/src/com/google/template/soy/CompilationResult.java
1723
/* * Copyright 2015 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.template.soy; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableCollection; import com.google.template.soy.base.SoySyntaxException; import com.google.template.soy.error.ErrorPrettyPrinter; import java.io.PrintStream; /** * Container for results associated with a Soy compilation. * * TODO(brndn): consider adding state for the compiled files. * Currently, {@link SoyFileSet#compileToJsSrcFiles} compiles but doesn't return them. * * @author brndn@google.com (Brendan Linn) */ final class CompilationResult { private final ImmutableCollection<? extends SoySyntaxException> errors; private final ErrorPrettyPrinter prettyPrinter; CompilationResult( ImmutableCollection<? extends SoySyntaxException> errors, ErrorPrettyPrinter prettyPrinter) { this.errors = errors; this.prettyPrinter = prettyPrinter; } boolean isSuccess() { return errors.isEmpty(); } void printErrors(PrintStream out) { Preconditions.checkState(!isSuccess()); for (SoySyntaxException e : errors) { prettyPrinter.print(e, out); } } }
apache-2.0
chef/chef
lib/chef/chef_fs/file_system/chef_server/environments_dir.rb
1763
# # Author:: John Keiser (<jkeiser@chef.io>) # Copyright:: Copyright (c) Chef Software Inc. # License:: Apache License, Version 2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # require_relative "../base_fs_dir" require_relative "rest_list_entry" require_relative "../exceptions" class Chef module ChefFS module FileSystem module ChefServer class EnvironmentsDir < RestListDir def make_child_entry(name, exists = nil) if File.basename(name, ".*") == "_default" DefaultEnvironmentEntry.new(name, self, exists) else super end end class DefaultEnvironmentEntry < RestListEntry def initialize(name, parent, exists = nil) super(name, parent) @exists = exists end def delete(recurse) raise NotFoundError.new(self) unless exists? raise DefaultEnvironmentCannotBeModifiedError.new(:delete, self) end def write(file_contents) raise NotFoundError.new(self) unless exists? raise DefaultEnvironmentCannotBeModifiedError.new(:write, self) end end end end end end end
apache-2.0
smartpcr/samza
samza-core/src/main/java/org/apache/samza/checkpoint/CheckpointManager.java
4211
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.samza.checkpoint; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import org.apache.samza.container.TaskName; import org.apache.samza.coordinator.stream.messages.CoordinatorStreamMessage; import org.apache.samza.coordinator.stream.messages.SetCheckpoint; import org.apache.samza.coordinator.stream.CoordinatorStreamSystemConsumer; import org.apache.samza.coordinator.stream.CoordinatorStreamSystemProducer; import org.apache.samza.coordinator.stream.AbstractCoordinatorStreamManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * The CheckpointManager is used to persist and restore checkpoint information. The CheckpointManager uses * CoordinatorStream underneath to do this. */ public class CheckpointManager extends AbstractCoordinatorStreamManager { private static final Logger log = LoggerFactory.getLogger(CheckpointManager.class); private final Map<TaskName, Checkpoint> taskNamesToOffsets; private final HashSet<TaskName> taskNames; public CheckpointManager(CoordinatorStreamSystemProducer coordinatorStreamProducer, CoordinatorStreamSystemConsumer coordinatorStreamConsumer, String source) { super(coordinatorStreamProducer, coordinatorStreamConsumer, source); taskNamesToOffsets = new HashMap<TaskName, Checkpoint>(); taskNames = new HashSet<TaskName>(); } /** * Registers this manager to write checkpoints of a specific Samza stream partition. * @param taskName Specific Samza taskName of which to write checkpoints for. */ public void register(TaskName taskName) { log.debug("Adding taskName {} to {}", taskName, this); taskNames.add(taskName); registerCoordinatorStreamConsumer(); registerCoordinatorStreamProducer(taskName.getTaskName()); } /** * Writes a checkpoint based on the current state of a Samza stream partition. * @param taskName Specific Samza taskName of which to write a checkpoint of. * @param checkpoint Reference to a Checkpoint object to store offset data in. */ public void writeCheckpoint(TaskName taskName, Checkpoint checkpoint) { log.debug("Writing checkpoint for Task: {} with offsets: {}", taskName.getTaskName(), checkpoint.getOffsets()); send(new SetCheckpoint(getSource(), taskName.getTaskName(), checkpoint)); } /** * Returns the last recorded checkpoint for a specified taskName. * @param taskName Specific Samza taskName for which to get the last checkpoint of. * @return A Checkpoint object with the recorded offset data of the specified partition. */ public Checkpoint readLastCheckpoint(TaskName taskName) { // Bootstrap each time to make sure that we are caught up with the stream, the bootstrap will just catch up on consecutive calls log.debug("Reading checkpoint for Task: {}", taskName.getTaskName()); for (CoordinatorStreamMessage coordinatorStreamMessage : getBootstrappedStream(SetCheckpoint.TYPE)) { SetCheckpoint setCheckpoint = new SetCheckpoint(coordinatorStreamMessage); TaskName taskNameInCheckpoint = new TaskName(setCheckpoint.getKey()); if (taskNames.contains(taskNameInCheckpoint)) { taskNamesToOffsets.put(taskNameInCheckpoint, setCheckpoint.getCheckpoint()); log.debug("Adding checkpoint {} for taskName {}", taskNameInCheckpoint, taskName); } } return taskNamesToOffsets.get(taskName); } }
apache-2.0
aljoscha/flink
flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/iteration/IterateExample.java
8786
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.examples.iteration; import org.apache.flink.api.common.functions.MapFunction; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.tuple.Tuple5; import org.apache.flink.api.java.utils.ParameterTool; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.datastream.IterativeStream; import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.ProcessFunction; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.util.Collector; import org.apache.flink.util.OutputTag; import java.util.Random; /** * Example illustrating iterations in Flink streaming. * * <p>The program sums up random numbers and counts additions it performs to reach a specific * threshold in an iterative streaming fashion. * * <p>This example shows how to use: * * <ul> * <li>streaming iterations, * <li>buffer timeout to enhance latency, * <li>directed outputs. * </ul> */ public class IterateExample { private static final int BOUND = 100; private static final OutputTag<Tuple5<Integer, Integer, Integer, Integer, Integer>> ITERATE_TAG = new OutputTag<Tuple5<Integer, Integer, Integer, Integer, Integer>>( "iterate") {}; // ************************************************************************* // PROGRAM // ************************************************************************* public static void main(String[] args) throws Exception { // Checking input parameters final ParameterTool params = ParameterTool.fromArgs(args); // set up input for the stream of integer pairs // obtain execution environment and set setBufferTimeout to 1 to enable // continuous flushing of the output buffers (lowest latency) StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment().setBufferTimeout(1); // make parameters available in the web interface env.getConfig().setGlobalJobParameters(params); // create input stream of integer pairs DataStream<Tuple2<Integer, Integer>> inputStream; if (params.has("input")) { inputStream = env.readTextFile(params.get("input")).map(new FibonacciInputMap()); } else { System.out.println("Executing Iterate example with default input data set."); System.out.println("Use --input to specify file input."); inputStream = env.addSource(new RandomFibonacciSource()); } // create an iterative data stream from the input with 5 second timeout IterativeStream<Tuple5<Integer, Integer, Integer, Integer, Integer>> it = inputStream.map(new InputMap()).iterate(5000L); // apply the step function to get the next Fibonacci number // increment the counter and split the output SingleOutputStreamOperator<Tuple5<Integer, Integer, Integer, Integer, Integer>> step = it.process(new Step()); // close the iteration by selecting the tuples that were directed to the // 'iterate' channel in the output selector it.closeWith(step.getSideOutput(ITERATE_TAG)); // to produce the final get the input pairs that have the greatest iteration counter // on a 1 second sliding window DataStream<Tuple2<Tuple2<Integer, Integer>, Integer>> numbers = step.map(new OutputMap()); // emit results if (params.has("output")) { numbers.writeAsText(params.get("output")); } else { System.out.println("Printing result to stdout. Use --output to specify output path."); numbers.print(); } // execute the program env.execute("Streaming Iteration Example"); } // ************************************************************************* // USER FUNCTIONS // ************************************************************************* /** Generate BOUND number of random integer pairs from the range from 1 to BOUND/2. */ private static class RandomFibonacciSource implements SourceFunction<Tuple2<Integer, Integer>> { private static final long serialVersionUID = 1L; private Random rnd = new Random(); private volatile boolean isRunning = true; private int counter = 0; @Override public void run(SourceContext<Tuple2<Integer, Integer>> ctx) throws Exception { while (isRunning && counter < BOUND) { int first = rnd.nextInt(BOUND / 2 - 1) + 1; int second = rnd.nextInt(BOUND / 2 - 1) + 1; ctx.collect(new Tuple2<>(first, second)); counter++; Thread.sleep(50L); } } @Override public void cancel() { isRunning = false; } } /** Generate random integer pairs from the range from 0 to BOUND/2. */ private static class FibonacciInputMap implements MapFunction<String, Tuple2<Integer, Integer>> { private static final long serialVersionUID = 1L; @Override public Tuple2<Integer, Integer> map(String value) throws Exception { String record = value.substring(1, value.length() - 1); String[] splitted = record.split(","); return new Tuple2<>(Integer.parseInt(splitted[0]), Integer.parseInt(splitted[1])); } } /** * Map the inputs so that the next Fibonacci numbers can be calculated while preserving the * original input tuple. A counter is attached to the tuple and incremented in every iteration * step. */ public static class InputMap implements MapFunction< Tuple2<Integer, Integer>, Tuple5<Integer, Integer, Integer, Integer, Integer>> { private static final long serialVersionUID = 1L; @Override public Tuple5<Integer, Integer, Integer, Integer, Integer> map( Tuple2<Integer, Integer> value) throws Exception { return new Tuple5<>(value.f0, value.f1, value.f0, value.f1, 0); } } /** Iteration step function that calculates the next Fibonacci number. */ public static class Step extends ProcessFunction< Tuple5<Integer, Integer, Integer, Integer, Integer>, Tuple5<Integer, Integer, Integer, Integer, Integer>> { private static final long serialVersionUID = 1L; @Override public void processElement( Tuple5<Integer, Integer, Integer, Integer, Integer> value, Context ctx, Collector<Tuple5<Integer, Integer, Integer, Integer, Integer>> out) throws Exception { Tuple5<Integer, Integer, Integer, Integer, Integer> element = new Tuple5<>(value.f0, value.f1, value.f3, value.f2 + value.f3, ++value.f4); if (value.f2 < BOUND && value.f3 < BOUND) { ctx.output(ITERATE_TAG, element); } else { out.collect(element); } } } /** Giving back the input pair and the counter. */ public static class OutputMap implements MapFunction< Tuple5<Integer, Integer, Integer, Integer, Integer>, Tuple2<Tuple2<Integer, Integer>, Integer>> { private static final long serialVersionUID = 1L; @Override public Tuple2<Tuple2<Integer, Integer>, Integer> map( Tuple5<Integer, Integer, Integer, Integer, Integer> value) throws Exception { return new Tuple2<>(new Tuple2<>(value.f0, value.f1), value.f4); } } }
apache-2.0
pkcool/querydsl
querydsl-sql/src/main/java/com/querydsl/sql/teradata/TeradataQueryFactory.java
1639
/* * Copyright 2015, The Querydsl Team (http://www.querydsl.com/team) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.querydsl.sql.teradata; import java.sql.Connection; import javax.inject.Provider; import com.querydsl.sql.AbstractSQLQueryFactory; import com.querydsl.sql.Configuration; import com.querydsl.sql.SQLTemplates; import com.querydsl.sql.TeradataTemplates; /** * Teradata specific implementation of SQLQueryFactory * * @author tiwe * */ public class TeradataQueryFactory extends AbstractSQLQueryFactory<TeradataQuery<?>> { public TeradataQueryFactory(Configuration configuration, Provider<Connection> connection) { super(configuration, connection); } public TeradataQueryFactory(Provider<Connection> connection) { this(new Configuration(new TeradataTemplates()), connection); } public TeradataQueryFactory(SQLTemplates templates, Provider<Connection> connection) { this(new Configuration(templates), connection); } @Override public TeradataQuery<?> query() { return new TeradataQuery<Void>(connection.get(), configuration); } }
apache-2.0
kisskys/incubator-asterixdb-hyracks
hyracks/hyracks-storage-am-lsm-invertedindex/src/main/java/org/apache/hyracks/storage/am/lsm/invertedindex/inmemory/PartitionedInMemoryInvertedIndexOpContext.java
1926
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.hyracks.storage.am.lsm.invertedindex.inmemory; import org.apache.hyracks.api.dataflow.value.IBinaryComparatorFactory; import org.apache.hyracks.storage.am.btree.impls.BTree; import org.apache.hyracks.storage.am.lsm.invertedindex.tokenizers.IBinaryTokenizer; import org.apache.hyracks.storage.am.lsm.invertedindex.tokenizers.IBinaryTokenizerFactory; import org.apache.hyracks.storage.am.lsm.invertedindex.util.PartitionedInvertedIndexTokenizingTupleIterator; public class PartitionedInMemoryInvertedIndexOpContext extends InMemoryInvertedIndexOpContext { public PartitionedInMemoryInvertedIndexOpContext(BTree btree, IBinaryComparatorFactory[] tokenCmpFactories, IBinaryTokenizerFactory tokenizerFactory) { super(btree, tokenCmpFactories, tokenizerFactory); } protected void setTokenizingTupleIterator() { IBinaryTokenizer tokenizer = tokenizerFactory.createTokenizer(); tupleIter = new PartitionedInvertedIndexTokenizingTupleIterator(tokenCmpFactories.length, btree.getFieldCount() - tokenCmpFactories.length, tokenizer); } }
apache-2.0
jerrinot/hazelcast
hazelcast/src/main/java/com/hazelcast/jet/impl/processor/TwoPhaseSnapshotCommitUtility.java
16166
/* * Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hazelcast.jet.impl.processor; import com.hazelcast.function.ConsumerEx; import com.hazelcast.function.FunctionEx; import com.hazelcast.jet.config.ProcessingGuarantee; import com.hazelcast.jet.core.AbstractProcessor; import com.hazelcast.jet.core.BroadcastKey; import com.hazelcast.jet.core.Inbox; import com.hazelcast.jet.core.Outbox; import com.hazelcast.jet.core.Processor; import com.hazelcast.jet.core.Processor.Context; import com.hazelcast.jet.impl.processor.TwoPhaseSnapshotCommitUtility.TransactionId; import com.hazelcast.jet.impl.processor.TwoPhaseSnapshotCommitUtility.TransactionalResource; import com.hazelcast.jet.impl.util.LoggingUtil; import com.hazelcast.logging.ILogger; import javax.annotation.Nonnull; import javax.annotation.Nullable; import java.util.Map.Entry; import java.util.function.Consumer; import static com.hazelcast.jet.config.ProcessingGuarantee.AT_LEAST_ONCE; import static com.hazelcast.jet.config.ProcessingGuarantee.EXACTLY_ONCE; import static com.hazelcast.jet.impl.util.ExceptionUtil.sneakyThrow; /** * A base class for transaction utilities implementing different transaction * strategies. * <p> * The protected methods are intended for utility implementations. The * public methods are intended for utility users, the processors. * * @param <TXN_ID> type fo the transaction ID * @param <RES> type of the transactional resource */ public abstract class TwoPhaseSnapshotCommitUtility<TXN_ID extends TransactionId, RES extends TransactionalResource<TXN_ID>> { private final boolean isSource; private final Outbox outbox; private final Context procContext; private final ProcessingGuarantee externalGuarantee; private final FunctionEx<TXN_ID, LoggingNonThrowingResource<TXN_ID, RES>> createTxnFn; private final Consumer<TXN_ID> recoverAndCommitFn; private final ConsumerEx<Integer> recoverAndAbortFn; /** * @param outbox the outbox passed to the {@link Processor#init} method * @param procContext the context passed to the {@link Processor#init} method * @param isSource true, if the processor is a source (a reader), false for * a sink (a writer) * @param externalGuarantee guarantee required for the source/sink. Must * not be higher than the job's guarantee * @param createTxnFn creates a {@link TransactionalResource} based on a * transaction id. The implementation needs to ensure that in case * when the transaction ID was used before, the work from the previous * use is rolled back * @param recoverAndCommitFn a function to finish the commit of transaction * identified by the given ID * @param recoverAndAbortFn a function to rollback the work of the all the * transactions that were created by the given processor index */ protected TwoPhaseSnapshotCommitUtility( @Nonnull Outbox outbox, @Nonnull Context procContext, boolean isSource, @Nonnull ProcessingGuarantee externalGuarantee, @Nonnull FunctionEx<TXN_ID, RES> createTxnFn, @Nonnull ConsumerEx<TXN_ID> recoverAndCommitFn, @Nonnull ConsumerEx<Integer> recoverAndAbortFn ) { if (externalGuarantee.ordinal() > procContext.processingGuarantee().ordinal()) { throw new IllegalArgumentException("unsupported combination, job guarantee cannot by lower than external " + "guarantee. Job guarantee: " + procContext.processingGuarantee() + ", external guarantee: " + externalGuarantee); } this.isSource = isSource; this.outbox = outbox; this.procContext = procContext; this.externalGuarantee = externalGuarantee; this.createTxnFn = txnId -> new LoggingNonThrowingResource<>(procContext.logger(), createTxnFn.apply(txnId)); this.recoverAndCommitFn = recoverAndCommitFn; this.recoverAndAbortFn = recoverAndAbortFn; } public ProcessingGuarantee externalGuarantee() { return externalGuarantee; } protected Outbox getOutbox() { return outbox; } protected Context procContext() { return procContext; } protected FunctionEx<TXN_ID, LoggingNonThrowingResource<TXN_ID, RES>> createTxnFn() { return createTxnFn; } protected Consumer<TXN_ID> recoverAndCommitFn() { return recoverAndCommitFn; } protected ConsumerEx<Integer> recoverAndAbortFn() { return recoverAndAbortFn; } /** * Delegate handling of {@link Processor#tryProcess()} to this method. */ public boolean tryProcess() { return true; } /** * Returns the active transaction that can be used to store an item or * query the source. It's null in case when a transaction is not available * now. In that case the processor should back off and retry later. */ @Nullable public abstract RES activeTransaction(); /** * For sinks and inner vertices, call from {@link Processor#complete()}. * For batch sources call after the source emitted everything. Never call * it for streaming sources. */ public abstract void afterCompleted(); /** * Delegate handling of {@link Processor#snapshotCommitPrepare()} to this * method. * * @return a value to return from {@code snapshotCommitPrepare()} */ public abstract boolean snapshotCommitPrepare(); /** * Delegate handling of {@link Processor#snapshotCommitFinish(boolean)} to * this method. * * @param success value passed to {@code snapshotCommitFinish} * @return value to return from {@code snapshotCommitFinish} */ public abstract boolean snapshotCommitFinish(boolean success); /** * Delegate handling of {@link Processor#restoreFromSnapshot(Inbox)} to * this method. If you save custom items to snapshot besides those saved by * {@link #snapshotCommitPrepare()} of this utility, use {@link * #restoreFromSnapshot(Object, Object)} to pass only entries not handled * by your processor. * * @param inbox the inbox passed to {@code Processor.restoreFromSnapshot()} */ @SuppressWarnings("unchecked") public void restoreFromSnapshot(@Nonnull Inbox inbox) { for (Object item; (item = inbox.poll()) != null; ) { Entry<BroadcastKey<TXN_ID>, Boolean> castedItem = (Entry<BroadcastKey<TXN_ID>, Boolean>) item; restoreFromSnapshot(castedItem.getKey(), castedItem.getValue()); } } /** * Delegate handling of {@link * AbstractProcessor#restoreFromSnapshot(Object, Object)} to this method. * <p> * See also {@link #restoreFromSnapshot(Inbox)}. * * @param key a key from the snapshot * @param value a value from the snapshot */ public abstract void restoreFromSnapshot(@Nonnull Object key, @Nonnull Object value); /** * Call from {@link Processor#close()}. * <p> * The implementation must not commit or rollback any pending transactions * - the job might have failed between after snapshot phase 1 and 2. The * pending transactions might be recovered after the restart. */ public abstract void close() throws Exception; public boolean usesTransactionLifecycle() { return externalGuarantee == EXACTLY_ONCE || externalGuarantee == AT_LEAST_ONCE && isSource; } /** * A handle for a transactional resource. * <p> * The methods are called depending on the external guarantee:<ul> * * <li>EXACTLY_ONCE source & sink, AT_LEAST_ONCE source * * <ol> * <li>{@link #begin()} - called before the transaction is first * returned from {@link #activeTransaction()} * <li>{@link #flush()} * <li>{@link #endAndPrepare()} - after this the transaction will no * longer be returned from {@link #activeTransaction()}, i.e. no * more data will be written to it. Called in the 1st snapshot * phase. For AT_LEAST_ONCE source this call can be ignored, it's * not required to be able to finish the commit after restart * <li>{@link #commit()} - called in the 2nd snapshot phase * <li>if the utility recycles transactions, the process can go to (1) * <li>{@link #release()} * </ol> * * <li>AT_LEAST_ONCE sink * * The transaction should be in auto-commit mode, {@link #commit()} and * other transaction methods won't be called. * * <ol> * <li>{@link #flush()} - ensure all writes are stored in the external * system. Called in the 1st snapshot phase * <li>if the utility recycles transaction, the process can go to (1) * <li>{@link #release()} * </ol> * * <li>NONE * * <ol> * <li>{@link #release()} * </ol> * * </ul> * * @param <TXN_ID> type of transaction identifier. Must be serializable, will * be saved to state snapshot */ public interface TransactionalResource<TXN_ID> { /** * Returns the ID of this transaction, it should be the ID passed to * the {@link #createTxnFn()}. */ TXN_ID id(); /** * Begins the transaction. The method will be called before the * transaction is returned from {@link #activeTransaction()} for the * first time after creation or after {@link #commit()}. * <p> * This method is called in exactly-once mode; in at-least-once mode * it's called only if the processor is a source. It's never called if * there's no processing guarantee. * <p> * See also the {@linkplain TransactionalResource class javadoc}. * * @throws UnsupportedOperationException if the transaction was created * with {@code null} id passed to the {@link #createTxnFn()} */ default void begin() throws Exception { throw new UnsupportedOperationException("Resource without transaction support"); } /** * Flushes all previous writes to the external system and ensures all * pending items are emitted to the downstream. * <p> * See also the {@linkplain TransactionalResource class javadoc}. * * @return if all was flushed and emitted. If the method returns false, * it will be called again before any other method is called. */ default boolean flush() throws Exception { return true; } /** * Prepares for a commit. To achieve correctness, the transaction must * be able to eventually commit after this call, writes must be durably * stored in the external system. * <p> * After this call, the transaction will never again be returned * from {@link #activeTransaction()} until it's committed. * <p> * This method is called in exactly-once mode; in at-least-once mode * it's called only if the processor is a source. It's never called if * there's no processing guarantee. * <p> * See also the {@linkplain TransactionalResource class javadoc}. */ default void endAndPrepare() throws Exception { } /** * Makes the changes visible to others and acknowledges consumed items. * <p> * This method is called in exactly-once mode; in at-least-once mode * it's called only if the processor is a source. It's never called if * there's no processing guarantee. * <p> * See also the {@linkplain TransactionalResource class javadoc}. */ default void commit() throws Exception { throw new UnsupportedOperationException(); } /** * Roll back the transaction. Only called for non-prepared transactions * when the job execution ends. * <p> * Will only be called for a transaction that was {@linkplain * #begin() begun}. */ default void rollback() throws Exception { } /** * Finish the pending operations and release the associated resources. * If a transaction was begun, must not commit or roll it back, the * transaction can be later recovered from the durable storage and * continued. * <p> * See also the {@linkplain TransactionalResource class javadoc}. */ default void release() throws Exception { } } public interface TransactionId { /** * Returns the index of the processor that will handle this transaction * ID. Used when restoring transaction IDs to determine which processor * owns which transactions. * <p> * After restoring the ID from the snapshot the index might be out * of range (greater or equal to the current total parallelism). */ int index(); } /** * A wrapper for {@link TransactionalResource} adding logging and not * throwing checked exceptions. Aimed to simplify subclass implementation. */ protected static final class LoggingNonThrowingResource<TXN_ID, RES extends TransactionalResource<TXN_ID>> implements TransactionalResource<TXN_ID> { private final ILogger logger; private final RES wrapped; private LoggingNonThrowingResource(ILogger logger, RES wrapped) { this.logger = logger; this.wrapped = wrapped; } public RES wrapped() { return wrapped; } @Override public TXN_ID id() { return wrapped.id(); } @Override public void begin() { LoggingUtil.logFine(logger, "begin %s", id()); try { wrapped.begin(); } catch (Exception e) { throw sneakyThrow(e); } } @Override public boolean flush() { LoggingUtil.logFine(logger, "flush %s", id()); try { return wrapped.flush(); } catch (Exception e) { throw sneakyThrow(e); } } @Override public void endAndPrepare() { LoggingUtil.logFine(logger, "endAndPrepare %s", id()); try { wrapped.endAndPrepare(); } catch (Exception e) { throw sneakyThrow(e); } } @Override public void commit() { LoggingUtil.logFine(logger, "commit %s", id()); try { wrapped.commit(); } catch (Exception e) { throw sneakyThrow(e); } } @Override public void rollback() { LoggingUtil.logFine(logger, "rollback %s", id()); try { wrapped.rollback(); } catch (Exception e) { throw sneakyThrow(e); } } @Override public void release() { LoggingUtil.logFine(logger, "release %s", id()); try { wrapped.release(); } catch (Exception e) { throw sneakyThrow(e); } } } }
apache-2.0
RyanCavanaugh/TypeScript
tests/cases/fourslash/codeFixClassExtendAbstractMethodWithLongName.ts
2167
/// <reference path='fourslash.ts' /> ////namespace some.really.long.generated.type.goes.here.you.know.this_.should.be.pretty.simple { //// export interface Yah {} ////} ////namespace another.really.long.generated.type.goes.here.too.because.who.cares.about.space.do_.you.feel.me { //// export interface Yah {} ////} ////interface this_will_be_collapsed {} ////interface this_is_fine {} ////abstract class AbstractCstVisitor { //// abstract Node( //// arg1: [ //// some.really.long.generated.type.goes.here.you.know.this_.should.be.pretty.simple.Yah[], //// another.really.long.generated.type.goes.here.too.because.who.cares.about.space.do_.you.feel.me.Yah[] //// ], //// arg2: [this_will_be_collapsed], //// arg3: Set<this_will_be_collapsed>, //// arg4: this_is_fine //// ): Set<this_will_be_collapsed>; ////} ////class CstVisitorImplementation extends AbstractCstVisitor {} verify.codeFix({ description: "Implement inherited abstract class", newFileContent: `namespace some.really.long.generated.type.goes.here.you.know.this_.should.be.pretty.simple { export interface Yah {} } namespace another.really.long.generated.type.goes.here.too.because.who.cares.about.space.do_.you.feel.me { export interface Yah {} } interface this_will_be_collapsed {} interface this_is_fine {} abstract class AbstractCstVisitor { abstract Node( arg1: [ some.really.long.generated.type.goes.here.you.know.this_.should.be.pretty.simple.Yah[], another.really.long.generated.type.goes.here.too.because.who.cares.about.space.do_.you.feel.me.Yah[] ], arg2: [this_will_be_collapsed], arg3: Set<this_will_be_collapsed>, arg4: this_is_fine ): Set<this_will_be_collapsed>; } class CstVisitorImplementation extends AbstractCstVisitor { Node(arg1: [some.really.long.generated.type.goes.here.you.know.this_.should.be.pretty.simple.Yah[], another.really.long.generated.type.goes.here.too.because.who.cares.about.space.do_.you.feel.me.Yah[]], arg2: [this_will_be_collapsed], arg3: any, arg4: this_is_fine) { throw new Error("Method not implemented."); } }` });
apache-2.0
kinghy2302/cw-omnibus
ViewPager/Nested/src/com/commonsware/android/pagernested/SampleAdapter.java
1356
/*** Copyright (c) 2012 CommonsWare, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. From _The Busy Coder's Guide to Android Development_ http://commonsware.com/Android */ package com.commonsware.android.pagernested; import android.content.Context; import android.support.v4.app.Fragment; import android.support.v4.app.FragmentManager; import android.support.v4.app.FragmentPagerAdapter; public class SampleAdapter extends FragmentPagerAdapter { Context ctxt=null; public SampleAdapter(Context ctxt, FragmentManager mgr) { super(mgr); this.ctxt=ctxt; } @Override public int getCount() { return(10); } @Override public Fragment getItem(int position) { return(EditorFragment.newInstance(position)); } @Override public String getPageTitle(int position) { return(EditorFragment.getTitle(ctxt, position)); } }
apache-2.0
dawangjiaowolaixunshan/runtime
deps/libcxx/test/input.output/string.streams/istringstream/istringstream.assign/move.pass.cpp
2035
//===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // <sstream> // template <class charT, class traits = char_traits<charT>, class Allocator = allocator<charT> > // class basic_istringstream // basic_istringstream& operator=(basic_istringstream&& rhs); #include <sstream> #include <cassert> int main() { #ifndef _LIBCPP_HAS_NO_RVALUE_REFERENCES { std::istringstream ss0(" 123 456"); std::istringstream ss; ss = std::move(ss0); assert(ss.rdbuf() != 0); assert(ss.good()); assert(ss.str() == " 123 456"); int i = 0; ss >> i; assert(i == 123); ss >> i; assert(i == 456); } { std::istringstream s1("Aaaaa Bbbbb Cccccccccc Dddddddddddddddddd"); std::string s; s1 >> s; std::istringstream s2 = std::move(s1); s2 >> s; assert(s == "Bbbbb"); std::istringstream s3; s3 = std::move(s2); s3 >> s; assert(s == "Cccccccccc"); s1 = std::move(s3); s1 >> s; assert(s == "Dddddddddddddddddd"); } { std::wistringstream ss0(L" 123 456"); std::wistringstream ss; ss = std::move(ss0); assert(ss.rdbuf() != 0); assert(ss.good()); assert(ss.str() == L" 123 456"); int i = 0; ss >> i; assert(i == 123); ss >> i; assert(i == 456); } { std::wistringstream s1(L"Aaaaa Bbbbb Cccccccccc Dddddddddddddddddd"); std::wstring s; s1 >> s; std::wistringstream s2 = std::move(s1); s2 >> s; assert(s == L"Bbbbb"); std::wistringstream s3; s3 = std::move(s2); s3 >> s; assert(s == L"Cccccccccc"); s1 = std::move(s3); s1 >> s; assert(s == L"Dddddddddddddddddd"); } #endif // _LIBCPP_HAS_NO_RVALUE_REFERENCES }
apache-2.0
mosoft521/lemon
src/main/java/com/mossle/api/delegate/MockDelegateConnector.java
538
package com.mossle.api.delegate; public class MockDelegateConnector implements DelegateConnector { public String findAttorney(String userId, String processDefinitionId, String taskDefinitionKey, String tenantId) { return null; } public void recordDelegate(String userId, String attorney, String taskId, String tenantId) { } public void cancel(String taskId, String userId, String tenantId) { } public void complete(String taskId, String complete, String tenantId) { } }
apache-2.0
mackjieson/weixin-popular
src/main/java/weixin/popular/bean/paymch/Transfers.java
2563
package weixin.popular.bean.paymch; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; @XmlRootElement(name="xml") @XmlAccessorType(XmlAccessType.FIELD) public class Transfers { private String mch_appid; private String mchid; private String sub_mch_id; private String device_info; private String nonce_str; private String sign; private String partner_trade_no; private String openid; private String check_name; private String re_user_name; private String amount; private String desc; private String spbill_create_ip; public String getMch_appid() { return mch_appid; } public void setMch_appid(String mch_appid) { this.mch_appid = mch_appid; } public String getMchid() { return mchid; } public void setMchid(String mchid) { this.mchid = mchid; } public String getSub_mch_id() { return sub_mch_id; } public void setSub_mch_id(String sub_mch_id) { this.sub_mch_id = sub_mch_id; } public String getDevice_info() { return device_info; } public void setDevice_info(String device_info) { this.device_info = device_info; } public String getNonce_str() { return nonce_str; } public void setNonce_str(String nonce_str) { this.nonce_str = nonce_str; } public String getSign() { return sign; } public void setSign(String sign) { this.sign = sign; } public String getPartner_trade_no() { return partner_trade_no; } public void setPartner_trade_no(String partner_trade_no) { this.partner_trade_no = partner_trade_no; } public String getOpenid() { return openid; } public void setOpenid(String openid) { this.openid = openid; } public String getCheck_name() { return check_name; } public void setCheck_name(String check_name) { this.check_name = check_name; } public String getRe_user_name() { return re_user_name; } public void setRe_user_name(String re_user_name) { this.re_user_name = re_user_name; } public String getAmount() { return amount; } public void setAmount(String amount) { this.amount = amount; } public String getDesc() { return desc; } public void setDesc(String desc) { this.desc = desc; } public String getSpbill_create_ip() { return spbill_create_ip; } public void setSpbill_create_ip(String spbill_create_ip) { this.spbill_create_ip = spbill_create_ip; } }
apache-2.0
Buzzardo/spring-boot
spring-boot-project/spring-boot-tools/spring-boot-buildpack-platform/src/main/java/org/springframework/boot/buildpack/platform/build/TarGzipBuildpack.java
4562
/* * Copyright 2012-2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.buildpack.platform.build; import java.io.IOException; import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import org.apache.commons.compress.archivers.ArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream; import org.springframework.boot.buildpack.platform.docker.type.Layer; import org.springframework.boot.buildpack.platform.io.IOConsumer; import org.springframework.util.StreamUtils; /** * A {@link Buildpack} that references a buildpack contained in a local gzipped tar * archive file. * * The archive must contain a buildpack descriptor named {@code buildpack.toml} at the * root of the archive. The contents of the archive will be provided as a single layer to * be included in the builder image. * * @author Scott Frederick */ final class TarGzipBuildpack implements Buildpack { private final Path path; private final BuildpackCoordinates coordinates; private TarGzipBuildpack(Path path) { this.path = path; this.coordinates = findBuildpackCoordinates(path); } private BuildpackCoordinates findBuildpackCoordinates(Path path) { try { try (TarArchiveInputStream tar = new TarArchiveInputStream( new GzipCompressorInputStream(Files.newInputStream(path)))) { ArchiveEntry entry = tar.getNextEntry(); while (entry != null) { if ("buildpack.toml".equals(entry.getName())) { return BuildpackCoordinates.fromToml(tar, path); } entry = tar.getNextEntry(); } throw new IllegalArgumentException( "Buildpack descriptor 'buildpack.toml' is required in buildpack '" + path + "'"); } } catch (IOException ex) { throw new RuntimeException("Error parsing descriptor for buildpack '" + path + "'", ex); } } @Override public BuildpackCoordinates getCoordinates() { return this.coordinates; } @Override public void apply(IOConsumer<Layer> layers) throws IOException { layers.accept(Layer.fromTarArchive(this::copyAndRebaseEntries)); } private void copyAndRebaseEntries(OutputStream outputStream) throws IOException { String id = this.coordinates.getSanitizedId(); Path basePath = Paths.get("/cnb/buildpacks/", id, this.coordinates.getVersion()); try (TarArchiveInputStream tar = new TarArchiveInputStream( new GzipCompressorInputStream(Files.newInputStream(this.path))); TarArchiveOutputStream output = new TarArchiveOutputStream(outputStream)) { writeBasePathEntries(output, basePath); TarArchiveEntry entry = tar.getNextTarEntry(); while (entry != null) { entry.setName(basePath + "/" + entry.getName()); output.putArchiveEntry(entry); StreamUtils.copy(tar, output); output.closeArchiveEntry(); entry = tar.getNextTarEntry(); } output.finish(); } } private void writeBasePathEntries(TarArchiveOutputStream output, Path basePath) throws IOException { int pathCount = basePath.getNameCount(); for (int pathIndex = 1; pathIndex < pathCount + 1; pathIndex++) { String name = "/" + basePath.subpath(0, pathIndex) + "/"; TarArchiveEntry entry = new TarArchiveEntry(name); output.putArchiveEntry(entry); output.closeArchiveEntry(); } } /** * A {@link BuildpackResolver} compatible method to resolve tar-gzip buildpacks. * @param context the resolver context * @param reference the buildpack reference * @return the resolved {@link Buildpack} or {@code null} */ static Buildpack resolve(BuildpackResolverContext context, BuildpackReference reference) { Path path = reference.asPath(); if (path != null && Files.exists(path) && Files.isRegularFile(path)) { return new TarGzipBuildpack(path); } return null; } }
apache-2.0
molobrakos/home-assistant
homeassistant/components/verisure/sensor.py
4476
"""Support for Verisure sensors.""" import logging from homeassistant.const import TEMP_CELSIUS from homeassistant.helpers.entity import Entity from . import CONF_HYDROMETERS, CONF_MOUSE, CONF_THERMOMETERS, HUB as hub _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Verisure platform.""" sensors = [] hub.update_overview() if int(hub.config.get(CONF_THERMOMETERS, 1)): sensors.extend([ VerisureThermometer(device_label) for device_label in hub.get( '$.climateValues[?(@.temperature)].deviceLabel')]) if int(hub.config.get(CONF_HYDROMETERS, 1)): sensors.extend([ VerisureHygrometer(device_label) for device_label in hub.get( '$.climateValues[?(@.humidity)].deviceLabel')]) if int(hub.config.get(CONF_MOUSE, 1)): sensors.extend([ VerisureMouseDetection(device_label) for device_label in hub.get( "$.eventCounts[?(@.deviceType=='MOUSE1')].deviceLabel")]) add_entities(sensors) class VerisureThermometer(Entity): """Representation of a Verisure thermometer.""" def __init__(self, device_label): """Initialize the sensor.""" self._device_label = device_label @property def name(self): """Return the name of the device.""" return hub.get_first( "$.climateValues[?(@.deviceLabel=='%s')].deviceArea", self._device_label) + " temperature" @property def state(self): """Return the state of the device.""" return hub.get_first( "$.climateValues[?(@.deviceLabel=='%s')].temperature", self._device_label) @property def available(self): """Return True if entity is available.""" return hub.get_first( "$.climateValues[?(@.deviceLabel=='%s')].temperature", self._device_label) is not None @property def unit_of_measurement(self): """Return the unit of measurement of this entity.""" return TEMP_CELSIUS # pylint: disable=no-self-use def update(self): """Update the sensor.""" hub.update_overview() class VerisureHygrometer(Entity): """Representation of a Verisure hygrometer.""" def __init__(self, device_label): """Initialize the sensor.""" self._device_label = device_label @property def name(self): """Return the name of the device.""" return hub.get_first( "$.climateValues[?(@.deviceLabel=='%s')].deviceArea", self._device_label) + " humidity" @property def state(self): """Return the state of the device.""" return hub.get_first( "$.climateValues[?(@.deviceLabel=='%s')].humidity", self._device_label) @property def available(self): """Return True if entity is available.""" return hub.get_first( "$.climateValues[?(@.deviceLabel=='%s')].humidity", self._device_label) is not None @property def unit_of_measurement(self): """Return the unit of measurement of this entity.""" return '%' # pylint: disable=no-self-use def update(self): """Update the sensor.""" hub.update_overview() class VerisureMouseDetection(Entity): """Representation of a Verisure mouse detector.""" def __init__(self, device_label): """Initialize the sensor.""" self._device_label = device_label @property def name(self): """Return the name of the device.""" return hub.get_first( "$.eventCounts[?(@.deviceLabel=='%s')].area", self._device_label) + " mouse" @property def state(self): """Return the state of the device.""" return hub.get_first( "$.eventCounts[?(@.deviceLabel=='%s')].detections", self._device_label) @property def available(self): """Return True if entity is available.""" return hub.get_first( "$.eventCounts[?(@.deviceLabel=='%s')]", self._device_label) is not None @property def unit_of_measurement(self): """Return the unit of measurement of this entity.""" return 'Mice' # pylint: disable=no-self-use def update(self): """Update the sensor.""" hub.update_overview()
apache-2.0
hippich/typescript
tests/cases/fourslash/quickInfoOnMergedInterfaces.ts
538
/// <reference path='fourslash.ts'/> ////module M { //// interface A<T> { //// (): string; //// (x: T): T; //// } //// interface A<T> { //// (x: T, y: number): T; //// <U>(x: U, y: T): U; //// } //// var a: A<boolean>; //// var r = a(); //// var r2 = a(true); //// var r3 = a(true, 2); //// var /*1*/r4 = a(1, true); ////} diagnostics.setEditValidation(IncrementalEditValidation.None); goTo.marker('1'); verify.quickInfoIs("number", undefined, "r4", "var");
apache-2.0
medicayun/medicayundicom
dcm4che14/branches/DCM4JBOSS_2_2_1_BRANCH/test/java/org/dcm4che/data/PackageTest.java
2324
/*$Id: PackageTest.java 3493 2002-07-14 16:03:36Z gunterze $*/ /***************************************************************************** * * * Copyright (c) 2002 by TIANI MEDGRAPH AG * * * * This file is part of dcm4che. * * * * This library is free software; you can redistribute it and/or modify it * * under the terms of the GNU Lesser General Public License as published * * by the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This library is distributed in the hope that it will be useful, but * * WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * * Lesser General Public License for more details. * * * * You should have received a copy of the GNU Lesser General Public * * License along with this library; if not, write to the Free Software * * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * *****************************************************************************/ package org.dcm4che.data; import java.io.*; import java.util.*; import junit.framework.*; /** * * @author gunter.zeilinger@tiani.com * @version 1.0.0 */ public class PackageTest extends Object { private PackageTest() { } public static void main (String[] args) { junit.textui.TestRunner.run (suite()); } public static Test suite() { TestSuite suite= new TestSuite(); suite.addTest(DcmParserTest.suite()); suite.addTest(DatasetTest.suite()); suite.addTest(DatasetSerializerTest.suite()); return suite; } }
apache-2.0
mikebrow/docker
vendor/github.com/docker/swarmkit/api/ca.pb.go
61733
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/docker/swarmkit/api/ca.proto /* Package api is a generated protocol buffer package. It is generated from these files: github.com/docker/swarmkit/api/ca.proto github.com/docker/swarmkit/api/control.proto github.com/docker/swarmkit/api/dispatcher.proto github.com/docker/swarmkit/api/health.proto github.com/docker/swarmkit/api/logbroker.proto github.com/docker/swarmkit/api/objects.proto github.com/docker/swarmkit/api/raft.proto github.com/docker/swarmkit/api/resource.proto github.com/docker/swarmkit/api/snapshot.proto github.com/docker/swarmkit/api/specs.proto github.com/docker/swarmkit/api/types.proto github.com/docker/swarmkit/api/watch.proto It has these top-level messages: NodeCertificateStatusRequest NodeCertificateStatusResponse IssueNodeCertificateRequest IssueNodeCertificateResponse GetRootCACertificateRequest GetRootCACertificateResponse GetUnlockKeyRequest GetUnlockKeyResponse GetNodeRequest GetNodeResponse ListNodesRequest ListNodesResponse UpdateNodeRequest UpdateNodeResponse RemoveNodeRequest RemoveNodeResponse GetTaskRequest GetTaskResponse RemoveTaskRequest RemoveTaskResponse ListTasksRequest ListTasksResponse CreateServiceRequest CreateServiceResponse GetServiceRequest GetServiceResponse UpdateServiceRequest UpdateServiceResponse RemoveServiceRequest RemoveServiceResponse ListServicesRequest ListServicesResponse ListServiceStatusesRequest ListServiceStatusesResponse CreateNetworkRequest CreateNetworkResponse GetNetworkRequest GetNetworkResponse RemoveNetworkRequest RemoveNetworkResponse ListNetworksRequest ListNetworksResponse GetClusterRequest GetClusterResponse ListClustersRequest ListClustersResponse KeyRotation UpdateClusterRequest UpdateClusterResponse GetSecretRequest GetSecretResponse UpdateSecretRequest UpdateSecretResponse ListSecretsRequest ListSecretsResponse CreateSecretRequest CreateSecretResponse RemoveSecretRequest RemoveSecretResponse GetConfigRequest GetConfigResponse UpdateConfigRequest UpdateConfigResponse ListConfigsRequest ListConfigsResponse CreateConfigRequest CreateConfigResponse RemoveConfigRequest RemoveConfigResponse CreateExtensionRequest CreateExtensionResponse RemoveExtensionRequest RemoveExtensionResponse GetExtensionRequest GetExtensionResponse CreateResourceRequest CreateResourceResponse RemoveResourceRequest RemoveResourceResponse UpdateResourceRequest UpdateResourceResponse GetResourceRequest GetResourceResponse ListResourcesRequest ListResourcesResponse SessionRequest SessionMessage HeartbeatRequest HeartbeatResponse UpdateTaskStatusRequest UpdateTaskStatusResponse TasksRequest TasksMessage AssignmentsRequest Assignment AssignmentChange AssignmentsMessage HealthCheckRequest HealthCheckResponse LogSubscriptionOptions LogSelector LogContext LogAttr LogMessage SubscribeLogsRequest SubscribeLogsMessage ListenSubscriptionsRequest SubscriptionMessage PublishLogsMessage PublishLogsResponse Meta Node Service Endpoint Task NetworkAttachment Network Cluster Secret Config Resource Extension RaftMember JoinRequest JoinResponse LeaveRequest LeaveResponse ProcessRaftMessageRequest ProcessRaftMessageResponse StreamRaftMessageRequest StreamRaftMessageResponse ResolveAddressRequest ResolveAddressResponse InternalRaftRequest StoreAction AttachNetworkRequest AttachNetworkResponse DetachNetworkRequest DetachNetworkResponse StoreSnapshot ClusterSnapshot Snapshot NodeSpec ServiceSpec ReplicatedService GlobalService TaskSpec ResourceReference GenericRuntimeSpec NetworkAttachmentSpec ContainerSpec EndpointSpec NetworkSpec ClusterSpec SecretSpec ConfigSpec Version IndexEntry Annotations NamedGenericResource DiscreteGenericResource GenericResource Resources ResourceRequirements Platform PluginDescription EngineDescription NodeDescription NodeTLSInfo RaftMemberStatus NodeStatus Image Mount RestartPolicy UpdateConfig UpdateStatus ContainerStatus PortStatus TaskStatus NetworkAttachmentConfig IPAMConfig PortConfig Driver IPAMOptions Peer WeightedPeer IssuanceStatus AcceptancePolicy ExternalCA CAConfig OrchestrationConfig TaskDefaults DispatcherConfig RaftConfig EncryptionConfig SpreadOver PlacementPreference Placement JoinTokens RootCA Certificate EncryptionKey ManagerStatus FileTarget RuntimeTarget SecretReference ConfigReference BlacklistedCertificate HealthConfig MaybeEncryptedRecord RootRotation Privileges Object SelectBySlot SelectByCustom SelectBy WatchRequest WatchMessage */ package api import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" import _ "github.com/gogo/protobuf/gogoproto" import _ "github.com/docker/swarmkit/protobuf/plugin" import deepcopy "github.com/docker/swarmkit/api/deepcopy" import context "golang.org/x/net/context" import grpc "google.golang.org/grpc" import raftselector "github.com/docker/swarmkit/manager/raftselector" import codes "google.golang.org/grpc/codes" import status "google.golang.org/grpc/status" import metadata "google.golang.org/grpc/metadata" import peer "google.golang.org/grpc/peer" import rafttime "time" import strings "strings" import reflect "reflect" import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type NodeCertificateStatusRequest struct { NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` } func (m *NodeCertificateStatusRequest) Reset() { *m = NodeCertificateStatusRequest{} } func (*NodeCertificateStatusRequest) ProtoMessage() {} func (*NodeCertificateStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{0} } type NodeCertificateStatusResponse struct { Status *IssuanceStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` Certificate *Certificate `protobuf:"bytes,2,opt,name=certificate" json:"certificate,omitempty"` } func (m *NodeCertificateStatusResponse) Reset() { *m = NodeCertificateStatusResponse{} } func (*NodeCertificateStatusResponse) ProtoMessage() {} func (*NodeCertificateStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{1} } type IssueNodeCertificateRequest struct { // DEPRECATED: Role is now selected based on which secret is matched. Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` // CSR is the certificate signing request. CSR []byte `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"` // Token represents a user-provided string that is necessary for new // nodes to join the cluster Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` // Availability allows a user to control the current scheduling status of a node Availability NodeSpec_Availability `protobuf:"varint,4,opt,name=availability,proto3,enum=docker.swarmkit.v1.NodeSpec_Availability" json:"availability,omitempty"` } func (m *IssueNodeCertificateRequest) Reset() { *m = IssueNodeCertificateRequest{} } func (*IssueNodeCertificateRequest) ProtoMessage() {} func (*IssueNodeCertificateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{2} } type IssueNodeCertificateResponse struct { NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` NodeMembership NodeSpec_Membership `protobuf:"varint,2,opt,name=node_membership,json=nodeMembership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"node_membership,omitempty"` } func (m *IssueNodeCertificateResponse) Reset() { *m = IssueNodeCertificateResponse{} } func (*IssueNodeCertificateResponse) ProtoMessage() {} func (*IssueNodeCertificateResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{3} } type GetRootCACertificateRequest struct { } func (m *GetRootCACertificateRequest) Reset() { *m = GetRootCACertificateRequest{} } func (*GetRootCACertificateRequest) ProtoMessage() {} func (*GetRootCACertificateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{4} } type GetRootCACertificateResponse struct { Certificate []byte `protobuf:"bytes,1,opt,name=certificate,proto3" json:"certificate,omitempty"` } func (m *GetRootCACertificateResponse) Reset() { *m = GetRootCACertificateResponse{} } func (*GetRootCACertificateResponse) ProtoMessage() {} func (*GetRootCACertificateResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{5} } type GetUnlockKeyRequest struct { } func (m *GetUnlockKeyRequest) Reset() { *m = GetUnlockKeyRequest{} } func (*GetUnlockKeyRequest) ProtoMessage() {} func (*GetUnlockKeyRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{6} } type GetUnlockKeyResponse struct { UnlockKey []byte `protobuf:"bytes,1,opt,name=unlock_key,json=unlockKey,proto3" json:"unlock_key,omitempty"` Version Version `protobuf:"bytes,2,opt,name=version" json:"version"` } func (m *GetUnlockKeyResponse) Reset() { *m = GetUnlockKeyResponse{} } func (*GetUnlockKeyResponse) ProtoMessage() {} func (*GetUnlockKeyResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{7} } func init() { proto.RegisterType((*NodeCertificateStatusRequest)(nil), "docker.swarmkit.v1.NodeCertificateStatusRequest") proto.RegisterType((*NodeCertificateStatusResponse)(nil), "docker.swarmkit.v1.NodeCertificateStatusResponse") proto.RegisterType((*IssueNodeCertificateRequest)(nil), "docker.swarmkit.v1.IssueNodeCertificateRequest") proto.RegisterType((*IssueNodeCertificateResponse)(nil), "docker.swarmkit.v1.IssueNodeCertificateResponse") proto.RegisterType((*GetRootCACertificateRequest)(nil), "docker.swarmkit.v1.GetRootCACertificateRequest") proto.RegisterType((*GetRootCACertificateResponse)(nil), "docker.swarmkit.v1.GetRootCACertificateResponse") proto.RegisterType((*GetUnlockKeyRequest)(nil), "docker.swarmkit.v1.GetUnlockKeyRequest") proto.RegisterType((*GetUnlockKeyResponse)(nil), "docker.swarmkit.v1.GetUnlockKeyResponse") } type authenticatedWrapperCAServer struct { local CAServer authorize func(context.Context, []string) error } func NewAuthenticatedWrapperCAServer(local CAServer, authorize func(context.Context, []string) error) CAServer { return &authenticatedWrapperCAServer{ local: local, authorize: authorize, } } func (p *authenticatedWrapperCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) { return p.local.GetRootCACertificate(ctx, r) } func (p *authenticatedWrapperCAServer) GetUnlockKey(ctx context.Context, r *GetUnlockKeyRequest) (*GetUnlockKeyResponse, error) { if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { return nil, err } return p.local.GetUnlockKey(ctx, r) } type authenticatedWrapperNodeCAServer struct { local NodeCAServer authorize func(context.Context, []string) error } func NewAuthenticatedWrapperNodeCAServer(local NodeCAServer, authorize func(context.Context, []string) error) NodeCAServer { return &authenticatedWrapperNodeCAServer{ local: local, authorize: authorize, } } func (p *authenticatedWrapperNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) { return p.local.IssueNodeCertificate(ctx, r) } func (p *authenticatedWrapperNodeCAServer) NodeCertificateStatus(ctx context.Context, r *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) { return p.local.NodeCertificateStatus(ctx, r) } func (m *NodeCertificateStatusRequest) Copy() *NodeCertificateStatusRequest { if m == nil { return nil } o := &NodeCertificateStatusRequest{} o.CopyFrom(m) return o } func (m *NodeCertificateStatusRequest) CopyFrom(src interface{}) { o := src.(*NodeCertificateStatusRequest) *m = *o } func (m *NodeCertificateStatusResponse) Copy() *NodeCertificateStatusResponse { if m == nil { return nil } o := &NodeCertificateStatusResponse{} o.CopyFrom(m) return o } func (m *NodeCertificateStatusResponse) CopyFrom(src interface{}) { o := src.(*NodeCertificateStatusResponse) *m = *o if o.Status != nil { m.Status = &IssuanceStatus{} deepcopy.Copy(m.Status, o.Status) } if o.Certificate != nil { m.Certificate = &Certificate{} deepcopy.Copy(m.Certificate, o.Certificate) } } func (m *IssueNodeCertificateRequest) Copy() *IssueNodeCertificateRequest { if m == nil { return nil } o := &IssueNodeCertificateRequest{} o.CopyFrom(m) return o } func (m *IssueNodeCertificateRequest) CopyFrom(src interface{}) { o := src.(*IssueNodeCertificateRequest) *m = *o if o.CSR != nil { m.CSR = make([]byte, len(o.CSR)) copy(m.CSR, o.CSR) } } func (m *IssueNodeCertificateResponse) Copy() *IssueNodeCertificateResponse { if m == nil { return nil } o := &IssueNodeCertificateResponse{} o.CopyFrom(m) return o } func (m *IssueNodeCertificateResponse) CopyFrom(src interface{}) { o := src.(*IssueNodeCertificateResponse) *m = *o } func (m *GetRootCACertificateRequest) Copy() *GetRootCACertificateRequest { if m == nil { return nil } o := &GetRootCACertificateRequest{} o.CopyFrom(m) return o } func (m *GetRootCACertificateRequest) CopyFrom(src interface{}) {} func (m *GetRootCACertificateResponse) Copy() *GetRootCACertificateResponse { if m == nil { return nil } o := &GetRootCACertificateResponse{} o.CopyFrom(m) return o } func (m *GetRootCACertificateResponse) CopyFrom(src interface{}) { o := src.(*GetRootCACertificateResponse) *m = *o if o.Certificate != nil { m.Certificate = make([]byte, len(o.Certificate)) copy(m.Certificate, o.Certificate) } } func (m *GetUnlockKeyRequest) Copy() *GetUnlockKeyRequest { if m == nil { return nil } o := &GetUnlockKeyRequest{} o.CopyFrom(m) return o } func (m *GetUnlockKeyRequest) CopyFrom(src interface{}) {} func (m *GetUnlockKeyResponse) Copy() *GetUnlockKeyResponse { if m == nil { return nil } o := &GetUnlockKeyResponse{} o.CopyFrom(m) return o } func (m *GetUnlockKeyResponse) CopyFrom(src interface{}) { o := src.(*GetUnlockKeyResponse) *m = *o if o.UnlockKey != nil { m.UnlockKey = make([]byte, len(o.UnlockKey)) copy(m.UnlockKey, o.UnlockKey) } deepcopy.Copy(&m.Version, &o.Version) } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for CA service type CAClient interface { GetRootCACertificate(ctx context.Context, in *GetRootCACertificateRequest, opts ...grpc.CallOption) (*GetRootCACertificateResponse, error) // GetUnlockKey returns the current unlock key for the cluster for the role of the client // asking. GetUnlockKey(ctx context.Context, in *GetUnlockKeyRequest, opts ...grpc.CallOption) (*GetUnlockKeyResponse, error) } type cAClient struct { cc *grpc.ClientConn } func NewCAClient(cc *grpc.ClientConn) CAClient { return &cAClient{cc} } func (c *cAClient) GetRootCACertificate(ctx context.Context, in *GetRootCACertificateRequest, opts ...grpc.CallOption) (*GetRootCACertificateResponse, error) { out := new(GetRootCACertificateResponse) err := grpc.Invoke(ctx, "/docker.swarmkit.v1.CA/GetRootCACertificate", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *cAClient) GetUnlockKey(ctx context.Context, in *GetUnlockKeyRequest, opts ...grpc.CallOption) (*GetUnlockKeyResponse, error) { out := new(GetUnlockKeyResponse) err := grpc.Invoke(ctx, "/docker.swarmkit.v1.CA/GetUnlockKey", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for CA service type CAServer interface { GetRootCACertificate(context.Context, *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) // GetUnlockKey returns the current unlock key for the cluster for the role of the client // asking. GetUnlockKey(context.Context, *GetUnlockKeyRequest) (*GetUnlockKeyResponse, error) } func RegisterCAServer(s *grpc.Server, srv CAServer) { s.RegisterService(&_CA_serviceDesc, srv) } func _CA_GetRootCACertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetRootCACertificateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CAServer).GetRootCACertificate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/docker.swarmkit.v1.CA/GetRootCACertificate", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CAServer).GetRootCACertificate(ctx, req.(*GetRootCACertificateRequest)) } return interceptor(ctx, in, info, handler) } func _CA_GetUnlockKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetUnlockKeyRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CAServer).GetUnlockKey(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/docker.swarmkit.v1.CA/GetUnlockKey", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CAServer).GetUnlockKey(ctx, req.(*GetUnlockKeyRequest)) } return interceptor(ctx, in, info, handler) } var _CA_serviceDesc = grpc.ServiceDesc{ ServiceName: "docker.swarmkit.v1.CA", HandlerType: (*CAServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "GetRootCACertificate", Handler: _CA_GetRootCACertificate_Handler, }, { MethodName: "GetUnlockKey", Handler: _CA_GetUnlockKey_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "github.com/docker/swarmkit/api/ca.proto", } // Client API for NodeCA service type NodeCAClient interface { IssueNodeCertificate(ctx context.Context, in *IssueNodeCertificateRequest, opts ...grpc.CallOption) (*IssueNodeCertificateResponse, error) NodeCertificateStatus(ctx context.Context, in *NodeCertificateStatusRequest, opts ...grpc.CallOption) (*NodeCertificateStatusResponse, error) } type nodeCAClient struct { cc *grpc.ClientConn } func NewNodeCAClient(cc *grpc.ClientConn) NodeCAClient { return &nodeCAClient{cc} } func (c *nodeCAClient) IssueNodeCertificate(ctx context.Context, in *IssueNodeCertificateRequest, opts ...grpc.CallOption) (*IssueNodeCertificateResponse, error) { out := new(IssueNodeCertificateResponse) err := grpc.Invoke(ctx, "/docker.swarmkit.v1.NodeCA/IssueNodeCertificate", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *nodeCAClient) NodeCertificateStatus(ctx context.Context, in *NodeCertificateStatusRequest, opts ...grpc.CallOption) (*NodeCertificateStatusResponse, error) { out := new(NodeCertificateStatusResponse) err := grpc.Invoke(ctx, "/docker.swarmkit.v1.NodeCA/NodeCertificateStatus", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for NodeCA service type NodeCAServer interface { IssueNodeCertificate(context.Context, *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) NodeCertificateStatus(context.Context, *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) } func RegisterNodeCAServer(s *grpc.Server, srv NodeCAServer) { s.RegisterService(&_NodeCA_serviceDesc, srv) } func _NodeCA_IssueNodeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(IssueNodeCertificateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(NodeCAServer).IssueNodeCertificate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/docker.swarmkit.v1.NodeCA/IssueNodeCertificate", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(NodeCAServer).IssueNodeCertificate(ctx, req.(*IssueNodeCertificateRequest)) } return interceptor(ctx, in, info, handler) } func _NodeCA_NodeCertificateStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(NodeCertificateStatusRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(NodeCAServer).NodeCertificateStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/docker.swarmkit.v1.NodeCA/NodeCertificateStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(NodeCAServer).NodeCertificateStatus(ctx, req.(*NodeCertificateStatusRequest)) } return interceptor(ctx, in, info, handler) } var _NodeCA_serviceDesc = grpc.ServiceDesc{ ServiceName: "docker.swarmkit.v1.NodeCA", HandlerType: (*NodeCAServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "IssueNodeCertificate", Handler: _NodeCA_IssueNodeCertificate_Handler, }, { MethodName: "NodeCertificateStatus", Handler: _NodeCA_NodeCertificateStatus_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "github.com/docker/swarmkit/api/ca.proto", } func (m *NodeCertificateStatusRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *NodeCertificateStatusRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.NodeID) > 0 { dAtA[i] = 0xa i++ i = encodeVarintCa(dAtA, i, uint64(len(m.NodeID))) i += copy(dAtA[i:], m.NodeID) } return i, nil } func (m *NodeCertificateStatusResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *NodeCertificateStatusResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Status != nil { dAtA[i] = 0xa i++ i = encodeVarintCa(dAtA, i, uint64(m.Status.Size())) n1, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 } if m.Certificate != nil { dAtA[i] = 0x12 i++ i = encodeVarintCa(dAtA, i, uint64(m.Certificate.Size())) n2, err := m.Certificate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n2 } return i, nil } func (m *IssueNodeCertificateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *IssueNodeCertificateRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Role != 0 { dAtA[i] = 0x8 i++ i = encodeVarintCa(dAtA, i, uint64(m.Role)) } if len(m.CSR) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintCa(dAtA, i, uint64(len(m.CSR))) i += copy(dAtA[i:], m.CSR) } if len(m.Token) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintCa(dAtA, i, uint64(len(m.Token))) i += copy(dAtA[i:], m.Token) } if m.Availability != 0 { dAtA[i] = 0x20 i++ i = encodeVarintCa(dAtA, i, uint64(m.Availability)) } return i, nil } func (m *IssueNodeCertificateResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *IssueNodeCertificateResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.NodeID) > 0 { dAtA[i] = 0xa i++ i = encodeVarintCa(dAtA, i, uint64(len(m.NodeID))) i += copy(dAtA[i:], m.NodeID) } if m.NodeMembership != 0 { dAtA[i] = 0x10 i++ i = encodeVarintCa(dAtA, i, uint64(m.NodeMembership)) } return i, nil } func (m *GetRootCACertificateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetRootCACertificateRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l return i, nil } func (m *GetRootCACertificateResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetRootCACertificateResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Certificate) > 0 { dAtA[i] = 0xa i++ i = encodeVarintCa(dAtA, i, uint64(len(m.Certificate))) i += copy(dAtA[i:], m.Certificate) } return i, nil } func (m *GetUnlockKeyRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetUnlockKeyRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l return i, nil } func (m *GetUnlockKeyResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetUnlockKeyResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.UnlockKey) > 0 { dAtA[i] = 0xa i++ i = encodeVarintCa(dAtA, i, uint64(len(m.UnlockKey))) i += copy(dAtA[i:], m.UnlockKey) } dAtA[i] = 0x12 i++ i = encodeVarintCa(dAtA, i, uint64(m.Version.Size())) n3, err := m.Version.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n3 return i, nil } func encodeVarintCa(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } type raftProxyCAServer struct { local CAServer connSelector raftselector.ConnProvider localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) } func NewRaftProxyCAServer(local CAServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) CAServer { redirectChecker := func(ctx context.Context) (context.Context, error) { p, ok := peer.FromContext(ctx) if !ok { return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") } addr := p.Addr.String() md, ok := metadata.FromIncomingContext(ctx) if ok && len(md["redirect"]) != 0 { return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) } if !ok { md = metadata.New(map[string]string{}) } md["redirect"] = append(md["redirect"], addr) return metadata.NewOutgoingContext(ctx, md), nil } remoteMods := []func(context.Context) (context.Context, error){redirectChecker} remoteMods = append(remoteMods, remoteCtxMod) var localMods []func(context.Context) (context.Context, error) if localCtxMod != nil { localMods = []func(context.Context) (context.Context, error){localCtxMod} } return &raftProxyCAServer{ local: local, connSelector: connSelector, localCtxMods: localMods, remoteCtxMods: remoteMods, } } func (p *raftProxyCAServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { var err error for _, mod := range ctxMods { ctx, err = mod(ctx) if err != nil { return ctx, err } } return ctx, nil } func (p *raftProxyCAServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { ticker := rafttime.NewTicker(500 * rafttime.Millisecond) defer ticker.Stop() for { select { case <-ticker.C: conn, err := p.connSelector.LeaderConn(ctx) if err != nil { return nil, err } client := NewHealthClient(conn) resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) if err != nil || resp.Status != HealthCheckResponse_SERVING { continue } return conn, nil case <-ctx.Done(): return nil, ctx.Err() } } } func (p *raftProxyCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) { conn, err := p.connSelector.LeaderConn(ctx) if err != nil { if err == raftselector.ErrIsLeader { ctx, err = p.runCtxMods(ctx, p.localCtxMods) if err != nil { return nil, err } return p.local.GetRootCACertificate(ctx, r) } return nil, err } modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) if err != nil { return nil, err } resp, err := NewCAClient(conn).GetRootCACertificate(modCtx, r) if err != nil { if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { return resp, err } conn, err := p.pollNewLeaderConn(ctx) if err != nil { if err == raftselector.ErrIsLeader { return p.local.GetRootCACertificate(ctx, r) } return nil, err } return NewCAClient(conn).GetRootCACertificate(modCtx, r) } return resp, err } func (p *raftProxyCAServer) GetUnlockKey(ctx context.Context, r *GetUnlockKeyRequest) (*GetUnlockKeyResponse, error) { conn, err := p.connSelector.LeaderConn(ctx) if err != nil { if err == raftselector.ErrIsLeader { ctx, err = p.runCtxMods(ctx, p.localCtxMods) if err != nil { return nil, err } return p.local.GetUnlockKey(ctx, r) } return nil, err } modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) if err != nil { return nil, err } resp, err := NewCAClient(conn).GetUnlockKey(modCtx, r) if err != nil { if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { return resp, err } conn, err := p.pollNewLeaderConn(ctx) if err != nil { if err == raftselector.ErrIsLeader { return p.local.GetUnlockKey(ctx, r) } return nil, err } return NewCAClient(conn).GetUnlockKey(modCtx, r) } return resp, err } type raftProxyNodeCAServer struct { local NodeCAServer connSelector raftselector.ConnProvider localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) } func NewRaftProxyNodeCAServer(local NodeCAServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) NodeCAServer { redirectChecker := func(ctx context.Context) (context.Context, error) { p, ok := peer.FromContext(ctx) if !ok { return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") } addr := p.Addr.String() md, ok := metadata.FromIncomingContext(ctx) if ok && len(md["redirect"]) != 0 { return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) } if !ok { md = metadata.New(map[string]string{}) } md["redirect"] = append(md["redirect"], addr) return metadata.NewOutgoingContext(ctx, md), nil } remoteMods := []func(context.Context) (context.Context, error){redirectChecker} remoteMods = append(remoteMods, remoteCtxMod) var localMods []func(context.Context) (context.Context, error) if localCtxMod != nil { localMods = []func(context.Context) (context.Context, error){localCtxMod} } return &raftProxyNodeCAServer{ local: local, connSelector: connSelector, localCtxMods: localMods, remoteCtxMods: remoteMods, } } func (p *raftProxyNodeCAServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { var err error for _, mod := range ctxMods { ctx, err = mod(ctx) if err != nil { return ctx, err } } return ctx, nil } func (p *raftProxyNodeCAServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { ticker := rafttime.NewTicker(500 * rafttime.Millisecond) defer ticker.Stop() for { select { case <-ticker.C: conn, err := p.connSelector.LeaderConn(ctx) if err != nil { return nil, err } client := NewHealthClient(conn) resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) if err != nil || resp.Status != HealthCheckResponse_SERVING { continue } return conn, nil case <-ctx.Done(): return nil, ctx.Err() } } } func (p *raftProxyNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) { conn, err := p.connSelector.LeaderConn(ctx) if err != nil { if err == raftselector.ErrIsLeader { ctx, err = p.runCtxMods(ctx, p.localCtxMods) if err != nil { return nil, err } return p.local.IssueNodeCertificate(ctx, r) } return nil, err } modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) if err != nil { return nil, err } resp, err := NewNodeCAClient(conn).IssueNodeCertificate(modCtx, r) if err != nil { if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { return resp, err } conn, err := p.pollNewLeaderConn(ctx) if err != nil { if err == raftselector.ErrIsLeader { return p.local.IssueNodeCertificate(ctx, r) } return nil, err } return NewNodeCAClient(conn).IssueNodeCertificate(modCtx, r) } return resp, err } func (p *raftProxyNodeCAServer) NodeCertificateStatus(ctx context.Context, r *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) { conn, err := p.connSelector.LeaderConn(ctx) if err != nil { if err == raftselector.ErrIsLeader { ctx, err = p.runCtxMods(ctx, p.localCtxMods) if err != nil { return nil, err } return p.local.NodeCertificateStatus(ctx, r) } return nil, err } modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) if err != nil { return nil, err } resp, err := NewNodeCAClient(conn).NodeCertificateStatus(modCtx, r) if err != nil { if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { return resp, err } conn, err := p.pollNewLeaderConn(ctx) if err != nil { if err == raftselector.ErrIsLeader { return p.local.NodeCertificateStatus(ctx, r) } return nil, err } return NewNodeCAClient(conn).NodeCertificateStatus(modCtx, r) } return resp, err } func (m *NodeCertificateStatusRequest) Size() (n int) { var l int _ = l l = len(m.NodeID) if l > 0 { n += 1 + l + sovCa(uint64(l)) } return n } func (m *NodeCertificateStatusResponse) Size() (n int) { var l int _ = l if m.Status != nil { l = m.Status.Size() n += 1 + l + sovCa(uint64(l)) } if m.Certificate != nil { l = m.Certificate.Size() n += 1 + l + sovCa(uint64(l)) } return n } func (m *IssueNodeCertificateRequest) Size() (n int) { var l int _ = l if m.Role != 0 { n += 1 + sovCa(uint64(m.Role)) } l = len(m.CSR) if l > 0 { n += 1 + l + sovCa(uint64(l)) } l = len(m.Token) if l > 0 { n += 1 + l + sovCa(uint64(l)) } if m.Availability != 0 { n += 1 + sovCa(uint64(m.Availability)) } return n } func (m *IssueNodeCertificateResponse) Size() (n int) { var l int _ = l l = len(m.NodeID) if l > 0 { n += 1 + l + sovCa(uint64(l)) } if m.NodeMembership != 0 { n += 1 + sovCa(uint64(m.NodeMembership)) } return n } func (m *GetRootCACertificateRequest) Size() (n int) { var l int _ = l return n } func (m *GetRootCACertificateResponse) Size() (n int) { var l int _ = l l = len(m.Certificate) if l > 0 { n += 1 + l + sovCa(uint64(l)) } return n } func (m *GetUnlockKeyRequest) Size() (n int) { var l int _ = l return n } func (m *GetUnlockKeyResponse) Size() (n int) { var l int _ = l l = len(m.UnlockKey) if l > 0 { n += 1 + l + sovCa(uint64(l)) } l = m.Version.Size() n += 1 + l + sovCa(uint64(l)) return n } func sovCa(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } func sozCa(x uint64) (n int) { return sovCa(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (this *NodeCertificateStatusRequest) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&NodeCertificateStatusRequest{`, `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, `}`, }, "") return s } func (this *NodeCertificateStatusResponse) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&NodeCertificateStatusResponse{`, `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "IssuanceStatus", "IssuanceStatus", 1) + `,`, `Certificate:` + strings.Replace(fmt.Sprintf("%v", this.Certificate), "Certificate", "Certificate", 1) + `,`, `}`, }, "") return s } func (this *IssueNodeCertificateRequest) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&IssueNodeCertificateRequest{`, `Role:` + fmt.Sprintf("%v", this.Role) + `,`, `CSR:` + fmt.Sprintf("%v", this.CSR) + `,`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, `Availability:` + fmt.Sprintf("%v", this.Availability) + `,`, `}`, }, "") return s } func (this *IssueNodeCertificateResponse) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&IssueNodeCertificateResponse{`, `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, `NodeMembership:` + fmt.Sprintf("%v", this.NodeMembership) + `,`, `}`, }, "") return s } func (this *GetRootCACertificateRequest) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&GetRootCACertificateRequest{`, `}`, }, "") return s } func (this *GetRootCACertificateResponse) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&GetRootCACertificateResponse{`, `Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`, `}`, }, "") return s } func (this *GetUnlockKeyRequest) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&GetUnlockKeyRequest{`, `}`, }, "") return s } func (this *GetUnlockKeyResponse) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&GetUnlockKeyResponse{`, `UnlockKey:` + fmt.Sprintf("%v", this.UnlockKey) + `,`, `Version:` + strings.Replace(strings.Replace(this.Version.String(), "Version", "Version", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } func valueToStringCa(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { return "nil" } pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } func (m *NodeCertificateStatusRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: NodeCertificateStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: NodeCertificateStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthCa } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.NodeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipCa(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthCa } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *NodeCertificateStatusResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: NodeCertificateStatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: NodeCertificateStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthCa } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Status == nil { m.Status = &IssuanceStatus{} } if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthCa } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Certificate == nil { m.Certificate = &Certificate{} } if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipCa(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthCa } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *IssueNodeCertificateRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: IssueNodeCertificateRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: IssueNodeCertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) } m.Role = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Role |= (NodeRole(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CSR", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthCa } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.CSR = append(m.CSR[:0], dAtA[iNdEx:postIndex]...) if m.CSR == nil { m.CSR = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthCa } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Token = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Availability", wireType) } m.Availability = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Availability |= (NodeSpec_Availability(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipCa(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthCa } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *IssueNodeCertificateResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: IssueNodeCertificateResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: IssueNodeCertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthCa } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.NodeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field NodeMembership", wireType) } m.NodeMembership = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.NodeMembership |= (NodeSpec_Membership(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipCa(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthCa } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetRootCACertificateRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetRootCACertificateRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetRootCACertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipCa(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthCa } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetRootCACertificateResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetRootCACertificateResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetRootCACertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthCa } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Certificate = append(m.Certificate[:0], dAtA[iNdEx:postIndex]...) if m.Certificate == nil { m.Certificate = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipCa(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthCa } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetUnlockKeyRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetUnlockKeyRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetUnlockKeyRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipCa(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthCa } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetUnlockKeyResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetUnlockKeyResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetUnlockKeyResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field UnlockKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthCa } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.UnlockKey = append(m.UnlockKey[:0], dAtA[iNdEx:postIndex]...) if m.UnlockKey == nil { m.UnlockKey = []byte{} } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowCa } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthCa } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipCa(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthCa } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipCa(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowCa } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowCa } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowCa } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } iNdEx += length if length < 0 { return 0, ErrInvalidLengthCa } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowCa } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipCa(dAtA[start:]) if err != nil { return 0, err } iNdEx = start + next } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthCa = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowCa = fmt.Errorf("proto: integer overflow") ) func init() { proto.RegisterFile("github.com/docker/swarmkit/api/ca.proto", fileDescriptorCa) } var fileDescriptorCa = []byte{ // 638 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xc1, 0x6e, 0xd3, 0x4c, 0x10, 0xee, 0xba, 0xfd, 0xd3, 0xbf, 0xd3, 0xd0, 0xa2, 0xa5, 0x95, 0x4c, 0x9a, 0x3a, 0x95, 0x39, 0xb4, 0x20, 0x61, 0xb7, 0x01, 0x09, 0x09, 0x2e, 0x24, 0x41, 0xaa, 0x2a, 0x54, 0x84, 0xb6, 0x82, 0x6b, 0xe5, 0x38, 0xdb, 0x74, 0x15, 0xc7, 0x6b, 0xbc, 0xeb, 0x42, 0x6e, 0x48, 0x20, 0xde, 0x00, 0xc1, 0x89, 0x47, 0xe0, 0x39, 0x2a, 0x4e, 0x48, 0x5c, 0x38, 0x55, 0xd4, 0x0f, 0xc0, 0x33, 0x20, 0xaf, 0x6d, 0x9a, 0xb4, 0x4e, 0x5a, 0x4e, 0xf1, 0xce, 0x7c, 0xdf, 0x37, 0x33, 0xdf, 0x4e, 0x16, 0xd6, 0xbb, 0x4c, 0x1e, 0x46, 0x6d, 0xcb, 0xe5, 0x7d, 0xbb, 0xc3, 0xdd, 0x1e, 0x0d, 0x6d, 0xf1, 0xda, 0x09, 0xfb, 0x3d, 0x26, 0x6d, 0x27, 0x60, 0xb6, 0xeb, 0x58, 0x41, 0xc8, 0x25, 0xc7, 0x38, 0xcd, 0x5a, 0x79, 0xd6, 0x3a, 0xda, 0xaa, 0xdc, 0xb9, 0x84, 0x2c, 0x07, 0x01, 0x15, 0x29, 0xff, 0x52, 0xac, 0x08, 0xa8, 0x9b, 0x63, 0x97, 0xba, 0xbc, 0xcb, 0xd5, 0xa7, 0x9d, 0x7c, 0x65, 0xd1, 0x07, 0x13, 0x14, 0x14, 0xa2, 0x1d, 0x1d, 0xd8, 0x81, 0x17, 0x75, 0x99, 0x9f, 0xfd, 0xa4, 0x44, 0xb3, 0x05, 0xd5, 0x67, 0xbc, 0x43, 0x5b, 0x34, 0x94, 0xec, 0x80, 0xb9, 0x8e, 0xa4, 0x7b, 0xd2, 0x91, 0x91, 0x20, 0xf4, 0x55, 0x44, 0x85, 0xc4, 0xb7, 0x60, 0xd6, 0xe7, 0x1d, 0xba, 0xcf, 0x3a, 0x3a, 0x5a, 0x43, 0x1b, 0x73, 0x4d, 0x88, 0x4f, 0x6a, 0xa5, 0x84, 0xb2, 0xf3, 0x84, 0x94, 0x92, 0xd4, 0x4e, 0xc7, 0xfc, 0x82, 0x60, 0x75, 0x8c, 0x8a, 0x08, 0xb8, 0x2f, 0x28, 0x7e, 0x08, 0x25, 0xa1, 0x22, 0x4a, 0x65, 0xbe, 0x6e, 0x5a, 0x17, 0x2d, 0xb3, 0x76, 0x84, 0x88, 0x1c, 0xdf, 0xcd, 0xb9, 0x19, 0x03, 0x37, 0x60, 0xde, 0x3d, 0x13, 0xd6, 0x35, 0x25, 0x50, 0x2b, 0x12, 0x18, 0xaa, 0x4f, 0x86, 0x39, 0xe6, 0x0f, 0x04, 0x2b, 0x89, 0x3a, 0x3d, 0xd7, 0x65, 0x3e, 0xe5, 0x7d, 0x98, 0x09, 0xb9, 0x47, 0x55, 0x73, 0x0b, 0xf5, 0x6a, 0x91, 0x76, 0xc2, 0x24, 0xdc, 0xa3, 0x4d, 0x4d, 0x47, 0x44, 0xa1, 0xf1, 0x4d, 0x98, 0x76, 0x45, 0xa8, 0x1a, 0x2a, 0x37, 0x67, 0xe3, 0x93, 0xda, 0x74, 0x6b, 0x8f, 0x90, 0x24, 0x86, 0x97, 0xe0, 0x3f, 0xc9, 0x7b, 0xd4, 0xd7, 0xa7, 0x13, 0xd3, 0x48, 0x7a, 0xc0, 0xbb, 0x50, 0x76, 0x8e, 0x1c, 0xe6, 0x39, 0x6d, 0xe6, 0x31, 0x39, 0xd0, 0x67, 0x54, 0xb9, 0xdb, 0xe3, 0xca, 0xed, 0x05, 0xd4, 0xb5, 0x1a, 0x43, 0x04, 0x32, 0x42, 0x37, 0x3f, 0x22, 0xa8, 0x16, 0x4f, 0x95, 0xb9, 0x7e, 0x95, 0xcb, 0xc3, 0xcf, 0x61, 0x51, 0x81, 0xfa, 0xb4, 0xdf, 0xa6, 0xa1, 0x38, 0x64, 0x81, 0x9a, 0x68, 0xa1, 0xbe, 0x3e, 0xb1, 0xaf, 0xdd, 0xbf, 0x70, 0xb2, 0x90, 0xf0, 0xcf, 0xce, 0xe6, 0x2a, 0xac, 0x6c, 0x53, 0x49, 0x38, 0x97, 0xad, 0xc6, 0x45, 0xb3, 0xcd, 0xc7, 0x50, 0x2d, 0x4e, 0x67, 0x5d, 0xaf, 0x8d, 0xde, 0x77, 0xd2, 0x79, 0x79, 0xf4, 0x3a, 0x97, 0xe1, 0xc6, 0x36, 0x95, 0x2f, 0x7c, 0x8f, 0xbb, 0xbd, 0xa7, 0x74, 0x90, 0x0b, 0x87, 0xb0, 0x34, 0x1a, 0xce, 0x04, 0x57, 0x01, 0x22, 0x15, 0xdc, 0xef, 0xd1, 0x41, 0xa6, 0x37, 0x17, 0xe5, 0x30, 0xfc, 0x08, 0x66, 0x8f, 0x68, 0x28, 0x18, 0xf7, 0xb3, 0xdd, 0x5a, 0x29, 0x1a, 0xfc, 0x65, 0x0a, 0x69, 0xce, 0x1c, 0x9f, 0xd4, 0xa6, 0x48, 0xce, 0xa8, 0xbf, 0xd7, 0x40, 0x6b, 0x35, 0xf0, 0x3b, 0xa4, 0x6a, 0x5f, 0x18, 0x0a, 0xdb, 0x45, 0x5a, 0x13, 0xdc, 0xa9, 0x6c, 0x5e, 0x9d, 0x90, 0x8e, 0x67, 0xfe, 0xff, 0xed, 0xeb, 0xef, 0xcf, 0x9a, 0x76, 0x1d, 0xe1, 0x37, 0x50, 0x1e, 0x36, 0x00, 0xaf, 0x8f, 0xd1, 0x3a, 0xef, 0x5c, 0x65, 0xe3, 0x72, 0x60, 0x56, 0x6c, 0x59, 0x15, 0x5b, 0x84, 0x6b, 0x0a, 0x79, 0xb7, 0xef, 0xf8, 0x4e, 0x97, 0x86, 0xf5, 0x4f, 0x1a, 0xa8, 0xbd, 0xca, 0xac, 0x28, 0xda, 0xca, 0x62, 0x2b, 0x26, 0xfc, 0x2b, 0x8b, 0xad, 0x98, 0xb4, 0xf0, 0x43, 0x56, 0x7c, 0x40, 0xb0, 0x5c, 0xf8, 0x24, 0xe1, 0xcd, 0x71, 0x6b, 0x3d, 0xee, 0x0d, 0xac, 0x6c, 0xfd, 0x03, 0xe3, 0x7c, 0x23, 0x4d, 0xfd, 0xf8, 0xd4, 0x98, 0xfa, 0x79, 0x6a, 0x4c, 0xbd, 0x8d, 0x0d, 0x74, 0x1c, 0x1b, 0xe8, 0x7b, 0x6c, 0xa0, 0x5f, 0xb1, 0x81, 0xda, 0x25, 0xf5, 0x02, 0xdf, 0xfb, 0x13, 0x00, 0x00, 0xff, 0xff, 0xe1, 0xda, 0xca, 0xba, 0x67, 0x06, 0x00, 0x00, }
apache-2.0
nvwls/chef
lib/chef/formatters/error_description.rb
2426
# # Author:: Tyler Cloke (<tyler@chef.io>) # # Copyright:: Copyright (c) Chef Software Inc. # License:: Apache License, Version 2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # require_relative "../version" class Chef module Formatters # == Formatters::ErrorDescription # Class for displaying errors on STDOUT. class ErrorDescription attr_reader :sections def initialize(title) @title = title @sections = [] end def section(heading, text) @sections << { heading => (text || "") } end def display(out) out.puts "=" * 80 out.puts @title, :red out.puts "=" * 80 out.puts "\n" sections.each do |section| section.each do |heading, text| display_section(heading, text, out) end end display_section("System Info:", error_context_info, out) end def for_json { "title" => @title, "sections" => @sections, } end private def display_section(heading, text, out) out.puts heading out.puts "-" * heading.size out.puts text out.puts "\n" end def error_context_info context_info = { chef_version: Chef::VERSION } if Chef.node context_info[:platform] = Chef.node["platform"] context_info[:platform_version] = Chef.node["platform_version"] end # A string like "ruby 2.3.1p112 (2016-04-26 revision 54768) [x86_64-darwin15]" context_info[:ruby] = RUBY_DESCRIPTION # The argv[0] value. context_info[:program_name] = $PROGRAM_NAME # This is kind of wonky but it's the only way to get the entry path script. context_info[:executable] = File.realpath(caller.last[/^(.*):\d+:in /, 1]) context_info.map { |k, v| "#{k}=#{v}" }.join("\n") end end end end
apache-2.0
bbossgroups/bboss
bboss-core/test/org/frameworkset/spi/properties/injectbean/InjectServiceInf.java
339
package org.frameworkset.spi.properties.injectbean; public interface InjectServiceInf { public abstract int getTest_int(); public abstract Inject getTest_inject(); public abstract String getRefattr(); public abstract ServiceInf getRefservice(); public abstract ServiceInf getRefservice_direct(); }
apache-2.0
srvaroa/RxJava
rxjava-core/src/main/java/rx/observers/Observers.java
6879
/** * Copyright 2014 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package rx.observers; import rx.Observer; import rx.exceptions.OnErrorNotImplementedException; import rx.functions.Action0; import rx.functions.Action1; /** * Helper methods and utilities for creating and working with {@link Observer} objects. */ public class Observers { private static final Observer<Object> EMPTY = new Observer<Object>() { @Override public final void onCompleted() { // do nothing } @Override public final void onError(Throwable e) { throw new OnErrorNotImplementedException(e); } @Override public final void onNext(Object args) { // do nothing } }; /** * Returns an inert {@link Observer} that does nothing in response to the emissions or notifications from * any {@code Observable} it subscribes to. This is different, however, from an {@link EmptyObserver}, in * that it will throw an exception if its {@link Observer#onError onError} method is called (whereas * {@code EmptyObserver} will swallow the error in such a case). * * @return an inert {@code Observer} */ @SuppressWarnings("unchecked") public static <T> Observer<T> empty() { return (Observer<T>) EMPTY; } /** * Creates an {@link Observer} that receives the emissions of any {@code Observable} it subscribes to via * {@link Observer#onNext onNext} but ignores {@link Observer#onError onError} and * {@link Observer#onCompleted onCompleted} notifications. * * @param onNext * a function that handles each item emitted by an {@code Observable} * @throws IllegalArgument Exception * if {@code onNext} is {@code null} * @return an {@code Observer} that calls {@code onNext} for each emitted item from the {@code Observable} * the {@code Observer} subscribes to */ public static final <T> Observer<T> create(final Action1<? super T> onNext) { if (onNext == null) { throw new IllegalArgumentException("onNext can not be null"); } return new Observer<T>() { @Override public final void onCompleted() { // do nothing } @Override public final void onError(Throwable e) { throw new OnErrorNotImplementedException(e); } @Override public final void onNext(T args) { onNext.call(args); } }; } /** * Creates an {@link Observer} that receives the emissions of any {@code Observable} it subscribes to via * {@link Observer#onNext onNext} and handles any {@link Observer#onError onError} notification but ignores * an {@link Observer#onCompleted onCompleted} notification. * * @param onNext * a function that handles each item emitted by an {@code Observable} * @param onError * a function that handles an error notification if one is sent by an {@code Observable} * @throws IllegalArgument Exception * if either {@code onNext} or {@code onError} are {@code null} * @return an {@code Observer} that calls {@code onNext} for each emitted item from the {@code Observable} * the {@code Observer} subscribes to, and calls {@code onError} if the {@code Observable} notifies * of an error */ public static final <T> Observer<T> create(final Action1<? super T> onNext, final Action1<Throwable> onError) { if (onNext == null) { throw new IllegalArgumentException("onNext can not be null"); } if (onError == null) { throw new IllegalArgumentException("onError can not be null"); } return new Observer<T>() { @Override public final void onCompleted() { // do nothing } @Override public final void onError(Throwable e) { onError.call(e); } @Override public final void onNext(T args) { onNext.call(args); } }; } /** * Creates an {@link Observer} that receives the emissions of any {@code Observable} it subscribes to via * {@link Observer#onNext onNext} and handles any {@link Observer#onError onError} or * {@link Observer#onCompleted onCompleted} notifications. * * @param onNext * a function that handles each item emitted by an {@code Observable} * @param onError * a function that handles an error notification if one is sent by an {@code Observable} * @param onComplete * a function that handles a sequence complete notification if one is sent by an {@code Observable} * @throws IllegalArgument Exception * if either {@code onNext}, {@code onError}, or {@code onComplete} are {@code null} * @return an {@code Observer} that calls {@code onNext} for each emitted item from the {@code Observable} * the {@code Observer} subscribes to, calls {@code onError} if the {@code Observable} notifies * of an error, and calls {@code onComplete} if the {@code Observable} notifies that the observable * sequence is complete */ public static final <T> Observer<T> create(final Action1<? super T> onNext, final Action1<Throwable> onError, final Action0 onComplete) { if (onNext == null) { throw new IllegalArgumentException("onNext can not be null"); } if (onError == null) { throw new IllegalArgumentException("onError can not be null"); } if (onComplete == null) { throw new IllegalArgumentException("onComplete can not be null"); } return new Observer<T>() { @Override public final void onCompleted() { onComplete.call(); } @Override public final void onError(Throwable e) { onError.call(e); } @Override public final void onNext(T args) { onNext.call(args); } }; } }
apache-2.0
LoLab-VU/pysb
pysb/export/bng_net.py
1147
""" Module containing a class for getting the BNGL NET file for a given PySB model. Serves as a wrapper around :py:func:`pysb.bng.generate_network`, which generates the BNGL for the model and then invokes BNG to generate the NET file. For information on how to use the model exporters, see the documentation for :py:mod:`pysb.export`. """ from pysb.bng import generate_network from pysb.export import Exporter class BngNetExporter(Exporter): """A class for generating the BNG NET file for a given PySB model. Inherits from :py:class:`pysb.export.Export`, which implements basic functionality for all exporters. """ def export(self): """Generate the BNGL NET file for the PySB model associated with the exporter. A wrapper around :py:func:`pysb.bng.generate_network`. Returns ------- string The NET file output for the model, generated by BNG. """ net_str = '' if self.docstring: net_str += '# ' + self.docstring.replace('\n', '\n# ') + '\n' net_str += generate_network(self.model, append_stdout=True) return net_str
bsd-2-clause
MattDevo/edk2
BaseTools/Source/Python/Eot/Identification.py
1742
## @file # This file is used to define the identification of INF/DEC/DSC files # # Copyright (c) 2007, Intel Corporation. All rights reserved.<BR> # This program and the accompanying materials # are licensed and made available under the terms and conditions of the BSD License # which accompanies this distribution. The full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. ## Identification # # This class defined basic Identification information structure which is used by INF/DEC/DSC files # # @param object: Inherited from object class # # @var FileName: To store data for Filename # @var FileFullPath: To store data for full path of the file # @var FileRelativePath: To store data for relative path of the file # @var RunStatus: Status of build system running # class Identification(object): def __init__(self): self.FileName = '' self.FileFullPath = '' self.FileRelativePath = '' self.PackagePath = '' ## GetFileName # # Reserved # def GetFileName(self, FileFullPath, FileRelativePath): pass ## GetFileName # # Reserved # def GetFileFullPath(self, FileName, FileRelativePath): pass ## GetFileName # # Reserved # def GetFileRelativePath(self, FileName, FileFullPath): pass ## # # This acts like the main() function for the script, unless it is 'import'ed into another # script. # if __name__ == '__main__': id = Identification()
bsd-2-clause
bcg62/homebrew-core
Formula/loudmouth.rb
1691
class Loudmouth < Formula desc "Lightweight C library for the Jabber protocol" homepage "https://mcabber.com" url "https://mcabber.com/files/loudmouth/loudmouth-1.5.3.tar.bz2" sha256 "54329415cb1bacb783c20f5f1f975de4fc460165d0d8a1e3b789367b5f69d32c" revision 1 bottle do cellar :any sha256 "b361c56b41bf7248fa3b12893856ef54bb3b06f895c2667ffc51c83a5ce44bff" => :mojave sha256 "0ae2fce2fd5edcea19ecf80cbcc4f12ab203e92f85c8c28f9444f11fc34df37c" => :high_sierra sha256 "778b6156e5d99748a1e4a2e45683cdea3c08295ad6dbaccf64cd23eea0f952ed" => :sierra sha256 "92264a248d2b8b7c02e4ab60cd64430869fac7ce5a09a49154c6b2ed3659223a" => :el_capitan end head do url "https://github.com/mcabber/loudmouth.git" depends_on "autoconf" => :build depends_on "automake" => :build depends_on "libtool" => :build end depends_on "pkg-config" => :build depends_on "gettext" depends_on "glib" depends_on "gnutls" depends_on "libidn" def install system "./autogen.sh", "-n" if build.head? system "./configure", "--disable-debug", "--disable-dependency-tracking", "--prefix=#{prefix}", "--with-ssl=gnutls" system "make" system "make", "check" system "make", "install" (pkgshare/"examples").install Dir["examples/*.c"] end test do cp pkgshare/"examples/lm-send-async.c", testpath system ENV.cc, "lm-send-async.c", "-o", "test", "-L#{lib}", "-L#{Formula["glib"].opt_lib}", "-lloudmouth-1", "-lglib-2.0", "-I#{include}/loudmouth-1.0", "-I#{Formula["glib"].opt_include}/glib-2.0", "-I#{Formula["glib"].opt_lib}/glib-2.0/include" system "./test", "--help" end end
bsd-2-clause
bahlo/hc
util/rand.go
428
package util import ( "crypto/rand" ) // RandomHexString returns a random hex string. func RandomHexString() string { var b [16]byte _, err := rand.Read(b[:]) if err != nil { panic(err) } var out [32]byte for i := 0; i < len(b); i++ { out[i*2] = btoh((b[i] >> 4) & 0xF) out[i*2+1] = btoh(b[i] & 0xF) } return string(out[:]) } func btoh(i byte) byte { if i > 9 { return 0x61 + (i - 10) } return 0x30 + i }
bsd-2-clause
mohanaraosv/jodd
jodd-bean/src/test/java/jodd/typeconverter/ShortArrayConverterTest.java
2664
// Copyright (c) 2003-present, Jodd Team (http://jodd.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. package jodd.typeconverter; import jodd.typeconverter.impl.ShortArrayConverter; import org.junit.Test; import static jodd.typeconverter.TypeConverterTestHelper.*; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; public class ShortArrayConverterTest { @Test public void testConversion() { ShortArrayConverter shortArrayConverter = (ShortArrayConverter) TypeConverterManager.lookup(short[].class); assertNull(shortArrayConverter.convert(null)); assertEq(arrs(1), shortArrayConverter.convert(Double.valueOf(1))); assertEq(arrs(1, 7, 3), shortArrayConverter.convert(arrs(1, 7, 3))); assertEq(arrs(1, 7, 3), shortArrayConverter.convert(arrb(1, 7, 3))); assertEq(arrs(1, 7, 3), shortArrayConverter.convert(arri(1, 7, 3))); assertEq(arrs(173, 1022), shortArrayConverter.convert(arrs("173", "1022"))); assertEq(arrs(173, 1022), shortArrayConverter.convert(arrs(" 173 ", " 1022 "))); assertEq(arrs(173, 10), shortArrayConverter.convert(arro("173", Integer.valueOf(10)))); assertEq(arrs(173, 10), shortArrayConverter.convert("173,10")); } private void assertEq(short[] arr1, short[] arr2) { assertEquals(arr1.length, arr2.length); for (int i = 0; i < arr1.length; i++) { assertEquals(arr1[i], arr2[i]); } } }
bsd-2-clause
yacon/koala-framework
Kwc/Directories/Item/Directory/Admin.php
1231
<?php class Kwc_Directories_Item_Directory_Admin extends Kwc_Admin { /** entfernt, stattdessen editComponent setting in detail setzen **/ //(final damit exception kommt) protected final function _getContentClass() { return null; } protected function _getPluginAdmins() { $lookForPluginClasses = $this->_getPluginParentComponents(); $classes = array(); foreach ($lookForPluginClasses as $c) { $classes = array_merge($classes, Kwc_Abstract::getChildComponentClasses($c)); } $ret = array(); foreach ($classes as $class) { $admin = Kwc_Admin::getInstance($class); if ($admin instanceof Kwc_Directories_Item_Directory_PluginAdminInterface) { $ret[] = $admin; } } return $ret; } protected function _getPluginParentComponents() { return array($this->_class); } public final function getPluginAdmins() { return $this->_getPluginAdmins(); } public function delete($componentId) { $detail = Kwc_Abstract::getChildComponentClass($this->_class, 'detail'); Kwc_Admin::getInstance($detail)->delete($componentId); } }
bsd-2-clause
kunickiaj/homebrew-core
Formula/atk.rb
1507
class Atk < Formula desc "GNOME accessibility toolkit" homepage "https://library.gnome.org/devel/atk/" url "https://download.gnome.org/sources/atk/2.24/atk-2.24.0.tar.xz" sha256 "bb2daa9a808c73a7a79d2983f333e0ba74be42fc51e3ba1faf2551a636487a49" bottle do sha256 "89d59856b8bbd0a51961c0569237da2f6119fddd40f67b50d96651483fe9be10" => :sierra sha256 "87028a5febfcdce413c64f7786468db61b1c62412799405e6b3ac179c0a8ae05" => :el_capitan sha256 "e628db1461560fbe5fa9da800987ed2eab6e6c969709797de07a6052521a022e" => :yosemite end depends_on "pkg-config" => :build depends_on "glib" depends_on "gobject-introspection" def install system "./configure", "--disable-dependency-tracking", "--prefix=#{prefix}", "--enable-introspection=yes" system "make" system "make", "install" end test do (testpath/"test.c").write <<-EOS.undent #include <atk/atk.h> int main(int argc, char *argv[]) { const gchar *version = atk_get_version(); return 0; } EOS gettext = Formula["gettext"] glib = Formula["glib"] flags = %W[ -I#{gettext.opt_include} -I#{glib.opt_include}/glib-2.0 -I#{glib.opt_lib}/glib-2.0/include -I#{include}/atk-1.0 -L#{gettext.opt_lib} -L#{glib.opt_lib} -L#{lib} -latk-1.0 -lglib-2.0 -lgobject-2.0 -lintl ] system ENV.cc, "test.c", "-o", "test", *flags system "./test" end end
bsd-2-clause
pham186/yii2
themes/admin/js/flot/custom/stacked-vertical.js
1825
var $border_color = "#efefef"; var $grid_color = "#ddd"; var $default_black = "#666"; var $green = "#8ecf67"; var $yellow = "#fac567"; var $orange = "#F08C56"; var $blue = "#1e91cf"; var $red = "#f74e4d"; var $teal = "#28D8CA"; var $grey = "#999999"; var $dark_blue = "#0D4F8B"; $(function () { var d1, d2, d3, data, chartOptions; d1 = [ [1325376000000, 1200], [1328054400000, 700], [1330560000000, 1000], [1333238400000, 600], [1335830400000, 350] ]; d2 = [ [1325376000000, 800], [1328054400000, 600], [1330560000000, 300], [1333238400000, 350], [1335830400000, 300] ]; d3 = [ [1325376000000, 650], [1328054400000, 450], [1330560000000, 150], [1333238400000, 200], [1335830400000, 150] ]; data = [{ label: 'Referral', data: d1 }, { label: 'Direct', data: d2 }, { label: 'Organic', data: d3 }]; chartOptions = { xaxis: { min: (new Date(2011, 11, 15)).getTime(), max: (new Date(2012, 04, 18)).getTime(), mode: "time", tickSize: [2, "month"], monthNames: ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"], tickLength: 0 }, grid:{ hoverable: true, clickable: false, borderWidth: 1, tickColor: $border_color, borderColor: $grid_color, }, series: { stack: true }, bars: { show: true, barWidth: 36*24*60*60*300, fill: true, align: 'center', lineWidth: 1, lineWidth: 0, fillColor: { colors: [ { opacity: 1 }, { opacity: 1 } ] } }, shadowSize: 0, tooltip: true, tooltipOpts: { content: '%s: %y' }, colors: [$green, $blue, $yellow, $teal, $yellow, $green], } var holder = $('#stacked-vertical-chart'); if (holder.length) { $.plot(holder, data, chartOptions ); } });
bsd-3-clause
scheib/chromium
chrome/chrome_cleaner/test/test_service_main.cc
5107
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <windows.h> #include <string> #include "base/at_exit.h" #include "base/command_line.h" #include "base/files/file_path.h" #include "base/logging.h" #include "base/numerics/safe_conversions.h" #include "base/path_service.h" #include "base/test/test_timeouts.h" #include "base/threading/thread_checker.h" #include "chrome/chrome_cleaner/test/test_strings.h" namespace { class TestService { public: TestService() { service_status_.dwCurrentState = SERVICE_START_PENDING; service_status_.dwServiceType = SERVICE_WIN32_OWN_PROCESS; // action_max_timeout can be overridden by command line to a value that // could overflow. service_status_.dwWaitHint = base::checked_cast<DWORD>( TestTimeouts::action_max_timeout().InMilliseconds()); service_stop_event_ = ::CreateEvent(nullptr, TRUE, FALSE, nullptr); } void SignalStopEvent() { ::SetEvent(service_stop_event_); } void WaitForStopEvent() { ::WaitForSingleObject(service_stop_event_, INFINITE); } void SetControlsAccepted(DWORD control) { DCHECK_CALLED_ON_VALID_THREAD(service_status_thread_checker_); service_status_.dwControlsAccepted = control; } void SetStatusHandle(SERVICE_STATUS_HANDLE status_handle) { CHECK(status_handle); status_handle_ = status_handle; } DWORD GetServiceStatusState() { DCHECK_CALLED_ON_VALID_THREAD(service_status_thread_checker_); return service_status_.dwCurrentState; } void SetServiceStatusState(DWORD state) { DCHECK_CALLED_ON_VALID_THREAD(service_status_thread_checker_); DCHECK(status_handle_); service_status_.dwCurrentState = state; if (::SetServiceStatus(status_handle_, &service_status_) == FALSE) LOG(ERROR) << "Cannot set service status state."; } private: SERVICE_STATUS_HANDLE status_handle_{}; SERVICE_STATUS service_status_{}; HANDLE service_stop_event_{INVALID_HANDLE_VALUE}; THREAD_CHECKER(service_status_thread_checker_); }; DWORD WINAPI ServiceCtrlHandler(DWORD control, DWORD /* event_type */, LPVOID /* event_data */, LPVOID context) { TestService* service = reinterpret_cast<TestService*>(context); DCHECK(service); if (control == SERVICE_CONTROL_STOP) service->SignalStopEvent(); return 0; } void WINAPI ServiceMain(DWORD argc, LPTSTR* argv) { TestService service; // Registers a function to handle extended service control requests. SERVICE_STATUS_HANDLE status_handle = RegisterServiceCtrlHandlerEx(L"", ServiceCtrlHandler, &service); service.SetStatusHandle(status_handle); // Tell the service controller the current service has started. service.SetServiceStatusState(SERVICE_START_PENDING); LOG(INFO) << "Service status is 'start pending'."; // Tell the service controller the current service is running. service.SetControlsAccepted(SERVICE_ACCEPT_STOP); service.SetServiceStatusState(SERVICE_RUNNING); LOG(INFO) << "Service status is 'running'."; // The body of the service wait until a stop signal is received. LOG(INFO) << "Service is waiting to be stopped."; service.WaitForStopEvent(); // TODO(pmbureau): Add more kind of worker that may protect against service // termination and validate service termination. // Tell the service controller the current service is stopped. service.SetServiceStatusState(SERVICE_STOP_PENDING); LOG(INFO) << "Service status is 'stop pending'."; service.SetServiceStatusState(SERVICE_STOPPED); // Do not attempt to perform any additional work after calling // SetServiceStatus with SERVICE_STOPPED, because the service process can be // terminated at any time. } constexpr wchar_t kLogFileExtension[] = L"log"; } // namespace int main(int argc, char** argv) { base::AtExitManager at_exit; bool success = base::CommandLine::Init(0, nullptr); DCHECK(success); TestTimeouts::Initialize(); // Initialize the logging settings to set a specific log file name. base::FilePath exe_file_path; success = base::PathService::Get(base::FILE_EXE, &exe_file_path); DCHECK(success); base::FilePath log_file_path( exe_file_path.ReplaceExtension(kLogFileExtension)); logging::LoggingSettings logging_settings; logging_settings.logging_dest = logging::LOG_TO_FILE; logging_settings.log_file_path = log_file_path.value().c_str(); success = logging::InitLogging(logging_settings); DCHECK(success); LOG(INFO) << "Service Started."; // Service main. // SERVICE_TABLE_ENTRY::lpServiceName takes a non-const string. wchar_t empty_string[] = L""; SERVICE_TABLE_ENTRY dispatch_table[] = {{empty_string, ServiceMain}, {nullptr, nullptr}}; if (::StartServiceCtrlDispatcher(dispatch_table) == FALSE) { LOG(ERROR) << "StartServiceCtrlDispatcher failed."; return 1; } LOG(INFO) << "Service ended."; return 0; }
bsd-3-clause
masteringyii/chapter4
config/messages.php
3309
<?php return [ // string, required, root directory of all source files 'sourcePath' => __DIR__ . DIRECTORY_SEPARATOR . '..', // array, required, list of language codes that the extracted messages // should be translated to. For example, ['zh-CN', 'de']. 'languages' => ['en'], // string, the name of the function for translating messages. // Defaults to 'Yii::t'. This is used as a mark to find the messages to be // translated. You may use a string for single function name or an array for // multiple function names. 'translator' => 'Yii::t', // boolean, whether to sort messages by keys when merging new messages // with the existing ones. Defaults to false, which means the new (untranslated) // messages will be separated from the old (translated) ones. 'sort' => false, // boolean, whether to remove messages that no longer appear in the source code. // Defaults to false, which means each of these messages will be enclosed with a pair of '@@' marks. 'removeUnused' => false, // array, list of patterns that specify which files/directories should NOT be processed. // If empty or not set, all files/directories will be processed. // A path matches a pattern if it contains the pattern string at its end. For example, // '/a/b' will match all files and directories ending with '/a/b'; // the '*.svn' will match all files and directories whose name ends with '.svn'. // and the '.svn' will match all files and directories named exactly '.svn'. // Note, the '/' characters in a pattern matches both '/' and '\'. // See helpers/FileHelper::findFiles() description for more details on pattern matching rules. 'only' => ['*.php'], // array, list of patterns that specify which files (not directories) should be processed. // If empty or not set, all files will be processed. // Please refer to "except" for details about the patterns. // If a file/directory matches both a pattern in "only" and "except", it will NOT be processed. 'except' => [ '.svn', '.git', '.gitignore', '.gitkeep', '.hgignore', '.hgkeep', '/messages', '/vendor/' ], // 'php' output format is for saving messages to php files. 'format' => 'php', // Root directory containing message translations. 'messagePath' => __DIR__, // boolean, whether the message file should be overwritten with the merged messages 'overwrite' => true, /* // 'db' output format is for saving messages to database. 'format' => 'db', // Connection component to use. Optional. 'db' => 'db', // Custom source message table. Optional. // 'sourceMessageTable' => '{{%source_message}}', // Custom name for translation message table. Optional. // 'messageTable' => '{{%message}}', */ /* // 'po' output format is for saving messages to gettext po files. 'format' => 'po', // Root directory containing message translations. 'messagePath' => __DIR__ . DIRECTORY_SEPARATOR . 'messages', // Name of the file that will be used for translations. 'catalog' => 'messages', // boolean, whether the message file should be overwritten with the merged messages 'overwrite' => true, */ ];
bsd-3-clause
cross1154/odbc
column.go
8946
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package odbc import ( "code.google.com/p/odbc/api" "database/sql/driver" "errors" "fmt" "time" "unsafe" ) type BufferLen api.SQLLEN func (l *BufferLen) IsNull() bool { return *l == api.SQL_NULL_DATA } func (l *BufferLen) GetData(h api.SQLHSTMT, idx int, ctype api.SQLSMALLINT, buf []byte) api.SQLRETURN { return api.SQLGetData(h, api.SQLUSMALLINT(idx+1), ctype, api.SQLPOINTER(unsafe.Pointer(&buf[0])), api.SQLLEN(len(buf)), (*api.SQLLEN)(l)) } func (l *BufferLen) Bind(h api.SQLHSTMT, idx int, ctype api.SQLSMALLINT, buf []byte) api.SQLRETURN { return api.SQLBindCol(h, api.SQLUSMALLINT(idx+1), ctype, api.SQLPOINTER(unsafe.Pointer(&buf[0])), api.SQLLEN(len(buf)), (*api.SQLLEN)(l)) } // Column provides access to row columns. type Column interface { Name() string Bind(h api.SQLHSTMT, idx int) (bool, error) Value(h api.SQLHSTMT, idx int) (driver.Value, error) } func describeColumn(h api.SQLHSTMT, idx int, namebuf []uint16) (namelen int, sqltype api.SQLSMALLINT, size api.SQLULEN, ret api.SQLRETURN) { var l, decimal, nullable api.SQLSMALLINT ret = api.SQLDescribeCol(h, api.SQLUSMALLINT(idx+1), (*api.SQLWCHAR)(unsafe.Pointer(&namebuf[0])), api.SQLSMALLINT(len(namebuf)), &l, &sqltype, &size, &decimal, &nullable) return int(l), sqltype, size, ret } // TODO(brainman): did not check for MS SQL timestamp func NewColumn(h api.SQLHSTMT, idx int) (Column, error) { namebuf := make([]uint16, 150) namelen, sqltype, size, ret := describeColumn(h, idx, namebuf) if ret == api.SQL_SUCCESS_WITH_INFO && namelen > len(namebuf) { // try again with bigger buffer namebuf = make([]uint16, namelen) namelen, sqltype, size, ret = describeColumn(h, idx, namebuf) } if IsError(ret) { return nil, NewError("SQLDescribeCol", h) } if namelen > len(namebuf) { // still complaining about buffer size return nil, errors.New("Failed to allocate column name buffer") } b := &BaseColumn{ name: api.UTF16ToString(namebuf[:namelen]), } switch sqltype { case api.SQL_BIT: return NewBindableColumn(b, api.SQL_C_BIT, 1), nil case api.SQL_TINYINT, api.SQL_SMALLINT, api.SQL_INTEGER: return NewBindableColumn(b, api.SQL_C_LONG, 4), nil case api.SQL_BIGINT: return NewBindableColumn(b, api.SQL_C_SBIGINT, 8), nil case api.SQL_NUMERIC, api.SQL_DECIMAL, api.SQL_FLOAT, api.SQL_REAL, api.SQL_DOUBLE: return NewBindableColumn(b, api.SQL_C_DOUBLE, 8), nil case api.SQL_TYPE_TIMESTAMP: var v api.SQL_TIMESTAMP_STRUCT return NewBindableColumn(b, api.SQL_C_TYPE_TIMESTAMP, int(unsafe.Sizeof(v))), nil case api.SQL_TYPE_DATE: var v api.SQL_DATE_STRUCT return NewBindableColumn(b, api.SQL_C_DATE, int(unsafe.Sizeof(v))), nil case api.SQL_GUID: var v api.SQLGUID return NewBindableColumn(b, api.SQL_C_GUID, int(unsafe.Sizeof(v))), nil case api.SQL_CHAR, api.SQL_VARCHAR: return NewVariableWidthColumn(b, api.SQL_C_CHAR, size), nil case api.SQL_WCHAR, api.SQL_WVARCHAR: return NewVariableWidthColumn(b, api.SQL_C_WCHAR, size), nil case api.SQL_BINARY, api.SQL_VARBINARY: return NewVariableWidthColumn(b, api.SQL_C_BINARY, size), nil case api.SQL_LONGVARCHAR: return NewVariableWidthColumn(b, api.SQL_C_CHAR, 0), nil case api.SQL_WLONGVARCHAR, api.SQL_SS_XML: return NewVariableWidthColumn(b, api.SQL_C_WCHAR, 0), nil case api.SQL_LONGVARBINARY: return NewVariableWidthColumn(b, api.SQL_C_BINARY, 0), nil default: return nil, fmt.Errorf("unsupported column type %d", sqltype) } panic("unreachable") } // BaseColumn implements common column functionality. type BaseColumn struct { name string CType api.SQLSMALLINT } func (c *BaseColumn) Name() string { return c.name } func (c *BaseColumn) Value(buf []byte) (driver.Value, error) { var p unsafe.Pointer if len(buf) > 0 { p = unsafe.Pointer(&buf[0]) } switch c.CType { case api.SQL_C_BIT: return buf[0] != 0, nil case api.SQL_C_LONG: return *((*int32)(p)), nil case api.SQL_C_SBIGINT: return *((*int64)(p)), nil case api.SQL_C_DOUBLE: return *((*float64)(p)), nil case api.SQL_C_CHAR: return buf, nil case api.SQL_C_WCHAR: if p == nil { return buf, nil } s := (*[1 << 28]uint16)(p)[:len(buf)/2] return utf16toutf8(s), nil case api.SQL_C_TYPE_TIMESTAMP: t := (*api.SQL_TIMESTAMP_STRUCT)(p) r := time.Date(int(t.Year), time.Month(t.Month), int(t.Day), int(t.Hour), int(t.Minute), int(t.Second), int(t.Fraction), time.Local) return r, nil case api.SQL_C_GUID: t := (*api.SQLGUID)(p) var p1, p2 string for _, d := range t.Data4[:2] { p1 += fmt.Sprintf("%02x", d) } for _, d := range t.Data4[2:] { p2 += fmt.Sprintf("%02x", d) } r := fmt.Sprintf("%08x-%04x-%04x-%s-%s", t.Data1, t.Data2, t.Data3, p1, p2) return r, nil case api.SQL_C_DATE: t := (*api.SQL_DATE_STRUCT)(p) r := time.Date(int(t.Year), time.Month(t.Month), int(t.Day), 0, 0, 0, 0, time.Local) return r, nil case api.SQL_C_BINARY: return buf, nil } return nil, fmt.Errorf("unsupported column ctype %d", c.CType) } // BindableColumn allows access to columns that can have their buffers // bound. Once bound at start, they are written to by odbc driver every // time it fetches new row. This saves on syscall and, perhaps, some // buffer copying. BindableColumn can be left unbound, then it behaves // like NonBindableColumn when user reads data from it. type BindableColumn struct { *BaseColumn IsBound bool IsVariableWidth bool Size int Len BufferLen Buffer []byte smallBuf [8]byte // small inline memory buffer, so we do not need allocate external memory all the time } func NewBindableColumn(b *BaseColumn, ctype api.SQLSMALLINT, bufSize int) *BindableColumn { b.CType = ctype c := &BindableColumn{BaseColumn: b, Size: bufSize} if c.Size <= len(c.smallBuf) { // use inline buffer c.Buffer = c.smallBuf[:c.Size] } else { c.Buffer = make([]byte, c.Size) } return c } func NewVariableWidthColumn(b *BaseColumn, ctype api.SQLSMALLINT, colWidth api.SQLULEN) Column { if colWidth == 0 || colWidth > 1024 { b.CType = ctype return &NonBindableColumn{b} } l := int(colWidth) switch ctype { case api.SQL_C_WCHAR: l += 1 // room for null-termination character l *= 2 // wchars take 2 bytes each case api.SQL_C_CHAR: l += 1 // room for null-termination character case api.SQL_C_BINARY: // nothing to do default: panic(fmt.Errorf("do not know how wide column of ctype %d is", ctype)) } c := NewBindableColumn(b, ctype, l) c.IsVariableWidth = true return c } func (c *BindableColumn) Bind(h api.SQLHSTMT, idx int) (bool, error) { ret := c.Len.Bind(h, idx, c.CType, c.Buffer) if IsError(ret) { return false, NewError("SQLBindCol", h) } c.IsBound = true return true, nil } func (c *BindableColumn) Value(h api.SQLHSTMT, idx int) (driver.Value, error) { if !c.IsBound { ret := c.Len.GetData(h, idx, c.CType, c.Buffer) if IsError(ret) { return nil, NewError("SQLGetData", h) } } if c.Len.IsNull() { // is NULL return nil, nil } if !c.IsVariableWidth && int(c.Len) != c.Size { panic(fmt.Errorf("wrong column #%d length %d returned, %d expected", idx, c.Len, c.Size)) } return c.BaseColumn.Value(c.Buffer[:c.Len]) } // NonBindableColumn provide access to columns, that can't be bound. // These are of character or binary type, and, usually, there is no // limit for their width. type NonBindableColumn struct { *BaseColumn } func (c *NonBindableColumn) Bind(h api.SQLHSTMT, idx int) (bool, error) { return false, nil } func (c *NonBindableColumn) Value(h api.SQLHSTMT, idx int) (driver.Value, error) { var l BufferLen var total []byte b := make([]byte, 1024) loop: for { ret := l.GetData(h, idx, c.CType, b) switch ret { case api.SQL_SUCCESS: if l.IsNull() { // is NULL return nil, nil } if int(l) > len(b) { return nil, fmt.Errorf("too much data returned: %d bytes returned, but buffer size is %d", l, cap(b)) } total = append(total, b[:l]...) break loop case api.SQL_SUCCESS_WITH_INFO: err := NewError("SQLGetData", h).(*Error) if len(err.Diag) > 0 && err.Diag[0].State != "01004" { return nil, err } i := len(b) switch c.CType { case api.SQL_C_WCHAR: i -= 2 // remove wchar (2 bytes) null-termination character case api.SQL_C_CHAR: i-- // remove null-termination character } total = append(total, b[:i]...) if l != api.SQL_NO_TOTAL { // odbc gives us a hint about remaining data, // lets get it in one go. n := int(l) // total bytes for our data n -= i // subtract already received n += 2 // room for biggest (wchar) null-terminator if len(b) < n { b = make([]byte, n) } } default: return nil, NewError("SQLGetData", h) } } return c.BaseColumn.Value(total) }
bsd-3-clause
boundarydevices/android_external_chromium_org
net/quic/quic_config.cc
23038
// Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/quic/quic_config.h" #include <algorithm> #include "base/logging.h" #include "net/quic/crypto/crypto_handshake_message.h" #include "net/quic/crypto/crypto_protocol.h" #include "net/quic/quic_flags.h" #include "net/quic/quic_sent_packet_manager.h" #include "net/quic/quic_utils.h" using std::min; using std::string; namespace net { // Reads the value corresponding to |name_| from |msg| into |out|. If the // |name_| is absent in |msg| and |presence| is set to OPTIONAL |out| is set // to |default_value|. QuicErrorCode ReadUint32(const CryptoHandshakeMessage& msg, QuicTag tag, QuicConfigPresence presence, uint32 default_value, uint32* out, string* error_details) { DCHECK(error_details != NULL); QuicErrorCode error = msg.GetUint32(tag, out); switch (error) { case QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND: if (presence == PRESENCE_REQUIRED) { *error_details = "Missing " + QuicUtils::TagToString(tag); break; } error = QUIC_NO_ERROR; *out = default_value; break; case QUIC_NO_ERROR: break; default: *error_details = "Bad " + QuicUtils::TagToString(tag); break; } return error; } QuicConfigValue::QuicConfigValue(QuicTag tag, QuicConfigPresence presence) : tag_(tag), presence_(presence) { } QuicConfigValue::~QuicConfigValue() {} QuicNegotiableValue::QuicNegotiableValue(QuicTag tag, QuicConfigPresence presence) : QuicConfigValue(tag, presence), negotiated_(false) { } QuicNegotiableValue::~QuicNegotiableValue() {} QuicNegotiableUint32::QuicNegotiableUint32(QuicTag tag, QuicConfigPresence presence) : QuicNegotiableValue(tag, presence), max_value_(0), default_value_(0) { } QuicNegotiableUint32::~QuicNegotiableUint32() {} void QuicNegotiableUint32::set(uint32 max, uint32 default_value) { DCHECK_LE(default_value, max); max_value_ = max; default_value_ = default_value; } uint32 QuicNegotiableUint32::GetUint32() const { if (negotiated_) { return negotiated_value_; } return default_value_; } void QuicNegotiableUint32::ToHandshakeMessage( CryptoHandshakeMessage* out) const { if (negotiated_) { out->SetValue(tag_, negotiated_value_); } else { out->SetValue(tag_, max_value_); } } QuicErrorCode QuicNegotiableUint32::ProcessPeerHello( const CryptoHandshakeMessage& peer_hello, HelloType hello_type, string* error_details) { DCHECK(!negotiated_); DCHECK(error_details != NULL); uint32 value; QuicErrorCode error = ReadUint32(peer_hello, tag_, presence_, default_value_, &value, error_details); if (error != QUIC_NO_ERROR) { return error; } if (hello_type == SERVER && value > max_value_) { *error_details = "Invalid value received for " + QuicUtils::TagToString(tag_); return QUIC_INVALID_NEGOTIATED_VALUE; } negotiated_ = true; negotiated_value_ = min(value, max_value_); return QUIC_NO_ERROR; } QuicNegotiableTag::QuicNegotiableTag(QuicTag tag, QuicConfigPresence presence) : QuicNegotiableValue(tag, presence), negotiated_tag_(0), default_value_(0) { } QuicNegotiableTag::~QuicNegotiableTag() {} void QuicNegotiableTag::set(const QuicTagVector& possible, QuicTag default_value) { DCHECK(ContainsQuicTag(possible, default_value)); possible_values_ = possible; default_value_ = default_value; } QuicTag QuicNegotiableTag::GetTag() const { if (negotiated_) { return negotiated_tag_; } return default_value_; } void QuicNegotiableTag::ToHandshakeMessage(CryptoHandshakeMessage* out) const { if (negotiated_) { // Because of the way we serialize and parse handshake messages we can // serialize this as value and still parse it as a vector. out->SetValue(tag_, negotiated_tag_); } else { out->SetVector(tag_, possible_values_); } } QuicErrorCode QuicNegotiableTag::ReadVector( const CryptoHandshakeMessage& msg, const QuicTag** out, size_t* out_length, string* error_details) const { DCHECK(error_details != NULL); QuicErrorCode error = msg.GetTaglist(tag_, out, out_length); switch (error) { case QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND: if (presence_ == PRESENCE_REQUIRED) { *error_details = "Missing " + QuicUtils::TagToString(tag_); break; } error = QUIC_NO_ERROR; *out_length = 1; *out = &default_value_; case QUIC_NO_ERROR: break; default: *error_details = "Bad " + QuicUtils::TagToString(tag_); break; } return error; } QuicErrorCode QuicNegotiableTag::ProcessPeerHello( const CryptoHandshakeMessage& peer_hello, HelloType hello_type, string* error_details) { DCHECK(!negotiated_); DCHECK(error_details != NULL); const QuicTag* received_tags; size_t received_tags_length; QuicErrorCode error = ReadVector(peer_hello, &received_tags, &received_tags_length, error_details); if (error != QUIC_NO_ERROR) { return error; } if (hello_type == SERVER) { if (received_tags_length != 1 || !ContainsQuicTag(possible_values_, *received_tags)) { *error_details = "Invalid " + QuicUtils::TagToString(tag_); return QUIC_INVALID_NEGOTIATED_VALUE; } negotiated_tag_ = *received_tags; } else { QuicTag negotiated_tag; if (!QuicUtils::FindMutualTag(possible_values_, received_tags, received_tags_length, QuicUtils::LOCAL_PRIORITY, &negotiated_tag, NULL)) { *error_details = "Unsupported " + QuicUtils::TagToString(tag_); return QUIC_CRYPTO_MESSAGE_PARAMETER_NO_OVERLAP; } negotiated_tag_ = negotiated_tag; } negotiated_ = true; return QUIC_NO_ERROR; } QuicFixedUint32::QuicFixedUint32(QuicTag tag, QuicConfigPresence presence) : QuicConfigValue(tag, presence), has_send_value_(false), has_receive_value_(false) { } QuicFixedUint32::~QuicFixedUint32() {} bool QuicFixedUint32::HasSendValue() const { return has_send_value_; } uint32 QuicFixedUint32::GetSendValue() const { LOG_IF(DFATAL, !has_send_value_) << "No send value to get for tag:" << QuicUtils::TagToString(tag_); return send_value_; } void QuicFixedUint32::SetSendValue(uint32 value) { has_send_value_ = true; send_value_ = value; } bool QuicFixedUint32::HasReceivedValue() const { return has_receive_value_; } uint32 QuicFixedUint32::GetReceivedValue() const { LOG_IF(DFATAL, !has_receive_value_) << "No receive value to get for tag:" << QuicUtils::TagToString(tag_); return receive_value_; } void QuicFixedUint32::SetReceivedValue(uint32 value) { has_receive_value_ = true; receive_value_ = value; } void QuicFixedUint32::ToHandshakeMessage(CryptoHandshakeMessage* out) const { if (has_send_value_) { out->SetValue(tag_, send_value_); } } QuicErrorCode QuicFixedUint32::ProcessPeerHello( const CryptoHandshakeMessage& peer_hello, HelloType hello_type, string* error_details) { DCHECK(error_details != NULL); QuicErrorCode error = peer_hello.GetUint32(tag_, &receive_value_); switch (error) { case QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND: if (presence_ == PRESENCE_OPTIONAL) { return QUIC_NO_ERROR; } *error_details = "Missing " + QuicUtils::TagToString(tag_); break; case QUIC_NO_ERROR: has_receive_value_ = true; break; default: *error_details = "Bad " + QuicUtils::TagToString(tag_); break; } return error; } QuicFixedTag::QuicFixedTag(QuicTag name, QuicConfigPresence presence) : QuicConfigValue(name, presence), has_send_value_(false), has_receive_value_(false) { } QuicFixedTag::~QuicFixedTag() {} bool QuicFixedTag::HasSendValue() const { return has_send_value_; } uint32 QuicFixedTag::GetSendValue() const { LOG_IF(DFATAL, !has_send_value_) << "No send value to get for tag:" << QuicUtils::TagToString(tag_); return send_value_; } void QuicFixedTag::SetSendValue(uint32 value) { has_send_value_ = true; send_value_ = value; } bool QuicFixedTag::HasReceivedValue() const { return has_receive_value_; } uint32 QuicFixedTag::GetReceivedValue() const { LOG_IF(DFATAL, !has_receive_value_) << "No receive value to get for tag:" << QuicUtils::TagToString(tag_); return receive_value_; } void QuicFixedTag::SetReceivedValue(uint32 value) { has_receive_value_ = true; receive_value_ = value; } void QuicFixedTag::ToHandshakeMessage(CryptoHandshakeMessage* out) const { if (has_send_value_) { out->SetValue(tag_, send_value_); } } QuicErrorCode QuicFixedTag::ProcessPeerHello( const CryptoHandshakeMessage& peer_hello, HelloType hello_type, string* error_details) { DCHECK(error_details != NULL); QuicErrorCode error = peer_hello.GetUint32(tag_, &receive_value_); switch (error) { case QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND: if (presence_ == PRESENCE_OPTIONAL) { return QUIC_NO_ERROR; } *error_details = "Missing " + QuicUtils::TagToString(tag_); break; case QUIC_NO_ERROR: has_receive_value_ = true; break; default: *error_details = "Bad " + QuicUtils::TagToString(tag_); break; } return error; } QuicFixedTagVector::QuicFixedTagVector(QuicTag name, QuicConfigPresence presence) : QuicConfigValue(name, presence), has_send_values_(false), has_receive_values_(false) { } QuicFixedTagVector::~QuicFixedTagVector() {} bool QuicFixedTagVector::HasSendValues() const { return has_send_values_; } QuicTagVector QuicFixedTagVector::GetSendValues() const { LOG_IF(DFATAL, !has_send_values_) << "No send values to get for tag:" << QuicUtils::TagToString(tag_); return send_values_; } void QuicFixedTagVector::SetSendValues(const QuicTagVector& values) { has_send_values_ = true; send_values_ = values; } bool QuicFixedTagVector::HasReceivedValues() const { return has_receive_values_; } QuicTagVector QuicFixedTagVector::GetReceivedValues() const { LOG_IF(DFATAL, !has_receive_values_) << "No receive value to get for tag:" << QuicUtils::TagToString(tag_); return receive_values_; } void QuicFixedTagVector::SetReceivedValues(const QuicTagVector& values) { has_receive_values_ = true; receive_values_ = values; } void QuicFixedTagVector::ToHandshakeMessage(CryptoHandshakeMessage* out) const { if (has_send_values_) { out->SetVector(tag_, send_values_); } } QuicErrorCode QuicFixedTagVector::ProcessPeerHello( const CryptoHandshakeMessage& peer_hello, HelloType hello_type, string* error_details) { DCHECK(error_details != NULL); const QuicTag* received_tags; size_t received_tags_length; QuicErrorCode error = peer_hello.GetTaglist(tag_, &received_tags, &received_tags_length); switch (error) { case QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND: if (presence_ == PRESENCE_OPTIONAL) { return QUIC_NO_ERROR; } *error_details = "Missing " + QuicUtils::TagToString(tag_); break; case QUIC_NO_ERROR: has_receive_values_ = true; for (size_t i = 0; i < received_tags_length; ++i) { receive_values_.push_back(received_tags[i]); } break; default: *error_details = "Bad " + QuicUtils::TagToString(tag_); break; } return error; } QuicConfig::QuicConfig() : congestion_feedback_(kCGST, PRESENCE_REQUIRED), congestion_options_(kCOPT, PRESENCE_OPTIONAL), loss_detection_(kLOSS, PRESENCE_OPTIONAL), idle_connection_state_lifetime_seconds_(kICSL, PRESENCE_REQUIRED), keepalive_timeout_seconds_(kKATO, PRESENCE_OPTIONAL), max_streams_per_connection_(kMSPC, PRESENCE_REQUIRED), max_time_before_crypto_handshake_(QuicTime::Delta::Zero()), initial_congestion_window_(kSWND, PRESENCE_OPTIONAL), initial_round_trip_time_us_(kIRTT, PRESENCE_OPTIONAL), // TODO(rjshade): Make this PRESENCE_REQUIRED when retiring // QUIC_VERSION_17. initial_flow_control_window_bytes_(kIFCW, PRESENCE_OPTIONAL), // TODO(rjshade): Make this PRESENCE_REQUIRED when retiring // QUIC_VERSION_19. initial_stream_flow_control_window_bytes_(kSFCW, PRESENCE_OPTIONAL), // TODO(rjshade): Make this PRESENCE_REQUIRED when retiring // QUIC_VERSION_19. initial_session_flow_control_window_bytes_(kCFCW, PRESENCE_OPTIONAL) { } QuicConfig::~QuicConfig() {} void QuicConfig::set_congestion_feedback( const QuicTagVector& congestion_feedback, QuicTag default_congestion_feedback) { congestion_feedback_.set(congestion_feedback, default_congestion_feedback); } QuicTag QuicConfig::congestion_feedback() const { return congestion_feedback_.GetTag(); } void QuicConfig::SetCongestionOptionsToSend( const QuicTagVector& congestion_options) { congestion_options_.SetSendValues(congestion_options); } bool QuicConfig::HasReceivedCongestionOptions() const { return congestion_options_.HasReceivedValues(); } QuicTagVector QuicConfig::ReceivedCongestionOptions() const { return congestion_options_.GetReceivedValues(); } void QuicConfig::SetLossDetectionToSend(QuicTag loss_detection) { loss_detection_.SetSendValue(loss_detection); } bool QuicConfig::HasReceivedLossDetection() const { return loss_detection_.HasReceivedValue(); } QuicTag QuicConfig::ReceivedLossDetection() const { return loss_detection_.GetReceivedValue(); } void QuicConfig::set_idle_connection_state_lifetime( QuicTime::Delta max_idle_connection_state_lifetime, QuicTime::Delta default_idle_conection_state_lifetime) { idle_connection_state_lifetime_seconds_.set( max_idle_connection_state_lifetime.ToSeconds(), default_idle_conection_state_lifetime.ToSeconds()); } QuicTime::Delta QuicConfig::idle_connection_state_lifetime() const { return QuicTime::Delta::FromSeconds( idle_connection_state_lifetime_seconds_.GetUint32()); } QuicTime::Delta QuicConfig::keepalive_timeout() const { return QuicTime::Delta::FromSeconds( keepalive_timeout_seconds_.GetUint32()); } void QuicConfig::set_max_streams_per_connection(size_t max_streams, size_t default_streams) { max_streams_per_connection_.set(max_streams, default_streams); } uint32 QuicConfig::max_streams_per_connection() const { return max_streams_per_connection_.GetUint32(); } void QuicConfig::set_max_time_before_crypto_handshake( QuicTime::Delta max_time_before_crypto_handshake) { max_time_before_crypto_handshake_ = max_time_before_crypto_handshake; } QuicTime::Delta QuicConfig::max_time_before_crypto_handshake() const { return max_time_before_crypto_handshake_; } void QuicConfig::SetInitialCongestionWindowToSend(size_t initial_window) { initial_congestion_window_.SetSendValue(initial_window); } bool QuicConfig::HasReceivedInitialCongestionWindow() const { return initial_congestion_window_.HasReceivedValue(); } uint32 QuicConfig::ReceivedInitialCongestionWindow() const { return initial_congestion_window_.GetReceivedValue(); } void QuicConfig::SetInitialRoundTripTimeUsToSend(size_t rtt) { initial_round_trip_time_us_.SetSendValue(rtt); } bool QuicConfig::HasReceivedInitialRoundTripTimeUs() const { return initial_round_trip_time_us_.HasReceivedValue(); } uint32 QuicConfig::ReceivedInitialRoundTripTimeUs() const { return initial_round_trip_time_us_.GetReceivedValue(); } void QuicConfig::SetInitialFlowControlWindowToSend(uint32 window_bytes) { if (window_bytes < kDefaultFlowControlSendWindow) { LOG(DFATAL) << "Initial flow control receive window (" << window_bytes << ") cannot be set lower than default (" << kDefaultFlowControlSendWindow << ")."; window_bytes = kDefaultFlowControlSendWindow; } initial_flow_control_window_bytes_.SetSendValue(window_bytes); } uint32 QuicConfig::GetInitialFlowControlWindowToSend() const { return initial_flow_control_window_bytes_.GetSendValue(); } bool QuicConfig::HasReceivedInitialFlowControlWindowBytes() const { return initial_flow_control_window_bytes_.HasReceivedValue(); } uint32 QuicConfig::ReceivedInitialFlowControlWindowBytes() const { return initial_flow_control_window_bytes_.GetReceivedValue(); } void QuicConfig::SetInitialStreamFlowControlWindowToSend(uint32 window_bytes) { if (window_bytes < kDefaultFlowControlSendWindow) { LOG(DFATAL) << "Initial stream flow control receive window (" << window_bytes << ") cannot be set lower than default (" << kDefaultFlowControlSendWindow << ")."; window_bytes = kDefaultFlowControlSendWindow; } initial_stream_flow_control_window_bytes_.SetSendValue(window_bytes); } uint32 QuicConfig::GetInitialStreamFlowControlWindowToSend() const { return initial_stream_flow_control_window_bytes_.GetSendValue(); } bool QuicConfig::HasReceivedInitialStreamFlowControlWindowBytes() const { return initial_stream_flow_control_window_bytes_.HasReceivedValue(); } uint32 QuicConfig::ReceivedInitialStreamFlowControlWindowBytes() const { return initial_stream_flow_control_window_bytes_.GetReceivedValue(); } void QuicConfig::SetInitialSessionFlowControlWindowToSend(uint32 window_bytes) { if (window_bytes < kDefaultFlowControlSendWindow) { LOG(DFATAL) << "Initial session flow control receive window (" << window_bytes << ") cannot be set lower than default (" << kDefaultFlowControlSendWindow << ")."; window_bytes = kDefaultFlowControlSendWindow; } initial_session_flow_control_window_bytes_.SetSendValue(window_bytes); } uint32 QuicConfig::GetInitialSessionFlowControlWindowToSend() const { return initial_session_flow_control_window_bytes_.GetSendValue(); } bool QuicConfig::HasReceivedInitialSessionFlowControlWindowBytes() const { return initial_session_flow_control_window_bytes_.HasReceivedValue(); } uint32 QuicConfig::ReceivedInitialSessionFlowControlWindowBytes() const { return initial_session_flow_control_window_bytes_.GetReceivedValue(); } bool QuicConfig::negotiated() { // TODO(ianswett): Add the negotiated parameters once and iterate over all // of them in negotiated, ToHandshakeMessage, ProcessClientHello, and // ProcessServerHello. return congestion_feedback_.negotiated() && idle_connection_state_lifetime_seconds_.negotiated() && keepalive_timeout_seconds_.negotiated() && max_streams_per_connection_.negotiated(); } void QuicConfig::SetDefaults() { QuicTagVector congestion_feedback; if (FLAGS_enable_quic_pacing) { congestion_feedback.push_back(kPACE); } congestion_feedback.push_back(kQBIC); congestion_feedback_.set(congestion_feedback, kQBIC); idle_connection_state_lifetime_seconds_.set(kDefaultTimeoutSecs, kDefaultInitialTimeoutSecs); // kKATO is optional. Return 0 if not negotiated. keepalive_timeout_seconds_.set(0, 0); max_streams_per_connection_.set(kDefaultMaxStreamsPerConnection, kDefaultMaxStreamsPerConnection); max_time_before_crypto_handshake_ = QuicTime::Delta::FromSeconds( kDefaultMaxTimeForCryptoHandshakeSecs); SetInitialFlowControlWindowToSend(kDefaultFlowControlSendWindow); SetInitialStreamFlowControlWindowToSend(kDefaultFlowControlSendWindow); SetInitialSessionFlowControlWindowToSend(kDefaultFlowControlSendWindow); } void QuicConfig::EnablePacing(bool enable_pacing) { QuicTagVector congestion_feedback; if (enable_pacing) { congestion_feedback.push_back(kPACE); } congestion_feedback.push_back(kQBIC); congestion_feedback_.set(congestion_feedback, kQBIC); } void QuicConfig::ToHandshakeMessage(CryptoHandshakeMessage* out) const { congestion_feedback_.ToHandshakeMessage(out); idle_connection_state_lifetime_seconds_.ToHandshakeMessage(out); keepalive_timeout_seconds_.ToHandshakeMessage(out); max_streams_per_connection_.ToHandshakeMessage(out); initial_congestion_window_.ToHandshakeMessage(out); initial_round_trip_time_us_.ToHandshakeMessage(out); loss_detection_.ToHandshakeMessage(out); initial_flow_control_window_bytes_.ToHandshakeMessage(out); initial_stream_flow_control_window_bytes_.ToHandshakeMessage(out); initial_session_flow_control_window_bytes_.ToHandshakeMessage(out); congestion_options_.ToHandshakeMessage(out); } QuicErrorCode QuicConfig::ProcessPeerHello( const CryptoHandshakeMessage& peer_hello, HelloType hello_type, string* error_details) { DCHECK(error_details != NULL); QuicErrorCode error = QUIC_NO_ERROR; if (error == QUIC_NO_ERROR) { error = congestion_feedback_.ProcessPeerHello( peer_hello, hello_type, error_details); } if (error == QUIC_NO_ERROR) { error = idle_connection_state_lifetime_seconds_.ProcessPeerHello( peer_hello, hello_type, error_details); } if (error == QUIC_NO_ERROR) { error = keepalive_timeout_seconds_.ProcessPeerHello( peer_hello, hello_type, error_details); } if (error == QUIC_NO_ERROR) { error = max_streams_per_connection_.ProcessPeerHello( peer_hello, hello_type, error_details); } if (error == QUIC_NO_ERROR) { error = initial_congestion_window_.ProcessPeerHello( peer_hello, hello_type, error_details); } if (error == QUIC_NO_ERROR) { error = initial_round_trip_time_us_.ProcessPeerHello( peer_hello, hello_type, error_details); } if (error == QUIC_NO_ERROR) { error = initial_flow_control_window_bytes_.ProcessPeerHello( peer_hello, hello_type, error_details); } if (error == QUIC_NO_ERROR) { error = initial_stream_flow_control_window_bytes_.ProcessPeerHello( peer_hello, hello_type, error_details); } if (error == QUIC_NO_ERROR) { error = initial_session_flow_control_window_bytes_.ProcessPeerHello( peer_hello, hello_type, error_details); } if (error == QUIC_NO_ERROR) { error = loss_detection_.ProcessPeerHello( peer_hello, hello_type, error_details); } if (error == QUIC_NO_ERROR) { error = congestion_options_.ProcessPeerHello( peer_hello, hello_type, error_details); } return error; } } // namespace net
bsd-3-clause
ho96/yii2-phpfreechat
assets/phpfreechat/demo/demo31_show_who_is_online-whoisonline.php
1257
<?php require_once dirname(__FILE__)."/../src/pfcinfo.class.php"; $info = new pfcInfo( md5("Whois online demo") ); // NULL is used to get all the connected users, but you can specify // a channel name to get only the connected user on a specific channel $users = $info->getOnlineNick(NULL); echo "<h1>A demo which explains how to get the connected users list</h1>"; echo '<div style="margin: auto; width: 70%; border: 1px solid red; background-color: #FDD; padding: 1em;">'; $info = ""; $nb_users = count($users); if ($nb_users <= 1) $info = "<strong>%d</strong> user is connected to the server !"; else $info = "<strong>%d</strong> users are connected to the server !"; echo "<p>".sprintf($info, $nb_users)."</p>"; echo "<p>Here is the online nicknames list of <a href='./demo31_show_who_is_online-chat.php'>this chat</a>:</p>"; echo "<ul>"; foreach($users as $u) { echo "<li>".$u."</li>"; } echo "</ul>"; echo "</div>"; ?> <?php // print the current file echo "<h2>The source code</h2>"; $filename = __FILE__; echo "<p><code>".$filename."</code></p>"; echo "<pre style=\"margin: 0 50px 0 50px; padding: 10px; background-color: #DDD;\">"; $content = file_get_contents($filename); highlight_string($content); echo "</pre>"; ?>
bsd-3-clause
redmed/echarts-www
docv/dep/lodash/3.8.0/src/object/methods.js
70
define(["./functions"], function(functions) { return functions; });
bsd-3-clause
JuliBakagianni/CEF-ELRC
lib/python2.7/site-packages/pygeoip/util.py
1421
""" Misc. utility functions. It is part of the pygeoip package. @author: Jennifer Ennis <zaylea at gmail dot com> @license: Copyright(C) 2004 MaxMind LLC This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program. If not, see <http://www.gnu.org/licenses/lgpl.txt>. """ import six def ip2long(ip): """ Convert a IPv4 address into a 32-bit integer. @param ip: quad-dotted IPv4 address @type ip: str @return: network byte order 32-bit integer @rtype: int """ ip_array = ip.split('.') if six.PY3: # int and long are unified in py3 ip_long = int(ip_array[0]) * 16777216 + int(ip_array[1]) * 65536 + int(ip_array[2]) * 256 + int(ip_array[3]) else: ip_long = long(ip_array[0]) * 16777216 + long(ip_array[1]) * 65536 + long(ip_array[2]) * 256 + long(ip_array[3]) return ip_long
bsd-3-clause
adamluo159/go
src/runtime/gcinfo_test.go
6389
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime_test import ( "bytes" "runtime" "testing" ) const ( typeScalar = 0 typePointer = 1 ) // TestGCInfo tests that various objects in heap, data and bss receive correct GC pointer type info. func TestGCInfo(t *testing.T) { verifyGCInfo(t, "bss Ptr", &bssPtr, infoPtr) verifyGCInfo(t, "bss ScalarPtr", &bssScalarPtr, infoScalarPtr) verifyGCInfo(t, "bss PtrScalar", &bssPtrScalar, infoPtrScalar) verifyGCInfo(t, "bss BigStruct", &bssBigStruct, infoBigStruct()) verifyGCInfo(t, "bss string", &bssString, infoString) verifyGCInfo(t, "bss slice", &bssSlice, infoSlice) verifyGCInfo(t, "bss eface", &bssEface, infoEface) verifyGCInfo(t, "bss iface", &bssIface, infoIface) verifyGCInfo(t, "data Ptr", &dataPtr, infoPtr) verifyGCInfo(t, "data ScalarPtr", &dataScalarPtr, infoScalarPtr) verifyGCInfo(t, "data PtrScalar", &dataPtrScalar, infoPtrScalar) verifyGCInfo(t, "data BigStruct", &dataBigStruct, infoBigStruct()) verifyGCInfo(t, "data string", &dataString, infoString) verifyGCInfo(t, "data slice", &dataSlice, infoSlice) verifyGCInfo(t, "data eface", &dataEface, infoEface) verifyGCInfo(t, "data iface", &dataIface, infoIface) verifyGCInfo(t, "stack Ptr", new(Ptr), infoPtr) verifyGCInfo(t, "stack ScalarPtr", new(ScalarPtr), infoScalarPtr) verifyGCInfo(t, "stack PtrScalar", new(PtrScalar), infoPtrScalar) verifyGCInfo(t, "stack BigStruct", new(BigStruct), infoBigStruct()) verifyGCInfo(t, "stack string", new(string), infoString) verifyGCInfo(t, "stack slice", new([]string), infoSlice) verifyGCInfo(t, "stack eface", new(interface{}), infoEface) verifyGCInfo(t, "stack iface", new(Iface), infoIface) for i := 0; i < 10; i++ { verifyGCInfo(t, "heap Ptr", escape(new(Ptr)), trimDead(padDead(infoPtr))) verifyGCInfo(t, "heap PtrSlice", escape(&make([]*byte, 10)[0]), trimDead(infoPtr10)) verifyGCInfo(t, "heap ScalarPtr", escape(new(ScalarPtr)), trimDead(infoScalarPtr)) verifyGCInfo(t, "heap ScalarPtrSlice", escape(&make([]ScalarPtr, 4)[0]), trimDead(infoScalarPtr4)) verifyGCInfo(t, "heap PtrScalar", escape(new(PtrScalar)), trimDead(infoPtrScalar)) verifyGCInfo(t, "heap BigStruct", escape(new(BigStruct)), trimDead(infoBigStruct())) verifyGCInfo(t, "heap string", escape(new(string)), trimDead(infoString)) verifyGCInfo(t, "heap eface", escape(new(interface{})), trimDead(infoEface)) verifyGCInfo(t, "heap iface", escape(new(Iface)), trimDead(infoIface)) } } func verifyGCInfo(t *testing.T, name string, p interface{}, mask0 []byte) { mask := runtime.GCMask(p) if !bytes.Equal(mask, mask0) { t.Errorf("bad GC program for %v:\nwant %+v\ngot %+v", name, mask0, mask) return } } func padDead(mask []byte) []byte { // Because the dead bit isn't encoded in the second word, // and because on 32-bit systems a one-word allocation // uses a two-word block, the pointer info for a one-word // object needs to be expanded to include an extra scalar // on 32-bit systems to match the heap bitmap. if runtime.PtrSize == 4 && len(mask) == 1 { return []byte{mask[0], 0} } return mask } func trimDead(mask []byte) []byte { for len(mask) > 2 && mask[len(mask)-1] == typeScalar { mask = mask[:len(mask)-1] } if len(mask) == 2 && mask[0] == typeScalar && mask[1] == typeScalar { mask = mask[:0] } return mask } var gcinfoSink interface{} func escape(p interface{}) interface{} { gcinfoSink = p return p } var infoPtr = []byte{typePointer} type Ptr struct { *byte } var infoPtr10 = []byte{typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer} type ScalarPtr struct { q int w *int e int r *int t int y *int } var infoScalarPtr = []byte{typeScalar, typePointer, typeScalar, typePointer, typeScalar, typePointer} var infoScalarPtr4 = append(append(append(append([]byte(nil), infoScalarPtr...), infoScalarPtr...), infoScalarPtr...), infoScalarPtr...) type PtrScalar struct { q *int w int e *int r int t *int y int } var infoPtrScalar = []byte{typePointer, typeScalar, typePointer, typeScalar, typePointer, typeScalar} type BigStruct struct { q *int w byte e [17]byte r []byte t int y uint16 u uint64 i string } func infoBigStruct() []byte { switch runtime.GOARCH { case "386", "arm": return []byte{ typePointer, // q *int typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // w byte; e [17]byte typePointer, typeScalar, typeScalar, // r []byte typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64 typePointer, typeScalar, // i string } case "arm64", "amd64", "mips64", "mips64le", "ppc64", "ppc64le", "s390x": return []byte{ typePointer, // q *int typeScalar, typeScalar, typeScalar, // w byte; e [17]byte typePointer, typeScalar, typeScalar, // r []byte typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64 typePointer, typeScalar, // i string } case "amd64p32": return []byte{ typePointer, // q *int typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // w byte; e [17]byte typePointer, typeScalar, typeScalar, // r []byte typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64 typePointer, typeScalar, // i string } default: panic("unknown arch") } } type Iface interface { f() } type IfaceImpl int func (IfaceImpl) f() { } var ( // BSS bssPtr Ptr bssScalarPtr ScalarPtr bssPtrScalar PtrScalar bssBigStruct BigStruct bssString string bssSlice []string bssEface interface{} bssIface Iface // DATA dataPtr = Ptr{new(byte)} dataScalarPtr = ScalarPtr{q: 1} dataPtrScalar = PtrScalar{w: 1} dataBigStruct = BigStruct{w: 1} dataString = "foo" dataSlice = []string{"foo"} dataEface interface{} = 42 dataIface Iface = IfaceImpl(42) infoString = []byte{typePointer, typeScalar} infoSlice = []byte{typePointer, typeScalar, typeScalar} infoEface = []byte{typePointer, typePointer} infoIface = []byte{typePointer, typePointer} )
bsd-3-clause
ShreyaR13/Twilio-SMS-Project-Part-1
twilio-php-master/Twilio/Rest/Preview/Marketplace/InstalledAddOn/InstalledAddOnExtensionContext.php
3075
<?php /** * This code was generated by * \ / _ _ _| _ _ * | (_)\/(_)(_|\/| |(/_ v1.0.0 * / / */ namespace Twilio\Rest\Preview\Marketplace\InstalledAddOn; use Twilio\InstanceContext; use Twilio\Serialize; use Twilio\Values; use Twilio\Version; /** * PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. */ class InstalledAddOnExtensionContext extends InstanceContext { /** * Initialize the InstalledAddOnExtensionContext * * @param \Twilio\Version $version Version that contains the resource * @param string $installedAddOnSid The installed_add_on_sid * @param string $sid The unique Extension Sid * @return \Twilio\Rest\Preview\Marketplace\InstalledAddOn\InstalledAddOnExtensionContext */ public function __construct(Version $version, $installedAddOnSid, $sid) { parent::__construct($version); // Path Solution $this->solution = array( 'installedAddOnSid' => $installedAddOnSid, 'sid' => $sid, ); $this->uri = '/InstalledAddOns/' . rawurlencode($installedAddOnSid) . '/Extensions/' . rawurlencode($sid) . ''; } /** * Fetch a InstalledAddOnExtensionInstance * * @return InstalledAddOnExtensionInstance Fetched * InstalledAddOnExtensionInstance */ public function fetch() { $params = Values::of(array()); $payload = $this->version->fetch( 'GET', $this->uri, $params ); return new InstalledAddOnExtensionInstance( $this->version, $payload, $this->solution['installedAddOnSid'], $this->solution['sid'] ); } /** * Update the InstalledAddOnExtensionInstance * * @param boolean $enabled A Boolean indicating if the Extension will be invoked * @return InstalledAddOnExtensionInstance Updated * InstalledAddOnExtensionInstance */ public function update($enabled) { $data = Values::of(array( 'Enabled' => Serialize::booleanToString($enabled), )); $payload = $this->version->update( 'POST', $this->uri, array(), $data ); return new InstalledAddOnExtensionInstance( $this->version, $payload, $this->solution['installedAddOnSid'], $this->solution['sid'] ); } /** * Provide a friendly representation * * @return string Machine friendly representation */ public function __toString() { $context = array(); foreach ($this->solution as $key => $value) { $context[] = "$key=$value"; } return '[Twilio.Preview.Marketplace.InstalledAddOnExtensionContext ' . implode(' ', $context) . ']'; } }
bsd-3-clause
AICP/external_chromium_org
ash/shell/app_list.cc
10960
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <string> #include "ash/session/session_state_delegate.h" #include "ash/shell.h" #include "ash/shell/example_factory.h" #include "ash/shell/toplevel_window.h" #include "ash/shell_delegate.h" #include "base/basictypes.h" #include "base/callback.h" #include "base/files/file_path.h" #include "base/i18n/case_conversion.h" #include "base/i18n/string_search.h" #include "base/strings/string_util.h" #include "base/strings/stringprintf.h" #include "base/strings/utf_string_conversions.h" #include "ui/app_list/app_list_item.h" #include "ui/app_list/app_list_item_list.h" #include "ui/app_list/app_list_model.h" #include "ui/app_list/app_list_view_delegate.h" #include "ui/app_list/search_box_model.h" #include "ui/app_list/search_result.h" #include "ui/app_list/speech_ui_model.h" #include "ui/gfx/canvas.h" #include "ui/gfx/font_list.h" #include "ui/gfx/geometry/rect.h" #include "ui/gfx/image/image_skia.h" #include "ui/views/examples/examples_window_with_content.h" namespace ash { namespace shell { namespace { // WindowTypeShelfItem is an app item of app list. It carries a window // launch type and launches corresponding example window when activated. class WindowTypeShelfItem : public app_list::AppListItem { public: enum Type { TOPLEVEL_WINDOW = 0, NON_RESIZABLE_WINDOW, LOCK_SCREEN, WIDGETS_WINDOW, EXAMPLES_WINDOW, LAST_TYPE, }; explicit WindowTypeShelfItem(const std::string& id, Type type) : app_list::AppListItem(id), type_(type) { std::string title(GetTitle(type)); SetIcon(GetIcon(type), false); SetName(title); } static gfx::ImageSkia GetIcon(Type type) { static const SkColor kColors[] = { SK_ColorRED, SK_ColorGREEN, SK_ColorBLUE, SK_ColorYELLOW, SK_ColorCYAN, }; const int kIconSize = 128; SkBitmap icon; icon.setConfig(SkBitmap::kARGB_8888_Config, kIconSize, kIconSize); icon.allocPixels(); icon.eraseColor(kColors[static_cast<int>(type) % arraysize(kColors)]); return gfx::ImageSkia::CreateFrom1xBitmap(icon); } // The text below is not localized as this is an example code. static std::string GetTitle(Type type) { switch (type) { case TOPLEVEL_WINDOW: return "Create Window"; case NON_RESIZABLE_WINDOW: return "Create Non-Resizable Window"; case LOCK_SCREEN: return "Lock Screen"; case WIDGETS_WINDOW: return "Show Example Widgets"; case EXAMPLES_WINDOW: return "Open Views Examples Window"; default: return "Unknown window type."; } } // The text below is not localized as this is an example code. static std::string GetDetails(Type type) { // Assigns details only to some types so that we see both one-line // and two-line results. switch (type) { case WIDGETS_WINDOW: return "Creates a window to show example widgets"; case EXAMPLES_WINDOW: return "Creates a window to show views example."; default: return std::string(); } } static void ActivateItem(Type type, int event_flags) { switch (type) { case TOPLEVEL_WINDOW: { ToplevelWindow::CreateParams params; params.can_resize = true; ToplevelWindow::CreateToplevelWindow(params); break; } case NON_RESIZABLE_WINDOW: { ToplevelWindow::CreateToplevelWindow(ToplevelWindow::CreateParams()); break; } case LOCK_SCREEN: { Shell::GetInstance()->session_state_delegate()->LockScreen(); break; } case WIDGETS_WINDOW: { CreateWidgetsWindow(); break; } case EXAMPLES_WINDOW: { views::examples::ShowExamplesWindowWithContent( views::examples::DO_NOTHING_ON_CLOSE, Shell::GetInstance()->delegate()->GetActiveBrowserContext(), NULL); break; } default: break; } } // AppListItem virtual void Activate(int event_flags) OVERRIDE { ActivateItem(type_, event_flags); } private: Type type_; DISALLOW_COPY_AND_ASSIGN(WindowTypeShelfItem); }; // ExampleSearchResult is an app list search result. It provides what icon to // show, what should title and details text look like. It also carries the // matching window launch type so that AppListViewDelegate knows how to open // it. class ExampleSearchResult : public app_list::SearchResult { public: ExampleSearchResult(WindowTypeShelfItem::Type type, const base::string16& query) : type_(type) { SetIcon(WindowTypeShelfItem::GetIcon(type_)); base::string16 title = base::UTF8ToUTF16(WindowTypeShelfItem::GetTitle(type_)); set_title(title); Tags title_tags; const size_t match_len = query.length(); // Highlight matching parts in title with bold. // Note the following is not a proper way to handle i18n string. title = base::i18n::ToLower(title); size_t match_start = title.find(query); while (match_start != base::string16::npos) { title_tags.push_back(Tag(Tag::MATCH, match_start, match_start + match_len)); match_start = title.find(query, match_start + match_len); } set_title_tags(title_tags); base::string16 details = base::UTF8ToUTF16(WindowTypeShelfItem::GetDetails(type_)); set_details(details); Tags details_tags; details_tags.push_back(Tag(Tag::DIM, 0, details.length())); set_details_tags(details_tags); } WindowTypeShelfItem::Type type() const { return type_; } private: WindowTypeShelfItem::Type type_; DISALLOW_COPY_AND_ASSIGN(ExampleSearchResult); }; class ExampleAppListViewDelegate : public app_list::AppListViewDelegate { public: ExampleAppListViewDelegate() : model_(new app_list::AppListModel), speech_ui_(app_list::SPEECH_RECOGNITION_OFF) { PopulateApps(); DecorateSearchBox(model_->search_box()); } private: void PopulateApps() { for (int i = 0; i < static_cast<int>(WindowTypeShelfItem::LAST_TYPE); ++i) { WindowTypeShelfItem::Type type = static_cast<WindowTypeShelfItem::Type>(i); std::string id = base::StringPrintf("%d", i); scoped_ptr<WindowTypeShelfItem> shelf_item( new WindowTypeShelfItem(id, type)); model_->AddItem(shelf_item.PassAs<app_list::AppListItem>()); } } gfx::ImageSkia CreateSearchBoxIcon() { const base::string16 icon_text = base::ASCIIToUTF16("ash"); const gfx::Size icon_size(32, 32); gfx::Canvas canvas(icon_size, 1.0f, false /* is_opaque */); canvas.DrawStringRectWithFlags( icon_text, gfx::FontList(), SK_ColorBLACK, gfx::Rect(icon_size), gfx::Canvas::TEXT_ALIGN_CENTER | gfx::Canvas::NO_SUBPIXEL_RENDERING); return gfx::ImageSkia(canvas.ExtractImageRep()); } void DecorateSearchBox(app_list::SearchBoxModel* search_box_model) { search_box_model->SetIcon(CreateSearchBoxIcon()); search_box_model->SetHintText(base::ASCIIToUTF16("Type to search...")); } // Overridden from app_list::AppListViewDelegate: virtual bool ForceNativeDesktop() const OVERRIDE { return false; } virtual void SetProfileByPath(const base::FilePath& profile_path) OVERRIDE { // Nothing needs to be done. } virtual const Users& GetUsers() const OVERRIDE { return users_; } virtual bool ShouldCenterWindow() const OVERRIDE { return false; } virtual app_list::AppListModel* GetModel() OVERRIDE { return model_.get(); } virtual app_list::SpeechUIModel* GetSpeechUI() OVERRIDE { return &speech_ui_; } virtual void GetShortcutPathForApp( const std::string& app_id, const base::Callback<void(const base::FilePath&)>& callback) OVERRIDE { callback.Run(base::FilePath()); } virtual void OpenSearchResult(app_list::SearchResult* result, bool auto_launch, int event_flags) OVERRIDE { const ExampleSearchResult* example_result = static_cast<const ExampleSearchResult*>(result); WindowTypeShelfItem::ActivateItem(example_result->type(), event_flags); } virtual void InvokeSearchResultAction(app_list::SearchResult* result, int action_index, int event_flags) OVERRIDE { NOTIMPLEMENTED(); } virtual base::TimeDelta GetAutoLaunchTimeout() OVERRIDE { return base::TimeDelta(); } virtual void AutoLaunchCanceled() OVERRIDE { } virtual void StartSearch() OVERRIDE { base::string16 query; base::TrimWhitespace(model_->search_box()->text(), base::TRIM_ALL, &query); query = base::i18n::ToLower(query); model_->results()->DeleteAll(); if (query.empty()) return; for (int i = 0; i < static_cast<int>(WindowTypeShelfItem::LAST_TYPE); ++i) { WindowTypeShelfItem::Type type = static_cast<WindowTypeShelfItem::Type>(i); base::string16 title = base::UTF8ToUTF16(WindowTypeShelfItem::GetTitle(type)); if (base::i18n::StringSearchIgnoringCaseAndAccents( query, title, NULL, NULL)) { model_->results()->Add(new ExampleSearchResult(type, query)); } } } virtual void StopSearch() OVERRIDE { // Nothing needs to be done. } virtual void ViewInitialized() OVERRIDE { // Nothing needs to be done. } virtual void Dismiss() OVERRIDE { DCHECK(ash::Shell::HasInstance()); if (Shell::GetInstance()->GetAppListTargetVisibility()) Shell::GetInstance()->ToggleAppList(NULL); } virtual void ViewClosing() OVERRIDE { // Nothing needs to be done. } virtual gfx::ImageSkia GetWindowIcon() OVERRIDE { return gfx::ImageSkia(); } virtual void OpenSettings() OVERRIDE { // Nothing needs to be done. } virtual void OpenHelp() OVERRIDE { // Nothing needs to be done. } virtual void OpenFeedback() OVERRIDE { // Nothing needs to be done. } virtual void ToggleSpeechRecognition() OVERRIDE { NOTIMPLEMENTED(); } virtual void ShowForProfileByPath( const base::FilePath& profile_path) OVERRIDE { // Nothing needs to be done. } virtual views::View* CreateStartPageWebView(const gfx::Size& size) OVERRIDE { return NULL; } virtual bool IsSpeechRecognitionEnabled() OVERRIDE { return false; } scoped_ptr<app_list::AppListModel> model_; app_list::SpeechUIModel speech_ui_; Users users_; DISALLOW_COPY_AND_ASSIGN(ExampleAppListViewDelegate); }; } // namespace app_list::AppListViewDelegate* CreateAppListViewDelegate() { return new ExampleAppListViewDelegate; } } // namespace shell } // namespace ash
bsd-3-clause
wf2/shifter
tests/assets/freestyle/build-rollup-expected/baz/baz.js
93
YUI.add('baz', function (Y, NAME) { Y[NAME] = 2; }, '@VERSION@', {"requires": ["bar"]});
bsd-3-clause
ropik/chromium
chrome/browser/ssl/ssl_add_cert_handler.cc
3933
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ssl/ssl_add_cert_handler.h" #include "base/bind.h" #include "chrome/browser/tab_contents/tab_contents_ssl_helper.h" #include "chrome/browser/tab_contents/tab_util.h" #include "chrome/browser/ui/tab_contents/tab_contents_wrapper.h" #include "content/public/browser/browser_thread.h" #include "content/public/browser/resource_request_info.h" #include "content/public/browser/web_contents.h" #include "net/base/cert_database.h" #include "net/base/net_errors.h" #include "net/base/x509_certificate.h" #include "net/url_request/url_request.h" using content::BrowserThread; using content::WebContents; SSLAddCertHandler::SSLAddCertHandler(net::URLRequest* request, net::X509Certificate* cert, int render_process_host_id, int render_view_id) : cert_(cert), render_process_host_id_(render_process_host_id), render_view_id_(render_view_id) { network_request_id_ = content::ResourceRequestInfo::ForRequest(request)->GetRequestID(); // Stay alive until the process completes and Finished() is called. AddRef(); // Delay adding the certificate until the next mainloop iteration. BrowserThread::PostTask( BrowserThread::IO, FROM_HERE, base::Bind(&SSLAddCertHandler::Run, this)); } SSLAddCertHandler::~SSLAddCertHandler() {} void SSLAddCertHandler::Run() { int cert_error; { net::CertDatabase db; cert_error = db.CheckUserCert(cert_); } if (cert_error != net::OK) { LOG_IF(ERROR, cert_error == net::ERR_NO_PRIVATE_KEY_FOR_CERT) << "No corresponding private key in store for cert: " << (cert_.get() ? cert_->subject().GetDisplayName() : "NULL"); BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::Bind( &SSLAddCertHandler::CallVerifyClientCertificateError, this, cert_error)); Finished(false); return; } // TODO(davidben): Move the existing certificate dialog elsewhere, make // AskToAddCert send a message to the RenderViewHostDelegate, and ask when we // cannot completely verify the certificate for whatever reason. // AskToAddCert(); Finished(true); } #if !defined(OS_MACOSX) void SSLAddCertHandler::AskToAddCert() { // TODO(snej): Someone should add Windows and GTK implementations with UI. Finished(true); } #endif void SSLAddCertHandler::Finished(bool add_cert) { int cert_error = net::OK; if (add_cert) { net::CertDatabase db; cert_error = db.AddUserCert(cert_); } BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::Bind( &SSLAddCertHandler::CallAddClientCertificate, this, add_cert, cert_error)); Release(); } void SSLAddCertHandler::CallVerifyClientCertificateError(int cert_error) { WebContents* tab = tab_util::GetWebContentsByID( render_process_host_id_, render_view_id_); if (!tab) return; TabContentsWrapper* wrapper = TabContentsWrapper::GetCurrentWrapperForContents(tab); wrapper->ssl_helper()->OnVerifyClientCertificateError(this, cert_error); } void SSLAddCertHandler::CallAddClientCertificate(bool add_cert, int cert_error) { WebContents* tab = tab_util::GetWebContentsByID( render_process_host_id_, render_view_id_); if (!tab) return; TabContentsWrapper* wrapper = TabContentsWrapper::GetCurrentWrapperForContents(tab); if (add_cert) { if (cert_error == net::OK) { wrapper->ssl_helper()->OnAddClientCertificateSuccess(this); } else { wrapper->ssl_helper()->OnAddClientCertificateError(this, cert_error); } } wrapper->ssl_helper()->OnAddClientCertificateFinished(this); }
bsd-3-clause
isabela-angelo/scratch-tangible-blocks
node_modules/scratch-blocks/gh-pages/playgrounds/tests/jsunit/event_test.js
13845
/** * @license * Visual Blocks Editor * * Copyright 2017 Google Inc. * https://developers.google.com/blockly/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview Tests for Blockly.Events * @author marisaleung@google.com (Marisa Leung) */ 'use strict'; goog.require('goog.testing'); goog.require('goog.testing.MockControl'); var mockControl_; var workspace; function eventTest_setUp() { workspace = new Blockly.Workspace(); mockControl_ = new goog.testing.MockControl(); } function eventTest_setUpWithMockBlocks() { eventTest_setUp(); Blockly.defineBlocksWithJsonArray([{ 'type': 'field_variable_test_block', 'message0': '%1', 'args0': [ { 'type': 'field_variable', 'name': 'VAR', 'variable': 'item' } ], }]); } function eventTest_tearDown() { mockControl_.$tearDown(); workspace.dispose(); } function eventTest_tearDownWithMockBlocks() { eventTest_tearDown(); delete Blockly.Blocks.field_variable_test_block; } function test_abstract_constructor_block() { eventTest_setUpWithMockBlocks(); setUpMockMethod(mockControl_, Blockly.utils, 'genUid', null, '1'); var block = new Blockly.Block(workspace, 'field_variable_test_block'); var event = new Blockly.Events.Abstract(block); assertUndefined(event.varId); checkExactEventValues(event, {'blockId': '1', 'workspaceId': workspace.id, 'group': '', 'recordUndo': true}); eventTest_tearDownWithMockBlocks(); } function test_abstract_constructor_variable() { eventTest_setUpWithMockBlocks(); setUpMockMethod(mockControl_, Blockly.utils, 'genUid', null, '1'); var variable = workspace.createVariable('name1', 'type1', 'id1'); var event = new Blockly.Events.Abstract(variable); assertUndefined(event.blockId); checkExactEventValues(event, {'varId': 'id1', 'workspaceId': workspace.id, 'group': '', 'recordUndo': true}); eventTest_tearDownWithMockBlocks(); } function test_abstract_constructor_null() { eventTest_setUpWithMockBlocks(); var event = new Blockly.Events.Abstract(null); assertUndefined(event.blockId); assertUndefined(event.workspaceId); checkExactEventValues(event, {'group': '', 'recordUndo': true}); eventTest_tearDownWithMockBlocks(); } function checkCreateEventValues(event, block, ids, type) { var expected_xml = Blockly.Xml.domToText(Blockly.Xml.blockToDom(block)); var result_xml = Blockly.Xml.domToText(event.xml); assertEquals(expected_xml, result_xml); isEqualArrays(ids, event.ids); assertEquals(type, event.type); } function checkDeleteEventValues(event, block, ids, type) { var expected_xml = Blockly.Xml.domToText(Blockly.Xml.blockToDom(block)); var result_xml = Blockly.Xml.domToText(event.oldXml); assertEquals(expected_xml, result_xml); isEqualArrays(ids, event.ids); assertEquals(type, event.type); } function checkExactEventValues(event, values) { var keys = Object.keys(values); for (var i = 0, field; field = keys[i]; i++) { assertEquals(values[field], event[field]); } } function test_create_constructor() { eventTest_setUpWithMockBlocks(); setUpMockMethod(mockControl_, Blockly.utils, 'genUid', null, ['1']); var block = new Blockly.Block(workspace, 'field_variable_test_block'); var event = new Blockly.Events.Create(block); checkCreateEventValues(event, block, ['1'], 'create'); eventTest_tearDownWithMockBlocks(); } function test_blockCreate_constructor() { // expect that blockCreate behaves the same as create. eventTest_setUpWithMockBlocks(); setUpMockMethod(mockControl_, Blockly.utils, 'genUid', null, ['1']); var block = new Blockly.Block(workspace, 'field_variable_test_block'); var event = new Blockly.Events.BlockCreate(block); checkCreateEventValues(event, block, ['1'], 'create'); eventTest_tearDownWithMockBlocks(); } function test_delete_constructor() { eventTest_setUpWithMockBlocks(); setUpMockMethod(mockControl_, Blockly.utils, 'genUid', null, ['1']); var block = new Blockly.Block(workspace, 'field_variable_test_block'); var event = new Blockly.Events.Delete(block); checkDeleteEventValues(event, block, ['1'], 'delete'); eventTest_tearDownWithMockBlocks(); } function test_blockDelete_constructor() { eventTest_setUpWithMockBlocks(); setUpMockMethod(mockControl_, Blockly.utils, 'genUid', null, ['1']); var block = new Blockly.Block(workspace, 'field_variable_test_block'); var event = new Blockly.Events.BlockDelete(block); checkDeleteEventValues(event, block, ['1'], 'delete'); eventTest_tearDownWithMockBlocks(); } function test_change_constructor() { eventTest_setUpWithMockBlocks(); setUpMockMethod(mockControl_, Blockly.utils, 'genUid', null, ['1']); var block = new Blockly.Block(workspace, 'field_variable_test_block'); var event = new Blockly.Events.Change(block, 'field', 'VAR', 'item', 'item2'); checkExactEventValues(event, {'element': 'field', 'name': 'VAR', 'oldValue': 'item', 'newValue': 'item2', 'type': 'change'}); eventTest_tearDownWithMockBlocks(); } function test_blockChange_constructor() { eventTest_setUpWithMockBlocks(); setUpMockMethod(mockControl_, Blockly.utils, 'genUid', null, ['1']); var block = new Blockly.Block(workspace, 'field_variable_test_block'); var event = new Blockly.Events.BlockChange(block, 'field', 'VAR', 'item', 'item2'); checkExactEventValues(event, {'element': 'field', 'name': 'VAR', 'oldValue': 'item', 'newValue': 'item2', 'type': 'change'}); eventTest_tearDownWithMockBlocks(); } function test_move_constructorCoordinate() { // Expect the oldCoordinate to be set. eventTest_setUpWithMockBlocks(); setUpMockMethod(mockControl_, Blockly.utils, 'genUid', null, ['1', '2']); var block1 = new Blockly.Block(workspace, 'field_variable_test_block'); var coordinate = new goog.math.Coordinate(3,4); block1.xy_ = coordinate; var event = new Blockly.Events.Move(block1); checkExactEventValues(event, {'oldCoordinate': coordinate, 'type': 'move'}); eventTest_tearDownWithMockBlocks(); } function test_move_constructoroldParentId() { // Expect the oldParentId to be set but not the oldCoordinate to be set. eventTest_setUpWithMockBlocks(); setUpMockMethod(mockControl_, Blockly.utils, 'genUid', null, ['1', '2']); var block1 = new Blockly.Block(workspace, 'field_variable_test_block'); var block2 = new Blockly.Block(workspace, 'field_variable_test_block'); block1.parentBlock_ = block2; block1.xy_ = new goog.math.Coordinate(3,4); var event = new Blockly.Events.Move(block1); checkExactEventValues(event, {'oldCoordinate': undefined, 'oldParentId': '2', 'type': 'move'}); block1.parentBlock_ = null; eventTest_tearDownWithMockBlocks(); } function test_blockMove_constructorCoordinate() { // Expect the oldCoordinate to be set. eventTest_setUpWithMockBlocks(); setUpMockMethod(mockControl_, Blockly.utils, 'genUid', null, ['1', '2']); var block1 = new Blockly.Block(workspace, 'field_variable_test_block'); var coordinate = new goog.math.Coordinate(3,4); block1.xy_ = coordinate; var event = new Blockly.Events.BlockMove(block1); checkExactEventValues(event, {'oldCoordinate': coordinate, 'type': 'move'}); eventTest_tearDownWithMockBlocks(); } function test_blockMove_constructoroldParentId() { // Expect the oldParentId to be set but not the oldCoordinate to be set. eventTest_setUpWithMockBlocks(); setUpMockMethod(mockControl_, Blockly.utils, 'genUid', null, ['1', '2']); var block1 = new Blockly.Block(workspace, 'field_variable_test_block'); var block2 = new Blockly.Block(workspace, 'field_variable_test_block'); block1.parentBlock_ = block2; block1.xy_ = new goog.math.Coordinate(3,4); var event = new Blockly.Events.BlockMove(block1); checkExactEventValues(event, {'oldCoordinate': undefined, 'oldParentId': '2', 'type': 'move'}); block1.parentBlock_ = null; eventTest_tearDownWithMockBlocks(); } function test_varCreate_constructor() { eventTest_setUp(); var variable = workspace.createVariable('name1', 'type1', 'id1'); var event = new Blockly.Events.VarCreate(variable); checkExactEventValues(event, {'varName': 'name1', 'varType': 'type1', 'type': 'var_create'}); eventTest_tearDown(); } function test_varCreate_toJson() { eventTest_setUp(); var variable = workspace.createVariable('name1', 'type1', 'id1'); var event = new Blockly.Events.VarCreate(variable); var json = event.toJson(); var expectedJson = ({type: "var_create", varId: "id1", varType: "type1", varName: "name1"}); assertEquals(JSON.stringify(expectedJson), JSON.stringify(json)); eventTest_tearDown(); } function test_varCreate_fromJson() { eventTest_setUp(); var variable = workspace.createVariable('name1', 'type1', 'id1'); var event = new Blockly.Events.VarCreate(variable); var event2 = new Blockly.Events.VarCreate(null); var json = event.toJson(); event2.fromJson(json); assertEquals(JSON.stringify(json), JSON.stringify(event2.toJson())); eventTest_tearDown(); } function test_varCreate_runForward() { eventTest_setUp(); var json = {type: "var_create", varId: "id1", varType: "type1", varName: "name1"}; var event = Blockly.Events.fromJson(json, workspace); assertNull(workspace.getVariableById('id1')); event.run(true); checkVariableValues(workspace, 'name1', 'type1', 'id1'); eventTest_tearDown(); } function test_varCreate_runBackwards() { eventTest_setUp(); var variable = workspace.createVariable('name1', 'type1', 'id1'); var event = new Blockly.Events.VarCreate(variable); assertNotNull(workspace.getVariableById('id1')); event.run(false); assertNull(workspace.getVariableById('id1')); eventTest_tearDown(); } function test_varDelete_constructor() { eventTest_setUp(); var variable = workspace.createVariable('name1', 'type1', 'id1'); var event = new Blockly.Events.VarDelete(variable); checkExactEventValues(event, {'varName': 'name1', 'varType': 'type1', 'varId':'id1', 'type': 'var_delete'}); eventTest_tearDown(); } function test_varDelete_toJson() { eventTest_setUp(); var variable = workspace.createVariable('name1', 'type1', 'id1'); var event = new Blockly.Events.VarDelete(variable); var json = event.toJson(); var expectedJson = ({type: "var_delete", varId: "id1", varType: "type1", varName: "name1"}); assertEquals(JSON.stringify(expectedJson), JSON.stringify(json)); eventTest_tearDown(); } function test_varDelete_fromJson() { eventTest_setUp(); var variable = workspace.createVariable('name1', 'type1', 'id1'); var event = new Blockly.Events.VarDelete(variable); var event2 = new Blockly.Events.VarDelete(null); var json = event.toJson(); event2.fromJson(json); assertEquals(JSON.stringify(json), JSON.stringify(event2.toJson())); eventTest_tearDown(); } function test_varDelete_runForwards() { eventTest_setUp(); var variable = workspace.createVariable('name1', 'type1', 'id1'); var event = new Blockly.Events.VarDelete(variable); assertNotNull(workspace.getVariableById('id1')); event.run(true); assertNull(workspace.getVariableById('id1')); eventTest_tearDown(); } function test_varDelete_runBackwards() { eventTest_setUp(); var json = {type: "var_delete", varId: "id1", varType: "type1", varName: "name1"}; var event = Blockly.Events.fromJson(json, workspace); assertNull(workspace.getVariableById('id1')); event.run(false); checkVariableValues(workspace, 'name1', 'type1', 'id1'); eventTest_tearDown(); } function test_varRename_constructor() { eventTest_setUp(); var variable = workspace.createVariable('name1', 'type1', 'id1'); var event = new Blockly.Events.VarRename(variable, 'name2'); checkExactEventValues(event, {'varId': 'id1', 'oldName': 'name1', 'newName': 'name2', 'type': 'var_rename'}); eventTest_tearDown(); } function test_varRename_toJson() { eventTest_setUp(); var variable = workspace.createVariable('name1', 'type1', 'id1'); var event = new Blockly.Events.VarRename(variable, 'name2'); var json = event.toJson(); var expectedJson = ({type: "var_rename", varId: "id1", oldName: "name1", newName: "name2"}); assertEquals(JSON.stringify(expectedJson), JSON.stringify(json)); eventTest_tearDown(); } function test_varRename_fromJson() { eventTest_setUp(); var variable = workspace.createVariable('name1', 'type1', 'id1'); var event = new Blockly.Events.VarRename(variable, ''); var event2 = new Blockly.Events.VarRename(null); var json = event.toJson(); event2.fromJson(json); assertEquals(JSON.stringify(json), JSON.stringify(event2.toJson())); eventTest_tearDown(); } function test_varRename_runForward() { eventTest_setUp(); var variable = workspace.createVariable('name1', 'type1', 'id1'); var event = new Blockly.Events.VarRename(variable, 'name2'); event.run(true); assertNull(workspace.getVariable('name1')); checkVariableValues(workspace, 'name2', 'type1', 'id1'); eventTest_tearDown(); } function test_varBackard_runForward() { eventTest_setUp(); var variable = workspace.createVariable('name1', 'type1', 'id1'); var event = new Blockly.Events.VarRename(variable, 'name2'); event.run(false); assertNull(workspace.getVariable('name2')); checkVariableValues(workspace, 'name1', 'type1', 'id1'); eventTest_tearDown(); }
bsd-3-clause
darneta/siwapp-sf1
apps/siwapp/modules/js/actions/actions.class.php
1435
<?php /** * js actions. * * @package siwapp * @subpackage js * @author Your name here * @version SVN: $Id: actions.class.php 12479 2008-10-31 10:54:40Z fabien $ */ class jsActions extends sfActions { public function preExecute() { sfConfig::set('sf_web_debug', false); } public function executeI18n(sfWebRequest $request) { } public function executeUrl(sfWebRequest $request) { sfProjectConfiguration::getActive()->loadHelpers('Url'); $urls = array(); if ($module = $request->getParameter('key')) { $urls = $this->loadUrls($module); } $this->urls = implode(",".PHP_EOL, $urls); } private function loadUrls($module, &$included = array()) { $urls = array(); $included[] = $module; $path = sfConfig::get('sf_app_module_dir')."/$module/config/module.yml"; if (file_exists($path) && is_file($path)) { $config = sfYaml::load($path); foreach ($config['all']['urls']['variables'] as $key => $value) { $urls[] = " $key : '".url_for($value)."'"; } if (array_key_exists('include', $config['all']['urls'])) { foreach ((array) $config['all']['urls']['include'] as $module) { if (!in_array($module, $included)) { $urls = array_merge($urls, $this->loadUrls($module, $included)); } } } } return $urls; } }
mit
ycsoft/FatCat-Server
LIBS/boost_1_58_0/libs/type_traits/test/has_virtual_destructor_test.cpp
3728
// (C) Copyright John Maddock 2005. // Use, modification and distribution are subject to the // Boost Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "test.hpp" #include "check_integral_constant.hpp" #ifdef TEST_STD # include <type_traits> #else # include <boost/type_traits/has_virtual_destructor.hpp> #endif #include <iostream> #include <stdexcept> #include <new> class polymorphic_no_virtual_destructor { public: virtual void method() = 0; }; TT_TEST_BEGIN(has_virtual_destructor) BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<int>::value, false); BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<const int>::value, false); BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<volatile int>::value, false); BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<int*>::value, false); BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<int* const>::value, false); BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<int[2]>::value, false); BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<int&>::value, false); BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<mf4>::value, false); BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<f1>::value, false); BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<enum_UDT>::value, false); BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<UDT>::value, false); BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<empty_UDT>::value, false); BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<UDT*>::value, false); BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<UDT[2]>::value, false); BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<UDT&>::value, false); #ifndef BOOST_NO_CXX11_RVALUE_REFERENCES BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<UDT&&>::value, false); #endif BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<void>::value, false); BOOST_CHECK_SOFT_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<VB>::value, true, false); BOOST_CHECK_SOFT_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<VD>::value, true, false); BOOST_CHECK_SOFT_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<test_abc1>::value, true, false); BOOST_CHECK_SOFT_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<test_abc2>::value, true, false); BOOST_CHECK_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<polymorphic_no_virtual_destructor>::value, false); #ifndef BOOST_NO_STD_LOCALE BOOST_CHECK_SOFT_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<std::iostream>::value, true, false); BOOST_CHECK_SOFT_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<std::basic_streambuf<char> >::value, true, false); BOOST_CHECK_SOFT_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<std::basic_ios<char> >::value, true, false); BOOST_CHECK_SOFT_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<std::basic_istream<char> >::value, true, false); BOOST_CHECK_SOFT_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<std::basic_streambuf<char> >::value, true, false); #endif BOOST_CHECK_SOFT_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<std::exception>::value, true, false); BOOST_CHECK_SOFT_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<std::bad_alloc>::value, true, false); BOOST_CHECK_SOFT_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<std::runtime_error>::value, true, false); BOOST_CHECK_SOFT_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<std::out_of_range>::value, true, false); BOOST_CHECK_SOFT_INTEGRAL_CONSTANT(::tt::has_virtual_destructor<std::range_error>::value, true, false); TT_TEST_END
mit
andri0331/cetaku
Presentation/Nop.Web/Models/Boards/TopicMoveModel.cs
465
using System.Collections.Generic; using System.Web.Mvc; using Nop.Web.Framework.Mvc; namespace Nop.Web.Models.Boards { public partial class TopicMoveModel : BaseNopEntityModel { public TopicMoveModel() { ForumList = new List<SelectListItem>(); } public int ForumSelected { get; set; } public string TopicSeName { get; set; } public IEnumerable<SelectListItem> ForumList { get; set; } } }
mit
patrickkusebauch/27skauti
src/libs/Nette/PhpGenerator/ClassType.php
5545
<?php /** * This file is part of the Nette Framework (http://nette.org) * Copyright (c) 2004 David Grudl (http://davidgrudl.com) */ namespace Nette\PhpGenerator; use Nette, Nette\Utils\Strings; /** * Class/Interface/Trait description. * * @author David Grudl * * @method ClassType setName(string) * @method string getName() * @method ClassType setType(string) * @method string getType() * @method ClassType setFinal(bool) * @method bool isFinal() * @method ClassType setAbstract(bool) * @method bool isAbstract() * @method ClassType setExtends(string[]|string) * @method string[]|string getExtends() * @method ClassType addExtend(string) * @method ClassType setImplements(string[]) * @method string[] getImplements() * @method ClassType addImplement(string) * @method ClassType setTraits(string[]) * @method string[] getTraits() * @method ClassType addTrait(string) * @method ClassType setDocuments(string[]) * @method string[] getDocuments() * @method ClassType addDocument(string) * @method ClassType setConsts(scalar[]) * @method scalar[] getConsts() * @method ClassType setProperties(Property[]) * @method Property[] getProperties() * @method ClassType setMethods(Method[]) * @method Method[] getMethods() */ class ClassType extends Nette\Object { /** @var string */ private $name; /** @var string class|interface|trait */ private $type = 'class'; /** @var bool */ private $final; /** @var bool */ private $abstract; /** @var string[]|string */ private $extends = array(); /** @var string[] */ private $implements = array(); /** @var string[] */ private $traits = array(); /** @var string[] */ private $documents = array(); /** @var mixed[] name => value */ private $consts = array(); /** @var Property[] name => Property */ private $properties = array(); /** @var Method[] name => Method */ private $methods = array(); /** @return ClassType */ public static function from($from) { $from = $from instanceof \ReflectionClass ? $from : new \ReflectionClass($from); $class = new static($from->getShortName()); $class->type = $from->isInterface() ? 'interface' : (PHP_VERSION_ID >= 50400 && $from->isTrait() ? 'trait' : 'class'); $class->final = $from->isFinal(); $class->abstract = $from->isAbstract() && $class->type === 'class'; $class->implements = $from->getInterfaceNames(); $class->documents = preg_replace('#^\s*\* ?#m', '', trim($from->getDocComment(), "/* \r\n")); $namespace = $from->getNamespaceName(); if ($from->getParentClass()) { $class->extends = $from->getParentClass()->getName(); if ($namespace) { $class->extends = Strings::startsWith($class->extends, "$namespace\\") ? substr($class->extends, strlen($namespace) + 1) : '\\' . $class->extends; } $class->implements = array_diff($class->implements, $from->getParentClass()->getInterfaceNames()); } if ($namespace) { foreach ($class->implements as & $interface) { $interface = Strings::startsWith($interface, "$namespace\\") ? substr($interface, strlen($namespace) + 1) : '\\' . $interface; } } foreach ($from->getProperties() as $prop) { if ($prop->getDeclaringClass() == $from) { // intentionally == $class->properties[$prop->getName()] = Property::from($prop); } } foreach ($from->getMethods() as $method) { if ($method->getDeclaringClass() == $from) { // intentionally == $class->methods[$method->getName()] = Method::from($method); } } return $class; } public function __construct($name = NULL) { $this->name = $name; } /** @return ClassType */ public function addConst($name, $value) { $this->consts[$name] = $value; return $this; } /** @return Property */ public function addProperty($name, $value = NULL) { $property = new Property; return $this->properties[$name] = $property->setName($name)->setValue($value); } /** @return Method */ public function addMethod($name) { $method = new Method; if ($this->type === 'interface') { $method->setVisibility('')->setBody(FALSE); } else { $method->setVisibility('public'); } return $this->methods[$name] = $method->setName($name); } /** @return string PHP code */ public function __toString() { $consts = array(); foreach ($this->consts as $name => $value) { $consts[] = "const $name = " . Helpers::dump($value) . ";\n"; } $properties = array(); foreach ($this->properties as $property) { $properties[] = ($property->documents ? str_replace("\n", "\n * ", "/**\n" . implode("\n", (array) $property->documents)) . "\n */\n" : '') . $property->visibility . ($property->static ? ' static' : '') . ' $' . $property->name . ($property->value === NULL ? '' : ' = ' . Helpers::dump($property->value)) . ";\n"; } return Strings::normalize( ($this->documents ? str_replace("\n", "\n * ", "/**\n" . implode("\n", (array) $this->documents)) . "\n */\n" : '') . ($this->abstract ? 'abstract ' : '') . ($this->final ? 'final ' : '') . $this->type . ' ' . $this->name . ' ' . ($this->extends ? 'extends ' . implode(', ', (array) $this->extends) . ' ' : '') . ($this->implements ? 'implements ' . implode(', ', (array) $this->implements) . ' ' : '') . "\n{\n\n" . Strings::indent( ($this->traits ? "use " . implode(', ', (array) $this->traits) . ";\n\n" : '') . ($this->consts ? implode('', $consts) . "\n\n" : '') . ($this->properties ? implode("\n", $properties) . "\n\n" : '') . implode("\n\n\n", $this->methods), 1) . "\n\n}") . "\n"; } }
mit
rherlt/HWR-Berlin-OOP2-2016
src/eclipse_workspace/08_GsonHttp/HttpclientLib/httpcomponents-client-4.5.2/examples/org/apache/http/examples/entity/mime/ClientMultipartFormPost.java
3485
/* * ==================================================================== * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * ==================================================================== * * This software consists of voluntary contributions made by many * individuals on behalf of the Apache Software Foundation. For more * information on the Apache Software Foundation, please see * <http://www.apache.org/>. * */ package org.apache.http.examples.entity.mime; import java.io.File; import org.apache.http.HttpEntity; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpPost; import org.apache.http.entity.ContentType; import org.apache.http.entity.mime.MultipartEntityBuilder; import org.apache.http.entity.mime.content.FileBody; import org.apache.http.entity.mime.content.StringBody; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.apache.http.util.EntityUtils; /** * Example how to use multipart/form encoded POST request. */ public class ClientMultipartFormPost { public static void main(String[] args) throws Exception { if (args.length != 1) { System.out.println("File path not given"); System.exit(1); } CloseableHttpClient httpclient = HttpClients.createDefault(); try { HttpPost httppost = new HttpPost("http://localhost:8080" + "/servlets-examples/servlet/RequestInfoExample"); FileBody bin = new FileBody(new File(args[0])); StringBody comment = new StringBody("A binary file of some kind", ContentType.TEXT_PLAIN); HttpEntity reqEntity = MultipartEntityBuilder.create() .addPart("bin", bin) .addPart("comment", comment) .build(); httppost.setEntity(reqEntity); System.out.println("executing request " + httppost.getRequestLine()); CloseableHttpResponse response = httpclient.execute(httppost); try { System.out.println("----------------------------------------"); System.out.println(response.getStatusLine()); HttpEntity resEntity = response.getEntity(); if (resEntity != null) { System.out.println("Response content length: " + resEntity.getContentLength()); } EntityUtils.consume(resEntity); } finally { response.close(); } } finally { httpclient.close(); } } }
mit
TukekeSoft/CocosSharp
box2d/Dynamics/b2QueryCallback.cs
497
using System; using System.Collections.Generic; using System.Linq; using System.Text; namespace Box2D.Dynamics { /// <summary> /// Callback class for AABB queries. /// See b2World::Query /// </summary> public abstract class b2QueryCallback { /// <summary> /// Called for each fixture found in the query AABB. /// @return false to terminate the query. /// </summary> public abstract bool ReportFixture(b2Fixture fixture); } }
mit
yogeshsaroya/new-cdnjs
ajax/libs/angular.js/1.3.0-beta.19/i18n/angular-locale_en-001.js
129
version https://git-lfs.github.com/spec/v1 oid sha256:cb4896ff3f8a3439a0858180345c003d6f15c6589b6f48fbe57758a34838aa29 size 2326
mit
punker76/vscode
src/vs/editor/test/common/viewLayout/editorScrollable.test.ts
3468
/*--------------------------------------------------------------------------------------------- * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for license information. *--------------------------------------------------------------------------------------------*/ 'use strict'; import * as assert from 'assert'; import {EditorScrollable} from 'vs/editor/common/viewLayout/editorScrollable'; suite('Editor ViewLayout - EditorScrollable', () => { function assertScrollState(scrollable:EditorScrollable, scrollTop:number, scrollLeft:number, width:number, height:number, scrollWidth:number, scrollHeight:number) { assert.equal(scrollable.getScrollTop(), scrollTop); assert.equal(scrollable.getScrollLeft(), scrollLeft); assert.equal(scrollable.getScrollWidth(), scrollWidth); assert.equal(scrollable.getScrollHeight(), scrollHeight); assert.equal(scrollable.getWidth(), width); assert.equal(scrollable.getHeight(), height); } test('EditorScrollable', () => { var scrollable = new EditorScrollable(); scrollable.setWidth(100); scrollable.setHeight(100); assertScrollState(scrollable, 0, 0, 100, 100, 100, 100); // Make it vertically scrollable scrollable.setScrollHeight(1000); assertScrollState(scrollable, 0, 0, 100, 100, 100, 1000); // Scroll vertically... scrollable.setScrollTop(10); assertScrollState(scrollable, 10, 0, 100, 100, 100, 1000); scrollable.setScrollTop(900); assertScrollState(scrollable, 900, 0, 100, 100, 100, 1000); scrollable.setScrollTop(-1); assertScrollState(scrollable, 0, 0, 100, 100, 100, 1000); scrollable.setScrollTop(901); assertScrollState(scrollable, 900, 0, 100, 100, 100, 1000); scrollable.setScrollTop(9001); assertScrollState(scrollable, 900, 0, 100, 100, 100, 1000); // Increase vertical size => scrollTop should readjust scrollable.setHeight(200); assertScrollState(scrollable, 800, 0, 100, 200, 100, 1000); // Reset height & scrollHeight scrollable.setScrollHeight(100); assertScrollState(scrollable, 0, 0, 100, 200, 100, 200); scrollable.setHeight(100); assertScrollState(scrollable, 0, 0, 100, 100, 100, 200); scrollable.setScrollHeight(100); assertScrollState(scrollable, 0, 0, 100, 100, 100, 100); // Make it vertically scrollable scrollable.setScrollWidth(1000); assertScrollState(scrollable, 0, 0, 100, 100, 1000, 100); // Scroll horizontally... scrollable.setScrollLeft(10); assertScrollState(scrollable, 0, 10, 100, 100, 1000, 100); scrollable.setScrollLeft(900); assertScrollState(scrollable, 0, 900, 100, 100, 1000, 100); scrollable.setScrollLeft(-1); assertScrollState(scrollable, 0, 0, 100, 100, 1000, 100); scrollable.setScrollLeft(901); assertScrollState(scrollable, 0, 900, 100, 100, 1000, 100); scrollable.setScrollLeft(9001); assertScrollState(scrollable, 0, 900, 100, 100, 1000, 100); // Increase horizontal size => scrollLeft should readjust scrollable.setWidth(200); assertScrollState(scrollable, 0, 800, 200, 100, 1000, 100); // Validate with / height scrollable.setWidth(-1); assertScrollState(scrollable, 0, 800, 0, 100, 1000, 100); scrollable.setScrollWidth(-1); assertScrollState(scrollable, 0, 0, 0, 100, 0, 100); scrollable.setHeight(-1); assertScrollState(scrollable, 0, 0, 0, 0, 0, 100); scrollable.setScrollHeight(-1); assertScrollState(scrollable, 0, 0, 0, 0, 0, 0); }); });
mit
kanboard/kanboard
app/Notification/WebhookNotification.php
1480
<?php namespace Kanboard\Notification; use Kanboard\Core\Base; use Kanboard\Core\Notification\NotificationInterface; /** * Webhook Notification * * @package Kanboard\Notification * @author Frederic Guillot */ class WebhookNotification extends Base implements NotificationInterface { /** * Send notification to a user * * @access public * @param array $user * @param string $event_name * @param array $event_data */ public function notifyUser(array $user, $event_name, array $event_data) { } /** * Send notification to a project * * @access public * @param array $project * @param string $event_name * @param array $event_data */ public function notifyProject(array $project, $event_name, array $event_data) { $url = $this->configModel->get('webhook_url'); $token = $this->configModel->get('webhook_token'); if (! empty($url)) { if (strpos($url, '?') !== false) { $url .= '&token='.$token; } else { $url .= '?token='.$token; } $payload = array( 'event_name' => $event_name, 'event_data' => $event_data, 'event_author' => ($this->userSession->isLogged() ? $this->userSession->getUsername() : NULL), ); $this->httpClient->postJson($url, $payload); } } }
mit