repo_name
stringlengths
4
116
path
stringlengths
4
379
size
stringlengths
1
7
content
stringlengths
3
1.05M
license
stringclasses
15 values
adessaigne/camel
components/camel-slack/src/main/java/org/apache/camel/component/slack/SlackConsumer.java
6763
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.slack; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Queue; import org.apache.camel.Exchange; import org.apache.camel.Processor; import org.apache.camel.RuntimeCamelException; import org.apache.camel.support.ScheduledBatchPollingConsumer; import org.apache.camel.util.CastUtils; import org.apache.camel.util.ObjectHelper; import org.apache.camel.util.json.DeserializationException; import org.apache.camel.util.json.JsonArray; import org.apache.camel.util.json.JsonObject; import org.apache.camel.util.json.Jsoner; import org.apache.http.HttpResponse; import org.apache.http.client.HttpClient; import org.apache.http.client.entity.UrlEncodedFormEntity; import org.apache.http.client.methods.HttpPost; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.message.BasicNameValuePair; import static org.apache.camel.component.slack.utils.SlackUtils.readResponse; public class SlackConsumer extends ScheduledBatchPollingConsumer { private SlackEndpoint slackEndpoint; private String timestamp; private String channelId; public SlackConsumer(SlackEndpoint endpoint, Processor processor) throws IOException, DeserializationException { super(endpoint, processor); this.slackEndpoint = endpoint; this.channelId = getChannelId(slackEndpoint.getChannel()); } @Override protected int poll() throws Exception { Queue<Exchange> exchanges; HttpClient client = HttpClientBuilder.create().useSystemProperties().build(); HttpPost httpPost = new HttpPost(slackEndpoint.getServerUrl() + "/api/conversations.history"); List<BasicNameValuePair> params = new ArrayList<>(); params.add(new BasicNameValuePair(SlackConstants.SLACK_CHANNEL_FIELD, channelId)); if (ObjectHelper.isNotEmpty(timestamp)) { params.add(new BasicNameValuePair("oldest", timestamp)); } params.add(new BasicNameValuePair("count", slackEndpoint.getMaxResults())); params.add(new BasicNameValuePair("token", slackEndpoint.getToken())); httpPost.setEntity(new UrlEncodedFormEntity(params)); HttpResponse response = client.execute(httpPost); String jsonString = readResponse(response); JsonObject c = (JsonObject) Jsoner.deserialize(jsonString); checkSlackReply(c); JsonArray list = c.getCollection("messages"); exchanges = createExchanges(list); return processBatch(CastUtils.cast(exchanges)); } private Queue<Exchange> createExchanges(List<Object> list) { Queue<Exchange> answer = new LinkedList<>(); if (ObjectHelper.isNotEmpty(list)) { Iterator it = list.iterator(); int i = 0; while (it.hasNext()) { Object object = it.next(); JsonObject singleMess = (JsonObject) object; if (i == 0) { timestamp = (String) singleMess.get("ts"); } i++; Exchange exchange = slackEndpoint.createExchange(singleMess); answer.add(exchange); } } return answer; } @Override public int processBatch(Queue<Object> exchanges) throws Exception { int total = exchanges.size(); for (int index = 0; index < total && isBatchAllowed(); index++) { // only loop if we are started (allowed to run) final Exchange exchange = ObjectHelper.cast(Exchange.class, exchanges.poll()); // add current index and total as properties exchange.setProperty(Exchange.BATCH_INDEX, index); exchange.setProperty(Exchange.BATCH_SIZE, total); exchange.setProperty(Exchange.BATCH_COMPLETE, index == total - 1); // update pending number of exchanges pendingExchanges = total - index - 1; getAsyncProcessor().process(exchange, doneSync -> { // noop }); } return total; } private String getChannelId(String channel) throws IOException, DeserializationException { HttpClient client = HttpClientBuilder.create().useSystemProperties().build(); HttpPost httpPost = new HttpPost(slackEndpoint.getServerUrl() + "/api/conversations.list"); List<BasicNameValuePair> params = new ArrayList<>(); params.add(new BasicNameValuePair("token", slackEndpoint.getToken())); httpPost.setEntity(new UrlEncodedFormEntity(params)); HttpResponse response = client.execute(httpPost); String jsonString = readResponse(response); JsonObject c = (JsonObject) Jsoner.deserialize(jsonString); checkSlackReply(c); Collection<JsonObject> channels = c.getCollection("channels"); if (channels == null) { throw new RuntimeCamelException("The response was successful but no channel list was provided"); } for (JsonObject singleChannel : channels) { if (singleChannel.get("name") != null) { if (singleChannel.get("name").equals(channel)) { if (singleChannel.get("id") != null) { return (String) singleChannel.get("id"); } } } } return jsonString; } private void checkSlackReply(JsonObject c) { boolean okStatus = c.getBoolean("ok"); if (!okStatus) { String errorMessage = c.getString("error"); if (errorMessage == null || errorMessage.isEmpty()) { errorMessage = "the slack server did not provide error details"; } throw new RuntimeCamelException(String.format("API request to Slack failed: %s", errorMessage)); } } }
apache-2.0
rsadam-google/google-input-tools
chrome/os/i18n_messages.js
54105
// Copyright 2014 The ChromeOS IME Authors. All Rights Reserved. // limitations under the License. // See the License for the specific language governing permissions and // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // distributed under the License is distributed on an "AS-IS" BASIS, // Unless required by applicable law or agreed to in writing, software // // http://www.apache.org/licenses/LICENSE-2.0 // // You may obtain a copy of the License at // you may not use this file except in compliance with the License. // Licensed under the Apache License, Version 2.0 (the "License"); // goog.require('goog.chrome.extensions.i18n'); var msgs = {}; /** * @desc It is the Google input tools title. */ msgs.MSG_CHOS_INPUTTOOL_TITLE = goog.getMsg( 'Google Input Tools' ); /** * @desc It is the description of the input tools chrome extension. */ msgs.MSG_CHOS_INPUTTOOL_DESCRIPTION = goog.getMsg( 'Google Input Tools for Chrome OS lets you type in your desired language' + ' on Chrome OS.' ); /** * @desc It is the description of Pinyin setting pages. */ msgs.MSG_PINYIN_SETTINGS_PAGE = goog.getMsg( 'Pinyin Settings Page' ); /** * @desc Description of selection tooltip. */ msgs.MSG_SWIPE_SELECTION_TOOLTIP = goog.getMsg( 'Swipe left or right to move the cursor.' ); /** * @desc Description of restoration tooltip. */ msgs.MSG_SWIPE_RESTORATION_TOOLTIP = goog.getMsg( 'Swipe right to restore words.' ); /** * @desc Description of deletion tooltip. */ msgs.MSG_SWIPE_DELETION_TOOLTIP = goog.getMsg( 'Swipe left to delete whole words.' ); /** * @desc It is the description of Zhuyin setting pages. */ msgs.MSG_ZHUYIN_SETTINGS_PAGE = goog.getMsg( 'Zhuyin Settings Page' ); /** * @desc It is the description of fuzzy pinyin checkbox in Pinyin setting page. */ msgs.MSG_FUZZY_PINYIN = goog.getMsg( 'Enable Fuzzy-Pinyin mode' ); /** * @desc It is the description of personal dictionary checkbox in setting * pages. */ msgs.MSG_USER_DICT = goog.getMsg( 'Enable personal dictionary' ); /** * @desc It is the description of to sync personal dictionary with Server * checkbox in setting pages. */ msgs.MSG_USER_DICT_SYNC = goog.getMsg( 'Sync personal dictionary' ); /** * @desc Label for button that resets all user dictionary changes. */ msgs.MSG_USER_DICT_RESET = goog.getMsg('Reset All Dictionary Entries'); /** * @desc Label for button that edit user dictionary entries. */ msgs.MSG_USER_DICT_EDIT = goog.getMsg('Edit Dictionary Entries'); /** * @desc Label for button with popup user dictionary dialog. */ msgs.MSG_USER_DICT_MANAGE = goog.getMsg('Manage personal dictionary...'); /** * @desc Title for manage user dictionary popup dialog. */ msgs.MSG_USER_DICT_TITLE = goog.getMsg('Personal Dictionary'); /** * @desc Label for button that when clicked, removes the selected entries from * the personal dictionary. */ msgs.MSG_USER_DICT_REMOVE = goog.getMsg('Remove Selected'); /** * @desc Label for button that clears all personal dictionary entries. */ msgs.MSG_USER_DICT_CLEAR = goog.getMsg('Clear All'); /** * @desc Label for button that user can press to submit the word they entered to * be added to their personal dictionary. */ msgs.MSG_USER_DICT_ADD = goog.getMsg('Add'); /** * @desc Indicator that typing in this input box will add a new word to the * person's user dictionary. */ msgs.MSG_USER_DICT_NEW_WORD = goog.getMsg('New word'); /** * @desc Button text to save user dictionary changes. */ msgs.MSG_USER_DICT_SAVE = goog.getMsg('Save'); /** * @desc It is the description of whether to enable '-' and '=' keys checkbox in * Pinyin setting page. */ msgs.MSG_MOVE_PAGE_KEY_ABOVE = goog.getMsg( 'Use - and = keys to page a candidate list' ); /** * @desc It is the description of whether to enable ',' and '.' keys checkbox in * Pinyin setting page. */ msgs.MSG_MOVE_PAGE_KEY_BELOW = goog.getMsg( 'Use , and . keys to page a candidate list' ); /** * @desc It is the description of whether the the default input method is * Chinese. */ msgs.MSG_INIT_LANG = goog.getMsg( 'Initial input language is Chinese' ); /** * @desc It is the description of whether the default punctuation width is Full. */ msgs.MSG_INIT_PUNC = goog.getMsg( 'Initial punctuation width is Full' ); /** * @desc It is the description of whether the default character width is Full. */ msgs.MSG_INIT_SBC = goog.getMsg( 'Initial character width is Full' ); /** * @desc It is the description of whether the current input method is * Chinese. */ msgs.MSG_CURRENT_LANG = goog.getMsg( 'Input language is Chinese' ); /** * @desc It is the description of whether the current punctuation width is Full. */ msgs.MSG_CURRENT_PUNC = goog.getMsg( 'Punctuation width is Full' ); /** * @desc It is the description of whether the current character width is Full. */ msgs.MSG_CURRENT_SBC = goog.getMsg( 'Character width is Full' ); /** * @desc It is the description of keyboard types in Zhuyin setting page. */ msgs.MSG_ZHUYIN_KEYBOARD_LAYOUT = goog.getMsg( 'Keyboard type' ); /** * @desc It is the description of which keys are used to select candidates in * Zhuyin setting page. */ msgs.MSG_ZHUYIN_SELECT_KEYS = goog.getMsg( 'Selection keys' ); /** * @desc It is the description of how many candidate shown in one page for * Zhuyin input method. */ msgs.MSG_ZHUYIN_PAGE_SIZE = goog.getMsg( 'Number of candidates to display per page' ); /** * @desc It is the label of a dropdown list for choosing keyboard layouts in * option pages of some of input methods. The keyboard layouts are like US, * Dvorak, Colemak, etc. */ msgs.MSG_XKB_LAYOUT = goog.getMsg( 'Keyboard layouts' ); /** * @desc It is the label of keyboard without any descriptor in the input method * list. */ msgs.MSG_KEYBOARD_NONE = goog.getMsg( 'Keyboard' ); /** * @desc It is the label of Phonetic keyboard in the input method list. */ msgs.MSG_KEYBOARD_PHONETIC = goog.getMsg( 'Keyboard (Phonetic)' ); /** * @desc It is the label of inscript keyboard in the input method list. */ msgs.MSG_KEYBOARD_INSCRIPT = goog.getMsg( 'Keyboard (InScript)' ); /** * @desc It is the label of tamil99 keyboard in the input method list. */ msgs.MSG_KEYBOARD_TAMIL99 = goog.getMsg( 'Keyboard (Tamil99)' ); /** * @desc It is the label of typewriter keyboard in the input method list. */ msgs.MSG_KEYBOARD_TYPEWRITER = goog.getMsg( 'Keyboard (Typewriter)' ); /** * @desc It is the label of itrans keyboard in the input method list. */ msgs.MSG_KEYBOARD_ITRANS = goog.getMsg( 'Keyboard (itrans)' ); /** * @desc It is the label of Thai kedmanee keyboard in the input method list. */ msgs.MSG_KEYBOARD_KEDMANEE = goog.getMsg( 'Keyboard (Kedmanee)' ); /** * @desc It is the label of Thai pattachote keyboard in the input method list. */ msgs.MSG_KEYBOARD_PATTACHOTE = goog.getMsg( 'Keyboard (Pattachote)' ); /** * @desc It is the label of Thai TIS 820-2531 keyboard in the input method list. */ msgs.MSG_KEYBOARD_TIS = goog.getMsg( 'Keyboard (TIS 820-2531)' ); /** * @desc It is the label of Vietnamese tcvn keyboard in the input method list. */ msgs.MSG_KEYBOARD_TCVN = goog.getMsg( 'Keyboard (TCVN)' ); /** * @desc It is the label of Vietnamese telex keyboard in the input method list. */ msgs.MSG_KEYBOARD_TELEX = goog.getMsg( 'Keyboard (Telex)' ); /** * @desc It is the label of Vietnamese vni keyboard in the input method list. */ msgs.MSG_KEYBOARD_VNI = goog.getMsg( 'Keyboard (VNI)' ); /** * @desc It is the label of Vietnamese viqr keyboard in the input method list. */ msgs.MSG_KEYBOARD_VIQR = goog.getMsg( 'Keyboard (VIQR)' ); /** * @desc It is the label for basic section in setting pages. */ msgs.MSG_BASIC = goog.getMsg( 'Basics' ); /** * @desc It is the label for advanced section in setting pages. */ msgs.MSG_ADVANCED = goog.getMsg( 'Advanced' ); // The following message is for Input Tools name. /** * @desc It is the description of Korean input method. */ msgs.MSG_INPUTMETHOD_HANGUL = goog.getMsg( 'Korean input method' ); /** * @desc It is the description of Pinyin input method for Simplified * Chinese. */ msgs.MSG_INPUTMETHOD_PINYIN = goog.getMsg( 'Pinyin input method' ); /** * @desc It is the description of Pinyin input method for Traditional * Chinese. */ msgs.MSG_INPUTMETHOD_TRADITIONAL_PINYIN = goog.getMsg( 'Pinyin input method for traditional Chinese' ); /** * @desc It is the description of Zhuyin input method for Traditional * Chinese. */ msgs.MSG_INPUTMETHOD_ZHUYIN = goog.getMsg( 'Zhuyin input method' ); /** * @desc It is the description of Wubi input method for Simplified Chinese. */ msgs.MSG_INPUTMETHOD_WUBI = goog.getMsg( 'Wubi input method' ); /** * @desc It is the description of Cangjie input method for Traditional * Chinese. */ msgs.MSG_INPUTMETHOD_CANGJIE = goog.getMsg( 'Cangjie input method' ); /** * @desc It is the description of Array (\u884c\u5217) input method for * Traditional Chinese. */ msgs.MSG_INPUTMETHOD_ARRAY = goog.getMsg( 'Array input method' ); /** * @desc It is the description of Dayi (\u5927\u6613) input method for * Traditional Chinese. */ msgs.MSG_INPUTMETHOD_DAYI = goog.getMsg( 'Dayi input method' ); /** * @desc It is the description of Quick (\u901f\u6210) input method for * Traditional Chinese. */ msgs.MSG_INPUTMETHOD_QUICK = goog.getMsg( 'Quick input method' ); /** * @desc It is the description of Cantonese (\u5ee3\u6771\u8a71) input method for * Traditional Chinese. */ msgs.MSG_INPUTMETHOD_CANTONESE = goog.getMsg( 'Cantonese input method' ); /** * @desc It is the label of Bengali Phonetic keyboard in the input method list. */ msgs.MSG_KEYBOARD_BENGALI_PHONETIC = goog.getMsg( 'Bengali keyboard (Phonetic)' ); /** * @desc It is the label of Gujarati Phonetic keyboard in the input method list. */ msgs.MSG_KEYBOARD_GUJARATI_PHONETIC = goog.getMsg( 'Gujarati keyboard (Phonetic)' ); /** * @desc It is the label of Hindi Phonetic keyboard in the input method list. */ msgs.MSG_KEYBOARD_DEVANAGARI_PHONETIC = goog.getMsg( 'Devanagari keyboard (Phonetic)' ); /** * @desc It is the label of Kannada Phonetic keyboard in the input method list. */ msgs.MSG_KEYBOARD_KANNADA_PHONETIC = goog.getMsg( 'Kannada keyboard (Phonetic)' ); /** * @desc It is the label of Malayalam Phonetic keyboard in the input method * list. */ msgs.MSG_KEYBOARD_MALAYALAM_PHONETIC = goog.getMsg( 'Malayalam keyboard (Phonetic)' ); /** * @desc It is the label of Tamil Phonetic keyboard in the input method list. */ msgs.MSG_KEYBOARD_TAMIL_PHONETIC = goog.getMsg( 'Tamil keyboard (Phonetic)' ); /** * @desc It is the label of Tamil inscript keyboard in the input method list. */ msgs.MSG_KEYBOARD_TAMIL_INSCRIPT = goog.getMsg( 'Tamil keyboard (InScript)' ); /** * @desc It is the label of Tamil tamil99 keyboard in the input method list. */ msgs.MSG_KEYBOARD_TAMIL_TAMIL99 = goog.getMsg( 'Tamil keyboard (Tamil99)' ); /** * @desc It is the label of Tamil typewriter keyboard in the input method list. */ msgs.MSG_KEYBOARD_TAMIL_TYPEWRITER = goog.getMsg( 'Tamil keyboard (Typewriter)' ); /** * @desc It is the label of Tamil itrans keyboard in the input method list. */ msgs.MSG_KEYBOARD_TAMIL_ITRANS = goog.getMsg( 'Tamil keyboard (itrans)' ); /** * @desc It is the label of Telugu Phonetic keyboard in the input method list. */ msgs.MSG_KEYBOARD_TELUGU_PHONETIC = goog.getMsg( 'Telugu keyboard (Phonetic)' ); /** * @desc It is the label of Ethiopic Phonetic keyboard in the input method list. */ msgs.MSG_KEYBOARD_ETHIOPIC = goog.getMsg( 'Ethiopic keyboard' ); /** * @desc It is the label of Thai kedmanee keyboard in the input method list. */ msgs.MSG_KEYBOARD_THAI_KEDMANEE = goog.getMsg( 'Thai keyboard (Kedmanee)' ); /** * @desc It is the label of Thai pattachote keyboard in the input method list. */ msgs.MSG_KEYBOARD_THAI_PATTACHOTE = goog.getMsg( 'Thai keyboard (Pattachote)' ); /** * @desc It is the label of Thai TIS 820-2531 keyboard in the input method list. */ msgs.MSG_KEYBOARD_THAI_TIS = goog.getMsg( 'Thai keyboard (TIS 820-2531)' ); /** * @desc It is the label of Persian keyboard in the input method list. */ msgs.MSG_KEYBOARD_PERSIAN = goog.getMsg( 'Persian keyboard' ); /** * @desc It is the label of Vietnamese tcvn keyboard in the input method list. */ msgs.MSG_KEYBOARD_VIETNAMESE_TCVN = goog.getMsg( 'Vietnamese keyboard (TCVN)' ); /** * @desc It is the label of Vietnamese telex keyboard in the input method list. */ msgs.MSG_KEYBOARD_VIETNAMESE_TELEX = goog.getMsg( 'Vietnamese keyboard (Telex)' ); /** * @desc It is the label of Vietnamese vni keyboard in the input method list. */ msgs.MSG_KEYBOARD_VIETNAMESE_VNI = goog.getMsg( 'Vietnamese keyboard (VNI)' ); /** * @desc It is the label of Vietnamese viqr keyboard in the input method list. */ msgs.MSG_KEYBOARD_VIETNAMESE_VIQR = goog.getMsg( 'Vietnamese keyboard (VIQR)' ); /** * @desc It is the label of Arrabic keyboard in the input method list. */ msgs.MSG_KEYBOARD_ARABIC = goog.getMsg( 'Arabic keyboard' ); /** * @desc It is the label of Lao keyboard in the input method list. */ msgs.MSG_KEYBOARD_LAO = goog.getMsg( 'Lao keyboard' ); /** * @desc It is the label of Nepali InScript keyboard in the input method list. */ msgs.MSG_KEYBOARD_NEPALI_INSCRIPT = goog.getMsg( 'Nepali keyboard (InScript)' ); /** * @desc It is the label of Nepali Phonetic keyboard in the input method list. */ msgs.MSG_KEYBOARD_NEPALI_PHONETIC = goog.getMsg( 'Nepali keyboard (Phonetic)' ); /** * @desc It is the label of Khmer keyboard in the input method list. */ msgs.MSG_KEYBOARD_KHMER = goog.getMsg( 'Khmer keyboard' ); /** * @desc It is the label of Myanmar keyboard in the input method list. */ msgs.MSG_KEYBOARD_MYANMAR = goog.getMsg( 'Myanmar MM3 keyboard' ); /** * @desc It is the label of Sinhala keyboard in the input method list. */ msgs.MSG_KEYBOARD_SINHALA = goog.getMsg( 'Sinhala keyboard' ); /** * @desc The title of keyboard - us keyboard . */ msgs.MSG_KEYBOARD_US = goog.getMsg('US keyboard'); /** * @desc The title of keyboard - us international_keyboard . */ msgs.MSG_KEYBOARD_US_INTERNATIONAL = goog.getMsg('US international keyboard'); /** * @desc The title of keyboard - us extended_keyboard . */ msgs.MSG_KEYBOARD_US_EXTENDED = goog.getMsg('US extended keyboard'); /** * @desc The title of keyboard - us dvorak_keyboard . */ msgs.MSG_KEYBOARD_US_DVORAK = goog.getMsg('US Dvorak keyboard'); /** * @desc The title of keyboard - us dvp_keyboard . */ msgs.MSG_KEYBOARD_US_DVP = goog.getMsg('US programmer Dvorak keyboard'); /** * @desc The title of keyboard - us colemak_keyboard . */ msgs.MSG_KEYBOARD_US_COLEMAK = goog.getMsg('US Colemak keyboard'); /** * @desc The title of keyboard - belgian keyboard . */ msgs.MSG_KEYBOARD_BELGIAN = goog.getMsg('Belgian keyboard'); /** * @desc The title of keyboard - faroese keyboard . */ msgs.MSG_KEYBOARD_FAROESE = goog.getMsg('Faroese keyboard'); /** * @desc The title of keyboard - netherlands keyboard. */ msgs.MSG_KEYBOARD_NETHERLANDS = goog.getMsg('Netherlands keyboard'); /** * @desc The title of keyboard - french bepo_keyboard . */ msgs.MSG_KEYBOARD_FRENCH_BEPO = goog.getMsg('French BÉPO keyboard'); /** * @desc The title of keyboard - french keyboard . */ msgs.MSG_KEYBOARD_FRENCH = goog.getMsg('French keyboard'); /** * @desc The title of keyboard - canadian french_keyboard . */ msgs.MSG_KEYBOARD_CANADIAN_FRENCH = goog.getMsg('Canadian French keyboard'); /** * @desc The title of keyboard - swiss french_keyboard . */ msgs.MSG_KEYBOARD_SWISS_FRENCH = goog.getMsg('Swiss French keyboard'); /** * @desc The title of keyboard - canadian multilingual_keyboard . */ msgs.MSG_KEYBOARD_CANADIAN_MULTILINGUAL = goog.getMsg( 'Canadian Multilingual keyboard'); /** * @desc The title of keyboard - german keyboard . */ msgs.MSG_KEYBOARD_GERMAN = goog.getMsg('German keyboard'); /** * @desc The title of keyboard - german neo_2_keyboard . */ msgs.MSG_KEYBOARD_GERMAN_NEO_2 = goog.getMsg('German Neo 2 keyboard'); /** * @desc The title of keyboard - swiss keyboard . */ msgs.MSG_KEYBOARD_SWISS = goog.getMsg('Swiss keyboard'); /** * @desc The title of keyboard - japanese keyboard . */ msgs.MSG_KEYBOARD_JAPANESE = goog.getMsg('Japanese keyboard'); /** * @desc The title of keyboard - russian keyboard . */ msgs.MSG_KEYBOARD_RUSSIAN = goog.getMsg('Russian keyboard'); /** * @desc The title of keyboard - russian phonetic keyboard . */ msgs.MSG_KEYBOARD_RUSSIAN_PHONETIC = goog.getMsg('Russian phonetic keyboard'); /** * @desc The title of keyboard - russian phonetic (AATSEEL) keyboard . */ msgs.MSG_KEYBOARD_RUSSIAN_PHONETIC_AATSEEL = goog.getMsg('Russian phonetic (AATSEEL) keyboard'); /** * @desc The title of keyboard - russian phonetic (YaZHert) keyboard . */ msgs.MSG_KEYBOARD_RUSSIAN_PHONETIC_YAZHERT = goog.getMsg('Russian phonetic (YaZHert) keyboard'); /** * @desc The title of keyboard - brazilian keyboard . */ msgs.MSG_KEYBOARD_BRAZILIAN = goog.getMsg('Brazilian keyboard'); /** * @desc The title of keyboard - bulgarian keyboard . */ msgs.MSG_KEYBOARD_BULGARIAN = goog.getMsg('Bulgarian keyboard'); /** * @desc The title of keyboard - bulgarian phonetic_keyboard . */ msgs.MSG_KEYBOARD_BULGARIAN_PHONETIC = goog.getMsg( 'Bulgarian phonetic keyboard'); /** * @desc The title of keyboard - canadian english_keyboard . */ msgs.MSG_KEYBOARD_CANADIAN_ENGLISH = goog.getMsg('Canadian English keyboard'); /** * @desc The title of keyboard - czech keyboard . */ msgs.MSG_KEYBOARD_CZECH = goog.getMsg('Czech keyboard'); /** * @desc The title of keyboard - czech qwerty_keyboard . */ msgs.MSG_KEYBOARD_CZECH_QWERTY = goog.getMsg('Czech QWERTY keyboard'); /** * @desc The title of keyboard - estonian keyboard . */ msgs.MSG_KEYBOARD_ESTONIAN = goog.getMsg('Estonian keyboard'); /** * @desc The title of keyboard - spanish keyboard . */ msgs.MSG_KEYBOARD_SPANISH = goog.getMsg('Spanish keyboard'); /** * @desc The title of keyboard - catalan keyboard . */ msgs.MSG_KEYBOARD_CATALAN = goog.getMsg('Catalan keyboard'); /** * @desc The title of keyboard - danish keyboard . */ msgs.MSG_KEYBOARD_DANISH = goog.getMsg('Danish keyboard'); /** * @desc The title of keyboard - greek keyboard . */ msgs.MSG_KEYBOARD_GREEK = goog.getMsg('Greek keyboard'); /** * @desc The title of keyboard - hebrew keyboard . */ msgs.MSG_KEYBOARD_HEBREW = goog.getMsg('Hebrew keyboard'); /** * @desc The title of keyboard - latin american_keyboard . */ msgs.MSG_KEYBOARD_LATIN_AMERICAN = goog.getMsg('Latin American keyboard'); /** * @desc The title of keyboard - lithuanian keyboard . */ msgs.MSG_KEYBOARD_LITHUANIAN = goog.getMsg('Lithuanian keyboard'); /** * @desc The title of keyboard - latvian keyboard . */ msgs.MSG_KEYBOARD_LATVIAN = goog.getMsg('Latvian keyboard'); /** * @desc The title of keyboard - croatian keyboard . */ msgs.MSG_KEYBOARD_CROATIAN = goog.getMsg('Croatian keyboard'); /** * @desc The title of keyboard - uk keyboard . */ msgs.MSG_KEYBOARD_UK = goog.getMsg('UK keyboard'); /** * @desc The title of keyboard - uk dvorak_keyboard . */ msgs.MSG_KEYBOARD_UK_DVORAK = goog.getMsg('UK Dvorak keyboard'); /** * @desc The title of keyboard - finnish keyboard . */ msgs.MSG_KEYBOARD_FINNISH = goog.getMsg('Finnish keyboard'); /** * @desc The title of keyboard - hungarian keyboard . */ msgs.MSG_KEYBOARD_HUNGARIAN = goog.getMsg('Hungarian keyboard'); /** * @desc The title of keyboard - hungarian qwerty_keyboard . */ msgs.MSG_KEYBOARD_HUNGARIAN_QWERTY = goog.getMsg('Hungarian Qwerty keyboard'); /** * @desc The title of keyboard - italian keyboard . */ msgs.MSG_KEYBOARD_ITALIAN = goog.getMsg('Italian keyboard'); /** * @desc The title of keyboard - icelandic keyboard . */ msgs.MSG_KEYBOARD_ICELANDIC = goog.getMsg('Icelandic keyboard'); /** * @desc The title of keyboard - norwegian keyboard . */ msgs.MSG_KEYBOARD_NORWEGIAN = goog.getMsg('Norwegian keyboard'); /** * @desc The title of keyboard - polish keyboard . */ msgs.MSG_KEYBOARD_POLISH = goog.getMsg('Polish keyboard'); /** * @desc The title of keyboard - portuguese keyboard . */ msgs.MSG_KEYBOARD_PORTUGUESE = goog.getMsg('Portuguese keyboard'); /** * @desc The title of keyboard - romanian keyboard . */ msgs.MSG_KEYBOARD_ROMANIAN = goog.getMsg('Romanian keyboard'); /** * @desc The title of keyboard - romanian standard keyboard . */ msgs.MSG_KEYBOARD_ROMANIAN_STANDARD = goog.getMsg('Romanian standard keyboard'); /** * @desc The title of keyboard - swedish keyboard . */ msgs.MSG_KEYBOARD_SWEDISH = goog.getMsg('Swedish keyboard'); /** * @desc The title of keyboard - slovak keyboard . */ msgs.MSG_KEYBOARD_SLOVAKIAN = goog.getMsg('Slovak keyboard'); /** * @desc The title of keyboard - slovak keyboard . */ msgs.MSG_KEYBOARD_SLOVAK = goog.getMsg('Slovak keyboard'); /** * @desc The title of keyboard - slovenian keyboard . */ msgs.MSG_KEYBOARD_SLOVENIAN = goog.getMsg('Slovenian keyboard'); /** * @desc The title of keyboard - serbian keyboard . */ msgs.MSG_KEYBOARD_SERBIAN = goog.getMsg('Serbian keyboard'); /** * @desc The title of keyboard - turkish keyboard . */ msgs.MSG_KEYBOARD_TURKISH = goog.getMsg('Turkish keyboard'); /** * @desc The title of keyboard - turkish-f keyboard . */ msgs.MSG_KEYBOARD_TURKISH_F = goog.getMsg('Turkish-F keyboard'); /** * @desc The title of keyboard - ukrainian keyboard . */ msgs.MSG_KEYBOARD_UKRAINIAN = goog.getMsg('Ukrainian keyboard'); /** * @desc The title of keyboard - belarusian keyboard . */ msgs.MSG_KEYBOARD_BELARUSIAN = goog.getMsg('Belarusian keyboard'); /** * @desc The title of keyboard - armenian phonetic_keyboard . */ msgs.MSG_KEYBOARD_ARMENIAN_PHONETIC = goog.getMsg('Armenian Phonetic keyboard'); /** * @desc The title of keyboard - georgian keyboard . */ msgs.MSG_KEYBOARD_GEORGIAN = goog.getMsg('Georgian keyboard'); /** * @desc The title of keyboard - mongolian keyboard . */ msgs.MSG_KEYBOARD_MONGOLIAN = goog.getMsg('Mongolian keyboard'); /** * @desc The title of keyboard - maltese keyboard . */ msgs.MSG_KEYBOARD_MALTESE = goog.getMsg('Maltese keyboard'); /** * @desc The title of keyboard - macedonian keyboard . */ msgs.MSG_KEYBOARD_MACEDONIAN = goog.getMsg('Macedonian keyboard'); /** * @desc The title of keyboard - irish keyboard . */ msgs.MSG_KEYBOARD_IRISH = goog.getMsg('Irish keyboard'); /** * @desc The title of keyboard - Kurdish English-based keyboard . */ msgs.MSG_KEYBOARD_SORANIKURDISH_EN = goog.getMsg('Kurdish English-based keyboard'); /** * @desc The title of keyboard - Kurdish Arabic-based keyboard . */ msgs.MSG_KEYBOARD_SORANIKURDISH_AR = goog.getMsg('Kurdish Arabic-based keyboard'); /** * @desc It is the label of Myanmar Myansan keyboard in the input method list. */ msgs.MSG_KEYBOARD_MYANMAR_MYANSAN = goog.getMsg( 'Myanmar Myansan keyboard' ); /** * @desc The title of keyboard - Laothian keyboard */ msgs.MSG_KEYBOARD_LAOTHIAN = goog.getMsg('Laothian keyboard'); /** * @desc The title of keyboard - Kazakh keyboard */ msgs.MSG_KEYBOARD_KAZAKH = goog.getMsg('Kazakh keyboard'); /** * @desc The input method name for Amharic. */ msgs.MSG_TRANSLITERATION_AM = goog.getMsg( 'Transliteration ({$label})', { 'label': 'salam \u2192 \u1230\u120b\u121d' }); /** * @desc The input method name for Arabic. */ msgs.MSG_TRANSLITERATION_AR = goog.getMsg( 'Transliteration ({$label})', { 'label': 'marhaban \u2190 \u0645\u0631\u062d\u0628\u0627' }); /** * @desc The input method name for Belarusian. */ msgs.MSG_TRANSLITERATION_BE = goog.getMsg( 'Transliteration ({$label})', { 'label': 'pryvitannie \u2192 \u043f\u0440\u044b\u0432\u0456\u0442' + '\u0430\u043d\u043d\u0435' }); /** * @desc The input method name for Bulgarian. */ msgs.MSG_TRANSLITERATION_BG = goog.getMsg( 'Transliteration ({$label})', { 'label': 'zdrasti \u2192 \u0437\u0434\u0440\u0430\u0441\u0442\u0438' }); /** * @desc The input method name for Bengali. */ msgs.MSG_TRANSLITERATION_BN = goog.getMsg( 'Transliteration ({$label})', { 'label': 'namaskar \u2192 \u09a8\u09ae\u09b8\u09cd\u0995\u09be\u09b0' }); /** * @desc The input method name for Greek. */ msgs.MSG_TRANSLITERATION_EL = goog.getMsg( 'Transliteration ({$label})', { 'label': 'geia \u2192 \u03b3\u03b5\u03b9\u03b1' }); /** * @desc The input method name for Persian. */ msgs.MSG_TRANSLITERATION_FA = goog.getMsg( 'Transliteration ({$label})', { 'label': 'salam \u2190 \u0633\u0644\u0627\u0645' }); /** * @desc The input method name for Gujarati. */ msgs.MSG_TRANSLITERATION_GU = goog.getMsg( 'Transliteration ({$label})', { 'label': 'namaste \u2192 \u0aa8\u0aae\u0ab8\u0acd\u0aa4\u0ac7' }); /** * @desc The input method name for Hebrew. */ msgs.MSG_TRANSLITERATION_HE = goog.getMsg( 'Transliteration ({$label})', { 'label': 'shalom \u2190 \u05e9\u05dc\u05d5\u05dd' }); /** * @desc The input method name for Hindi. */ msgs.MSG_TRANSLITERATION_HI = goog.getMsg( 'Transliteration ({$label})', { 'label': 'namaste \u2192 \u0928\u092e\u0938\u094d\u0924\u0947' }); /** * @desc The input method name for Kannada. */ msgs.MSG_TRANSLITERATION_KN = goog.getMsg( 'Transliteration ({$label})', { 'label': 'namaskaram \u2192 \u0ca8\u0cae\u0cb8\u0ccd\u0c95\u0cbe\u0cb0' }); /** * @desc The input method name for Malayalam. */ msgs.MSG_TRANSLITERATION_ML = goog.getMsg( 'Transliteration ({$label})', { 'label': 'namaskar \u2192 \u0d28\u0d2e\u0d38\u0d4d\u0d15\u0d3e\u0d30' + '\u0d02' }); /** * @desc The input method name for Marathi. */ msgs.MSG_TRANSLITERATION_MR = goog.getMsg( 'Transliteration ({$label})', { 'label': 'namaste \u2192 \u0928\u092e\u0938\u094d\u0915\u093e\u0930' }); /** * @desc The input method name for Nepali. */ msgs.MSG_TRANSLITERATION_NE = goog.getMsg( 'Transliteration ({$label})', { 'label': 'namaste \u2192 \u0928\u092e\u0938\u094d\u0924\u0947' }); /** * @desc The input method name for Oriya. */ msgs.MSG_TRANSLITERATION_OR = goog.getMsg( 'Transliteration ({$label})', { 'label': 'mausam \u2192 \u0b28\u0b2e\u0b38\u0b4d\u0b24\u0b47' }); /** * @desc The input method name for Punjabi. */ msgs.MSG_TRANSLITERATION_PA = goog.getMsg( 'Transliteration ({$label})', { 'label': 'mausam \u2192 \u0a2e\u0a4c\u0a38\u0a2e' }); /** * @desc The input method name for Russian. */ msgs.MSG_TRANSLITERATION_RU = goog.getMsg( 'Transliteration ({$label})', { 'label': 'privet \u2192 \u043f\u0440\u0438\u0432\u0435\u0442' }); /** * @desc The input method name for Sanskrit. */ msgs.MSG_TRANSLITERATION_SA = goog.getMsg( 'Transliteration ({$label})', { 'label': 'namaste \u2192 \u0928\u092e\u0938\u094d\u0924\u0947' }); /** * @desc The input method name for Serbian. */ msgs.MSG_TRANSLITERATION_SR = goog.getMsg( 'Transliteration ({$label})', { 'label': 'zdravo \u2192 \u0437\u0434\u0440\u0430\u0432\u043e' }); /** * @desc The input method name for Sinhalese. */ msgs.MSG_TRANSLITERATION_SI = goog.getMsg( 'Transliteration ({$label})', { 'label': 'halo \u2192 \u0dc4\u0dbd\u0ddd' }); /** * @desc The input method name for Tamil. */ msgs.MSG_TRANSLITERATION_TA = goog.getMsg( 'Transliteration ({$label})', { 'label': 'vanakkam \u2192 \u0bb5\u0ba3\u0b95\u0bcd\u0b95\u0bae\u0bcd' }); /** * @desc The input method name for Telugu. */ msgs.MSG_TRANSLITERATION_TE = goog.getMsg( 'Transliteration ({$label})', { 'label': 'emandi \u2192 \u0c0f\u0c2e\u0c02\u0c21\u0c40' }); /** * @desc The input method name for Tigrinya. */ msgs.MSG_TRANSLITERATION_TI = goog.getMsg( 'Transliteration ({$label})', { 'label': 'selam \u2192 \u1230\u120b\u121d' }); /** * @desc The input method name for Ukraine. */ msgs.MSG_TRANSLITERATION_UK = goog.getMsg( 'Transliteration ({$label})', { 'label': 'pryvit \u2192 \u043f\u0440\u0438\u0432\u0456\u0442' }); /** * @desc The input method name for Urdu. */ msgs.MSG_TRANSLITERATION_UR = goog.getMsg( 'Transliteration ({$label})', { 'label': 'salam \u2190 \u0633\u0644\u0627\u0645' }); /** * @desc The input method name for Vietnamese. */ msgs.MSG_TRANSLITERATION_VI = goog.getMsg( 'Transliteration ({$label})', { 'label': 'chao \u2192 ch\xe0o' }); // End of the Input Tools name. /** * @desc "Back" button on handwriting panel */ msgs.MSG_HANDWRITING_BACK = goog.getMsg('Back'); /** * @desc Handwriting Input Tool need network access. If there is no network, * shows the message on handwriting panel */ msgs.MSG_HANDWRITING_NETOWRK_ERROR = goog.getMsg('Sorry, you can not use' + ' handwriting, because network is unavailable.'); /** * @desc Show the privacy message when user open handwriting IME first time. */ msgs.MSG_HANDWRITING_PRIVACY_INFO = goog.getMsg('Your input will be' + ' sent to Google servers to recognize text.'); /** * @desc Show the privacy message when user open voice IME first time. */ msgs.MSG_VOICE_PRIVACY_INFO = goog.getMsg('Your voice input will be' + ' sent to Google servers to recognize text.'); /** * @desc The message to indicate a character is in uppercase. */ msgs.MSG_CAPITAL = goog.getMsg('capital'); /** * @desc The name of the backspace key. */ msgs.MSG_BACKSPACE = goog.getMsg('backspace'); /** * @desc The name of the tab key. */ msgs.MSG_TAB = goog.getMsg('tab'); /** * @desc The name of the enter key. */ msgs.MSG_ENTER = goog.getMsg('enter'); /** * @desc The name of the space key. */ msgs.MSG_SPACE = goog.getMsg('space'); /** * @desc The name of the shift key. */ msgs.MSG_SHIFT = goog.getMsg('shift'); /** * @desc The name of the ctrl key. */ msgs.MSG_CTRL = goog.getMsg('ctrl'); /** * @desc The name of the alt key. */ msgs.MSG_ALT = goog.getMsg('alter'); /** * @desc The name of the altgr key. */ msgs.MSG_ALTGR = goog.getMsg('Alt Graphic'); /** * @desc The name of the capslock key. */ msgs.MSG_CAPSLOCK = goog.getMsg('capslock'); /** * @desc The message to indicate the shift key is locked. */ msgs.MSG_SHIFT_LOCK = goog.getMsg('Shift Lock'); /** * @desc The message to indicate it is the left arrow. */ msgs.MSG_LEFT_ARROW = goog.getMsg('left arrow'); /** * @desc The message to indicate it is the right arrow. */ msgs.MSG_RIGHT_ARROW = goog.getMsg('right arrow'); /** * @desc The message to indicate it is the up arrow. */ msgs.MSG_UP_ARROW = goog.getMsg('up arrow'); /** * @desc The message to indicate it is the down arrow. */ msgs.MSG_DOWN_ARROW = goog.getMsg('down arrow'); /** * @desc The message to indicate users this key will hide the keyboard. */ msgs.MSG_HIDE_KEYBOARD = goog.getMsg('hide keyboard'); /** * @desc The message to indicate users this key will switch to previous * keyboard. */ msgs.MSG_GLOBE = goog.getMsg('switch to previous keyboard'); /** * @desc The message to indicate users that press this key will open keyboard * menu for switching IMEs/layouts or open IME settings. */ msgs.MSG_MENU_KEY = goog.getMsg('open keyboard menu'); /** * @desc The message to indicate users that press this key will close the menu * for switching IMEs/layouts or open IME settings. */ msgs.MSG_DISMISS_MENU = goog.getMsg('dismiss keyboard menu'); /** * @desc The message to indicate users that press this button will open emoji * keyset. */ msgs.MSG_FOOTER_EMOJI_BUTTON = goog.getMsg('switch to emoji'); /** * @desc The message to indicate users that press this button will open * handwriting keyset. */ msgs.MSG_FOOTER_HANDWRITING_BUTTON = goog.getMsg('switch to handwriting'); /** * @desc The message to indicate users that press this button will open input * method settings. */ msgs.MSG_FOOTER_SETTINGS_BUTTON = goog.getMsg('open input method settings'); /** * @desc The message to indicate to users that pressing this button will make * the virtual keyboard floating, which means users can move the virtual * keyboard to anywhere on the screen. */ msgs.MSG_FOOTER_FLOATING_BUTTON = goog.getMsg('make virtual keyboard movable'); /** * @desc The message to indicate to users that pressing this button will dock * the virtual keyboard, which means the virtual keyboard will show at the * bottom of the screen and is as wide as the screen. */ msgs.MSG_FOOTER_DOCKING_BUTTON = goog.getMsg('dock virtual keyboard'); /** * @desc The message prefix which indicate users that press this button will * switch to a new keyboard. */ msgs.MSG_SWITCH_TO_KEYBOARD_PREFIX = goog.getMsg('switch to '); /** * @desc The message prefix which indicate users the current selected keyboard. */ msgs.MSG_CURRENT_KEYBOARD_PREFIX = goog.getMsg('current selected keyboard '); /** * @desc The message to indicate the button will switch to a new layout. */ msgs.MSG_SWITCH_TO = goog.getMsg('switch to '); /** * @desc The message to indicate it has switched to a new layout. */ msgs.MSG_SWITCHED_TO = goog.getMsg('switched to '); /** * @desc Show "Got it" button on a popup message. Uses click it means users saw * the message and want to hide the message. */ msgs.MSG_GOT_IT = goog.getMsg('Got it'); /** * @desc Option for input to never automatically correct or change user input * from keyboard. */ msgs.MSG_NEVER_AUTO_CORRECT = goog.getMsg('Off'); /** * @desc Option for input to sometimes automatically correct user input to what * the model thinks is the intended input when confidence is high. */ msgs.MSG_SOMETIMES_AUTO_CORRECT = goog.getMsg('Modest'); /** * @desc Option for input to always automatically correct user input to what * the model thinks is the intended input when confidence is high. */ msgs.MSG_ALWAYS_AUTO_CORRECT = goog.getMsg('Aggressive'); /** * @desc Option to determine how candidates are triggered. This one displays * input candidates only on backspace. */ msgs.MSG_SHOW_CANDIDATES_BACKSPACE = goog.getMsg('Show suggestion dropdown by Backspace'); /** * @desc Option to determine how candidates are triggered. This one displays * input candidates after waiting 500ms. */ msgs.MSG_SHOW_CANDIDATES_500 = goog.getMsg('Show suggestion dropdown after 500ms'); /** * @desc Option to determine how candidates are triggered. This one displays * input candidates after waiting 1 second. */ msgs.MSG_SHOW_CANDIDATES_1000 = goog.getMsg('Show suggestion dropdown after 1s'); /** * @desc Option to determine how candidates are triggered. This one displays * input candidates after waiting 2 second. */ msgs.MSG_SHOW_CANDIDATES_2000 = goog.getMsg('Show suggestion dropdown after 2s'); /** * @desc Option to determine how candidates are triggered. This one displays * input candidates after waiting 5 second. */ msgs.MSG_SHOW_CANDIDATES_5000 = goog.getMsg('Show suggestion dropdown after 5s'); /** * @desc The title for the settings page of latin input methods. */ msgs.MSG_LATIN_SETTINGS_PAGE = goog.getMsg('Settings'); /** * @desc Title for options to determine how often the input method * automatically corrects user input when confidence is high. */ msgs.MSG_AUTO_CORRECTION_LEVEL = goog.getMsg('Auto-correction'); /** * @desc Option to enable automatic capitalization of first character in a * sentence. */ msgs.MSG_ENABLE_CAPITALIZATION = goog.getMsg('Auto-capitalization'); /** * @desc Title for options to determine how often the input method * automatically corrects user input when confidence is high. */ msgs.MSG_PHYSICAL_AUTO_CORRECTION_LEVEL = goog.getMsg('Auto-correction'); /** * @desc Option to enable automatic capitalization of first character in a * sentence. */ msgs.MSG_PHYSICAL_ENABLE_CAPITALIZATION = goog.getMsg('Auto-capitalization'); /** * @desc Title for options to determine whether to show the candidates. */ msgs.MSG_SHOW_HANGUL_CANDIDATE = goog.getMsg('Show candidates in Hangul mode'); /** * @desc Title for options to determine when to show candidates. */ msgs.MSG_SHOW_CANDIDATE_MODE = goog.getMsg('Delay of suggestion dropdown'); /** * @desc Option to enable prediction of next word to be typed. */ msgs.MSG_ENABLE_PREDICTION = goog.getMsg('Enable next word prediction'); /** * @desc The title for the Armenian Phonetic Keyboard (Armenian) settings page. */ msgs.MSG_AM_PHONETIC_ARM_SETTINGS_PAGE = goog.getMsg('Armenian Phonetic Keyboard Settings Page'); /** * @desc The title for the Belgian Keyboard (French) settings page. */ msgs.MSG_BE_FRA_SETTINGS_PAGE = goog.getMsg('Belgian Keyboard (French) Settings Page'); /** * @desc The title for the Belgian Keyboard (German) settings page. */ msgs.MSG_BE_GER_SETTINGS_PAGE = goog.getMsg('Belgian Keyboard (German) Settings Page'); /** * @desc The title for the Belgian Keyboard (Dutch) settings page. */ msgs.MSG_BE_NLD_SETTINGS_PAGE = goog.getMsg('Belgian Keyboard (Dutch) Settings Page'); /** * @desc The title for the Bulgarian Keyboard (Bulgarian) settings page. */ msgs.MSG_BG_BUL_SETTINGS_PAGE = goog.getMsg('Bulgarian Keyboard Settings Page'); /** * @desc The title for the Bulgarian Phonetic Keyboard (Bulgarian) settings * page. */ msgs.MSG_BG_PHONETIC_BUL_SETTINGS_PAGE = goog.getMsg('Bulgarian Phonetic Keyboard Settings Page'); /** * @desc The title for the Brazilian Keyboard (Brazilian Portuguese) settings * page. */ msgs.MSG_BR_POR_SETTINGS_PAGE = goog.getMsg('Brazilian Keyboard (Brazilian Portuguese) Settings Page'); /** * @desc The title for the Belarusian Keyboard (Belarusian) settings page. */ msgs.MSG_BY_BEL_SETTINGS_PAGE = goog.getMsg('Belarusian Keyboard Settings Page'); /** * @desc The title for the Canadian French Keyboard (French) settings page. */ msgs.MSG_CA_FRA_SETTINGS_PAGE = goog.getMsg('Canadian French Keyboard (French) Settings Page'); /** * @desc The title for the Canadian English Keyboard (English) settings page. */ msgs.MSG_CA_ENG_ENG_SETTINGS_PAGE = goog.getMsg('Canadian English Keyboard (English) Settings Page'); /** * @desc The title for the Canadian Multilingual Keyboard (French) settings * page. */ msgs.MSG_CA_MULTIX_FRA_SETTINGS_PAGE = goog.getMsg('Canadian Multilingual Keyboard (French) Settings Page'); /** * @desc The title for the Swiss Keyboard (German) settings page. */ msgs.MSG_CH_GER_SETTINGS_PAGE = goog.getMsg('Swiss Keyboard (German) Settings Page'); /** * @desc The title for the Swiss French Keyboard (French) settings page. */ msgs.MSG_CH_FR_FRA_SETTINGS_PAGE = goog.getMsg('Swiss French Keyboard (French) Settings Page'); /** * @desc The title for the Czech Keyboard (Czech) settings page. */ msgs.MSG_CZ_CZE_SETTINGS_PAGE = goog.getMsg('Czech Keyboard Settings Page'); /** * @desc The title for the Czech Qwerty Keyboard (Czech) settings page. */ msgs.MSG_CZ_QWERTY_CZE_SETTINGS_PAGE = goog.getMsg('Czech Qwerty Keyboard Settings Page'); /** * @desc The title for the German Keyboard (German) settings page. */ msgs.MSG_DE_GER_SETTINGS_PAGE = goog.getMsg('German Keyboard Settings Page'); /** * @desc The title for the German Neo 2 Keyboard (German) settings page. */ msgs.MSG_DE_NEO_GER_SETTINGS_PAGE = goog.getMsg('German Neo 2 Keyboard Settings Page'); /** * @desc The title for the Danish Keyboard (Danish) settings page. */ msgs.MSG_DK_DAN_SETTINGS_PAGE = goog.getMsg('Danish Keyboard Settings Page'); /** * @desc The title for the Estonian Keyboard (Estonian) settings page. */ msgs.MSG_EE_EST_SETTINGS_PAGE = goog.getMsg('Estonian Keyboard Settings Page'); /** * @desc The title for the Spanish Keyboard (Spanish) settings page. */ msgs.MSG_ES_SPA_SETTINGS_PAGE = goog.getMsg('Spanish Keyboard Settings Page'); /** * @desc The title for the Catalan Keyboard (Catalan) settings page. */ msgs.MSG_ES_CAT_CAT_SETTINGS_PAGE = goog.getMsg('Catalan Keyboard Settings Page'); /** * @desc The title for the Faroese Keyboard (Faroese) settings page. */ msgs.MSG_FO_FAO_SETTINGS_PAGE = goog.getMsg('Faroese Keyboard Settings Page'); /** * @desc The title for the Finnish Keyboard (Finnish) settings page. */ msgs.MSG_FI_FIN_SETTINGS_PAGE = goog.getMsg('Finnish Keyboard Settings Page'); /** * @desc The title for the French BÉPO Keyboard (French) settings page. */ msgs.MSG_FR_BEPO_FRA_SETTINGS_PAGE = goog.getMsg('French BÉPO Keyboard (French) Settings Page'); /** * @desc The title for the French Keyboard (French) settings page. */ msgs.MSG_FR_FRA_SETTINGS_PAGE = goog.getMsg('French Keyboard Settings Page'); /** * @desc The title for the Uk Dvorak Keyboard (English) settings page. */ msgs.MSG_GB_DVORAK_ENG_SETTINGS_PAGE = goog.getMsg('Uk Dvorak Keyboard (English) Settings Page'); /** * @desc The title for the Uk Keyboard (English) settings page. */ msgs.MSG_GB_EXTD_ENG_SETTINGS_PAGE = goog.getMsg('Uk Keyboard (English) Settings Page'); /** * @desc The title for the Georgian Keyboard (Georgian) settings page. */ msgs.MSG_GE_GEO_SETTINGS_PAGE = goog.getMsg('Georgian Keyboard Settings Page'); /** * @desc The title for the Greek Keyboard (Greek) settings page. */ msgs.MSG_GR_GRE_SETTINGS_PAGE = goog.getMsg('Greek Keyboard Settings Page'); /** * @desc The title for the Croatian Keyboard (Croatian) settings page. */ msgs.MSG_HR_SCR_SETTINGS_PAGE = goog.getMsg('Croatian Keyboard Settings Page'); /** * @desc The title for the Hungarian Keyboard (Hungarian) settings page. */ msgs.MSG_HU_HUN_SETTINGS_PAGE = goog.getMsg('Hungarian Keyboard Settings Page'); /** * @desc The title for the Hungarian Qwerty Keyboard (Hungarian) settings page. */ msgs.MSG_HU_QWERTY_HUN_SETTINGS_PAGE = goog.getMsg('Hungarian Qwerty Keyboard Settings Page'); /** * @desc The title for the Irish Keyboard (Irish) settings page. */ msgs.MSG_IE_GA_SETTINGS_PAGE = goog.getMsg('Irish Keyboard Settings Page'); /** * @desc The title for the Hebrew Keyboard (Hebrew) settings page. */ msgs.MSG_IL_HEB_SETTINGS_PAGE = goog.getMsg('Hebrew Keyboard Settings Page'); /** * @desc The title for the Icelandic Keyboard (Icelandic) settings page. */ msgs.MSG_IS_ICE_SETTINGS_PAGE = goog.getMsg('Icelandic Keyboard Settings Page'); /** * @desc The title for the Italian Keyboard (Italian) settings page. */ msgs.MSG_IT_ITA_SETTINGS_PAGE = goog.getMsg('Italian Keyboard Settings Page'); /** * @desc The title for the Japanese Keyboard (Japanese) settings page. */ msgs.MSG_JP_JPN_SETTINGS_PAGE = goog.getMsg('Japanese Keyboard Settings Page'); /** * @desc The title for the Latin American Keyboard (Spanish) settings page. */ msgs.MSG_LATAM_SPA_SETTINGS_PAGE = goog.getMsg('Latin American Keyboard (Spanish) Settings Page'); /** * @desc The title for the Lithuanian Keyboard (Lithuanian) settings page. */ msgs.MSG_LT_LIT_SETTINGS_PAGE = goog.getMsg('Lithuanian Keyboard Settings Page'); /** * @desc The title for the Latvian Keyboard (Latvian, Lettish) settings page. */ msgs.MSG_LV_APOSTROPHE_LAV_SETTINGS_PAGE = goog.getMsg('Latvian Keyboard (Latvian, Lettish) Settings Page'); /** * @desc The title for the Mongolian Keyboard (Mongolian) settings page. */ msgs.MSG_MN_MON_SETTINGS_PAGE = goog.getMsg('Mongolian Keyboard Settings Page'); /** * @desc The title for the Norwegian Keyboard (Norwegian) settings page. */ msgs.MSG_NO_NOB_SETTINGS_PAGE = goog.getMsg('Norwegian Keyboard Settings Page'); /** * @desc The title for the Polish Keyboard (Polish) settings page. */ msgs.MSG_PL_POL_SETTINGS_PAGE = goog.getMsg('Polish Keyboard Settings Page'); /** * @desc The title for the Portuguese Keyboard (Portuguese) settings page. */ msgs.MSG_PT_POR_SETTINGS_PAGE = goog.getMsg('Portuguese Keyboard Settings Page'); /** * @desc The title for the Romanian Keyboard (Romanian) settings page. */ msgs.MSG_RO_RUM_SETTINGS_PAGE = goog.getMsg('Romanian Keyboard Settings Page'); /** * @desc The title for the Serbian Keyboard (Serbian) settings page. */ msgs.MSG_RS_SRP_SETTINGS_PAGE = goog.getMsg('Serbian Keyboard Settings Page'); /** * @desc The title for the Russian Keyboard (Russian) settings page. */ msgs.MSG_RU_RUS_SETTINGS_PAGE = goog.getMsg('Russian Keyboard Settings Page'); /** * @desc The title for the Russian Phonetic Keyboard (Russian) settings page. */ msgs.MSG_RU_PHONETIC_RUS_SETTINGS_PAGE = goog.getMsg('Russian Phonetic Keyboard Settings Page'); /** * @desc The title for the Swedish Keyboard (Swedish) settings page. */ msgs.MSG_SE_SWE_SETTINGS_PAGE = goog.getMsg('Swedish Keyboard Settings Page'); /** * @desc The title for the Slovenian Keyboard (Slovenian) settings page. */ msgs.MSG_SI_SLV_SETTINGS_PAGE = goog.getMsg('Slovenian Keyboard Settings Page'); /** * @desc The title for the Slovakian Keyboard (Slovak) settings page. */ msgs.MSG_SK_SLO_SETTINGS_PAGE = goog.getMsg('Slovakian Keyboard Settings Page'); /** * @desc The title for the Turkish Keyboard (Turkish) settings page. */ msgs.MSG_TR_TUR_SETTINGS_PAGE = goog.getMsg('Turkish Keyboard Settings Page'); /** * @desc The title for the Turkish-F Keyboard (Turkish) settings page. */ msgs.MSG_TR_F_TUR_SETTINGS_PAGE = goog.getMsg('Turkish-F Keyboard Settings Page'); /** * @desc The title for the Ukrainian Keyboard (Ukrainian) settings page. */ msgs.MSG_UA_UKR_SETTINGS_PAGE = goog.getMsg('Ukrainian Keyboard Settings Page'); /** * @desc The title for the US Keyboard (English) settings page. */ msgs.MSG_US_ENG_SETTINGS_PAGE = goog.getMsg('US Keyboard (English) Settings Page'); /** * @desc The title for the US Keyboard (Filipino) settings page. */ msgs.MSG_US_FIL_SETTINGS_PAGE = goog.getMsg('US Keyboard (Filipino) Settings Page'); /** * @desc The title for the US Keyboard (Indonesian) settings page. */ msgs.MSG_US_IND_SETTINGS_PAGE = goog.getMsg('US Keyboard (Indonesian) Settings Page'); /** * @desc The title for the US Keyboard (Malay) settings page. */ msgs.MSG_US_MSA_SETTINGS_PAGE = goog.getMsg('US Keyboard (Malay) Settings Page'); /** * @desc The title for the US Extended Keyboard (English) settings page. */ msgs.MSG_US_ALTGR_INTL_ENG_SETTINGS_PAGE = goog.getMsg('US Extended Keyboard (English) Settings Page'); /** * @desc The title for the US Colemak Keyboard (English) settings page. */ msgs.MSG_US_COLEMAK_ENG_SETTINGS_PAGE = goog.getMsg('US Colemak Keyboard (English) Settings Page'); /** * @desc The title for the US Dvorak Keyboard (English) settings page. */ msgs.MSG_US_DVORAK_ENG_SETTINGS_PAGE = goog.getMsg('US Dvorak Keyboard (English) Settings Page'); /** * @desc The title for the US International Keyboard (English) settings page. */ msgs.MSG_US_INTL_ENG_SETTINGS_PAGE = goog.getMsg('US International Keyboard (English) Settings Page'); /** * @desc The title for the US International Keyboard (Dutch) settings page. */ msgs.MSG_US_INTL_NLD_SETTINGS_PAGE = goog.getMsg('US International Keyboard (Dutch) Settings Page'); /** * @desc The title for the US International Keyboard (Brazilian Portuguese) * settings page. */ msgs.MSG_US_INTL_POR_SETTINGS_PAGE = goog.getMsg( 'US International Keyboard (Brazilian Portuguese) Settings Page'); /** * @desc The title for the section containing touch-enabled keyboard settings. */ msgs.MSG_TOUCH_KEYBOARD = goog.getMsg('On-screen keyboard'); /** * @desc The title for the section containing physical-enabled keyboard * settings. */ msgs.MSG_PHYSICAL_KEYBOARD = goog.getMsg('Physical keyboard'); /** * @desc Description of a checkbox that when checked, will produce a sound for * every keypress on the touch keyboard, and when unchecked, will be silent. */ msgs.MSG_SOUND_ON_KEYPRESS = goog.getMsg('Sound on keypress'); /** * @desc Description of a checkbox that when checked, will replace two * consecutive space key presses with a period and a space, thus making it * easier to type long paragraphs by double tapping space at the end of a * sentence instead of finding the period key and pressing that and then space. */ msgs.MSG_DOUBLE_SPACE_PERIOD = goog.getMsg('Double-space to type period'); /** * @desc Option to enable auto-completion of the word being typed. */ msgs.MSG_ENABLE_COMPLETION = goog.getMsg('Enable completion'); /** * @desc It is the description of Korean setting pages. */ msgs.MSG_KOREAN_SETTINGS_PAGE = goog.getMsg('Korean Input Method Settings Page'); /** * @desc It is the description of Korean Keyboard Layouts. */ msgs.MSG_KOREAN_KEYBOARD_LAYOUT = goog.getMsg('Korean Keyboard Layout'); /** * @desc It is the description of Korean Syllable Inputs. */ msgs.MSG_KOREAN_SYLLABLE_INPUT = goog.getMsg('Input a syllable at a time'); /** * @desc It is the description of Korean Syllable Inputs. */ msgs.MSG_KOREAN_HANJA_DISPLAY = goog.getMsg('Hanja Candidate Mode'); /** * @desc It is the description of whether the user input Korean or English. */ msgs.MSG_INPUT_HANGUL = goog.getMsg('Hangul Mode'); /** * @desc It is the description of whether the user input hangul or hanja. */ msgs.MSG_INPUT_HANJA = goog.getMsg('Hanja Mode'); /** * @desc Title for the switch to compact layout menu item. */ msgs.MSG_SWITCH_TO_COMPACT_LAYOUT = goog.getMsg('Switch to compact layout'); /** * @desc Title for the switch to full layout menu item. */ msgs.MSG_SWITCH_TO_FULL_LAYOUT = goog.getMsg('Switch to full layout'); /** * @desc Title for the pause key in the phonepad keyset. */ msgs.MSG_PAUSE = goog.getMsg('Pause'); /** * @desc Title for the pause key in the phonepad keyset. */ msgs.MSG_WAIT = goog.getMsg('Wait'); /** * @desc Button to initiate moving floating virtual keyboard by press and drag. */ msgs.MSG_DRAG_BUTTON = goog.getMsg('Drag to move virtual keyboard'); /** * @desc Emoji tab spoken feedback label for category "recent". */ msgs.MSG_EMOJI_TAB_RECENT = goog.getMsg('Emoji category recent'); /** * @desc Emoji tab spoken feedback label for category "hot". */ msgs.MSG_EMOJI_TAB_HOT = goog.getMsg('Emoji category hot'); /** * @desc Emoji tab spoken feedback label for category "face". */ msgs.MSG_EMOJI_TAB_FACE = goog.getMsg('Emoji category face'); /** * @desc Emoji tab spoken feedback label for category "symbol". */ msgs.MSG_EMOJI_TAB_SYMBOL = goog.getMsg('Emoji category symbol'); /** * @desc Emoji tab spoken feedback label for category "nature". */ msgs.MSG_EMOJI_TAB_NATURE = goog.getMsg('Emoji category nature'); /** * @desc Emoji tab spoken feedback label for category "place". */ msgs.MSG_EMOJI_TAB_PLACE = goog.getMsg('Emoji category place'); /** * @desc Emoji tab spoken feedback label for category "object". */ msgs.MSG_EMOJI_TAB_OBJECT = goog.getMsg('Emoji category object'); /** * @desc Emoji tab spoken feedback label for category "emoticon". */ msgs.MSG_EMOJI_TAB_EMOTICON = goog.getMsg('Emoji category emoticon'); /** * @desc Whether to enable voice input tools */ msgs.MSG_VOICE = goog.getMsg('Voice'); /** * @desc Turn on voice input tool. */ msgs.MSG_VOICE_TURN_ON = goog.getMsg('turn on voice input tool'); /** * @desc Turn off voice input tool. */ msgs.MSG_VOICE_TURN_OFF = goog.getMsg('turn off voice input tool'); /** * @desc Adds the current edition word into personal dictionary. */ msgs.MSG_ADD_TO_PERSONAL_DICTIONARY = goog.getMsg('Add to personal dictionary'); /** * @desc Adds the current edition word into dictionary. */ msgs.MSG_ADD_TO_DICTIONARY = goog.getMsg('Add to dictionary'); /** * @desc Adds the given word into dictionary. The given word is a placeholder. */ msgs.MSG_ADD_WORD_TO_DICTIONARY = goog.getMsg('Add "{$word}" to dictionary', { 'word': '$1' }); /** * @desc Ignore auto correction feature for the given word. */ msgs.MSG_IGNORE_CORRECTION = goog.getMsg('Ignore correction for'); /** * @desc Ignore auto correction feature for the given word. */ msgs.MSG_IGNORE_CORRECTION_SHORT = goog.getMsg('Ignore correction'); /** * @desc Settings for a Input Tool. */ msgs.MSG_SETTINGS = goog.getMsg('Settings'); /** * @desc expand the menu */ msgs.MSG_EXPAND = goog.getMsg('Expand'); /** * @desc expand the menu */ msgs.MSG_SHRINK_CANDIDATES = goog.getMsg('shrink candidate list'); /** * @desc expand the menu */ msgs.MSG_EXPAND_CANDIDATES = goog.getMsg('expand candidate list'); /** * @desc Title for gesture editing. */ msgs.MSG_ENABLE_GESTURE_EDITING = goog.getMsg( 'Enable swipe gestures'); /** * @desc Description for gesture editing. */ msgs.MSG_ENABLE_GESTURE_EDITING_DESC = goog.getMsg( 'Swipe from the edges of the keyboard to move the cursor, or from the ' + 'backspace key to delete whole words'); /** * @desc Title for gesture typing. */ msgs.MSG_ENABLE_GESTURE_TYPING = goog.getMsg( 'Enable gesture typing'); /** * @desc Description for gesture typing. */ msgs.MSG_ENABLE_GESTURE_TYPING_DESC = goog.getMsg( 'Input a word by sliding through the letters');
apache-2.0
TodoOrTODO/p4factory
mininet/swl_stp.py
3507
#!/usr/bin/python # Copyright 2013-present Barefoot Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## # Topology with four switches and two hosts with STP # # sw3----sw4 # | \ / | # | \/ | # | /\ | # | / \ | # sw1----sw2 # | | # | | # h1 h2 # # The topology runs mstpd. To workaround an issue of running msptd inside # docker containers, copy the script 'configs/bridge-stp' to /sbin/bridge-stp # on the host operating system. This script is invoked by the Kernel when a # bridge is create to determine if it has to run the spanning tree protocol # or some other process is responsible for it. ############################################################################## from mininet.net import Mininet, VERSION from mininet.log import setLogLevel, info from mininet.cli import CLI from distutils.version import StrictVersion from p4_mininet import P4DockerSwitch def main(): net = Mininet( controller = None ) # add hosts h1 = net.addHost( 'h1', ip = '172.16.10.1/24' ) h2 = net.addHost( 'h2', ip = '172.16.10.2/24' ) # add switch 1 sw1 = net.addSwitch( 'sw1', target_name = "p4dockerswitch", cls = P4DockerSwitch, config_fs = 'configs/sw1/stp', pcap_dump = True ) # add switch 2 sw2 = net.addSwitch( 'sw2', target_name = "p4dockerswitch", cls = P4DockerSwitch, config_fs = 'configs/sw2/stp', pcap_dump = True ) # add switch 3 sw3 = net.addSwitch( 'sw3', target_name = "p4dockerswitch", cls = P4DockerSwitch, config_fs = 'configs/sw3/stp', pcap_dump = True ) # add switch 4 sw4 = net.addSwitch( 'sw4', target_name = "p4dockerswitch", cls = P4DockerSwitch, config_fs = 'configs/sw4/stp', pcap_dump = True ) # add links if StrictVersion(VERSION) <= StrictVersion('2.2.0') : net.addLink( sw1, h1, port1 = 1 ) net.addLink( sw2, h2, port1 = 1 ) net.addLink( sw1, sw3, port1 = 2, port2 = 1 ) net.addLink( sw1, sw4, port1 = 3, port2 = 1 ) net.addLink( sw2, sw3, port1 = 2, port2 = 2 ) net.addLink( sw2, sw4, port1 = 3, port2 = 2 ) net.addLink( sw3, sw4, port1 = 3, port2 = 3 ) else: net.addLink( sw1, h1, port1 = 1, fast = False ) net.addLink( sw2, h2, port1 = 1, fast = False ) net.addLink( sw1, sw3, port1 = 2, port2 = 1, fast = False ) net.addLink( sw1, sw4, port1 = 3, port2 = 1, fast = False ) net.addLink( sw2, sw3, port1 = 2, port2 = 2, fast = False ) net.addLink( sw2, sw4, port1 = 3, port2 = 2, fast = False ) net.addLink( sw3, sw4, port1 = 3, port2 = 3, fast = False ) net.start() CLI( net ) net.stop() if __name__ == '__main__': setLogLevel( 'info' ) main()
apache-2.0
nishantmonu51/druid
processing/src/test/java/org/apache/druid/query/DoubleStorageTest.java
13267
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.druid.query; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.druid.data.input.impl.DimensionsSpec; import org.apache.druid.data.input.impl.InputRowParser; import org.apache.druid.data.input.impl.JSONParseSpec; import org.apache.druid.data.input.impl.MapInputRowParser; import org.apache.druid.data.input.impl.TimestampSpec; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.FileUtils; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.query.aggregation.DoubleSumAggregatorFactory; import org.apache.druid.query.metadata.SegmentMetadataQueryConfig; import org.apache.druid.query.metadata.SegmentMetadataQueryQueryToolChest; import org.apache.druid.query.metadata.SegmentMetadataQueryRunnerFactory; import org.apache.druid.query.metadata.metadata.ColumnAnalysis; import org.apache.druid.query.metadata.metadata.ListColumnIncluderator; import org.apache.druid.query.metadata.metadata.SegmentAnalysis; import org.apache.druid.query.metadata.metadata.SegmentMetadataQuery; import org.apache.druid.query.scan.ScanQuery; import org.apache.druid.query.scan.ScanQueryConfig; import org.apache.druid.query.scan.ScanQueryEngine; import org.apache.druid.query.scan.ScanQueryQueryToolChest; import org.apache.druid.query.scan.ScanQueryRunnerFactory; import org.apache.druid.query.scan.ScanQueryRunnerTest; import org.apache.druid.query.scan.ScanResultValue; import org.apache.druid.query.spec.LegacySegmentSpec; import org.apache.druid.segment.IndexIO; import org.apache.druid.segment.IndexMergerV9; import org.apache.druid.segment.IndexSpec; import org.apache.druid.segment.QueryableIndex; import org.apache.druid.segment.QueryableIndexSegment; import org.apache.druid.segment.TestHelper; import org.apache.druid.segment.column.ColumnHolder; import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.ValueType; import org.apache.druid.segment.incremental.IncrementalIndex; import org.apache.druid.segment.incremental.IncrementalIndexSchema; import org.apache.druid.segment.incremental.IndexSizeExceededException; import org.apache.druid.segment.incremental.OnheapIncrementalIndex; import org.apache.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory; import org.apache.druid.timeline.SegmentId; import org.joda.time.Interval; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; @RunWith(Parameterized.class) public class DoubleStorageTest { private static final SegmentMetadataQueryRunnerFactory METADATA_QR_FACTORY = new SegmentMetadataQueryRunnerFactory( new SegmentMetadataQueryQueryToolChest(new SegmentMetadataQueryConfig()), QueryRunnerTestHelper.NOOP_QUERYWATCHER ); private static final ScanQueryQueryToolChest SCAN_QUERY_QUERY_TOOL_CHEST = new ScanQueryQueryToolChest( new ScanQueryConfig(), DefaultGenericQueryMetricsFactory.instance() ); private static final ScanQueryRunnerFactory SCAN_QUERY_RUNNER_FACTORY = new ScanQueryRunnerFactory( SCAN_QUERY_QUERY_TOOL_CHEST, new ScanQueryEngine(), new ScanQueryConfig() ); private Druids.ScanQueryBuilder newTestQuery() { return Druids.newScanQueryBuilder() .dataSource(new TableDataSource(QueryRunnerTestHelper.DATA_SOURCE)) .columns(Collections.emptyList()) .intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC) .limit(Integer.MAX_VALUE) .legacy(false); } private static final IndexMergerV9 INDEX_MERGER_V9 = TestHelper.getTestIndexMergerV9(OffHeapMemorySegmentWriteOutMediumFactory.instance()); private static final IndexIO INDEX_IO = TestHelper.getTestIndexIO(); private static final Integer MAX_ROWS = 10; private static final String TIME_COLUMN = "__time"; private static final String DIM_NAME = "testDimName"; private static final String DIM_VALUE = "testDimValue"; private static final String DIM_FLOAT_NAME = "testDimFloatName"; private static final SegmentId SEGMENT_ID = SegmentId.dummy("segmentId"); private static final Interval INTERVAL = Intervals.of("2011-01-13T00:00:00.000Z/2011-01-22T00:00:00.001Z"); private static final InputRowParser<Map<String, Object>> ROW_PARSER = new MapInputRowParser( new JSONParseSpec( new TimestampSpec(TIME_COLUMN, "auto", null), new DimensionsSpec( DimensionsSpec.getDefaultSchemas(ImmutableList.of(DIM_NAME)), ImmutableList.of(DIM_FLOAT_NAME), ImmutableList.of() ), null, null, null ) ); private QueryableIndex index; private final SegmentAnalysis expectedSegmentAnalysis; private final String storeDoubleAs; public DoubleStorageTest( String storeDoubleAs, SegmentAnalysis expectedSegmentAnalysis ) { this.storeDoubleAs = storeDoubleAs; this.expectedSegmentAnalysis = expectedSegmentAnalysis; } @Parameterized.Parameters public static Collection<?> dataFeeder() { SegmentAnalysis expectedSegmentAnalysisDouble = new SegmentAnalysis( SEGMENT_ID.toString(), ImmutableList.of(INTERVAL), ImmutableMap.of( TIME_COLUMN, new ColumnAnalysis( ColumnType.LONG, ValueType.LONG.name(), false, false, 100, null, null, null, null ), DIM_NAME, new ColumnAnalysis( ColumnType.STRING, ValueType.STRING.name(), false, false, 120, 1, DIM_VALUE, DIM_VALUE, null ), DIM_FLOAT_NAME, new ColumnAnalysis( ColumnType.DOUBLE, ValueType.DOUBLE.name(), false, false, 80, null, null, null, null ) ), 330, MAX_ROWS, null, null, null, null ); SegmentAnalysis expectedSegmentAnalysisFloat = new SegmentAnalysis( SEGMENT_ID.toString(), ImmutableList.of(INTERVAL), ImmutableMap.of( TIME_COLUMN, new ColumnAnalysis( ColumnType.LONG, ValueType.LONG.name(), false, false, 100, null, null, null, null ), DIM_NAME, new ColumnAnalysis( ColumnType.STRING, ValueType.STRING.name(), false, false, 120, 1, DIM_VALUE, DIM_VALUE, null ), DIM_FLOAT_NAME, new ColumnAnalysis( ColumnType.FLOAT, ValueType.FLOAT.name(), false, false, 80, null, null, null, null ) ), 330, MAX_ROWS, null, null, null, null ); return ImmutableList.of( new Object[]{"double", expectedSegmentAnalysisDouble}, new Object[]{"float", expectedSegmentAnalysisFloat} ); } @Before public void setup() throws IOException { index = buildIndex(storeDoubleAs); } @Test public void testMetaDataAnalysis() { QueryRunner runner = QueryRunnerTestHelper.makeQueryRunner( METADATA_QR_FACTORY, SEGMENT_ID, new QueryableIndexSegment(index, SEGMENT_ID), null ); SegmentMetadataQuery segmentMetadataQuery = Druids.newSegmentMetadataQueryBuilder() .dataSource("testing") .intervals(ImmutableList.of(INTERVAL)) .toInclude(new ListColumnIncluderator(Arrays.asList( TIME_COLUMN, DIM_NAME, DIM_FLOAT_NAME ))) .analysisTypes( SegmentMetadataQuery.AnalysisType.CARDINALITY, SegmentMetadataQuery.AnalysisType.SIZE, SegmentMetadataQuery.AnalysisType.INTERVAL, SegmentMetadataQuery.AnalysisType.MINMAX ) .merge(true) .build(); List<SegmentAnalysis> results = runner.run(QueryPlus.wrap(segmentMetadataQuery)).toList(); Assert.assertEquals(Collections.singletonList(expectedSegmentAnalysis), results); } @Test public void testSelectValues() { QueryRunner runner = QueryRunnerTestHelper.makeQueryRunner( SCAN_QUERY_RUNNER_FACTORY, SEGMENT_ID, new QueryableIndexSegment(index, SEGMENT_ID), null ); ScanQuery query = newTestQuery() .intervals(new LegacySegmentSpec(INTERVAL)) .virtualColumns() .build(); Iterable<ScanResultValue> results = runner.run(QueryPlus.wrap(query)).toList(); ScanResultValue expectedScanResult = new ScanResultValue( SEGMENT_ID.toString(), ImmutableList.of(TIME_COLUMN, DIM_NAME, DIM_FLOAT_NAME), getStreamOfEvents().collect(Collectors.toList()) ); List<ScanResultValue> expectedResults = Collections.singletonList(expectedScanResult); ScanQueryRunnerTest.verify(expectedResults, results); } private static QueryableIndex buildIndex(String storeDoubleAsFloat) throws IOException { String oldValue = System.getProperty(ColumnHolder.DOUBLE_STORAGE_TYPE_PROPERTY); System.setProperty(ColumnHolder.DOUBLE_STORAGE_TYPE_PROPERTY, storeDoubleAsFloat); final IncrementalIndexSchema schema = new IncrementalIndexSchema.Builder() .withMinTimestamp(DateTimes.of("2011-01-13T00:00:00.000Z").getMillis()) .withDimensionsSpec(ROW_PARSER) .withMetrics( new DoubleSumAggregatorFactory(DIM_FLOAT_NAME, DIM_FLOAT_NAME) ) .build(); final IncrementalIndex index = new OnheapIncrementalIndex.Builder() .setIndexSchema(schema) .setMaxRowCount(MAX_ROWS) .build(); getStreamOfEvents().forEach(o -> { try { index.add(ROW_PARSER.parseBatch((Map<String, Object>) o).get(0)); } catch (IndexSizeExceededException e) { throw new RuntimeException(e); } }); if (oldValue == null) { System.clearProperty(ColumnHolder.DOUBLE_STORAGE_TYPE_PROPERTY); } else { System.setProperty(ColumnHolder.DOUBLE_STORAGE_TYPE_PROPERTY, oldValue); } File someTmpFile = File.createTempFile("billy", "yay"); someTmpFile.delete(); FileUtils.mkdirp(someTmpFile); INDEX_MERGER_V9.persist(index, someTmpFile, new IndexSpec(), null); someTmpFile.delete(); return INDEX_IO.loadIndex(someTmpFile); } @After public void cleanUp() { index.close(); } private static Stream getStreamOfEvents() { return IntStream.range(0, MAX_ROWS).mapToObj(i -> ImmutableMap.of( TIME_COLUMN, DateTimes.of("2011-01-13T00:00:00.000Z").plusDays(i).getMillis(), DIM_NAME, DIM_VALUE, DIM_FLOAT_NAME, i / 1.6179775280898876 )); } }
apache-2.0
cjxgm/p.cjprods.org
src/applications/project/controller/PhabricatorProjectBoardController.php
306
<?php abstract class PhabricatorProjectBoardController extends PhabricatorProjectController { protected function getProfileMenu() { $menu = parent::getProfileMenu(); $menu->selectFilter(PhabricatorProject::PANEL_WORKBOARD); $menu->addClass('project-board-nav'); return $menu; } }
apache-2.0
nuxeo/daisydiff
src/main/java/org/outerj/daisy/diff/html/dom/helper/LastCommonParentResult.java
1843
/* * Copyright 2007 Guy Van den Broeck * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.outerj.daisy.diff.html.dom.helper; import org.outerj.daisy.diff.html.dom.TagNode; /** * When detecting the last common parent of two nodes, all results are stored as * a {@link LastCommonParentResult}. */ public class LastCommonParentResult { public LastCommonParentResult() { } // Parent private TagNode parent; public TagNode getLastCommonParent() { return parent; } public void setLastCommonParent(TagNode parent) { this.parent = parent; } // Splitting private boolean splittingNeeded = false; public boolean isSplittingNeeded() { return splittingNeeded; } public void setSplittingNeeded() { splittingNeeded = true; } // Depth private int lastCommonParentDepth = -1; public int getLastCommonParentDepth() { return lastCommonParentDepth; } public void setLastCommonParentDepth(int depth) { lastCommonParentDepth = depth; } // Index private int indexInLastCommonParent = -1; public int getIndexInLastCommonParent() { return indexInLastCommonParent; } public void setIndexInLastCommonParent(int index) { indexInLastCommonParent = index; } }
apache-2.0
squidsolutions/bonecp
bonecp/src/main/java/com/jolbox/bonecp/BoneCP.java
30498
/** * Copyright 2010 Wallace Wadge * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.jolbox.bonecp; import java.io.Closeable; import java.io.Serializable; import java.lang.management.ManagementFactory; import java.lang.ref.Reference; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.AbstractMap; import java.util.Map; import java.util.Properties; import java.util.concurrent.BlockingQueue; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import javax.management.MBeanServer; import javax.management.ObjectName; import javax.sql.DataSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.FinalizableReferenceQueue; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; import com.jolbox.bonecp.hooks.AcquireFailConfig; import com.jolbox.bonecp.hooks.ConnectionHook; /** * Connection pool (main class). * @author wwadge * */ public class BoneCP implements Serializable, Closeable { /** Warning message. */ private static final String THREAD_CLOSE_CONNECTION_WARNING = "Thread close connection monitoring has been enabled. This will negatively impact on your performance. Only enable this option for debugging purposes!"; /** Serialization UID */ private static final long serialVersionUID = -8386816681977604817L; /** Exception message. */ private static final String ERROR_TEST_CONNECTION = "Unable to open a test connection to the given database. JDBC url = %s, username = %s. Terminating connection pool (set lazyInit to true if you expect to start your database after your app). Original Exception: %s"; /** Exception message. */ private static final String SHUTDOWN_LOCATION_TRACE = "Attempting to obtain a connection from a pool that has already been shutdown. \nStack trace of location where pool was shutdown follows:\n"; /** Exception message. */ private static final String UNCLOSED_EXCEPTION_MESSAGE = "Connection obtained from thread [%s] was never closed. \nStack trace of location where connection was obtained follows:\n"; /** JMX constant. */ public static final String MBEAN_CONFIG = "com.jolbox.bonecp:type=BoneCPConfig"; /** JMX constant. */ public static final String MBEAN_BONECP = "com.jolbox.bonecp:type=BoneCP"; /** Constant for keep-alive test */ private static final String[] METADATATABLE = new String[] {"TABLE"}; /** Constant for keep-alive test */ private static final String KEEPALIVEMETADATA = "BONECPKEEPALIVE"; /** Create more connections when we hit x% of our possible number of connections. */ protected final int poolAvailabilityThreshold; /** Number of partitions passed in constructor. **/ protected int partitionCount; /** Partitions handle. */ protected ConnectionPartition[] partitions; /** Handle to factory that creates 1 thread per partition that periodically wakes up and performs some * activity on the connection. */ @VisibleForTesting protected ScheduledExecutorService keepAliveScheduler; /** Handle to factory that creates 1 thread per partition that periodically wakes up and performs some * activity on the connection. */ private ScheduledExecutorService maxAliveScheduler; /** Executor for threads watching each partition to dynamically create new threads/kill off excess ones. */ private ExecutorService connectionsScheduler; /** Configuration object used in constructor. */ @VisibleForTesting protected BoneCPConfig config; /** Executor service for obtaining a connection in an asynchronous fashion. */ private ListeningExecutorService asyncExecutor; /** Logger class. */ private static final Logger logger = LoggerFactory.getLogger(BoneCP.class); /** JMX support. */ private MBeanServer mbs; /** If set to true, create a new thread that monitors a connection and displays warnings if application failed to * close the connection. */ protected boolean closeConnectionWatch = false; /** Threads monitoring for bad connection requests. */ private ExecutorService closeConnectionExecutor; /** set to true if the connection pool has been flagged as shutting down. */ protected volatile boolean poolShuttingDown; /** Placeholder to give more useful info in case of a double shutdown. */ protected String shutdownStackTrace; /** Reference of objects that are to be watched. */ private final Map<Connection, Reference<ConnectionHandle>> finalizableRefs = new ConcurrentHashMap<Connection, Reference<ConnectionHandle>>(); /** Watch for connections that should have been safely closed but the application forgot. */ private transient FinalizableReferenceQueue finalizableRefQueue; /** Time to wait before timing out the connection. Default in config is Long.MAX_VALUE milliseconds. */ protected long connectionTimeoutInMs; /** No of ms to wait for thread.join() in connection watch thread. */ private long closeConnectionWatchTimeoutInMs; /** if true, we care about statistics. */ protected boolean statisticsEnabled; /** statistics handle. */ protected Statistics statistics = new Statistics(this); /** Config setting. */ @VisibleForTesting protected boolean nullOnConnectionTimeout; /** Config setting. */ @VisibleForTesting protected boolean resetConnectionOnClose; /** Config setting. */ protected boolean cachedPoolStrategy; /** Currently active get connection strategy class to use. */ protected ConnectionStrategy connectionStrategy; /** If true, there are no connections to be taken. */ private AtomicBoolean dbIsDown = new AtomicBoolean(); /** Config setting. */ @VisibleForTesting protected Properties clientInfo; /** If false, we haven't made a dummy driver call first. */ @VisibleForTesting protected volatile boolean driverInitialized = false; /** Keep track of our jvm version. */ protected int jvmMajorVersion; /** This is moved here to aid testing. */ protected static String connectionClass = "java.sql.Connection"; /** * Closes off this connection pool. */ public synchronized void shutdown(){ if (!this.poolShuttingDown){ logger.info("Shutting down connection pool..."); this.poolShuttingDown = true; this.shutdownStackTrace = captureStackTrace(SHUTDOWN_LOCATION_TRACE); this.keepAliveScheduler.shutdownNow(); // stop threads from firing. this.maxAliveScheduler.shutdownNow(); // stop threads from firing. this.connectionsScheduler.shutdownNow(); // stop threads from firing. this.asyncExecutor.shutdownNow(); try { this.connectionsScheduler.awaitTermination(5, TimeUnit.SECONDS); this.maxAliveScheduler.awaitTermination(5, TimeUnit.SECONDS); this.keepAliveScheduler.awaitTermination(5, TimeUnit.SECONDS); this.asyncExecutor.awaitTermination(5, TimeUnit.SECONDS); if (this.closeConnectionExecutor != null){ this.closeConnectionExecutor.shutdownNow(); this.closeConnectionExecutor.awaitTermination(5, TimeUnit.SECONDS); } } catch (InterruptedException e) { // do nothing } this.connectionStrategy.terminateAllConnections(); unregisterDriver(); registerUnregisterJMX(false); if (finalizableRefQueue != null) { finalizableRefQueue.close(); } logger.info("Connection pool has been shutdown."); } } /** Drops a driver from the DriverManager's list. */ protected void unregisterDriver(){ String jdbcURL = this.config.getJdbcUrl(); if ((jdbcURL != null) && this.config.isDeregisterDriverOnClose()){ logger.info("Unregistering JDBC driver for : "+jdbcURL); try { DriverManager.deregisterDriver(DriverManager.getDriver(jdbcURL)); } catch (SQLException e) { logger.info("Unregistering driver failed.", e); } } } /** Just a synonym to shutdown. */ public void close(){ shutdown(); } /** * Physically close off the internal connection. * @param conn */ protected void destroyConnection(ConnectionHandle conn) { postDestroyConnection(conn); conn.setInReplayMode(true); // we're dead, stop attempting to replay anything try { conn.internalClose(); } catch (SQLException e) { logger.error("Error in attempting to close connection", e); } } /** Update counters and call hooks. * @param handle connection handle. */ protected void postDestroyConnection(ConnectionHandle handle){ ConnectionPartition partition = handle.getOriginatingPartition(); if (this.finalizableRefQueue != null && handle.getInternalConnection() != null){ //safety this.finalizableRefs.remove(handle.getInternalConnection()); // assert o != null : "Did not manage to remove connection from finalizable ref queue"; } partition.updateCreatedConnections(-1); partition.setUnableToCreateMoreTransactions(false); // we can create new ones now, this is an optimization // "Destroying" for us means: don't put it back in the pool. if (handle.getConnectionHook() != null){ handle.getConnectionHook().onDestroy(handle); } } /** Obtains a database connection, retrying if necessary. * @param connectionHandle * @return A DB connection. * @throws SQLException */ protected Connection obtainInternalConnection(ConnectionHandle connectionHandle) throws SQLException { boolean tryAgain = false; Connection result = null; Connection oldRawConnection = connectionHandle.getInternalConnection(); String url = this.getConfig().getJdbcUrl(); int acquireRetryAttempts = this.getConfig().getAcquireRetryAttempts(); long acquireRetryDelayInMs = this.getConfig().getAcquireRetryDelayInMs(); AcquireFailConfig acquireConfig = new AcquireFailConfig(); acquireConfig.setAcquireRetryAttempts(new AtomicInteger(acquireRetryAttempts)); acquireConfig.setAcquireRetryDelayInMs(acquireRetryDelayInMs); acquireConfig.setLogMessage("Failed to acquire connection to "+url); ConnectionHook connectionHook = this.getConfig().getConnectionHook(); do{ result = null; try { // keep track of this hook. result = this.obtainRawInternalConnection(); tryAgain = false; if (acquireRetryAttempts != this.getConfig().getAcquireRetryAttempts()){ logger.info("Successfully re-established connection to "+url); } this.getDbIsDown().set(false); connectionHandle.setInternalConnection(result); // call the hook, if available. if (connectionHook != null){ connectionHook.onAcquire(connectionHandle); } ConnectionHandle.sendInitSQL(result, this.getConfig().getInitSQL()); } catch (SQLException e) { // call the hook, if available. if (connectionHook != null){ tryAgain = connectionHook.onAcquireFail(e, acquireConfig); } else { logger.error(String.format("Failed to acquire connection to %s. Sleeping for %d ms. Attempts left: %d", url, acquireRetryDelayInMs, acquireRetryAttempts), e); try { if (acquireRetryAttempts > 0){ Thread.sleep(acquireRetryDelayInMs); } tryAgain = (acquireRetryAttempts--) > 0; } catch (InterruptedException e1) { tryAgain=false; } } if (!tryAgain){ if (oldRawConnection != null) { oldRawConnection.close(); } if (result != null) { result.close(); } connectionHandle.setInternalConnection(oldRawConnection); throw e; } } } while (tryAgain); return result; } /** Returns a database connection by using Driver.getConnection() or DataSource.getConnection() * @return Connection handle * @throws SQLException on error */ protected Connection obtainRawInternalConnection() throws SQLException { Connection result = null; DataSource datasourceBean = this.config.getDatasourceBean(); String url = this.config.getJdbcUrl(); String username = this.config.getUsername(); String password = this.config.getPassword(); Properties props = this.config.getDriverProperties(); boolean externalAuth = this.config.isExternalAuth(); if (externalAuth && props == null){ props = new Properties(); } if (datasourceBean != null){ return (username == null ? datasourceBean.getConnection() : datasourceBean.getConnection(username, password)); } // just force the driver to init first if (!this.driverInitialized ){ try{ this.driverInitialized = true; if (props != null){ result = DriverManager.getConnection(url, props); } else { result = DriverManager.getConnection(url, username, password); } result.close(); }catch (SQLException t){ // just force the driver to init first // See https://bugs.launchpad.net/bonecp/+bug/876476 } } if (props != null){ result = DriverManager.getConnection(url, props); } else { result = DriverManager.getConnection(url, username, password); } // #ifdef JDK>6 if (this.clientInfo != null){ // we take care of null'ing this in the constructor if jdk < 6 result.setClientInfo(this.clientInfo); } // #endif JDK>6 return result; } /** * Constructor. * @param config Configuration for pool * @throws SQLException on error */ public BoneCP(BoneCPConfig config) throws SQLException { Class<?> clazz; try { jvmMajorVersion = 5; clazz = Class.forName(connectionClass , true, config.getClassLoader()); clazz.getMethod("createClob"); // since 1.6 jvmMajorVersion = 6; clazz.getMethod("getNetworkTimeout"); // since 1.7 jvmMajorVersion = 7; } catch (Exception e) { // do nothing } try { this.config = Preconditions.checkNotNull(config).clone(); // immutable } catch (CloneNotSupportedException e1) { throw new SQLException("Cloning of the config failed"); } this.config.sanitize(); this.statisticsEnabled = this.config.isStatisticsEnabled(); this.closeConnectionWatchTimeoutInMs = this.config.getCloseConnectionWatchTimeoutInMs(); this.poolAvailabilityThreshold = this.config.getPoolAvailabilityThreshold(); this.connectionTimeoutInMs = this.config.getConnectionTimeoutInMs(); if (this.connectionTimeoutInMs == 0){ this.connectionTimeoutInMs = Long.MAX_VALUE; } this.nullOnConnectionTimeout = this.config.isNullOnConnectionTimeout(); this.resetConnectionOnClose = this.config.isResetConnectionOnClose(); this.clientInfo = jvmMajorVersion > 5 ? this.config.getClientInfo() : null; AcquireFailConfig acquireConfig = new AcquireFailConfig(); acquireConfig.setAcquireRetryAttempts(new AtomicInteger(0)); acquireConfig.setAcquireRetryDelayInMs(0); acquireConfig.setLogMessage("Failed to obtain initial connection"); if (!this.config.isLazyInit()){ try{ Connection sanityConnection = obtainRawInternalConnection(); sanityConnection.close(); } catch (Exception e){ if (this.config.getConnectionHook() != null){ this.config.getConnectionHook().onAcquireFail(e, acquireConfig); } throw PoolUtil.generateSQLException(String.format(ERROR_TEST_CONNECTION, this.config.getJdbcUrl(), this.config.getUsername(), PoolUtil.stringifyException(e)), e); } } if (!this.config.isDisableConnectionTracking()){ this.finalizableRefQueue = new FinalizableReferenceQueue(); } this.asyncExecutor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool()); this.partitions = new ConnectionPartition[this.config.getPartitionCount()]; String suffix = ""; if (this.config.getPoolName()!=null) { suffix="-"+this.config.getPoolName(); } this.keepAliveScheduler = Executors.newScheduledThreadPool(this.config.getPartitionCount(), new CustomThreadFactory("BoneCP-keep-alive-scheduler"+suffix, true)); this.maxAliveScheduler = Executors.newScheduledThreadPool(this.config.getPartitionCount(), new CustomThreadFactory("BoneCP-max-alive-scheduler"+suffix, true)); this.connectionsScheduler = Executors.newFixedThreadPool(this.config.getPartitionCount(), new CustomThreadFactory("BoneCP-pool-watch-thread"+suffix, true)); this.partitionCount = this.config.getPartitionCount(); this.closeConnectionWatch = this.config.isCloseConnectionWatch(); this.cachedPoolStrategy = this.config.getPoolStrategy() != null && this.config.getPoolStrategy().equalsIgnoreCase("CACHED"); if (this.cachedPoolStrategy){ this.connectionStrategy = new CachedConnectionStrategy(this, new DefaultConnectionStrategy(this)); } else { this.connectionStrategy = new DefaultConnectionStrategy(this); } boolean queueLIFO = this.config.getServiceOrder() != null && this.config.getServiceOrder().equalsIgnoreCase("LIFO"); if (this.closeConnectionWatch){ logger.warn(THREAD_CLOSE_CONNECTION_WARNING); this.closeConnectionExecutor = Executors.newCachedThreadPool(new CustomThreadFactory("BoneCP-connection-watch-thread"+suffix, true)); } for (int p=0; p < this.config.getPartitionCount(); p++){ ConnectionPartition connectionPartition = new ConnectionPartition(this); this.partitions[p]=connectionPartition; BlockingQueue<ConnectionHandle> connectionHandles = new LinkedBlockingQueue<ConnectionHandle>(this.config.getMaxConnectionsPerPartition()); this.partitions[p].setFreeConnections(connectionHandles); if (!this.config.isLazyInit()){ for (int i=0; i < this.config.getMinConnectionsPerPartition(); i++){ this.partitions[p].addFreeConnection(new ConnectionHandle(null, this.partitions[p], this, false)); } } if (this.config.getIdleConnectionTestPeriod(TimeUnit.SECONDS) > 0 || this.config.getIdleMaxAge(TimeUnit.SECONDS) > 0){ final Runnable connectionTester = new ConnectionTesterThread(connectionPartition, this, this.config.getIdleMaxAge(TimeUnit.MILLISECONDS), this.config.getIdleConnectionTestPeriod(TimeUnit.MILLISECONDS), queueLIFO); long delayInSeconds = this.config.getIdleConnectionTestPeriod(TimeUnit.SECONDS); if (delayInSeconds == 0L){ delayInSeconds = this.config.getIdleMaxAge(TimeUnit.SECONDS); } if (this.config.getIdleMaxAge(TimeUnit.SECONDS) < delayInSeconds && this.config.getIdleConnectionTestPeriod(TimeUnit.SECONDS) != 0 && this.config.getIdleMaxAge(TimeUnit.SECONDS) != 0){ delayInSeconds = this.config.getIdleMaxAge(TimeUnit.SECONDS); } this.keepAliveScheduler.scheduleAtFixedRate(connectionTester,delayInSeconds, delayInSeconds, TimeUnit.SECONDS); } if (this.config.getMaxConnectionAgeInSeconds() > 0){ final Runnable connectionMaxAgeTester = new ConnectionMaxAgeThread(connectionPartition, this, this.config.getMaxConnectionAge(TimeUnit.MILLISECONDS), queueLIFO); this.maxAliveScheduler.scheduleAtFixedRate(connectionMaxAgeTester, this.config.getMaxConnectionAgeInSeconds(), this.config.getMaxConnectionAgeInSeconds(), TimeUnit.SECONDS); } // watch this partition for low no of threads this.connectionsScheduler.execute(new PoolWatchThread(connectionPartition, this)); } if (!this.config.isDisableJMX()){ registerUnregisterJMX(true); } } /** * Initialises JMX stuff. * @param doRegister if true, perform registration, if false unregister */ protected void registerUnregisterJMX(boolean doRegister) { if (this.mbs == null ){ // this way makes it easier for mocking. this.mbs = ManagementFactory.getPlatformMBeanServer(); } try { String suffix = ""; if (this.config.getPoolName()!=null){ suffix="-"+this.config.getPoolName(); } ObjectName name = new ObjectName(MBEAN_BONECP +suffix); ObjectName configname = new ObjectName(MBEAN_CONFIG + suffix); if (doRegister){ if (!this.mbs.isRegistered(name)){ this.mbs.registerMBean(this.statistics, name); } if (!this.mbs.isRegistered(configname)){ this.mbs.registerMBean(this.config, configname); } } else { if (this.mbs.isRegistered(name)){ this.mbs.unregisterMBean(name); } if (this.mbs.isRegistered(configname)){ this.mbs.unregisterMBean(configname); } } } catch (Exception e) { logger.error("Unable to start/stop JMX", e); } } /** * Returns a free connection. * @return Connection handle. * @throws SQLException */ public Connection getConnection() throws SQLException { return this.connectionStrategy.getConnection(); } /** Starts off a new thread to monitor this connection attempt. * @param connectionHandle to monitor */ protected void watchConnection(ConnectionHandle connectionHandle) { String message = captureStackTrace(UNCLOSED_EXCEPTION_MESSAGE); this.closeConnectionExecutor.submit(new CloseThreadMonitor(Thread.currentThread(), connectionHandle, message, this.closeConnectionWatchTimeoutInMs)); } /** Throw an exception to capture it so as to be able to print it out later on * @param message message to display * @return Stack trace message * */ protected String captureStackTrace(String message) { StringBuilder stringBuilder = new StringBuilder(String.format(message, Thread.currentThread().getName())); StackTraceElement[] trace = Thread.currentThread().getStackTrace(); for(int i = 0; i < trace.length; i++){ stringBuilder.append(" "+trace[i]+"\r\n"); } stringBuilder.append(""); return stringBuilder.toString(); } /** Obtain a connection asynchronously by queueing a request to obtain a connection in a separate thread. * * Use as follows:<p> * Future&lt;Connection&gt; result = pool.getAsyncConnection();<p> * ... do something else in your application here ...<p> * Connection connection = result.get(); // get the connection<p> * * @return A Future task returning a connection. */ public ListenableFuture<Connection> getAsyncConnection(){ return this.asyncExecutor.submit(new Callable<Connection>() { public Connection call() throws Exception { return getConnection(); }}); } /** * Tests if this partition has hit a threshold and signal to the pool watch thread to create new connections * @param connectionPartition to test for. */ protected void maybeSignalForMoreConnections(ConnectionPartition connectionPartition) { if (!connectionPartition.isUnableToCreateMoreTransactions() && !this.poolShuttingDown && connectionPartition.getAvailableConnections()*100/connectionPartition.getMaxConnections() <= this.poolAvailabilityThreshold){ connectionPartition.getPoolWatchThreadSignalQueue().offer(new Object()); // item being pushed is not important. } } /** * Releases the given connection back to the pool. This method is not intended to be called by * applications (hence set to protected). Call connection.close() instead which will return * the connection back to the pool. * * @param connection to release * @throws SQLException */ protected void releaseConnection(Connection connection) throws SQLException { ConnectionHandle handle = (ConnectionHandle)connection; // hook calls if (handle.getConnectionHook() != null){ handle.getConnectionHook().onCheckIn(handle); } // release immediately or place it in a queue so that another thread will eventually close it. If we're shutting down, // close off the connection right away because the helper threads have gone away. if (!this.poolShuttingDown){ internalReleaseConnection(handle); } } /** Release a connection by placing the connection back in the pool. * @param connectionHandle Connection being released. * @throws SQLException **/ protected void internalReleaseConnection(ConnectionHandle connectionHandle) throws SQLException { if (!this.cachedPoolStrategy){ connectionHandle.clearStatementCaches(false); } if (connectionHandle.getReplayLog() != null){ connectionHandle.getReplayLog().clear(); connectionHandle.recoveryResult.getReplaceTarget().clear(); } if (connectionHandle.isExpired() || (!this.poolShuttingDown && connectionHandle.isPossiblyBroken() && !isConnectionHandleAlive(connectionHandle))){ if (connectionHandle.isExpired()) { connectionHandle.internalClose(); } ConnectionPartition connectionPartition = connectionHandle.getOriginatingPartition(); postDestroyConnection(connectionHandle); maybeSignalForMoreConnections(connectionPartition); connectionHandle.clearStatementCaches(true); return; // don't place back in queue - connection is broken or expired. } connectionHandle.setConnectionLastUsedInMs(System.currentTimeMillis()); if (!this.poolShuttingDown){ putConnectionBackInPartition(connectionHandle); } else { connectionHandle.internalClose(); } } /** Places a connection back in the originating partition. * @param connectionHandle to place back * @throws SQLException on error */ protected void putConnectionBackInPartition(ConnectionHandle connectionHandle) throws SQLException { if (this.cachedPoolStrategy && ((CachedConnectionStrategy)this.connectionStrategy).tlConnections.dumbGet().getValue()){ connectionHandle.logicallyClosed.set(true); ((CachedConnectionStrategy)this.connectionStrategy).tlConnections.set(new AbstractMap.SimpleEntry<ConnectionHandle, Boolean>(connectionHandle, false)); } else { BlockingQueue<ConnectionHandle> queue = connectionHandle.getOriginatingPartition().getFreeConnections(); if (!queue.offer(connectionHandle)){ // this shouldn't fail connectionHandle.internalClose(); } } } /** Sends a dummy statement to the server to keep the connection alive * @param connection Connection handle to perform activity on * @return true if test query worked, false otherwise */ public boolean isConnectionHandleAlive(ConnectionHandle connection) { Statement stmt = null; boolean result = false; boolean logicallyClosed = connection.logicallyClosed.get(); try { connection.logicallyClosed.compareAndSet(true, false); // avoid checks later on if it's marked as closed. String testStatement = this.config.getConnectionTestStatement(); ResultSet rs = null; if (testStatement == null) { // Make a call to fetch the metadata instead of a dummy query. rs = connection.getMetaData().getTables( null, null, KEEPALIVEMETADATA, METADATATABLE ); } else { stmt = connection.createStatement(); stmt.execute(testStatement); } if (rs != null) { rs.close(); } result = true; } catch (SQLException e) { // connection must be broken! result = false; } finally { connection.logicallyClosed.set(logicallyClosed); connection.setConnectionLastResetInMs(System.currentTimeMillis()); result = closeStatement(stmt, result); } return result; } /** * @param stmt * @param result * @return false on failure. */ private boolean closeStatement(Statement stmt, boolean result) { if (stmt != null) { try { stmt.close(); } catch (SQLException e) { return false; } } return result; } /** Return total number of connections currently in use by an application * @return no of leased connections */ public int getTotalLeased(){ int total=0; for (int i=0; i < this.partitionCount && this.partitions[i] != null; i++){ total+=this.partitions[i].getCreatedConnections()-this.partitions[i].getAvailableConnections(); } return total; } /** Return the number of free connections available to an application right away (excluding connections that can be * created dynamically) * @return number of free connections */ public int getTotalFree(){ int total=0; for (int i=0; i < this.partitionCount && this.partitions[i] != null ; i++){ total+=this.partitions[i].getAvailableConnections(); } return total; } /** * Return total number of connections created in all partitions. * * @return number of created connections */ public int getTotalCreatedConnections(){ int total=0; for (int i=0; i < this.partitionCount && this.partitions[i] != null; i++){ total+=this.partitions[i].getCreatedConnections(); } return total; } /** * Gets config object. * * @return config object */ public BoneCPConfig getConfig() { return this.config; } /** Return the finalizable refs handle. * @return the finalizableRefs value. */ protected Map<Connection, Reference<ConnectionHandle>> getFinalizableRefs() { return this.finalizableRefs; } /** Watch for connections that should have been safely closed but the application forgot. * @return the finalizableRefQueue */ protected FinalizableReferenceQueue getFinalizableRefQueue() { return this.finalizableRefQueue; } /** * Returns a reference to the statistics class. * @return statistics */ public Statistics getStatistics() { return this.statistics; } /** * Returns the dbIsDown field. * @return dbIsDown */ public AtomicBoolean getDbIsDown() { return this.dbIsDown; } }
apache-2.0
npmcomponent/josdejong-mathjs
lib/error/DimensionError.js
1151
/** * Create a range error with the message: * 'Dimension mismatch (<actual size> != <expected size>)' * @param {number | number[]} actual The actual size * @param {number | number[]} expected The expected size * @param {string} [relation='!='] Optional relation between actual * and expected size: '!=', '<', etc. * @extends RangeError */ function DimensionError(actual, expected, relation) { if (!(this instanceof DimensionError)) { throw new SyntaxError('Constructor must be called with the new operator'); } this.actual = actual; this.expected = expected; this.relation = relation; this.message = 'Dimension mismatch (' + (Array.isArray(actual) ? ('[' + actual.join(', ') + ']') : actual) + ' ' + (this.relation || '!=') + ' ' + (Array.isArray(expected) ? ('[' + expected.join(', ') + ']') : expected) + ')'; this.stack = (new Error()).stack; } DimensionError.prototype = new RangeError(); DimensionError.prototype.constructor = RangeError; DimensionError.prototype.name = 'DimensionError'; module.exports = DimensionError;
apache-2.0
tarikgwa/test
html/app/code/Magento/Catalog/Model/Indexer/Product/Flat/FlatTableBuilder.php
13390
<?php /** * Copyright © 2015 Magento. All rights reserved. * See COPYING.txt for license details. */ namespace Magento\Catalog\Model\Indexer\Product\Flat; use Magento\Framework\App\ResourceConnection; /** * Class FlatTableBuilder */ class FlatTableBuilder { /** * Path to maximum available amount of indexes for flat indexer */ const XML_NODE_MAX_INDEX_COUNT = 'catalog/product/flat/max_index_count'; /** * @var \Magento\Catalog\Helper\Product\Flat\Indexer */ protected $_productIndexerHelper; /** * @var \Magento\Framework\DB\Adapter\AdapterInterface */ protected $_connection; /** * @var \Magento\Framework\App\Config\ScopeConfigInterface $config */ protected $_config; /** * @var \Magento\Store\Model\StoreManagerInterface */ protected $_storeManager; /** * @var TableDataInterface */ protected $_tableData; /** * @param \Magento\Catalog\Helper\Product\Flat\Indexer $productIndexerHelper * @param \Magento\Framework\App\ResourceConnection $resource * @param \Magento\Framework\App\Config\ScopeConfigInterface $config * @param \Magento\Store\Model\StoreManagerInterface $storeManager * @param TableDataInterface $tableData */ public function __construct( \Magento\Catalog\Helper\Product\Flat\Indexer $productIndexerHelper, \Magento\Framework\App\ResourceConnection $resource, \Magento\Framework\App\Config\ScopeConfigInterface $config, \Magento\Store\Model\StoreManagerInterface $storeManager, \Magento\Catalog\Model\Indexer\Product\Flat\TableDataInterface $tableData ) { $this->_productIndexerHelper = $productIndexerHelper; $this->_connection = $resource->getConnection(); $this->_config = $config; $this->_storeManager = $storeManager; $this->_tableData = $tableData; } /** * Prepare temporary flat tables * * @param int|string $storeId * @param array $changedIds * @param string $valueFieldSuffix * @param string $tableDropSuffix * @param bool $fillTmpTables * @return void */ public function build($storeId, $changedIds, $valueFieldSuffix, $tableDropSuffix, $fillTmpTables) { $attributes = $this->_productIndexerHelper->getAttributes(); $eavAttributes = $this->_productIndexerHelper->getTablesStructure($attributes); $this->_createTemporaryFlatTable($storeId); if ($fillTmpTables) { $this->_fillTemporaryFlatTable($eavAttributes, $storeId, $valueFieldSuffix); //Update zero based attributes by values from current store $this->_updateTemporaryTableByStoreValues($eavAttributes, $changedIds, $storeId, $valueFieldSuffix); } $flatTable = $this->_productIndexerHelper->getFlatTableName($storeId); $flatDropName = $flatTable . $tableDropSuffix; $temporaryFlatTableName = $this->_getTemporaryTableName( $this->_productIndexerHelper->getFlatTableName($storeId) ); $this->_tableData->move($flatTable, $flatDropName, $temporaryFlatTableName); } /** * Prepare flat table for store * * @param int|string $storeId * @return void * @throws \Magento\Framework\Exception\LocalizedException * @SuppressWarnings(PHPMD.CyclomaticComplexity) * @SuppressWarnings(PHPMD.NPathComplexity) */ protected function _createTemporaryFlatTable($storeId) { $columns = $this->_productIndexerHelper->getFlatColumns(); $indexesNeed = $this->_productIndexerHelper->getFlatIndexes(); $maxIndex = $this->_config->getValue( self::XML_NODE_MAX_INDEX_COUNT ); if ($maxIndex && count($indexesNeed) > $maxIndex) { throw new \Magento\Framework\Exception\LocalizedException( __( 'The Flat Catalog module has a limit of %2$d filterable and/or sortable attributes.' . 'Currently there are %1$d of them.' . 'Please reduce the number of filterable/sortable attributes in order to use this module', count($indexesNeed), $maxIndex ) ); } $indexKeys = []; $indexProps = array_values($indexesNeed); $upperPrimaryKey = strtoupper(\Magento\Framework\DB\Adapter\AdapterInterface::INDEX_TYPE_PRIMARY); foreach ($indexProps as $i => $indexProp) { $indexName = $this->_connection->getIndexName( $this->_getTemporaryTableName($this->_productIndexerHelper->getFlatTableName($storeId)), $indexProp['fields'], $indexProp['type'] ); $indexProp['type'] = strtoupper($indexProp['type']); if ($indexProp['type'] == $upperPrimaryKey) { $indexKey = $upperPrimaryKey; } else { $indexKey = $indexName; } $indexProps[$i] = [ 'KEY_NAME' => $indexName, 'COLUMNS_LIST' => $indexProp['fields'], 'INDEX_TYPE' => strtolower($indexProp['type']), ]; $indexKeys[$i] = $indexKey; } $indexesNeed = array_combine($indexKeys, $indexProps); /** @var $table \Magento\Framework\DB\Ddl\Table */ $table = $this->_connection->newTable( $this->_getTemporaryTableName($this->_productIndexerHelper->getFlatTableName($storeId)) ); foreach ($columns as $fieldName => $fieldProp) { $columnLength = isset($fieldProp['length']) ? $fieldProp['length'] : null; $columnDefinition = [ 'nullable' => isset($fieldProp['nullable']) ? (bool)$fieldProp['nullable'] : false, 'unsigned' => isset($fieldProp['unsigned']) ? (bool)$fieldProp['unsigned'] : false, 'default' => isset($fieldProp['default']) ? $fieldProp['default'] : false, 'primary' => false, ]; $columnComment = isset($fieldProp['comment']) ? $fieldProp['comment'] : $fieldName; $table->addColumn($fieldName, $fieldProp['type'], $columnLength, $columnDefinition, $columnComment); } foreach ($indexesNeed as $indexProp) { $table->addIndex( $indexProp['KEY_NAME'], $indexProp['COLUMNS_LIST'], ['type' => $indexProp['INDEX_TYPE']] ); } $table->setComment("Catalog Product Flat (Store {$storeId})"); $this->_connection->dropTable( $this->_getTemporaryTableName($this->_productIndexerHelper->getFlatTableName($storeId)) ); $this->_connection->createTable($table); } /** * Fill temporary flat table by data from temporary flat table parts * * @param array $tables * @param int|string $storeId * @param string $valueFieldSuffix * @return void */ protected function _fillTemporaryFlatTable(array $tables, $storeId, $valueFieldSuffix) { $select = $this->_connection->select(); $temporaryFlatTableName = $this->_getTemporaryTableName( $this->_productIndexerHelper->getFlatTableName($storeId) ); $flatColumns = $this->_productIndexerHelper->getFlatColumns(); $entityTableName = $this->_productIndexerHelper->getTable('catalog_product_entity'); $entityTemporaryTableName = $this->_getTemporaryTableName($entityTableName); $columnsList = array_keys($tables[$entityTableName]); $websiteId = (int)$this->_storeManager->getStore($storeId)->getWebsiteId(); unset($tables[$entityTableName]); $allColumns = array_merge(['entity_id', 'type_id', 'attribute_set_id'], $columnsList); /* @var $status \Magento\Eav\Model\Entity\Attribute */ $status = $this->_productIndexerHelper->getAttribute('status'); $statusTable = $this->_getTemporaryTableName($status->getBackendTable()); $statusConditions = [ 'e.entity_id = dstatus.entity_id', 'dstatus.store_id = ' . (int)$storeId, 'dstatus.attribute_id = ' . (int)$status->getId(), ]; $statusExpression = $this->_connection->getIfNullSql( 'dstatus.value', $this->_connection->quoteIdentifier("{$statusTable}.status") ); $select->from( ['e' => $entityTemporaryTableName], $allColumns )->joinInner( ['wp' => $this->_productIndexerHelper->getTable('catalog_product_website')], 'wp.product_id = e.entity_id AND wp.website_id = ' . $websiteId, [] )->joinLeft( ['dstatus' => $status->getBackend()->getTable()], implode(' AND ', $statusConditions), [] )->where( $statusExpression . ' = ' . \Magento\Catalog\Model\Product\Attribute\Source\Status::STATUS_ENABLED ); foreach ($tables as $tableName => $columns) { $columnValueNames = []; $temporaryTableName = $this->_getTemporaryTableName($tableName); $temporaryValueTableName = $temporaryTableName . $valueFieldSuffix; $columnsNames = array_keys($columns); $select->joinLeft( $temporaryTableName, 'e.entity_id = ' . $temporaryTableName . '.entity_id', $columnsNames ); $allColumns = array_merge($allColumns, $columnsNames); foreach ($columnsNames as $name) { $columnValueName = $name . $valueFieldSuffix; if (isset($flatColumns[$columnValueName])) { $columnValueNames[] = $columnValueName; } } if (!empty($columnValueNames)) { $select->joinLeft( $temporaryValueTableName, 'e.entity_id = ' . $temporaryValueTableName . '.entity_id', $columnValueNames ); $allColumns = array_merge($allColumns, $columnValueNames); } } $sql = $select->insertFromSelect($temporaryFlatTableName, $allColumns, false); $this->_connection->query($sql); } /** * Apply diff. between 0 store and current store to temporary flat table * * @param array $tables * @param array $changedIds * @param int|string $storeId * @param string $valueFieldSuffix * @return void */ protected function _updateTemporaryTableByStoreValues( array $tables, array $changedIds, $storeId, $valueFieldSuffix ) { $flatColumns = $this->_productIndexerHelper->getFlatColumns(); $temporaryFlatTableName = $this->_getTemporaryTableName( $this->_productIndexerHelper->getFlatTableName($storeId) ); foreach ($tables as $tableName => $columns) { foreach ($columns as $attribute) { /* @var $attribute \Magento\Eav\Model\Entity\Attribute */ $attributeCode = $attribute->getAttributeCode(); if ($attribute->getBackend()->getType() != 'static') { $joinCondition = 't.entity_id = e.entity_id' . ' AND t.attribute_id=' . $attribute->getId() . ' AND t.store_id = ' . $storeId . ' AND t.value IS NOT NULL'; /** @var $select \Magento\Framework\DB\Select */ $select = $this->_connection->select()->joinInner( ['t' => $tableName], $joinCondition, [$attributeCode => 't.value'] ); if (!empty($changedIds)) { $select->where($this->_connection->quoteInto('e.entity_id IN (?)', $changedIds)); } $sql = $select->crossUpdateFromSelect(['e' => $temporaryFlatTableName]); $this->_connection->query($sql); } //Update not simple attributes (eg. dropdown) if (isset($flatColumns[$attributeCode . $valueFieldSuffix])) { $select = $this->_connection->select()->joinInner( ['t' => $this->_productIndexerHelper->getTable('eav_attribute_option_value')], 't.option_id = e.' . $attributeCode . ' AND t.store_id=' . $storeId, [$attributeCode . $valueFieldSuffix => 't.value'] ); if (!empty($changedIds)) { $select->where($this->_connection->quoteInto('e.entity_id IN (?)', $changedIds)); } $sql = $select->crossUpdateFromSelect(['e' => $temporaryFlatTableName]); $this->_connection->query($sql); } } } } /** * Retrieve temporary table name by regular table name * * @param string $tableName * @return string */ protected function _getTemporaryTableName($tableName) { return sprintf('%s_tmp_indexer', $tableName); } }
apache-2.0
Damianofds/jai-ext
jt-algebra/src/main/java/it/geosolutions/jaiext/algebra/constant/OperationConstDescriptor.java
7551
/* JAI-Ext - OpenSource Java Advanced Image Extensions Library * http://www.geo-solutions.it/ * Copyright 2014 GeoSolutions * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package it.geosolutions.jaiext.algebra.constant; import it.geosolutions.jaiext.algebra.AlgebraDescriptor.Operator; import it.geosolutions.jaiext.range.Range; import java.awt.RenderingHints; import java.awt.image.RenderedImage; import java.awt.image.renderable.ParameterBlock; import java.awt.image.renderable.RenderableImage; import javax.media.jai.JAI; import javax.media.jai.OperationDescriptorImpl; import javax.media.jai.ParameterBlockJAI; import javax.media.jai.ROI; import javax.media.jai.RenderableOp; import javax.media.jai.RenderedOp; import javax.media.jai.registry.RenderableRegistryMode; import javax.media.jai.registry.RenderedRegistryMode; /** * {@link OperationDescriptorImpl} describing the OperationConst operation * * @author Nicola Lagomarsini geosolutions * */ public class OperationConstDescriptor extends OperationDescriptorImpl { public final static int OPERATION_INDEX = 0; public final static int ROI_INDEX = 1; public final static int RANGE_INDEX = 2; public final static int DEST_NODATA_INDEX = 4; public final static int CONSTANT_INDEX = 3; /** * The resource strings that provide the general documentation and specify the parameter list for this operation. */ private static final String[][] resources = { { "GlobalName", "operationConst" }, { "LocalName", "operationConst" }, { "Vendor", "it.geosolutions.jaiext" }, { "Description", "This class executes the operation selected by the user on each pixel of the source images " }, { "DocURL", "Not Defined" }, { "Version", "1.0" }, { "arg0Desc", "Constant Values to Add" }, { "arg1Desc", "Operation to execute" }, { "arg2Desc", "ROI object used" }, { "arg3Desc", "No Data Range used" }, { "arg4Desc", "Output value for No Data" } }; /** * Input Parameter name */ private static final String[] paramNames = {"constants", "operation", "roi", "noData", "destinationNoData" }; /** * Input Parameter class */ private static final Class[] paramClasses = { double[].class, Operator.class, javax.media.jai.ROI.class, it.geosolutions.jaiext.range.Range.class, Double.class }; /** * Input Parameter default values */ private static final Object[] paramDefaults = { NO_PARAMETER_DEFAULT, NO_PARAMETER_DEFAULT, null, null, 0d }; /** Constructor. */ public OperationConstDescriptor() { super(resources, 1, paramClasses, paramNames, paramDefaults); } /** Returns <code>true</code> since renderable operation is supported. */ public boolean isRenderableSupported() { return true; } @Override protected boolean validateParameters(String modeName, ParameterBlock args, StringBuffer msg) { if(modeName.equalsIgnoreCase(RenderedRegistryMode.MODE_NAME)){ // Check for the constants double[] constants = null; Object param = args.getObjectParameter(0); if (param != null) { if (param instanceof double[]) { return true; } else if (param instanceof int[]) { int[] paramInt = (int[]) param; constants = new double[paramInt.length]; for (int i = 0; i < paramInt.length; i++) { constants[i] = paramInt[i]; } args.set(constants, 0); return true; } } return false; } return true; } /** * Executes the selected operation with a constant on the input image. * * <p> * Creates a <code>ParameterBlockJAI</code> from all supplied arguments except <code>hints</code> and invokes * {@link JAI#create(String,ParameterBlock,RenderingHints)}. * * @see JAI * @see ParameterBlockJAI * @see RenderedOp * * @param source <code>RenderedImage</code> source. * @param constants the constants array to apply to the source * @param op operation to execute * @param roi optional ROI object * @param optional nodata range for checking nodata * @param destinationNoData value to set for destination NoData * @param hints The <code>RenderingHints</code> to use. May be <code>null</code>. * @return The <code>RenderedOp</code> destination. */ public static RenderedOp create(RenderedImage source, double[] constants, Operator op, ROI roi, Range noData, double destinationNoData, RenderingHints hints) { ParameterBlockJAI pb = new ParameterBlockJAI("operationConst", RenderedRegistryMode.MODE_NAME); pb.setSource(source, 0); if (pb.getNumSources() == 0) { throw new IllegalArgumentException("The input images are Null"); } pb.setParameter("operation", op); pb.setParameter("roi", roi); pb.setParameter("constants", constants); pb.setParameter("noData", noData); pb.setParameter("destinationNoData", destinationNoData); return JAI.create("operationConst", pb, hints); } /** * Executes the selected operation with a constant on the input image. * * <p> * Creates a <code>ParameterBlockJAI</code> from all supplied arguments except <code>hints</code> and invokes * {@link JAI#createRenderable(String,ParameterBlock,RenderingHints)}. * * @see JAI * @see ParameterBlockJAI * @see RenderableOp * * @param source <code>RenderedImage</code> source. * @param constants the constants array to apply to the source * @param op operation to execute * @param roi optional ROI object * @param optional nodata range for checking nodata * @param destinationNoData value to set for destination NoData * @param hints The <code>RenderingHints</code> to use. May be <code>null</code>. * @return The <code>RenderableOp</code> destination. */ public static RenderableOp createRenderable(RenderableImage source, double[] constants, Operator op, ROI roi, Range noData, double destinationNoData, RenderingHints hints) { ParameterBlockJAI pb = new ParameterBlockJAI("operationConst", RenderableRegistryMode.MODE_NAME); pb.setSource(source, 0); if (pb.getNumSources() == 0) { throw new IllegalArgumentException("The input images are Null"); } pb.setParameter("operation", op); pb.setParameter("roi", roi); pb.setParameter("constants", constants); pb.setParameter("noData", noData); pb.setParameter("destinationNoData", destinationNoData); return JAI.createRenderable("operationConst", pb, hints); } }
apache-2.0
jbonofre/incubator-beam
sdks/python/apache_beam/examples/cookbook/datastore_wordcount.py
10712
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A word-counting workflow that uses Google Cloud Datastore. This example shows how to use ``datastoreio`` to read from and write to Google Cloud Datastore. Note that running this example may incur charge for Cloud Datastore operations. See https://developers.google.com/datastore/ for more details on Google Cloud Datastore. See https://beam.apache.org/get-started/quickstart on how to run a Beam pipeline. Read-only Mode: In this mode, this example reads Cloud Datastore entities using the ``datastoreio.ReadFromDatastore`` transform, extracts the words, counts them and write the output to a set of files. The following options must be provided to run this pipeline in read-only mode: `` --dataset YOUR_DATASET --kind YOUR_DATASTORE_KIND --output [YOUR_LOCAL_FILE *or* gs://YOUR_OUTPUT_PATH] --read_only `` Dataset maps to Project ID for v1 version of datastore. Read-write Mode: In this mode, this example reads words from an input file, converts them to Cloud Datastore ``Entity`` objects and writes them to Cloud Datastore using the ``datastoreio.Write`` transform. The second pipeline will then read these Cloud Datastore entities using the ``datastoreio.ReadFromDatastore`` transform, extract the words, count them and write the output to a set of files. The following options must be provided to run this pipeline in read-write mode: `` --dataset YOUR_DATASET --kind YOUR_DATASTORE_KIND --output [YOUR_LOCAL_FILE *or* gs://YOUR_OUTPUT_PATH] `` Note: We are using the Cloud Datastore protobuf objects directly because that is the interface that the ``datastoreio`` exposes. See the following links on more information about these protobuf messages. https://cloud.google.com/datastore/docs/reference/rpc/google.datastore.v1 and https://github.com/googleapis/googleapis/tree/master/google/datastore/v1 """ from __future__ import absolute_import import argparse import logging import re import uuid from google.cloud.proto.datastore.v1 import entity_pb2 from google.cloud.proto.datastore.v1 import query_pb2 from googledatastore import helper as datastore_helper from googledatastore import PropertyFilter import apache_beam as beam from apache_beam.io import ReadFromText from apache_beam.io.gcp.datastore.v1.datastoreio import ReadFromDatastore from apache_beam.io.gcp.datastore.v1.datastoreio import WriteToDatastore from apache_beam.metrics import Metrics from apache_beam.metrics.metric import MetricsFilter from apache_beam.options.pipeline_options import PipelineOptions from apache_beam.options.pipeline_options import SetupOptions class WordExtractingDoFn(beam.DoFn): """Parse each line of input text into words.""" def __init__(self): self.empty_line_counter = Metrics.counter('main', 'empty_lines') self.word_length_counter = Metrics.counter('main', 'word_lengths') self.word_counter = Metrics.counter('main', 'total_words') self.word_lengths_dist = Metrics.distribution('main', 'word_len_dist') def process(self, element): """Returns an iterator over words in contents of Cloud Datastore entity. The element is a line of text. If the line is blank, note that, too. Args: element: the input element to be processed Returns: The processed element. """ content_value = element.properties.get('content', None) text_line = '' if content_value: text_line = content_value.string_value if not text_line: self.empty_line_counter.inc() words = re.findall(r'[A-Za-z\']+', text_line) for w in words: self.word_length_counter.inc(len(w)) self.word_lengths_dist.update(len(w)) self.word_counter.inc() return words class EntityWrapper(object): """Create a Cloud Datastore entity from the given string.""" def __init__(self, namespace, kind, ancestor): self._namespace = namespace self._kind = kind self._ancestor = ancestor def make_entity(self, content): entity = entity_pb2.Entity() if self._namespace is not None: entity.key.partition_id.namespace_id = self._namespace # All entities created will have the same ancestor datastore_helper.add_key_path(entity.key, self._kind, self._ancestor, self._kind, str(uuid.uuid4())) datastore_helper.add_properties(entity, {"content": unicode(content)}) return entity def write_to_datastore(user_options, pipeline_options): """Creates a pipeline that writes entities to Cloud Datastore.""" with beam.Pipeline(options=pipeline_options) as p: # pylint: disable=expression-not-assigned (p | 'read' >> ReadFromText(user_options.input) | 'create entity' >> beam.Map( EntityWrapper(user_options.namespace, user_options.kind, user_options.ancestor).make_entity) | 'write to datastore' >> WriteToDatastore(user_options.dataset)) def make_ancestor_query(kind, namespace, ancestor): """Creates a Cloud Datastore ancestor query. The returned query will fetch all the entities that have the parent key name set to the given `ancestor`. """ ancestor_key = entity_pb2.Key() datastore_helper.add_key_path(ancestor_key, kind, ancestor) if namespace is not None: ancestor_key.partition_id.namespace_id = namespace query = query_pb2.Query() query.kind.add().name = kind datastore_helper.set_property_filter( query.filter, '__key__', PropertyFilter.HAS_ANCESTOR, ancestor_key) return query def read_from_datastore(user_options, pipeline_options): """Creates a pipeline that reads entities from Cloud Datastore.""" p = beam.Pipeline(options=pipeline_options) # Create a query to read entities from datastore. query = make_ancestor_query(user_options.kind, user_options.namespace, user_options.ancestor) # Read entities from Cloud Datastore into a PCollection. lines = p | 'read from datastore' >> ReadFromDatastore( user_options.dataset, query, user_options.namespace) # Count the occurrences of each word. def count_ones(word_ones): (word, ones) = word_ones return (word, sum(ones)) counts = (lines | 'split' >> (beam.ParDo(WordExtractingDoFn()) .with_output_types(unicode)) | 'pair_with_one' >> beam.Map(lambda x: (x, 1)) | 'group' >> beam.GroupByKey() | 'count' >> beam.Map(count_ones)) # Format the counts into a PCollection of strings. def format_result(word_count): (word, count) = word_count return '%s: %s' % (word, count) output = counts | 'format' >> beam.Map(format_result) # Write the output using a "Write" transform that has side effects. # pylint: disable=expression-not-assigned output | 'write' >> beam.io.WriteToText(file_path_prefix=user_options.output, num_shards=user_options.num_shards) result = p.run() # Wait until completion, main thread would access post-completion job results. result.wait_until_finish() return result def run(argv=None): """Main entry point; defines and runs the wordcount pipeline.""" parser = argparse.ArgumentParser() parser.add_argument('--input', dest='input', default='gs://dataflow-samples/shakespeare/kinglear.txt', help='Input file to process.') parser.add_argument('--dataset', dest='dataset', help='Dataset ID to read from Cloud Datastore.') parser.add_argument('--kind', dest='kind', required=True, help='Datastore Kind') parser.add_argument('--namespace', dest='namespace', help='Datastore Namespace') parser.add_argument('--ancestor', dest='ancestor', default='root', help='The ancestor key name for all entities.') parser.add_argument('--output', dest='output', required=True, help='Output file to write results to.') parser.add_argument('--read_only', action='store_true', help='Read an existing dataset, do not write first') parser.add_argument('--num_shards', dest='num_shards', type=int, # If the system should choose automatically. default=0, help='Number of output shards') known_args, pipeline_args = parser.parse_known_args(argv) # We use the save_main_session option because one or more DoFn's in this # workflow rely on global context (e.g., a module imported at module level). pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = True # Write to Datastore if `read_only` options is not specified. if not known_args.read_only: write_to_datastore(known_args, pipeline_options) # Read entities from Datastore. result = read_from_datastore(known_args, pipeline_options) empty_lines_filter = MetricsFilter().with_name('empty_lines') query_result = result.metrics().query(empty_lines_filter) if query_result['counters']: empty_lines_counter = query_result['counters'][0] logging.info('number of empty lines: %d', empty_lines_counter.committed) else: logging.warn('unable to retrieve counter metrics from runner') word_lengths_filter = MetricsFilter().with_name('word_len_dist') query_result = result.metrics().query(word_lengths_filter) if query_result['distributions']: word_lengths_dist = query_result['distributions'][0] logging.info('average word length: %d', word_lengths_dist.committed.mean) else: logging.warn('unable to retrieve distribution metrics from runner') if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) run()
apache-2.0
markharwood/elasticsearch
core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java
14078
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.index.mapper.object; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.DateFieldMapper; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue; import static org.elasticsearch.index.mapper.core.TypeParsers.parseDateTimeFormatter; /** * */ public class RootObjectMapper extends ObjectMapper { public static class Defaults { public static final FormatDateTimeFormatter[] DYNAMIC_DATE_TIME_FORMATTERS = new FormatDateTimeFormatter[]{ DateFieldMapper.Defaults.DATE_TIME_FORMATTER, Joda.getStrictStandardDateFormatter() }; public static final boolean DATE_DETECTION = true; public static final boolean NUMERIC_DETECTION = false; } public static class Builder extends ObjectMapper.Builder<Builder, RootObjectMapper> { protected final List<DynamicTemplate> dynamicTemplates = new ArrayList<>(); // we use this to filter out seen date formats, because we might get duplicates during merging protected Set<String> seenDateFormats = new HashSet<>(); protected List<FormatDateTimeFormatter> dynamicDateTimeFormatters = new ArrayList<>(); protected boolean dateDetection = Defaults.DATE_DETECTION; protected boolean numericDetection = Defaults.NUMERIC_DETECTION; public Builder(String name) { super(name); this.builder = this; } public Builder noDynamicDateTimeFormatter() { this.dynamicDateTimeFormatters = null; return builder; } public Builder dynamicDateTimeFormatter(Iterable<FormatDateTimeFormatter> dateTimeFormatters) { for (FormatDateTimeFormatter dateTimeFormatter : dateTimeFormatters) { if (!seenDateFormats.contains(dateTimeFormatter.format())) { seenDateFormats.add(dateTimeFormatter.format()); this.dynamicDateTimeFormatters.add(dateTimeFormatter); } } return builder; } public Builder add(DynamicTemplate dynamicTemplate) { this.dynamicTemplates.add(dynamicTemplate); return this; } public Builder add(DynamicTemplate... dynamicTemplate) { for (DynamicTemplate template : dynamicTemplate) { this.dynamicTemplates.add(template); } return this; } @Override protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map<String, Mapper> mappers, @Nullable Settings settings) { assert !nested.isNested(); FormatDateTimeFormatter[] dates = null; if (dynamicDateTimeFormatters == null) { dates = new FormatDateTimeFormatter[0]; } else if (dynamicDateTimeFormatters.isEmpty()) { // add the default one dates = Defaults.DYNAMIC_DATE_TIME_FORMATTERS; } else { dates = dynamicDateTimeFormatters.toArray(new FormatDateTimeFormatter[dynamicDateTimeFormatters.size()]); } return new RootObjectMapper(name, enabled, dynamic, mappers, dates, dynamicTemplates.toArray(new DynamicTemplate[dynamicTemplates.size()]), dateDetection, numericDetection); } } public static class TypeParser extends ObjectMapper.TypeParser { @Override protected ObjectMapper.Builder createBuilder(String name) { return new Builder(name); } @Override public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException { ObjectMapper.Builder builder = createBuilder(name); Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); while (iterator.hasNext()) { Map.Entry<String, Object> entry = iterator.next(); String fieldName = Strings.toUnderscoreCase(entry.getKey()); Object fieldNode = entry.getValue(); if (parseObjectOrDocumentTypeProperties(fieldName, fieldNode, parserContext, builder) || processField(builder, fieldName, fieldNode)) { iterator.remove(); } } return builder; } protected boolean processField(ObjectMapper.Builder builder, String fieldName, Object fieldNode) { if (fieldName.equals("date_formats") || fieldName.equals("dynamic_date_formats")) { List<FormatDateTimeFormatter> dateTimeFormatters = new ArrayList<>(); if (fieldNode instanceof List) { for (Object node1 : (List) fieldNode) { if (node1.toString().startsWith("epoch_")) { throw new MapperParsingException("Epoch ["+ node1.toString() +"] is not supported as dynamic date format"); } dateTimeFormatters.add(parseDateTimeFormatter(node1)); } } else if ("none".equals(fieldNode.toString())) { dateTimeFormatters = null; } else { dateTimeFormatters.add(parseDateTimeFormatter(fieldNode)); } if (dateTimeFormatters == null) { ((Builder) builder).noDynamicDateTimeFormatter(); } else { ((Builder) builder).dynamicDateTimeFormatter(dateTimeFormatters); } return true; } else if (fieldName.equals("dynamic_templates")) { // "dynamic_templates" : [ // { // "template_1" : { // "match" : "*_test", // "match_mapping_type" : "string", // "mapping" : { "type" : "string", "store" : "yes" } // } // } // ] List tmplNodes = (List) fieldNode; for (Object tmplNode : tmplNodes) { Map<String, Object> tmpl = (Map<String, Object>) tmplNode; if (tmpl.size() != 1) { throw new MapperParsingException("A dynamic template must be defined with a name"); } Map.Entry<String, Object> entry = tmpl.entrySet().iterator().next(); ((Builder) builder).add(DynamicTemplate.parse(entry.getKey(), (Map<String, Object>) entry.getValue())); } return true; } else if (fieldName.equals("date_detection")) { ((Builder) builder).dateDetection = lenientNodeBooleanValue(fieldNode); return true; } else if (fieldName.equals("numeric_detection")) { ((Builder) builder).numericDetection = lenientNodeBooleanValue(fieldNode); return true; } return false; } } private final FormatDateTimeFormatter[] dynamicDateTimeFormatters; private final boolean dateDetection; private final boolean numericDetection; private volatile DynamicTemplate dynamicTemplates[]; RootObjectMapper(String name, boolean enabled, Dynamic dynamic, Map<String, Mapper> mappers, FormatDateTimeFormatter[] dynamicDateTimeFormatters, DynamicTemplate dynamicTemplates[], boolean dateDetection, boolean numericDetection) { super(name, name, enabled, Nested.NO, dynamic, mappers); this.dynamicTemplates = dynamicTemplates; this.dynamicDateTimeFormatters = dynamicDateTimeFormatters; this.dateDetection = dateDetection; this.numericDetection = numericDetection; } @Override public ObjectMapper mappingUpdate(Mapper mapper) { RootObjectMapper update = (RootObjectMapper) super.mappingUpdate(mapper); // dynamic templates are irrelevant for dynamic mappings updates update.dynamicTemplates = new DynamicTemplate[0]; return update; } public boolean dateDetection() { return this.dateDetection; } public boolean numericDetection() { return this.numericDetection; } public FormatDateTimeFormatter[] dynamicDateTimeFormatters() { return dynamicDateTimeFormatters; } public Mapper.Builder findTemplateBuilder(ParseContext context, String name, String dynamicType) { return findTemplateBuilder(context, name, dynamicType, dynamicType); } public Mapper.Builder findTemplateBuilder(ParseContext context, String name, String dynamicType, String matchType) { DynamicTemplate dynamicTemplate = findTemplate(context.path(), name, matchType); if (dynamicTemplate == null) { return null; } Mapper.TypeParser.ParserContext parserContext = context.docMapperParser().parserContext(name); String mappingType = dynamicTemplate.mappingType(dynamicType); Mapper.TypeParser typeParser = parserContext.typeParser(mappingType); if (typeParser == null) { throw new MapperParsingException("failed to find type parsed [" + mappingType + "] for [" + name + "]"); } return typeParser.parse(name, dynamicTemplate.mappingForName(name, dynamicType), parserContext); } public DynamicTemplate findTemplate(ContentPath path, String name, String matchType) { for (DynamicTemplate dynamicTemplate : dynamicTemplates) { if (dynamicTemplate.match(path, name, matchType)) { return dynamicTemplate; } } return null; } @Override public RootObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) { return (RootObjectMapper) super.merge(mergeWith, updateAllTypes); } @Override protected void doMerge(ObjectMapper mergeWith, boolean updateAllTypes) { super.doMerge(mergeWith, updateAllTypes); RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith; // merge them List<DynamicTemplate> mergedTemplates = new ArrayList<>(Arrays.asList(this.dynamicTemplates)); for (DynamicTemplate template : mergeWithObject.dynamicTemplates) { boolean replaced = false; for (int i = 0; i < mergedTemplates.size(); i++) { if (mergedTemplates.get(i).name().equals(template.name())) { mergedTemplates.set(i, template); replaced = true; } } if (!replaced) { mergedTemplates.add(template); } } this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]); } @Override public RootObjectMapper updateFieldType(Map<String, MappedFieldType> fullNameToFieldType) { return (RootObjectMapper) super.updateFieldType(fullNameToFieldType); } @Override protected void doXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { if (dynamicDateTimeFormatters != Defaults.DYNAMIC_DATE_TIME_FORMATTERS) { if (dynamicDateTimeFormatters.length > 0) { builder.startArray("dynamic_date_formats"); for (FormatDateTimeFormatter dateTimeFormatter : dynamicDateTimeFormatters) { builder.value(dateTimeFormatter.format()); } builder.endArray(); } } if (dynamicTemplates != null && dynamicTemplates.length > 0) { builder.startArray("dynamic_templates"); for (DynamicTemplate dynamicTemplate : dynamicTemplates) { builder.startObject(); builder.field(dynamicTemplate.name()); builder.map(dynamicTemplate.conf()); builder.endObject(); } builder.endArray(); } if (dateDetection != Defaults.DATE_DETECTION) { builder.field("date_detection", dateDetection); } if (numericDetection != Defaults.NUMERIC_DETECTION) { builder.field("numeric_detection", numericDetection); } } }
apache-2.0
mohanaraosv/commons-jxpath
src/test/org/apache/commons/jxpath/ri/axes/SimplePathInterpreterTest.java
22549
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.jxpath.ri.axes; import java.util.HashMap; import junit.framework.TestCase; import org.apache.commons.jxpath.JXPathContext; import org.apache.commons.jxpath.NestedTestBean; import org.apache.commons.jxpath.Pointer; import org.apache.commons.jxpath.TestNull; import org.apache.commons.jxpath.ri.model.NodePointer; import org.apache.commons.jxpath.ri.model.VariablePointer; import org.apache.commons.jxpath.ri.model.beans.BeanPointer; import org.apache.commons.jxpath.ri.model.beans.BeanPropertyPointer; import org.apache.commons.jxpath.ri.model.beans.CollectionPointer; import org.apache.commons.jxpath.ri.model.beans.NullElementPointer; import org.apache.commons.jxpath.ri.model.beans.NullPointer; import org.apache.commons.jxpath.ri.model.beans.NullPropertyPointer; import org.apache.commons.jxpath.ri.model.beans.TestBeanFactory; import org.apache.commons.jxpath.ri.model.dom.DOMNodePointer; import org.apache.commons.jxpath.ri.model.dynamic.DynamicPointer; import org.apache.commons.jxpath.ri.model.dynamic.DynamicPropertyPointer; public class SimplePathInterpreterTest extends TestCase { private TestBeanWithNode bean; private JXPathContext context; protected void setUp() throws Exception { bean = TestBeanWithNode.createTestBeanWithDOM(); HashMap submap = new HashMap(); submap.put("key", new NestedTestBean("Name 9")); submap.put("strings", bean.getNestedBean().getStrings()); bean.getList().add(new int[]{1, 2}); bean.getList().add(bean.getVendor()); bean.getMap().put("Key3", new Object[]{ new NestedTestBean("some"), new Integer(2), bean.getVendor(), submap } ); bean.getMap().put("Key4", bean.getVendor()); bean.getMap().put("Key5", submap); bean.getMap().put("Key6", new Object[0]); context = JXPathContext.newContext(null, bean); context.setLenient(true); context.setFactory(new TestBeanFactory()); } public void testDoStepNoPredicatesPropertyOwner() { // Existing scalar property assertValueAndPointer("/int", new Integer(1), "/int", "Bb", "BbB"); // self:: assertValueAndPointer("/./int", new Integer(1), "/int", "Bb", "BbB"); // Missing property assertNullPointer("/foo", "/foo", "Bn"); // existingProperty/existingScalarProperty assertValueAndPointer("/nestedBean/int", new Integer(1), "/nestedBean/int", "BbBb", "BbBbB"); // existingProperty/collectionProperty assertValueAndPointer("/nestedBean/strings", bean.getNestedBean().getStrings(), "/nestedBean/strings", "BbBb", "BbBbC"); // existingProperty/missingProperty assertNullPointer("/nestedBean/foo", "/nestedBean/foo", "BbBn"); // map/missingProperty assertNullPointer("/map/foo", "/map[@name='foo']", "BbDd"); // Existing property by search in collection assertValueAndPointer("/list/int", new Integer(1), "/list[3]/int", "BbBb", "BbBbB"); // Missing property by search in collection assertNullPointer("/list/foo", "/list[1]/foo", "BbBn"); // existingProperty/missingProperty/missingProperty assertNullPointer("/nestedBean/foo/bar", "/nestedBean/foo/bar", "BbBnNn"); // collection/existingProperty/missingProperty assertNullPointer("/list/int/bar", "/list[3]/int/bar", "BbBbBn"); // collectionProperty/missingProperty/missingProperty assertNullPointer("/list/foo/bar", "/list[1]/foo/bar", "BbBnNn"); // map/missingProperty/anotherStep assertNullPointer("/map/foo/bar", "/map[@name='foo']/bar", "BbDdNn"); // Existing dynamic property assertValueAndPointer("/map/Key1", "Value 1", "/map[@name='Key1']", "BbDd", "BbDdB"); // collectionProperty assertValueAndPointer("/integers", bean.getIntegers(), "/integers", "Bb", "BbC"); } public void testDoStepNoPredicatesStandard() { // Existing DOM node assertValueAndPointer("/vendor/location/address/city", "Fruit Market", "/vendor/location[2]/address[1]/city[1]", "BbMMMM"); // Missing DOM node assertNullPointer("/vendor/location/address/pity", "/vendor/location[1]/address[1]/pity", "BbMMMn"); // Missing DOM node inside a missing element assertNullPointer("/vendor/location/address/itty/bitty", "/vendor/location[1]/address[1]/itty/bitty", "BbMMMnNn"); // Missing DOM node by search for the best match assertNullPointer("/vendor/location/address/city/pretty", "/vendor/location[2]/address[1]/city[1]/pretty", "BbMMMMn"); } public void testDoStepPredicatesPropertyOwner() { // missingProperty[@name=foo] assertNullPointer("/foo[@name='foo']", "/foo[@name='foo']", "BnNn"); // missingProperty[index] assertNullPointer("/foo[3]", "/foo[3]", "Bn"); } public void testDoStepPredicatesStandard() { // Looking for an actual XML attribute called "name" // nodeProperty/name[@name=value] assertValueAndPointer("/vendor/contact[@name='jack']", "Jack", "/vendor/contact[2]", "BbMM"); // Indexing in XML assertValueAndPointer("/vendor/contact[2]", "Jack", "/vendor/contact[2]", "BbMM"); // Indexing in XML, no result assertNullPointer("/vendor/contact[5]", "/vendor/contact[5]", "BbMn"); // Combination of search by name and indexing in XML assertValueAndPointer("/vendor/contact[@name='jack'][2]", "Jack Black", "/vendor/contact[4]", "BbMM"); // Combination of search by name and indexing in XML assertValueAndPointer("/vendor/contact[@name='jack'][2]", "Jack Black", "/vendor/contact[4]", "BbMM"); } public void testDoPredicateName() { // existingProperty[@name=existingProperty] assertValueAndPointer("/nestedBean[@name='int']", new Integer(1), "/nestedBean/int", "BbBb", "BbBbB"); // /self::node()[@name=existingProperty] assertValueAndPointer("/.[@name='int']", new Integer(1), "/int", "Bb", "BbB"); // dynamicProperty[@name=existingProperty] assertValueAndPointer("/map[@name='Key1']", "Value 1", "/map[@name='Key1']", "BbDd", "BbDdB"); // existingProperty[@name=collectionProperty] assertValueAndPointer("/nestedBean[@name='strings']", bean.getNestedBean().getStrings(), "/nestedBean/strings", "BbBb", "BbBbC"); // existingProperty[@name=missingProperty] assertNullPointer("/nestedBean[@name='foo']", "/nestedBean[@name='foo']", "BbBn"); // map[@name=collectionProperty] assertValueAndPointer("/map[@name='Key3']", bean.getMap().get("Key3"), "/map[@name='Key3']", "BbDd", "BbDdC"); // map[@name=missingProperty] assertNullPointer("/map[@name='foo']", "/map[@name='foo']", "BbDd"); // collectionProperty[@name=...] (find node) assertValueAndPointer("/list[@name='fruitco']", context.getValue("/vendor"), "/list[5]", "BbCM"); // collectionProperty[@name=...] (find map entry) assertValueAndPointer("/map/Key3[@name='key']/name", "Name 9", "/map[@name='Key3'][4][@name='key']/name", "BbDdCDdBb", "BbDdCDdBbB"); // map/collectionProperty[@name...] assertValueAndPointer("map/Key3[@name='fruitco']", context.getValue("/vendor"), "/map[@name='Key3'][3]", "BbDdCM"); // Bean property -> DOM Node, name match assertValueAndPointer("/vendor[@name='fruitco']", context.getValue("/vendor"), "/vendor", "BbM"); // Bean property -> DOM Node, name mismatch assertNullPointer("/vendor[@name='foo']", "/vendor[@name='foo']", "BbMn"); assertNullPointer("/vendor[@name='foo'][3]", "/vendor[@name='foo'][3]", "BbMn"); // existingProperty(bean)[@name=missingProperty]/anotherStep assertNullPointer("/nestedBean[@name='foo']/bar", "/nestedBean[@name='foo']/bar", "BbBnNn"); // map[@name=missingProperty]/anotherStep assertNullPointer("/map[@name='foo']/bar", "/map[@name='foo']/bar", "BbDdNn"); // existingProperty(node)[@name=missingProperty]/anotherStep assertNullPointer("/vendor[@name='foo']/bar", "/vendor[@name='foo']/bar", "BbMnNn"); // existingProperty(node)[@name=missingProperty][index]/anotherStep assertNullPointer("/vendor[@name='foo'][3]/bar", "/vendor[@name='foo'][3]/bar", "BbMnNn"); // Existing dynamic property + existing property assertValueAndPointer("/map[@name='Key2'][@name='name']", "Name 6", "/map[@name='Key2']/name", "BbDdBb", "BbDdBbB"); // Existing dynamic property + existing property + index assertValueAndPointer("/map[@name='Key2'][@name='strings'][2]", "String 2", "/map[@name='Key2']/strings[2]", "BbDdBb", "BbDdBbB"); // bean/map/map/property assertValueAndPointer("map[@name='Key5'][@name='key']/name", "Name 9", "/map[@name='Key5'][@name='key']/name", "BbDdDdBb", "BbDdDdBbB"); assertNullPointer("map[@name='Key2'][@name='foo']", "/map[@name='Key2'][@name='foo']", "BbDdBn"); assertNullPointer("map[@name='Key2'][@name='foo'][@name='bar']", "/map[@name='Key2'][@name='foo'][@name='bar']", "BbDdBnNn"); // bean/map/node assertValueAndPointer("map[@name='Key4'][@name='fruitco']", context.getValue("/vendor"), "/map[@name='Key4']", "BbDdM"); } public void testDoPredicatesStandard() { // bean/map/collection/node assertValueAndPointer("map[@name='Key3'][@name='fruitco']", context.getValue("/vendor"), "/map[@name='Key3'][3]", "BbDdCM"); // bean/map/collection/missingNode assertNullPointer("map[@name='Key3'][@name='foo']", "/map[@name='Key3'][4][@name='foo']", "BbDdCDd"); // bean/map/node assertValueAndPointer("map[@name='Key4'][@name='fruitco']", context.getValue("/vendor"), "/map[@name='Key4']", "BbDdM"); // bean/map/emptyCollection[@name=foo] assertNullPointer("map[@name='Key6'][@name='fruitco']", "/map[@name='Key6'][@name='fruitco']", "BbDdCn"); // bean/node[@name=foo][index] assertValueAndPointer("/vendor/contact[@name='jack'][2]", "Jack Black", "/vendor/contact[4]", "BbMM"); // bean/node[@name=foo][missingIndex] assertNullPointer("/vendor/contact[@name='jack'][5]", "/vendor/contact[@name='jack'][5]", "BbMnNn"); // bean/node/.[@name=foo][index] assertValueAndPointer("/vendor/contact/.[@name='jack']", "Jack", "/vendor/contact[2]", "BbMM"); } public void testDoPredicateIndex() { // Existing dynamic property + existing property + index assertValueAndPointer("/map[@name='Key2'][@name='strings'][2]", "String 2", "/map[@name='Key2']/strings[2]", "BbDdBb", "BbDdBbB"); // existingProperty[@name=collectionProperty][index] assertValueAndPointer("/nestedBean[@name='strings'][2]", bean.getNestedBean().getStrings()[1], "/nestedBean/strings[2]", "BbBb", "BbBbB"); // existingProperty[@name=missingProperty][index] assertNullPointer("/nestedBean[@name='foo'][3]", "/nestedBean[@name='foo'][3]", "BbBn"); // existingProperty[@name=collectionProperty][missingIndex] assertNullPointer("/nestedBean[@name='strings'][5]", "/nestedBean/strings[5]", "BbBbE"); // map[@name=collectionProperty][index] assertValueAndPointer("/map[@name='Key3'][2]", new Integer(2), "/map[@name='Key3'][2]", "BbDd", "BbDdB"); // map[@name=collectionProperty][missingIndex] assertNullPointer("/map[@name='Key3'][5]", "/map[@name='Key3'][5]", "BbDdE"); // map[@name=collectionProperty][missingIndex]/property assertNullPointer("/map[@name='Key3'][5]/foo", "/map[@name='Key3'][5]/foo", "BbDdENn"); // map[@name=map][@name=collection][index] assertValueAndPointer("/map[@name='Key5'][@name='strings'][2]", "String 2", "/map[@name='Key5'][@name='strings'][2]", "BbDdDd", "BbDdDdB"); // map[@name=map][@name=collection][missingIndex] assertNullPointer("/map[@name='Key5'][@name='strings'][5]", "/map[@name='Key5'][@name='strings'][5]", "BbDdDdE"); // Existing dynamic property + indexing assertValueAndPointer("/map[@name='Key3'][2]", new Integer(2), "/map[@name='Key3'][2]", "BbDd", "BbDdB"); // Existing dynamic property + indexing assertValueAndPointer("/map[@name='Key3'][1]/name", "some", "/map[@name='Key3'][1]/name", "BbDdBb", "BbDdBbB"); // map[@name=missingProperty][index] assertNullPointer("/map[@name='foo'][3]", "/map[@name='foo'][3]", "BbDdE"); // collectionProperty[index] assertValueAndPointer("/integers[2]", new Integer(2), "/integers[2]", "Bb", "BbB"); // existingProperty/collectionProperty[index] assertValueAndPointer("/nestedBean/strings[2]", bean.getNestedBean().getStrings()[1], "/nestedBean/strings[2]", "BbBb", "BbBbB"); // existingProperty[index]/existingProperty assertValueAndPointer("/list[3]/int", new Integer(1), "/list[3]/int", "BbBb", "BbBbB"); // existingProperty[missingIndex] assertNullPointer("/list[6]", "/list[6]", "BbE"); // existingProperty/missingProperty[index] assertNullPointer("/nestedBean/foo[3]", "/nestedBean/foo[3]", "BbBn"); // map[@name=missingProperty][index] assertNullPointer("/map/foo[3]", "/map[@name='foo'][3]", "BbDdE"); // existingProperty/collectionProperty[missingIndex] assertNullPointer("/nestedBean/strings[5]", "/nestedBean/strings[5]", "BbBbE"); // map/collectionProperty[missingIndex]/property assertNullPointer("/map/Key3[5]/foo", "/map[@name='Key3'][5]/foo", "BbDdENn"); // map[@name=map]/collection[index] assertValueAndPointer("/map[@name='Key5']/strings[2]", "String 2", "/map[@name='Key5'][@name='strings'][2]", "BbDdDd", "BbDdDdB"); // map[@name=map]/collection[missingIndex] assertNullPointer("/map[@name='Key5']/strings[5]", "/map[@name='Key5'][@name='strings'][5]", "BbDdDdE"); // scalarPropertyAsCollection[index] assertValueAndPointer("/int[1]", new Integer(1), "/int", "Bb", "BbB"); // scalarPropertyAsCollection[index] assertValueAndPointer(".[1]/int", new Integer(1), "/int", "Bb", "BbB"); } public void testInterpretExpressionPath() { context.getVariables().declareVariable("array", new String[]{"Value1"}); context.getVariables().declareVariable("testnull", new TestNull()); assertNullPointer("$testnull/nothing[2]", "$testnull/nothing[2]", "VBbE"); } private void assertValueAndPointer( String path, Object expectedValue, String expectedPath, String expectedSignature) { assertValueAndPointer( path, expectedValue, expectedPath, expectedSignature, expectedSignature); } private void assertValueAndPointer( String path, Object expectedValue, String expectedPath, String expectedSignature, String expectedValueSignature) { Object value = context.getValue(path); assertEquals("Checking value: " + path, expectedValue, value); Pointer pointer = context.getPointer(path); assertEquals("Checking pointer: " + path, expectedPath, pointer.toString()); assertEquals("Checking signature: " + path, expectedSignature, pointerSignature(pointer)); Pointer vPointer = ((NodePointer) pointer).getValuePointer(); assertEquals("Checking value pointer signature: " + path, expectedValueSignature, pointerSignature(vPointer)); } private void assertNullPointer(String path, String expectedPath, String expectedSignature) { Pointer pointer = context.getPointer(path); assertNotNull("Null path exists: " + path, pointer); assertEquals("Null path as path: " + path, expectedPath, pointer.asPath()); assertEquals("Checking Signature: " + path, expectedSignature, pointerSignature(pointer)); Pointer vPointer = ((NodePointer) pointer).getValuePointer(); assertTrue("Null path is null: " + path, !((NodePointer) vPointer).isActual()); assertEquals("Checking value pointer signature: " + path, expectedSignature + "N", pointerSignature(vPointer)); } /** * Since we need to test the internal Signature of a pointer, * we will get a signature which will contain a single character * per pointer in the chain, representing that pointer's type. */ private String pointerSignature(Pointer pointer) { if (pointer == null) { return ""; } char type = '?'; if (pointer instanceof NullPointer) { type = 'N'; } else if (pointer instanceof NullPropertyPointer) { type = 'n'; } else if (pointer instanceof NullElementPointer) { type = 'E'; } else if (pointer instanceof VariablePointer) { type = 'V'; } else if (pointer instanceof CollectionPointer) { type = 'C'; } else if (pointer instanceof BeanPointer) { type = 'B'; } else if (pointer instanceof BeanPropertyPointer) { type = 'b'; } else if (pointer instanceof DynamicPointer) { type = 'D'; } else if (pointer instanceof DynamicPropertyPointer) { type = 'd'; } else if (pointer instanceof DOMNodePointer) { type = 'M'; } else { System.err.println("UNKNOWN TYPE: " + pointer.getClass()); } NodePointer parent = ((NodePointer) pointer).getImmediateParentPointer(); return pointerSignature(parent) + type; } }
apache-2.0
vladmunthiu/dasein-cloud-core-GR-fork
src/main/java/org/dasein/cloud/network/RawAddress.java
3858
/** * Copyright (C) 2009-2014 Dell, Inc. * See annotations for authorship information * * ==================================================================== * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ==================================================================== */ package org.dasein.cloud.network; import javax.annotation.Nonnull; import javax.annotation.Nullable; /** * A raw IP address with basic address information. * <p>Created by George Reese: 1/9/13 10:18 AM</p> * @author George Reese * @version 2013.02 initial version (issue #38) * @since 2013.02 */ public class RawAddress { private String ipAddress; private IPVersion version; @SuppressWarnings("UnusedDeclaration") private RawAddress() { } /** * Constructs a new raw addressed based on the specified IP address string and guesses at the version. * @param ipAddress the IP address string for the address */ public RawAddress(@Nonnull String ipAddress) { this.ipAddress = ipAddress; String[] tmp = ipAddress.split("\\."); if( tmp.length == 4 ) { version = IPVersion.IPV4; } else { version = IPVersion.IPV6; } } /** * Constructs a new raw address based on the specified IP address string and version. * @param ipAddress the IP address string of the address * @param version the IP version associated with the address */ public RawAddress(@Nonnull String ipAddress, @Nonnull IPVersion version) { this.ipAddress = ipAddress; this.version = version; } @Override public boolean equals(@Nullable Object other) { return other != null && (other == this || other.getClass().getName().equals(getClass().getName()) && ipAddress.equalsIgnoreCase(((RawAddress) other).ipAddress)); } /** * @return the IP address string */ public @Nonnull String getIpAddress() { return ipAddress; } /** * @return the IP version associated with this address */ public @Nonnull IPVersion getVersion() { return version; } @Override public int hashCode() { return ipAddress.hashCode(); } /** * Indicates whether this IP address is reserved for private address spaces or likely represents a publicly * addressable IP address. * @return true if this address is in the publicly addressable address space */ public boolean isPublicIpAddress() { if( getVersion().equals(IPVersion.IPV4) ) { if( ipAddress.startsWith("10.") || ipAddress.startsWith("192.168") || ipAddress.startsWith("169.254") ) { return false; } else if( ipAddress.startsWith("172.") ) { String[] parts = ipAddress.split("\\."); if( parts.length != 4 ) { return true; } int x = Integer.parseInt(parts[1]); if( x >= 16 && x <= 31 ) { return false; } } } else { if( ipAddress.startsWith("fd") || ipAddress.startsWith("fc00:")) { return false; } } return true; } @Override public @Nonnull String toString() { return ipAddress; } }
apache-2.0
Teradata/presto
presto-main/src/main/java/com/facebook/presto/operator/TableFinishInfo.java
2570
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto.operator; import com.facebook.presto.spi.connector.ConnectorOutputMetadata; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonRawValue; import com.fasterxml.jackson.databind.JsonNode; import io.airlift.json.JsonCodec; import io.airlift.units.DataSize; import java.util.Optional; import static io.airlift.json.JsonCodec.jsonCodec; import static io.airlift.units.DataSize.Unit.MEGABYTE; import static java.lang.Math.toIntExact; public class TableFinishInfo implements OperatorInfo { private static final int JSON_LENGTH_LIMIT = toIntExact(new DataSize(10, MEGABYTE).toBytes()); private static final JsonCodec<Object> INFO_CODEC = jsonCodec(Object.class); private static final JsonCodec<JsonNode> JSON_NODE_CODEC = jsonCodec(JsonNode.class); private String connectorOutputMetadata; private boolean jsonLengthLimitExceeded; public TableFinishInfo(Optional<ConnectorOutputMetadata> metadata) { if (metadata.isPresent()) { Optional<String> serializedMetadata = INFO_CODEC.toJsonWithLengthLimit(metadata.get().getInfo(), JSON_LENGTH_LIMIT); if (!serializedMetadata.isPresent()) { jsonLengthLimitExceeded = true; } else { connectorOutputMetadata = serializedMetadata.get(); } } } @JsonCreator public TableFinishInfo(@JsonProperty("connectorOutputMetadata") JsonNode connectorOutputMetadata) { this.connectorOutputMetadata = JSON_NODE_CODEC.toJson(connectorOutputMetadata); } @Override public boolean isFinal() { return true; } @JsonProperty @JsonRawValue public String getConnectorOutputMetadata() { return connectorOutputMetadata; } @JsonProperty public boolean isJsonLengthLimitExceeded() { return jsonLengthLimitExceeded; } }
apache-2.0
bitmybytes/hadoop
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java
13130
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.s3a; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; import static org.apache.hadoop.fs.contract.ContractTestUtils.rm; import static org.apache.hadoop.fs.s3a.S3ATestUtils.skipIfEncryptionTestsDisabled; import static org.apache.hadoop.test.LambdaTestUtils.intercept; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.contract.s3a.S3AContract; import org.junit.Test; /** * Concrete class that extends {@link AbstractTestS3AEncryption} * and tests SSE-C encryption. */ public class ITestS3AEncryptionSSEC extends AbstractTestS3AEncryption { @Override protected Configuration createConfiguration() { Configuration conf = super.createConfiguration(); S3ATestUtils.disableFilesystemCaching(conf); conf.set(Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM, getSSEAlgorithm().getMethod()); conf.set(Constants.SERVER_SIDE_ENCRYPTION_KEY, "4niV/jPK5VFRHY+KNb6wtqYd4xXyMgdJ9XQJpcQUVbs="); return conf; } /** * This will create and write to a file using encryption key A, then attempt * to read from it again with encryption key B. This will not work as it * cannot decrypt the file. * * This is expected AWS S3 SSE-C behavior. * * @throws Exception */ @Test public void testCreateFileAndReadWithDifferentEncryptionKey() throws Exception { assumeEnabled(); skipIfEncryptionTestsDisabled(getConfiguration()); final Path[] path = new Path[1]; intercept(java.nio.file.AccessDeniedException.class, "Service: Amazon S3; Status Code: 403;", () -> { int len = 2048; describe("Create an encrypted file of size " + len); String src = createFilename(len); path[0] = writeThenReadFile(src, len); //extract the test FS FileSystem fileSystem = createNewFileSystemWithSSECKey( "kX7SdwVc/1VXJr76kfKnkQ3ONYhxianyL2+C3rPVT9s="); byte[] data = dataset(len, 'a', 'z'); ContractTestUtils.verifyFileContents(fileSystem, path[0], data); throw new Exception("Fail"); }); } /** * While each object has it's own key and should be distinct, this verifies * that hadoop treats object keys as a filesystem path. So if a top level * dir is encrypted with keyA, a sublevel dir cannot be accessed with a * different keyB. * * This is expected AWS S3 SSE-C behavior. * * @throws Exception */ @Test public void testCreateSubdirWithDifferentKey() throws Exception { assumeEnabled(); skipIfEncryptionTestsDisabled(getConfiguration()); final Path[] path = new Path[1]; intercept(java.nio.file.AccessDeniedException.class, "Service: Amazon S3; Status Code: 403;", () -> { path[0] = S3ATestUtils.createTestPath( new Path(createFilename("dir/")) ); Path nestedDirectory = S3ATestUtils.createTestPath( new Path(createFilename("dir/nestedDir/")) ); FileSystem fsKeyB = createNewFileSystemWithSSECKey( "G61nz31Q7+zpjJWbakxfTOZW4VS0UmQWAq2YXhcTXoo="); getFileSystem().mkdirs(path[0]); fsKeyB.mkdirs(nestedDirectory); throw new Exception("Exception should be thrown."); }); rm(getFileSystem(), path[0], true, false); } /** * Ensures a file can't be created with keyA and then renamed with a different * key. * * This is expected AWS S3 SSE-C behavior. * * @throws Exception */ @Test public void testCreateFileThenMoveWithDifferentSSECKey() throws Exception { assumeEnabled(); skipIfEncryptionTestsDisabled(getConfiguration()); final Path[] path = new Path[1]; intercept(java.nio.file.AccessDeniedException.class, "Service: Amazon S3; Status Code: 403;", () -> { int len = 2048; String src = createFilename(len); path[0] = writeThenReadFile(src, len); FileSystem fsKeyB = createNewFileSystemWithSSECKey( "NTx0dUPrxoo9+LbNiT/gqf3z9jILqL6ilismFmJO50U="); fsKeyB.rename(path[0], new Path(createFilename("different-path.txt"))); throw new Exception("Exception should be thrown."); }); } /** * General test to make sure move works with SSE-C with the same key, unlike * with multiple keys. * * @throws Exception */ @Test public void testRenameFile() throws Exception { assumeEnabled(); skipIfEncryptionTestsDisabled(getConfiguration()); String src = createFilename("original-path.txt"); Path path = writeThenReadFile(src, 2048); Path newPath = path(createFilename("different-path.txt")); getFileSystem().rename(path, newPath); byte[] data = dataset(2048, 'a', 'z'); ContractTestUtils.verifyFileContents(getFileSystem(), newPath, data); } /** * It is possible to list the contents of a directory up to the actual * end of the nested directories. This is due to how S3A mocks the * directories and how prefixes work in S3. * @throws Exception */ @Test public void testListEncryptedDir() throws Exception { assumeEnabled(); skipIfEncryptionTestsDisabled(getConfiguration()); Path nestedDirectory = S3ATestUtils.createTestPath( path(createFilename("/a/b/c/")) ); assertTrue(getFileSystem().mkdirs(nestedDirectory)); FileSystem fsKeyB = createNewFileSystemWithSSECKey( "msdo3VvvZznp66Gth58a91Hxe/UpExMkwU9BHkIjfW8="); fsKeyB.listFiles(S3ATestUtils.createTestPath( path(createFilename("/a/")) ), true); fsKeyB.listFiles(S3ATestUtils.createTestPath( path(createFilename("/a/b/")) ), true); //Until this point, no exception is thrown about access intercept(java.nio.file.AccessDeniedException.class, "Service: Amazon S3; Status Code: 403;", () -> { fsKeyB.listFiles(S3ATestUtils.createTestPath( path(createFilename("/a/b/c/")) ), false); throw new Exception("Exception should be thrown."); }); Configuration conf = this.createConfiguration(); conf.unset(Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM); conf.unset(Constants.SERVER_SIDE_ENCRYPTION_KEY); S3AContract contract = (S3AContract) createContract(conf); contract.init(); FileSystem unencryptedFileSystem = contract.getTestFileSystem(); //unencrypted can access until the final directory unencryptedFileSystem.listFiles(S3ATestUtils.createTestPath( path(createFilename("/a/")) ), true); unencryptedFileSystem.listFiles(S3ATestUtils.createTestPath( path(createFilename("/a/b/")) ), true); intercept(org.apache.hadoop.fs.s3a.AWSS3IOException.class, "Bad Request (Service: Amazon S3; Status Code: 400; Error" + " Code: 400 Bad Request;", () -> { unencryptedFileSystem.listFiles(S3ATestUtils.createTestPath( path(createFilename("/a/b/c/")) ), false); throw new Exception("Exception should be thrown."); }); rm(getFileSystem(), path(createFilename("/")), true, false); } /** * Much like the above list encrypted directory test, you cannot get the * metadata of an object without the correct encryption key. * @throws Exception */ @Test public void testListStatusEncryptedDir() throws Exception { assumeEnabled(); skipIfEncryptionTestsDisabled(getConfiguration()); Path nestedDirectory = S3ATestUtils.createTestPath( path(createFilename("/a/b/c/")) ); assertTrue(getFileSystem().mkdirs(nestedDirectory)); FileSystem fsKeyB = createNewFileSystemWithSSECKey( "msdo3VvvZznp66Gth58a91Hxe/UpExMkwU9BHkIjfW8="); fsKeyB.listStatus(S3ATestUtils.createTestPath( path(createFilename("/a/")))); fsKeyB.listStatus(S3ATestUtils.createTestPath( path(createFilename("/a/b/")))); //Until this point, no exception is thrown about access intercept(java.nio.file.AccessDeniedException.class, "Service: Amazon S3; Status Code: 403;", () -> { fsKeyB.listStatus(S3ATestUtils.createTestPath( path(createFilename("/a/b/c/")))); throw new Exception("Exception should be thrown."); }); //Now try it with an unencrypted filesystem. Configuration conf = this.createConfiguration(); conf.unset(Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM); conf.unset(Constants.SERVER_SIDE_ENCRYPTION_KEY); S3AContract contract = (S3AContract) createContract(conf); contract.init(); FileSystem unencryptedFileSystem = contract.getTestFileSystem(); //unencrypted can access until the final directory unencryptedFileSystem.listStatus(S3ATestUtils.createTestPath( path(createFilename("/a/")))); unencryptedFileSystem.listStatus(S3ATestUtils.createTestPath( path(createFilename("/a/b/")))); intercept(org.apache.hadoop.fs.s3a.AWSS3IOException.class, "Bad Request (Service: Amazon S3; Status Code: 400; Error Code: 400" + " Bad Request;", () -> { unencryptedFileSystem.listStatus(S3ATestUtils.createTestPath( path(createFilename("/a/b/c/")))); throw new Exception("Exception should be thrown."); }); rm(getFileSystem(), path(createFilename("/")), true, false); } /** * Much like trying to access a encrypted directory, an encrypted file cannot * have its metadata read, since both are technically an object. * @throws Exception */ @Test public void testListStatusEncryptedFile() throws Exception { assumeEnabled(); skipIfEncryptionTestsDisabled(getConfiguration()); Path nestedDirectory = S3ATestUtils.createTestPath( path(createFilename("/a/b/c/")) ); assertTrue(getFileSystem().mkdirs(nestedDirectory)); String src = createFilename("/a/b/c/fileToStat.txt"); Path fileToStat = writeThenReadFile(src, 2048); FileSystem fsKeyB = createNewFileSystemWithSSECKey( "msdo3VvvZznp66Gth58a91Hxe/UpExMkwU9BHkIjfW8="); //Until this point, no exception is thrown about access intercept(java.nio.file.AccessDeniedException.class, "Service: Amazon S3; Status Code: 403;", () -> { fsKeyB.listStatus(S3ATestUtils.createTestPath(fileToStat)); throw new Exception("Exception should be thrown."); }); rm(getFileSystem(), path(createFilename("/")), true, false); } /** * It is possible to delete directories without the proper encryption key and * the hierarchy above it. * * @throws Exception */ @Test public void testDeleteEncryptedObjectWithDifferentKey() throws Exception { assumeEnabled(); skipIfEncryptionTestsDisabled(getConfiguration()); Path nestedDirectory = S3ATestUtils.createTestPath( path(createFilename("/a/b/c/")) ); assertTrue(getFileSystem().mkdirs(nestedDirectory)); String src = createFilename("/a/b/c/filetobedeleted.txt"); Path fileToDelete = writeThenReadFile(src, 2048); FileSystem fsKeyB = createNewFileSystemWithSSECKey( "msdo3VvvZznp66Gth58a91Hxe/UpExMkwU9BHkIjfW8="); intercept(java.nio.file.AccessDeniedException.class, "Forbidden (Service: Amazon S3; Status Code: 403; Error Code: " + "403 Forbidden", () -> { fsKeyB.delete(fileToDelete, false); throw new Exception("Exception should be thrown."); }); //This is possible fsKeyB.delete(S3ATestUtils.createTestPath( path(createFilename("/a/b/c/"))), true); fsKeyB.delete(S3ATestUtils.createTestPath( path(createFilename("/a/b/"))), true); fsKeyB.delete(S3ATestUtils.createTestPath( path(createFilename("/a/"))), true); } private FileSystem createNewFileSystemWithSSECKey(String sseCKey) throws IOException { Configuration conf = this.createConfiguration(); conf.set(Constants.SERVER_SIDE_ENCRYPTION_KEY, sseCKey); S3AContract contract = (S3AContract) createContract(conf); contract.init(); FileSystem fileSystem = contract.getTestFileSystem(); return fileSystem; } @Override protected S3AEncryptionMethods getSSEAlgorithm() { return S3AEncryptionMethods.SSE_C; } }
apache-2.0
thoughtworks/cruisecontrol.rb
lib/builder_plugins/builder_plugin.rb
4757
# BuilderPlugin is the superclass of all CC.rb plugins. It does not provide any functionality # except a basic initializer that accepts as an argument the current project. # # CC.rb plugins offer a rich notification system for tracking every aspect of the build lifecycle. In rough order, # they are: # # * polling_source_control # * no_new_revisions_detected OR new_revisions_detected(revisions) # * build_requested # * queued # * timed_out # * build_initiated # * configuration_modified # * build_started # * build_finished # * release_note_generated # * release_tagged # * build_broken OR build_fixed # * build_loop_failed # * sleeping class BuilderPlugin attr_reader :project def initialize(project) @project = project end class << self def known_event?(event_name) self.instance_methods(false).map { |m| m.to_s }.include? event_name.to_s end def load_all plugins_to_load.each do |plugin| if can_load_immediately?(plugin) load_plugin(File.basename(plugin)) elsif File.directory?(plugin) init_path = File.join(plugin, 'init.rb') if File.file?(init_path) load_plugin(init_path) else log.error("No init.rb found in plugin directory #{plugin}") end end end end private def plugins_to_load (Dir[Rails.root.join('lib', 'builder_plugins', '*')] + Dir[CruiseControl::Configuration.plugins_root.join("*")]).reject do |plugin_path| # ignore hidden files and directories (they should be considered hidden by Dir[], but just in case) File.basename(plugin_path)[0, 1] == '.' end end def can_load_immediately?(plugin) File.file?(plugin) && plugin[-3..-1] == '.rb' end def load_plugin(plugin_path) plugin_file = File.basename(plugin_path).sub(/\.rb$/, '') plugin_is_directory = (plugin_file == 'init') plugin_name = plugin_is_directory ? File.basename(File.dirname(plugin_path)) : plugin_file CruiseControl::Log.debug("Loading plugin #{plugin_name}") if Rails.env == 'development' load plugin_path else if plugin_is_directory then require "#{plugin_name}/init" else require plugin_name end end end end # Called by ChangeInSourceControlTrigger to indicate that it is about to poll source control. def polling_source_control end # Called by ChangeInSourceControlTrigger to indicate that no new revisions have been detected. def no_new_revisions_detected end # Called by ChangeInSourceControlTrigger to indicate that new revisions were detected. def new_revisions_detected(revisions) end # Called by Project to indicate that a build has explicitly been requested by the user. def build_requested end # Called by BuildSerializer if it another build is still running and it cannot acquire the build serialization lock. # It will retry until it times out. Occurs only if build serialization is enabled in your CC.rb configuration. def queued end # Called by BuildSerializer if it times out attempting to acquire the build serialization lock due to another build # still running. Occurs only if build serialization is enabled in your CC.rb configuration. def timed_out end # Called by Project at the start of a new build before any other build events. def build_initiated end # Called by Project at the start of a new build to indicate that the configuration has been modified, # after which the build is aborted. def configuration_modified end # Called by Project after some basic logging and the configuration_modified check and just before the build begins running, def build_started(build) end # Called by Project immediately after the build has finished running. def build_finished(build) end # Called by Project immediately after the release note is generated. def release_note_generated(build , message , email) end # Called by Project immediately after the release is tagged. def release_tagged(revision , tag, build) end # Called by Project after the completion of a build if the previous build was successful and this one is a failure. def build_broken(build, previous_build) end # Called by Project after the completion of a build if the previous build was a failure and this one was successful. def build_fixed(build, previous_build) end # Called by Project if the build fails internally with a CC.rb exception. def build_loop_failed(exception) end # Called by Project at the end of a build to indicate that the build loop is once again sleeping. def sleeping end end
apache-2.0
ullgren/camel
components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsEndpoint.java
2194
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.hdfs; import java.net.URI; import java.net.URISyntaxException; import org.apache.camel.Consumer; import org.apache.camel.Processor; import org.apache.camel.Producer; import org.apache.camel.spi.UriEndpoint; import org.apache.camel.spi.UriParam; import org.apache.camel.support.ScheduledPollEndpoint; /** * For reading/writing from/to an HDFS filesystem using Hadoop 2.x. */ @UriEndpoint(firstVersion = "2.14.0", scheme = "hdfs", title = "HDFS", syntax = "hdfs:hostName:port/path", label = "hadoop,file") public class HdfsEndpoint extends ScheduledPollEndpoint { @UriParam private final HdfsConfiguration config; public HdfsEndpoint(String endpointUri, HdfsComponent component) throws URISyntaxException { super(endpointUri, component); this.config = new HdfsConfiguration(); this.config.parseURI(new URI(endpointUri)); } @Override public Consumer createConsumer(Processor processor) throws Exception { config.checkConsumerOptions(); HdfsConsumer answer = new HdfsConsumer(this, processor, config); configureConsumer(answer); return answer; } @Override public Producer createProducer() { config.checkProducerOptions(); return new HdfsProducer(this, config); } public HdfsConfiguration getConfig() { return config; } }
apache-2.0
ncteisen/grpc
test/cpp/server/load_reporter/load_data_store_test.cc
22763
/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <grpc/impl/codegen/port_platform.h> #include <set> #include <vector> #include <grpc/grpc.h> #include <gtest/gtest.h> #include "src/cpp/server/load_reporter/load_data_store.h" #include "test/core/util/port.h" #include "test/core/util/test_config.h" namespace grpc { namespace testing { namespace { using ::grpc::load_reporter::CallMetricValue; using ::grpc::load_reporter::LoadDataStore; using ::grpc::load_reporter::LoadRecordKey; using ::grpc::load_reporter::LoadRecordValue; using ::grpc::load_reporter::PerBalancerStore; using ::grpc::load_reporter::kInvalidLbId; class LoadDataStoreTest : public ::testing::Test { public: LoadDataStoreTest() : kKey1(kLbId1, kLbTag1, kUser1, kClientIp1), kKey2(kLbId2, kLbTag2, kUser2, kClientIp2) {} // Check whether per_balancer_stores contains a store which was originally // created for <hostname, lb_id, and load_key>. bool PerBalancerStoresContains( const LoadDataStore& load_data_store, const std::set<PerBalancerStore*>* per_balancer_stores, const grpc::string& hostname, const grpc::string& lb_id, const grpc::string& load_key) { auto original_per_balancer_store = load_data_store.FindPerBalancerStore(hostname, lb_id); EXPECT_NE(original_per_balancer_store, nullptr); EXPECT_EQ(original_per_balancer_store->lb_id(), lb_id); EXPECT_EQ(original_per_balancer_store->load_key(), load_key); for (auto per_balancer_store : *per_balancer_stores) { if (per_balancer_store == original_per_balancer_store) { return true; } } return false; } grpc::string FormatLbId(size_t index) { return "kLbId" + std::to_string(index); } const grpc::string kHostname1 = "kHostname1"; const grpc::string kHostname2 = "kHostname2"; const grpc::string kLbId1 = "kLbId1"; const grpc::string kLbId2 = "kLbId2"; const grpc::string kLbId3 = "kLbId3"; const grpc::string kLbId4 = "kLbId4"; const grpc::string kLoadKey1 = "kLoadKey1"; const grpc::string kLoadKey2 = "kLoadKey2"; const grpc::string kLbTag1 = "kLbTag1"; const grpc::string kLbTag2 = "kLbTag2"; const grpc::string kUser1 = "kUser1"; const grpc::string kUser2 = "kUser2"; const grpc::string kClientIp1 = "00"; const grpc::string kClientIp2 = "02"; const grpc::string kMetric1 = "kMetric1"; const grpc::string kMetric2 = "kMetric2"; const LoadRecordKey kKey1; const LoadRecordKey kKey2; }; using PerBalancerStoreTest = LoadDataStoreTest; TEST_F(LoadDataStoreTest, AssignToSelf) { LoadDataStore load_data_store; load_data_store.ReportStreamCreated(kHostname1, kLbId1, kLoadKey1); auto assigned_stores = load_data_store.GetAssignedStores(kHostname1, kLbId1); EXPECT_TRUE(PerBalancerStoresContains(load_data_store, assigned_stores, kHostname1, kLbId1, kLoadKey1)); } TEST_F(LoadDataStoreTest, ReassignOrphanStores) { LoadDataStore load_data_store; load_data_store.ReportStreamCreated(kHostname1, kLbId1, kLoadKey1); load_data_store.ReportStreamCreated(kHostname1, kLbId2, kLoadKey1); load_data_store.ReportStreamCreated(kHostname1, kLbId3, kLoadKey2); load_data_store.ReportStreamCreated(kHostname2, kLbId4, kLoadKey1); // 1. Close the second stream. load_data_store.ReportStreamClosed(kHostname1, kLbId2); auto assigned_to_lb_id_1 = load_data_store.GetAssignedStores(kHostname1, kLbId1); // The orphaned store is re-assigned to kLbId1 with the same load key. EXPECT_TRUE(PerBalancerStoresContains(load_data_store, assigned_to_lb_id_1, kHostname1, kLbId1, kLoadKey1)); EXPECT_TRUE(PerBalancerStoresContains(load_data_store, assigned_to_lb_id_1, kHostname1, kLbId2, kLoadKey1)); // 2. Close the first stream. load_data_store.ReportStreamClosed(kHostname1, kLbId1); auto assigned_to_lb_id_3 = load_data_store.GetAssignedStores(kHostname1, kLbId3); // The orphaned stores are re-assigned to kLbId3 with the same host, // because there isn't any LB with the same load key. EXPECT_TRUE(PerBalancerStoresContains(load_data_store, assigned_to_lb_id_3, kHostname1, kLbId1, kLoadKey1)); EXPECT_TRUE(PerBalancerStoresContains(load_data_store, assigned_to_lb_id_3, kHostname1, kLbId2, kLoadKey1)); EXPECT_TRUE(PerBalancerStoresContains(load_data_store, assigned_to_lb_id_3, kHostname1, kLbId3, kLoadKey2)); // 3. Close the third stream. load_data_store.ReportStreamClosed(kHostname1, kLbId3); auto assigned_to_lb_id_4 = load_data_store.GetAssignedStores(kHostname2, kLbId4); // There is no active LB for the first host now. kLbId4 is active but // it's for the second host, so it wll NOT adopt the orphaned stores. EXPECT_FALSE(PerBalancerStoresContains(load_data_store, assigned_to_lb_id_4, kHostname1, kLbId1, kLoadKey1)); EXPECT_FALSE(PerBalancerStoresContains(load_data_store, assigned_to_lb_id_4, kHostname1, kLbId2, kLoadKey1)); EXPECT_FALSE(PerBalancerStoresContains(load_data_store, assigned_to_lb_id_4, kHostname1, kLbId3, kLoadKey2)); EXPECT_TRUE(PerBalancerStoresContains(load_data_store, assigned_to_lb_id_4, kHostname2, kLbId4, kLoadKey1)); } TEST_F(LoadDataStoreTest, OrphanAssignmentIsSticky) { LoadDataStore load_data_store; std::set<grpc::string> active_lb_ids; size_t num_lb_ids = 1000; for (size_t i = 0; i < num_lb_ids; ++i) { load_data_store.ReportStreamCreated(kHostname1, FormatLbId(i), kLoadKey1); active_lb_ids.insert(FormatLbId(i)); } grpc::string orphaned_lb_id = FormatLbId(std::rand() % num_lb_ids); load_data_store.ReportStreamClosed(kHostname1, orphaned_lb_id); active_lb_ids.erase(orphaned_lb_id); // Find which LB is assigned the orphaned store. grpc::string assigned_lb_id = ""; for (const auto& lb_id : active_lb_ids) { if (PerBalancerStoresContains( load_data_store, load_data_store.GetAssignedStores(kHostname1, lb_id), kHostname1, orphaned_lb_id, kLoadKey1)) { assigned_lb_id = lb_id; break; } } EXPECT_STRNE(assigned_lb_id.c_str(), ""); // Close 10 more stream, skipping the assigned_lb_id. The assignment of // orphaned_lb_id shouldn't change. for (size_t _ = 0; _ < 10; ++_) { grpc::string lb_id_to_close = ""; for (const auto& lb_id : active_lb_ids) { if (lb_id != assigned_lb_id) { lb_id_to_close = lb_id; break; } } EXPECT_STRNE(lb_id_to_close.c_str(), ""); load_data_store.ReportStreamClosed(kHostname1, lb_id_to_close); active_lb_ids.erase(lb_id_to_close); EXPECT_TRUE(PerBalancerStoresContains( load_data_store, load_data_store.GetAssignedStores(kHostname1, assigned_lb_id), kHostname1, orphaned_lb_id, kLoadKey1)); } // Close the assigned_lb_id, orphaned_lb_id will be re-assigned again. load_data_store.ReportStreamClosed(kHostname1, assigned_lb_id); active_lb_ids.erase(assigned_lb_id); size_t orphaned_lb_id_occurences = 0; for (const auto& lb_id : active_lb_ids) { if (PerBalancerStoresContains( load_data_store, load_data_store.GetAssignedStores(kHostname1, lb_id), kHostname1, orphaned_lb_id, kLoadKey1)) { orphaned_lb_id_occurences++; } } EXPECT_EQ(orphaned_lb_id_occurences, 1U); } TEST_F(LoadDataStoreTest, HostTemporarilyLoseAllStreams) { LoadDataStore load_data_store; load_data_store.ReportStreamCreated(kHostname1, kLbId1, kLoadKey1); load_data_store.ReportStreamCreated(kHostname2, kLbId2, kLoadKey1); auto store_lb_id_1 = load_data_store.FindPerBalancerStore(kHostname1, kLbId1); auto store_invalid_lb_id_1 = load_data_store.FindPerBalancerStore(kHostname1, kInvalidLbId); EXPECT_FALSE(store_lb_id_1->IsSuspended()); EXPECT_FALSE(store_invalid_lb_id_1->IsSuspended()); // Disconnect all the streams of the first host. load_data_store.ReportStreamClosed(kHostname1, kLbId1); // All the streams of that host are suspended. EXPECT_TRUE(store_lb_id_1->IsSuspended()); EXPECT_TRUE(store_invalid_lb_id_1->IsSuspended()); // Detailed load data won't be kept when the PerBalancerStore is suspended. store_lb_id_1->MergeRow(kKey1, LoadRecordValue()); store_invalid_lb_id_1->MergeRow(kKey1, LoadRecordValue()); EXPECT_EQ(store_lb_id_1->load_record_map().size(), 0U); EXPECT_EQ(store_invalid_lb_id_1->load_record_map().size(), 0U); // The stores for different hosts won't mix, even if the load key is the same. auto assigned_to_lb_id_2 = load_data_store.GetAssignedStores(kHostname2, kLbId2); EXPECT_EQ(assigned_to_lb_id_2->size(), 2U); EXPECT_TRUE(PerBalancerStoresContains(load_data_store, assigned_to_lb_id_2, kHostname2, kLbId2, kLoadKey1)); EXPECT_TRUE(PerBalancerStoresContains(load_data_store, assigned_to_lb_id_2, kHostname2, kInvalidLbId, "")); // A new stream is created for the first host. load_data_store.ReportStreamCreated(kHostname1, kLbId3, kLoadKey2); // The stores for the first host are resumed. EXPECT_FALSE(store_lb_id_1->IsSuspended()); EXPECT_FALSE(store_invalid_lb_id_1->IsSuspended()); store_lb_id_1->MergeRow(kKey1, LoadRecordValue()); store_invalid_lb_id_1->MergeRow(kKey1, LoadRecordValue()); EXPECT_EQ(store_lb_id_1->load_record_map().size(), 1U); EXPECT_EQ(store_invalid_lb_id_1->load_record_map().size(), 1U); // The resumed stores are assigned to the new LB. auto assigned_to_lb_id_3 = load_data_store.GetAssignedStores(kHostname1, kLbId3); EXPECT_EQ(assigned_to_lb_id_3->size(), 3U); EXPECT_TRUE(PerBalancerStoresContains(load_data_store, assigned_to_lb_id_3, kHostname1, kLbId1, kLoadKey1)); EXPECT_TRUE(PerBalancerStoresContains(load_data_store, assigned_to_lb_id_3, kHostname1, kInvalidLbId, "")); EXPECT_TRUE(PerBalancerStoresContains(load_data_store, assigned_to_lb_id_3, kHostname1, kLbId3, kLoadKey2)); } TEST_F(LoadDataStoreTest, OneStorePerLbId) { LoadDataStore load_data_store; EXPECT_EQ(load_data_store.FindPerBalancerStore(kHostname1, kLbId1), nullptr); EXPECT_EQ(load_data_store.FindPerBalancerStore(kHostname1, kInvalidLbId), nullptr); EXPECT_EQ(load_data_store.FindPerBalancerStore(kHostname2, kLbId2), nullptr); EXPECT_EQ(load_data_store.FindPerBalancerStore(kHostname2, kLbId3), nullptr); // Create The first stream. load_data_store.ReportStreamCreated(kHostname1, kLbId1, kLoadKey1); auto store_lb_id_1 = load_data_store.FindPerBalancerStore(kHostname1, kLbId1); auto store_invalid_lb_id_1 = load_data_store.FindPerBalancerStore(kHostname1, kInvalidLbId); // Two stores will be created: one is for the stream; the other one is for // kInvalidLbId. EXPECT_NE(store_lb_id_1, nullptr); EXPECT_NE(store_invalid_lb_id_1, nullptr); EXPECT_NE(store_lb_id_1, store_invalid_lb_id_1); EXPECT_EQ(load_data_store.FindPerBalancerStore(kHostname2, kLbId2), nullptr); EXPECT_EQ(load_data_store.FindPerBalancerStore(kHostname2, kLbId3), nullptr); // Create the second stream. load_data_store.ReportStreamCreated(kHostname2, kLbId3, kLoadKey1); auto store_lb_id_3 = load_data_store.FindPerBalancerStore(kHostname2, kLbId3); auto store_invalid_lb_id_2 = load_data_store.FindPerBalancerStore(kHostname2, kInvalidLbId); EXPECT_NE(store_lb_id_3, nullptr); EXPECT_NE(store_invalid_lb_id_2, nullptr); EXPECT_NE(store_lb_id_3, store_invalid_lb_id_2); // The PerBalancerStores created for different hosts are independent. EXPECT_NE(store_lb_id_3, store_invalid_lb_id_1); EXPECT_NE(store_invalid_lb_id_2, store_invalid_lb_id_1); EXPECT_EQ(load_data_store.FindPerBalancerStore(kHostname2, kLbId2), nullptr); } TEST_F(LoadDataStoreTest, ExactlyOnceAssignment) { LoadDataStore load_data_store; size_t num_create = 100; size_t num_close = 50; for (size_t i = 0; i < num_create; ++i) { load_data_store.ReportStreamCreated(kHostname1, FormatLbId(i), kLoadKey1); } for (size_t i = 0; i < num_close; ++i) { load_data_store.ReportStreamClosed(kHostname1, FormatLbId(i)); } std::set<grpc::string> reported_lb_ids; for (size_t i = num_close; i < num_create; ++i) { for (auto assigned_store : *load_data_store.GetAssignedStores(kHostname1, FormatLbId(i))) { EXPECT_TRUE(reported_lb_ids.insert(assigned_store->lb_id()).second); } } // Add one for kInvalidLbId. EXPECT_EQ(reported_lb_ids.size(), (num_create + 1)); EXPECT_NE(reported_lb_ids.find(kInvalidLbId), reported_lb_ids.end()); } TEST_F(LoadDataStoreTest, UnknownBalancerIdTracking) { LoadDataStore load_data_store; load_data_store.ReportStreamCreated(kHostname1, kLbId1, kLoadKey1); // Merge data for a known LB ID. LoadRecordValue v1(192); load_data_store.MergeRow(kHostname1, kKey1, v1); // Merge data for unknown LB ID. LoadRecordValue v2(23); EXPECT_FALSE(load_data_store.IsTrackedUnknownBalancerId(kLbId2)); load_data_store.MergeRow( kHostname1, LoadRecordKey(kLbId2, kLbTag1, kUser1, kClientIp1), v2); EXPECT_TRUE(load_data_store.IsTrackedUnknownBalancerId(kLbId2)); LoadRecordValue v3(952); load_data_store.MergeRow( kHostname2, LoadRecordKey(kLbId3, kLbTag1, kUser1, kClientIp1), v3); EXPECT_TRUE(load_data_store.IsTrackedUnknownBalancerId(kLbId3)); // The data kept for a known LB ID is correct. auto store_lb_id_1 = load_data_store.FindPerBalancerStore(kHostname1, kLbId1); EXPECT_EQ(store_lb_id_1->load_record_map().size(), 1U); EXPECT_EQ(store_lb_id_1->load_record_map().find(kKey1)->second.start_count(), v1.start_count()); EXPECT_EQ(store_lb_id_1->GetNumCallsInProgressForReport(), v1.start_count()); // No PerBalancerStore created for Unknown LB ID. EXPECT_EQ(load_data_store.FindPerBalancerStore(kHostname1, kLbId2), nullptr); EXPECT_EQ(load_data_store.FindPerBalancerStore(kHostname2, kLbId3), nullptr); // End all the started RPCs for kLbId1. LoadRecordValue v4(0, v1.start_count()); load_data_store.MergeRow(kHostname1, kKey1, v4); EXPECT_EQ(store_lb_id_1->load_record_map().size(), 1U); EXPECT_EQ(store_lb_id_1->load_record_map().find(kKey1)->second.start_count(), v1.start_count()); EXPECT_EQ(store_lb_id_1->load_record_map().find(kKey1)->second.ok_count(), v4.ok_count()); EXPECT_EQ(store_lb_id_1->GetNumCallsInProgressForReport(), 0U); EXPECT_FALSE(load_data_store.IsTrackedUnknownBalancerId(kLbId1)); // End all the started RPCs for kLbId2. LoadRecordValue v5(0, v2.start_count()); load_data_store.MergeRow( kHostname1, LoadRecordKey(kLbId2, kLbTag1, kUser1, kClientIp1), v5); EXPECT_FALSE(load_data_store.IsTrackedUnknownBalancerId(kLbId2)); // End some of the started RPCs for kLbId3. LoadRecordValue v6(0, v3.start_count() / 2); load_data_store.MergeRow( kHostname2, LoadRecordKey(kLbId3, kLbTag1, kUser1, kClientIp1), v6); EXPECT_TRUE(load_data_store.IsTrackedUnknownBalancerId(kLbId3)); } TEST_F(PerBalancerStoreTest, Suspend) { PerBalancerStore per_balancer_store(kLbId1, kLoadKey1); EXPECT_FALSE(per_balancer_store.IsSuspended()); // Suspend the store. per_balancer_store.Suspend(); EXPECT_TRUE(per_balancer_store.IsSuspended()); EXPECT_EQ(0U, per_balancer_store.load_record_map().size()); // Data merged when the store is suspended won't be kept. LoadRecordValue v1(139, 19); per_balancer_store.MergeRow(kKey1, v1); EXPECT_EQ(0U, per_balancer_store.load_record_map().size()); // Resume the store. per_balancer_store.Resume(); EXPECT_FALSE(per_balancer_store.IsSuspended()); EXPECT_EQ(0U, per_balancer_store.load_record_map().size()); // Data merged after the store is resumed will be kept. LoadRecordValue v2(23, 0, 51); per_balancer_store.MergeRow(kKey1, v2); EXPECT_EQ(1U, per_balancer_store.load_record_map().size()); // Suspend the store. per_balancer_store.Suspend(); EXPECT_TRUE(per_balancer_store.IsSuspended()); EXPECT_EQ(0U, per_balancer_store.load_record_map().size()); // Data merged when the store is suspended won't be kept. LoadRecordValue v3(62, 11); per_balancer_store.MergeRow(kKey1, v3); EXPECT_EQ(0U, per_balancer_store.load_record_map().size()); // Resume the store. per_balancer_store.Resume(); EXPECT_FALSE(per_balancer_store.IsSuspended()); EXPECT_EQ(0U, per_balancer_store.load_record_map().size()); // Data merged after the store is resumed will be kept. LoadRecordValue v4(225, 98); per_balancer_store.MergeRow(kKey1, v4); EXPECT_EQ(1U, per_balancer_store.load_record_map().size()); // In-progress count is always kept. EXPECT_EQ(per_balancer_store.GetNumCallsInProgressForReport(), v1.start_count() - v1.ok_count() + v2.start_count() - v2.error_count() + v3.start_count() - v3.ok_count() + v4.start_count() - v4.ok_count()); } TEST_F(PerBalancerStoreTest, DataAggregation) { PerBalancerStore per_balancer_store(kLbId1, kLoadKey1); // Construct some Values. LoadRecordValue v1(992, 34, 13, 234, 164, 173467); v1.InsertCallMetric(kMetric1, CallMetricValue(3, 2773.2)); LoadRecordValue v2(4842, 213, 9, 393, 974, 1345); v2.InsertCallMetric(kMetric1, CallMetricValue(7, 25.234)); v2.InsertCallMetric(kMetric2, CallMetricValue(2, 387.08)); // v3 doesn't change the number of in-progress RPCs. LoadRecordValue v3(293, 55, 293 - 55, 28764, 5284, 5772); v3.InsertCallMetric(kMetric1, CallMetricValue(61, 3465.0)); v3.InsertCallMetric(kMetric2, CallMetricValue(13, 672.0)); // The initial state of the store. uint64_t num_calls_in_progress = 0; EXPECT_FALSE(per_balancer_store.IsNumCallsInProgressChangedSinceLastReport()); EXPECT_EQ(per_balancer_store.GetNumCallsInProgressForReport(), num_calls_in_progress); // Merge v1 and get report of the number of in-progress calls. per_balancer_store.MergeRow(kKey1, v1); EXPECT_TRUE(per_balancer_store.IsNumCallsInProgressChangedSinceLastReport()); EXPECT_EQ(per_balancer_store.GetNumCallsInProgressForReport(), num_calls_in_progress += (v1.start_count() - v1.ok_count() - v1.error_count())); EXPECT_FALSE(per_balancer_store.IsNumCallsInProgressChangedSinceLastReport()); // Merge v2 and get report of the number of in-progress calls. per_balancer_store.MergeRow(kKey2, v2); EXPECT_TRUE(per_balancer_store.IsNumCallsInProgressChangedSinceLastReport()); EXPECT_EQ(per_balancer_store.GetNumCallsInProgressForReport(), num_calls_in_progress += (v2.start_count() - v2.ok_count() - v2.error_count())); EXPECT_FALSE(per_balancer_store.IsNumCallsInProgressChangedSinceLastReport()); // Merge v3 and get report of the number of in-progress calls. per_balancer_store.MergeRow(kKey1, v3); EXPECT_FALSE(per_balancer_store.IsNumCallsInProgressChangedSinceLastReport()); EXPECT_EQ(per_balancer_store.GetNumCallsInProgressForReport(), num_calls_in_progress); // LoadRecordValue for kKey1 is aggregated correctly. LoadRecordValue value_for_key1 = per_balancer_store.load_record_map().find(kKey1)->second; EXPECT_EQ(value_for_key1.start_count(), v1.start_count() + v3.start_count()); EXPECT_EQ(value_for_key1.ok_count(), v1.ok_count() + v3.ok_count()); EXPECT_EQ(value_for_key1.error_count(), v1.error_count() + v3.error_count()); EXPECT_EQ(value_for_key1.bytes_sent(), v1.bytes_sent() + v3.bytes_sent()); EXPECT_EQ(value_for_key1.bytes_recv(), v1.bytes_recv() + v3.bytes_recv()); EXPECT_EQ(value_for_key1.latency_ms(), v1.latency_ms() + v3.latency_ms()); EXPECT_EQ(value_for_key1.call_metrics().size(), 2U); EXPECT_EQ(value_for_key1.call_metrics().find(kMetric1)->second.num_calls(), v1.call_metrics().find(kMetric1)->second.num_calls() + v3.call_metrics().find(kMetric1)->second.num_calls()); EXPECT_EQ( value_for_key1.call_metrics().find(kMetric1)->second.total_metric_value(), v1.call_metrics().find(kMetric1)->second.total_metric_value() + v3.call_metrics().find(kMetric1)->second.total_metric_value()); EXPECT_EQ(value_for_key1.call_metrics().find(kMetric2)->second.num_calls(), v3.call_metrics().find(kMetric2)->second.num_calls()); EXPECT_EQ( value_for_key1.call_metrics().find(kMetric2)->second.total_metric_value(), v3.call_metrics().find(kMetric2)->second.total_metric_value()); // LoadRecordValue for kKey2 is aggregated (trivially) correctly. LoadRecordValue value_for_key2 = per_balancer_store.load_record_map().find(kKey2)->second; EXPECT_EQ(value_for_key2.start_count(), v2.start_count()); EXPECT_EQ(value_for_key2.ok_count(), v2.ok_count()); EXPECT_EQ(value_for_key2.error_count(), v2.error_count()); EXPECT_EQ(value_for_key2.bytes_sent(), v2.bytes_sent()); EXPECT_EQ(value_for_key2.bytes_recv(), v2.bytes_recv()); EXPECT_EQ(value_for_key2.latency_ms(), v2.latency_ms()); EXPECT_EQ(value_for_key2.call_metrics().size(), 2U); EXPECT_EQ(value_for_key2.call_metrics().find(kMetric1)->second.num_calls(), v2.call_metrics().find(kMetric1)->second.num_calls()); EXPECT_EQ( value_for_key2.call_metrics().find(kMetric1)->second.total_metric_value(), v2.call_metrics().find(kMetric1)->second.total_metric_value()); EXPECT_EQ(value_for_key2.call_metrics().find(kMetric2)->second.num_calls(), v2.call_metrics().find(kMetric2)->second.num_calls()); EXPECT_EQ( value_for_key2.call_metrics().find(kMetric2)->second.total_metric_value(), v2.call_metrics().find(kMetric2)->second.total_metric_value()); } } // namespace } // namespace testing } // namespace grpc int main(int argc, char** argv) { grpc_test_init(argc, argv); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
apache-2.0
wmudge/commons-scxml
src/test/java/org/apache/commons/scxml2/model/Hello.java
2219
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.scxml2.model; import org.apache.commons.scxml2.ActionExecutionContext; import org.apache.commons.scxml2.SCXMLExpressionException; import org.apache.commons.scxml2.TriggerEvent; /** * Our custom &quot;hello world&quot; action. */ public class Hello extends Action { /** Serial version UID. */ private static final long serialVersionUID = 1L; /** This is who we say hello to. */ private String name; /** We count callbacks to execute() as part of the test suite. */ public static int callbacks = 0; /** Public constructor is needed for the I in SCXML IO. */ public Hello() { super(); } /** * Get the name. * * @return Returns the name. */ public String getName() { return name; } /** * Set the name. * * @param name The name to set. */ public void setName(String name) { this.name = name; } @Override public void execute(ActionExecutionContext exctx) throws ModelException, SCXMLExpressionException { if (exctx.getAppLog().isInfoEnabled()) { exctx.getAppLog().info("Hello " + name); } // For derived events payload testing TriggerEvent event = new TriggerEvent("helloevent", TriggerEvent.SIGNAL_EVENT, name); exctx.getInternalIOProcessor().addEvent(event); callbacks++; } }
apache-2.0
aifargonos2/elk-reasoner
elk-reasoner/src/main/java/org/semanticweb/elk/reasoner/saturation/rules/RuleCounter.java
10553
package org.semanticweb.elk.reasoner.saturation.rules; /* * #%L * ELK Reasoner * $Id:$ * $HeadURL:$ * %% * Copyright (C) 2011 - 2012 Department of Computer Science, University of Oxford * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import org.semanticweb.elk.reasoner.saturation.rules.backwardlinks.BackwardLinkChainFromBackwardLinkRule; import org.semanticweb.elk.reasoner.saturation.rules.backwardlinks.ContradictionOverBackwardLinkRule; import org.semanticweb.elk.reasoner.saturation.rules.backwardlinks.SubsumerBackwardLinkRule; import org.semanticweb.elk.reasoner.saturation.rules.contextinit.OwlThingContextInitRule; import org.semanticweb.elk.reasoner.saturation.rules.contextinit.RootContextInitializationRule; import org.semanticweb.elk.reasoner.saturation.rules.contradiction.ContradictionPropagationRule; import org.semanticweb.elk.reasoner.saturation.rules.disjointsubsumer.ContradicitonCompositionRule; import org.semanticweb.elk.reasoner.saturation.rules.forwardlink.BackwardLinkFromForwardLinkRule; import org.semanticweb.elk.reasoner.saturation.rules.forwardlink.NonReflexiveBackwardLinkCompositionRule; import org.semanticweb.elk.reasoner.saturation.rules.forwardlink.ReflexiveBackwardLinkCompositionRule; import org.semanticweb.elk.reasoner.saturation.rules.propagations.NonReflexivePropagationRule; import org.semanticweb.elk.reasoner.saturation.rules.propagations.ReflexivePropagationRule; import org.semanticweb.elk.reasoner.saturation.rules.subcontextinit.PropagationInitializationRule; import org.semanticweb.elk.reasoner.saturation.rules.subsumers.ContradictionFromDisjointnessRule; import org.semanticweb.elk.reasoner.saturation.rules.subsumers.ContradictionFromNegationRule; import org.semanticweb.elk.reasoner.saturation.rules.subsumers.ContradictionFromOwlNothingRule; import org.semanticweb.elk.reasoner.saturation.rules.subsumers.DisjointSubsumerFromMemberRule; import org.semanticweb.elk.reasoner.saturation.rules.subsumers.IndexedObjectComplementOfDecomposition; import org.semanticweb.elk.reasoner.saturation.rules.subsumers.IndexedObjectIntersectionOfDecomposition; import org.semanticweb.elk.reasoner.saturation.rules.subsumers.IndexedObjectSomeValuesFromDecomposition; import org.semanticweb.elk.reasoner.saturation.rules.subsumers.ObjectIntersectionFromConjunctRule; import org.semanticweb.elk.reasoner.saturation.rules.subsumers.ObjectUnionFromDisjunctRule; import org.semanticweb.elk.reasoner.saturation.rules.subsumers.PropagationFromExistentialFillerRule; import org.semanticweb.elk.reasoner.saturation.rules.subsumers.SuperClassFromSubClassRule; /** * An object which can be used to measure the methods invocations of a * {@link RuleVisitor}. The fields of the counter correspond to the methods of * {@link RuleVisitor}. * * @author "Yevgeny Kazakov" */ public class RuleCounter { /** * counter for {@link BackwardLinkChainFromBackwardLinkRule} */ long countBackwardLinkChainFromBackwardLinkRule; /** * counter for {@link BackwardLinkFromForwardLinkRule} */ long countBackwardLinkFromForwardLinkRule; /** * counter for {@link ContradicitonCompositionRule} */ long countContradicitonCompositionRule; /** * counter for {@link ContradictionFromDisjointnessRule} */ long countContradictionFromDisjointnessRule; /** * counter for {@link ContradictionFromNegationRule} */ long countContradictionFromNegationRule; /** * counter for {@link ContradictionFromOwlNothingRule} */ long countContradictionFromOwlNothingRule; /** * counter for {@link ContradictionOverBackwardLinkRule} */ long countContradictionOverBackwardLinkRule; /** * counter for {@link ContradictionPropagationRule} */ long countContradictionPropagationRule; /** * counter for {@link DisjointSubsumerFromMemberRule} */ long countDisjointSubsumerFromMemberRule; /** * counter for {@link IndexedObjectComplementOfDecomposition} */ long countIndexedObjectComplementOfDecomposition; /** * counter for {@link IndexedObjectIntersectionOfDecomposition} */ long countIndexedObjectIntersectionOfDecomposition; /** * counter for {@link IndexedObjectSomeValuesFromDecomposition} */ long countIndexedObjectSomeValuesFromDecomposition; /** * counter for {@link NonReflexiveBackwardLinkCompositionRule} */ long countNonReflexiveBackwardLinkCompositionRule; /** * counter for {@link NonReflexivePropagationRule} */ long countNonReflexivePropagationRule; /** * counter for {@link ObjectIntersectionFromConjunctRule} */ long countObjectIntersectionFromConjunctRule; /** * counter for {@link ObjectUnionFromDisjunctRule} */ long countObjectUnionFromDisjunctRule; /** * counter for {@link OwlThingContextInitRule} */ long countOwlThingContextInitRule; /** * counter for {@link PropagationFromExistentialFillerRule} */ long countPropagationFromExistentialFillerRule; /** * counter for {@link PropagationInitializationRule} */ long countPropagationInitializationRule; /** * counter for {@link ReflexiveBackwardLinkCompositionRule} */ long countReflexiveBackwardLinkCompositionRule; /** * counter for {@link ReflexivePropagationRule} */ long countReflexivePropagationRule; /** * counter for {@link RootContextInitializationRule} */ long countRootContextInitializationRule; /** * counter for {@link SubsumerBackwardLinkRule} */ long countSubsumerBackwardLinkRule; /** * counter for {@link SuperClassFromSubClassRule} */ long countSuperClassFromSubClassRule; /** * Add the values the corresponding values of the given counter * * @param counter */ public synchronized void add(RuleCounter counter) { countOwlThingContextInitRule += counter.countOwlThingContextInitRule; countRootContextInitializationRule += counter.countRootContextInitializationRule; countDisjointSubsumerFromMemberRule += counter.countDisjointSubsumerFromMemberRule; countContradictionFromDisjointnessRule += counter.countContradictionFromDisjointnessRule; countContradictionFromNegationRule += counter.countContradictionFromNegationRule; countObjectIntersectionFromConjunctRule += counter.countObjectIntersectionFromConjunctRule; countSuperClassFromSubClassRule += counter.countSuperClassFromSubClassRule; countPropagationFromExistentialFillerRule += counter.countPropagationFromExistentialFillerRule; countObjectUnionFromDisjunctRule += counter.countObjectUnionFromDisjunctRule; countBackwardLinkChainFromBackwardLinkRule += counter.countBackwardLinkChainFromBackwardLinkRule; countSubsumerBackwardLinkRule += counter.countSubsumerBackwardLinkRule; countContradictionOverBackwardLinkRule += counter.countContradictionOverBackwardLinkRule; countContradictionPropagationRule += counter.countContradictionPropagationRule; countContradicitonCompositionRule += counter.countContradicitonCompositionRule; countNonReflexiveBackwardLinkCompositionRule += counter.countNonReflexiveBackwardLinkCompositionRule; countIndexedObjectIntersectionOfDecomposition += counter.countIndexedObjectIntersectionOfDecomposition; countIndexedObjectSomeValuesFromDecomposition += counter.countIndexedObjectSomeValuesFromDecomposition; countIndexedObjectComplementOfDecomposition += counter.countIndexedObjectComplementOfDecomposition; countContradictionFromOwlNothingRule += counter.countContradictionFromOwlNothingRule; countNonReflexivePropagationRule += counter.countNonReflexivePropagationRule; countReflexivePropagationRule += counter.countReflexivePropagationRule; countReflexiveBackwardLinkCompositionRule += counter.countReflexiveBackwardLinkCompositionRule; countPropagationInitializationRule += counter.countPropagationInitializationRule; countBackwardLinkFromForwardLinkRule += counter.countBackwardLinkFromForwardLinkRule; } public long getTotalRuleAppCount() { return countOwlThingContextInitRule + countRootContextInitializationRule + countDisjointSubsumerFromMemberRule + countContradictionFromDisjointnessRule + countContradictionFromNegationRule + countObjectIntersectionFromConjunctRule + countSuperClassFromSubClassRule + countPropagationFromExistentialFillerRule + countObjectUnionFromDisjunctRule + countBackwardLinkChainFromBackwardLinkRule + countSubsumerBackwardLinkRule + countContradictionOverBackwardLinkRule + countContradictionPropagationRule + countContradicitonCompositionRule + countNonReflexiveBackwardLinkCompositionRule + countReflexiveBackwardLinkCompositionRule + countIndexedObjectIntersectionOfDecomposition + countIndexedObjectSomeValuesFromDecomposition + countIndexedObjectComplementOfDecomposition + countContradictionFromOwlNothingRule + countNonReflexivePropagationRule + countReflexivePropagationRule + countPropagationInitializationRule + countBackwardLinkFromForwardLinkRule; } /** * Reset all counters to zero. */ public void reset() { countOwlThingContextInitRule = 0; countRootContextInitializationRule = 0; countDisjointSubsumerFromMemberRule = 0; countContradictionFromDisjointnessRule = 0; countContradictionFromNegationRule = 0; countObjectIntersectionFromConjunctRule = 0; countSuperClassFromSubClassRule = 0; countPropagationFromExistentialFillerRule = 0; countObjectUnionFromDisjunctRule = 0; countBackwardLinkChainFromBackwardLinkRule = 0; countSubsumerBackwardLinkRule = 0; countContradictionOverBackwardLinkRule = 0; countContradictionPropagationRule = 0; countContradicitonCompositionRule = 0; countNonReflexiveBackwardLinkCompositionRule = 0; countReflexiveBackwardLinkCompositionRule = 0; countIndexedObjectIntersectionOfDecomposition = 0; countIndexedObjectSomeValuesFromDecomposition = 0; countIndexedObjectComplementOfDecomposition = 0; countContradictionFromOwlNothingRule = 0; countNonReflexivePropagationRule = 0; countReflexivePropagationRule = 0; countPropagationInitializationRule = 0; countBackwardLinkFromForwardLinkRule = 0; } }
apache-2.0
RenaudWasTaken/kubernetes
pkg/kubelet/kubelet_node_status_test.go
54494
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kubelet import ( "encoding/json" "fmt" "net" goruntime "runtime" "sort" "strconv" "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapiv2 "github.com/google/cadvisor/info/v2" "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes/fake" v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" core "k8s.io/client-go/testing" fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/util/sliceutils" "k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/volume/util" ) const ( maxImageTagsForTest = 20 ) // generateTestingImageList generate randomly generated image list and corresponding expectedImageList. func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerImage) { // imageList is randomly generated image list var imageList []kubecontainer.Image for ; count > 0; count-- { imageItem := kubecontainer.Image{ ID: string(uuid.NewUUID()), RepoTags: generateImageTags(), Size: rand.Int63nRange(minImgSize, maxImgSize+1), } imageList = append(imageList, imageItem) } // expectedImageList is generated by imageList according to size and maxImagesInNodeStatus // 1. sort the imageList by size sort.Sort(sliceutils.ByImageSize(imageList)) // 2. convert sorted imageList to v1.ContainerImage list var expectedImageList []v1.ContainerImage for _, kubeImage := range imageList { apiImage := v1.ContainerImage{ Names: kubeImage.RepoTags[0:maxNamesPerImageInNodeStatus], SizeBytes: kubeImage.Size, } expectedImageList = append(expectedImageList, apiImage) } // 3. only returns the top maxImagesInNodeStatus images in expectedImageList return imageList, expectedImageList[0:maxImagesInNodeStatus] } func generateImageTags() []string { var tagList []string // Generate > maxNamesPerImageInNodeStatus tags so that the test can verify // that kubelet report up to maxNamesPerImageInNodeStatus tags. count := rand.IntnRange(maxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1) for ; count > 0; count-- { tagList = append(tagList, "k8s.gcr.io:v"+strconv.Itoa(count)) } return tagList } func applyNodeStatusPatch(originalNode *v1.Node, patch []byte) (*v1.Node, error) { original, err := json.Marshal(originalNode) if err != nil { return nil, fmt.Errorf("failed to marshal original node %#v: %v", originalNode, err) } updated, err := strategicpatch.StrategicMergePatch(original, patch, v1.Node{}) if err != nil { return nil, fmt.Errorf("failed to apply strategic merge patch %q on node %#v: %v", patch, originalNode, err) } updatedNode := &v1.Node{} if err := json.Unmarshal(updated, updatedNode); err != nil { return nil, fmt.Errorf("failed to unmarshal updated node %q: %v", updated, err) } return updatedNode, nil } type localCM struct { cm.ContainerManager allocatableReservation v1.ResourceList capacity v1.ResourceList } func (lcm *localCM) GetNodeAllocatableReservation() v1.ResourceList { return lcm.allocatableReservation } func (lcm *localCM) GetCapacity() v1.ResourceList { return lcm.capacity } func TestNodeStatusWithCloudProviderNodeIP(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet kubelet.kubeClient = nil // ensure only the heartbeat client is used kubelet.hostname = testKubeletHostname existingNode := v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Annotations: make(map[string]string)}, Spec: v1.NodeSpec{}, } // TODO : is it possible to mock validateNodeIP() to avoid relying on the host interface addresses ? addrs, err := net.InterfaceAddrs() assert.NoError(t, err) for _, addr := range addrs { var ip net.IP switch v := addr.(type) { case *net.IPNet: ip = v.IP case *net.IPAddr: ip = v.IP } if ip != nil && !ip.IsLoopback() && ip.To4() != nil { kubelet.nodeIP = ip break } } assert.NotNil(t, kubelet.nodeIP) fakeCloud := &fakecloud.FakeCloud{ Addresses: []v1.NodeAddress{ { Type: v1.NodeExternalIP, Address: "132.143.154.163", }, { Type: v1.NodeExternalIP, Address: kubelet.nodeIP.String(), }, { Type: v1.NodeInternalIP, Address: "132.143.154.164", }, { Type: v1.NodeInternalIP, Address: kubelet.nodeIP.String(), }, { Type: v1.NodeInternalIP, Address: "132.143.154.165", }, { Type: v1.NodeHostName, Address: testKubeletHostname, }, }, Err: nil, } kubelet.cloud = fakeCloud kubelet.setNodeAddress(&existingNode) expectedAddresses := []v1.NodeAddress{ { Type: v1.NodeExternalIP, Address: kubelet.nodeIP.String(), }, { Type: v1.NodeInternalIP, Address: kubelet.nodeIP.String(), }, { Type: v1.NodeHostName, Address: testKubeletHostname, }, } assert.True(t, apiequality.Semantic.DeepEqual(expectedAddresses, existingNode.Status.Addresses), "%s", diff.ObjectDiff(expectedAddresses, existingNode.Status.Addresses)) } func TestUpdateNewNodeStatus(t *testing.T) { // generate one more than maxImagesInNodeStatus in inputImageList inputImageList, expectedImageList := generateTestingImageList(maxImagesInNodeStatus + 1) testKubelet := newTestKubeletWithImageList( t, inputImageList, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet kubelet.kubeClient = nil // ensure only the heartbeat client is used kubelet.containerManager = &localCM{ ContainerManager: cm.NewStubContainerManager(), allocatableReservation: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI), v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI), }, capacity: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), }, } kubeClient := testKubelet.fakeKubeClient existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}} kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain machineInfo := &cadvisorapi.MachineInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", NumCores: 2, MemoryCapacity: 10E9, // 10G } mockCadvisor := testKubelet.fakeCadvisor mockCadvisor.On("Start").Return(nil) mockCadvisor.On("MachineInfo").Return(machineInfo, nil) versionInfo := &cadvisorapi.VersionInfo{ KernelVersion: "3.16.0-0.bpo.4-amd64", ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", } mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 400, Capacity: 5000, Available: 600, }, nil) mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 400, Capacity: 5000, Available: 600, }, nil) mockCadvisor.On("VersionInfo").Return(versionInfo, nil) maxAge := 0 * time.Second options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge} mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil) kubelet.machineInfo = machineInfo expectedNode := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, Spec: v1.NodeSpec{}, Status: v1.NodeStatus{ Conditions: []v1.NodeCondition{ { Type: v1.NodeOutOfDisk, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, { Type: v1.NodeMemoryPressure, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientMemory", Message: fmt.Sprintf("kubelet has sufficient memory available"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, { Type: v1.NodeDiskPressure, Status: v1.ConditionFalse, Reason: "KubeletHasNoDiskPressure", Message: fmt.Sprintf("kubelet has no disk pressure"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, { Type: v1.NodePIDPressure, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientPID", Message: fmt.Sprintf("kubelet has sufficient PID available"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, { Type: v1.NodeReady, Status: v1.ConditionTrue, Reason: "KubeletReady", Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, }, NodeInfo: v1.NodeSystemInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", KernelVersion: "3.16.0-0.bpo.4-amd64", OSImage: "Debian GNU/Linux 7 (wheezy)", OperatingSystem: goruntime.GOOS, Architecture: goruntime.GOARCH, ContainerRuntimeVersion: "test://1.5.0", KubeletVersion: version.Get().String(), KubeProxyVersion: version.Get().String(), }, Capacity: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), }, Allocatable: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI), }, Addresses: []v1.NodeAddress{ {Type: v1.NodeInternalIP, Address: "127.0.0.1"}, {Type: v1.NodeHostName, Address: testKubeletHostname}, }, Images: expectedImageList, }, } kubelet.updateRuntimeUp() assert.NoError(t, kubelet.updateNodeStatus()) actions := kubeClient.Actions() require.Len(t, actions, 2) require.True(t, actions[1].Matches("patch", "nodes")) require.Equal(t, actions[1].GetSubresource(), "status") updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch()) assert.NoError(t, err) for i, cond := range updatedNode.Status.Conditions { assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type) assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type) updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} } // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type, "NotReady should be last") assert.Len(t, updatedNode.Status.Images, maxImagesInNodeStatus) assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode)) } func TestUpdateExistingNodeStatus(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet kubelet.kubeClient = nil // ensure only the heartbeat client is used kubelet.containerManager = &localCM{ ContainerManager: cm.NewStubContainerManager(), allocatableReservation: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI), }, capacity: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), }, } kubeClient := testKubelet.fakeKubeClient existingNode := v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, Spec: v1.NodeSpec{}, Status: v1.NodeStatus{ Conditions: []v1.NodeCondition{ { Type: v1.NodeOutOfDisk, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, { Type: v1.NodeMemoryPressure, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientMemory", Message: fmt.Sprintf("kubelet has sufficient memory available"), LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, { Type: v1.NodeDiskPressure, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, { Type: v1.NodePIDPressure, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientPID", Message: fmt.Sprintf("kubelet has sufficient PID available"), LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, { Type: v1.NodeReady, Status: v1.ConditionTrue, Reason: "KubeletReady", Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, }, Capacity: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), }, Allocatable: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), }, }, } kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain mockCadvisor := testKubelet.fakeCadvisor mockCadvisor.On("Start").Return(nil) machineInfo := &cadvisorapi.MachineInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", NumCores: 2, MemoryCapacity: 20E9, } mockCadvisor.On("MachineInfo").Return(machineInfo, nil) versionInfo := &cadvisorapi.VersionInfo{ KernelVersion: "3.16.0-0.bpo.4-amd64", ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", } mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 400, Capacity: 5000, Available: 600, }, nil) mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 400, Capacity: 5000, Available: 600, }, nil) mockCadvisor.On("VersionInfo").Return(versionInfo, nil) maxAge := 0 * time.Second options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge} mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil) kubelet.machineInfo = machineInfo expectedNode := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, Spec: v1.NodeSpec{}, Status: v1.NodeStatus{ Conditions: []v1.NodeCondition{ { Type: v1.NodeOutOfDisk, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, { Type: v1.NodeMemoryPressure, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientMemory", Message: fmt.Sprintf("kubelet has sufficient memory available"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, { Type: v1.NodeDiskPressure, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, { Type: v1.NodePIDPressure, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientPID", Message: fmt.Sprintf("kubelet has sufficient PID available"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, { Type: v1.NodeReady, Status: v1.ConditionTrue, Reason: "KubeletReady", Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: metav1.Time{}, // placeholder LastTransitionTime: metav1.Time{}, // placeholder }, }, NodeInfo: v1.NodeSystemInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", KernelVersion: "3.16.0-0.bpo.4-amd64", OSImage: "Debian GNU/Linux 7 (wheezy)", OperatingSystem: goruntime.GOOS, Architecture: goruntime.GOARCH, ContainerRuntimeVersion: "test://1.5.0", KubeletVersion: version.Get().String(), KubeProxyVersion: version.Get().String(), }, Capacity: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), }, Allocatable: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), }, Addresses: []v1.NodeAddress{ {Type: v1.NodeInternalIP, Address: "127.0.0.1"}, {Type: v1.NodeHostName, Address: testKubeletHostname}, }, // images will be sorted from max to min in node status. Images: []v1.ContainerImage{ { Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"}, SizeBytes: 123, }, { Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"}, SizeBytes: 456, }, }, }, } kubelet.updateRuntimeUp() assert.NoError(t, kubelet.updateNodeStatus()) actions := kubeClient.Actions() assert.Len(t, actions, 2) assert.IsType(t, core.PatchActionImpl{}, actions[1]) patchAction := actions[1].(core.PatchActionImpl) updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch()) require.NoError(t, err) for i, cond := range updatedNode.Status.Conditions { old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time // Expect LastHearbeat to be updated to Now, while LastTransitionTime to be the same. assert.NotEqual(t, old, cond.LastHeartbeatTime.Rfc3339Copy().UTC(), "LastHeartbeatTime for condition %v", cond.Type) assert.EqualValues(t, old, cond.LastTransitionTime.Rfc3339Copy().UTC(), "LastTransitionTime for condition %v", cond.Type) updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} } // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type, "NodeReady should be the last condition") assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode)) } func TestUpdateExistingNodeStatusTimeout(t *testing.T) { attempts := int64(0) // set up a listener that hangs connections ln, err := net.Listen("tcp", "127.0.0.1:0") assert.NoError(t, err) defer ln.Close() go func() { // accept connections and just let them hang for { _, err := ln.Accept() if err != nil { t.Log(err) return } t.Log("accepted connection") atomic.AddInt64(&attempts, 1) } }() config := &rest.Config{ Host: "http://" + ln.Addr().String(), QPS: -1, Timeout: time.Second, } assert.NoError(t, err) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet kubelet.kubeClient = nil // ensure only the heartbeat client is used kubelet.heartbeatClient, err = v1core.NewForConfig(config) kubelet.containerManager = &localCM{ ContainerManager: cm.NewStubContainerManager(), allocatableReservation: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI), }, capacity: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), }, } // should return an error, but not hang assert.Error(t, kubelet.updateNodeStatus()) // should have attempted multiple times if actualAttempts := atomic.LoadInt64(&attempts); actualAttempts != nodeStatusUpdateRetry { t.Errorf("Expected %d attempts, got %d", nodeStatusUpdateRetry, actualAttempts) } } func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet kubelet.kubeClient = nil // ensure only the heartbeat client is used kubelet.containerManager = &localCM{ ContainerManager: cm.NewStubContainerManager(), allocatableReservation: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI), v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI), }, capacity: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI), }, } clock := testKubelet.fakeClock kubeClient := testKubelet.fakeKubeClient existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}} kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain mockCadvisor := testKubelet.fakeCadvisor mockCadvisor.On("Start").Return(nil) machineInfo := &cadvisorapi.MachineInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", NumCores: 2, MemoryCapacity: 10E9, } mockCadvisor.On("MachineInfo").Return(machineInfo, nil) versionInfo := &cadvisorapi.VersionInfo{ KernelVersion: "3.16.0-0.bpo.4-amd64", ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", } mockCadvisor.On("VersionInfo").Return(versionInfo, nil) maxAge := 0 * time.Second options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge} mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil) mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 400, Capacity: 10E9, }, nil) mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 400, Capacity: 20E9, }, nil) kubelet.machineInfo = machineInfo expectedNode := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, Spec: v1.NodeSpec{}, Status: v1.NodeStatus{ Conditions: []v1.NodeCondition{ { Type: v1.NodeOutOfDisk, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, { Type: v1.NodeMemoryPressure, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientMemory", Message: fmt.Sprintf("kubelet has sufficient memory available"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, { Type: v1.NodeDiskPressure, Status: v1.ConditionFalse, Reason: "KubeletHasNoDiskPressure", Message: fmt.Sprintf("kubelet has no disk pressure"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, { Type: v1.NodePIDPressure, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientPID", Message: fmt.Sprintf("kubelet has sufficient PID available"), LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, }, {}, //placeholder }, NodeInfo: v1.NodeSystemInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", KernelVersion: "3.16.0-0.bpo.4-amd64", OSImage: "Debian GNU/Linux 7 (wheezy)", OperatingSystem: goruntime.GOOS, Architecture: goruntime.GOARCH, ContainerRuntimeVersion: "test://1.5.0", KubeletVersion: version.Get().String(), KubeProxyVersion: version.Get().String(), }, Capacity: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI), }, Allocatable: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI), }, Addresses: []v1.NodeAddress{ {Type: v1.NodeInternalIP, Address: "127.0.0.1"}, {Type: v1.NodeHostName, Address: testKubeletHostname}, }, Images: []v1.ContainerImage{ { Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"}, SizeBytes: 123, }, { Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"}, SizeBytes: 456, }, }, }, } checkNodeStatus := func(status v1.ConditionStatus, reason string) { kubeClient.ClearActions() assert.NoError(t, kubelet.updateNodeStatus()) actions := kubeClient.Actions() require.Len(t, actions, 2) require.True(t, actions[1].Matches("patch", "nodes")) require.Equal(t, actions[1].GetSubresource(), "status") updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch()) require.NoError(t, err, "can't apply node status patch") for i, cond := range updatedNode.Status.Conditions { assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type) assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type) updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} } // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 lastIndex := len(updatedNode.Status.Conditions) - 1 assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[lastIndex].Type, "NodeReady should be the last condition") assert.NotEmpty(t, updatedNode.Status.Conditions[lastIndex].Message) updatedNode.Status.Conditions[lastIndex].Message = "" expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{ Type: v1.NodeReady, Status: status, Reason: reason, LastHeartbeatTime: metav1.Time{}, LastTransitionTime: metav1.Time{}, } assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode)) } // TODO(random-liu): Refactor the unit test to be table driven test. // Should report kubelet not ready if the runtime check is out of date clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime)) kubelet.updateRuntimeUp() checkNodeStatus(v1.ConditionFalse, "KubeletNotReady") // Should report kubelet ready if the runtime check is updated clock.SetTime(time.Now()) kubelet.updateRuntimeUp() checkNodeStatus(v1.ConditionTrue, "KubeletReady") // Should report kubelet not ready if the runtime check is out of date clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime)) kubelet.updateRuntimeUp() checkNodeStatus(v1.ConditionFalse, "KubeletNotReady") // Should report kubelet not ready if the runtime check failed fakeRuntime := testKubelet.fakeRuntime // Inject error into fake runtime status check, node should be NotReady fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error") clock.SetTime(time.Now()) kubelet.updateRuntimeUp() checkNodeStatus(v1.ConditionFalse, "KubeletNotReady") fakeRuntime.StatusErr = nil // Should report node not ready if runtime status is nil. fakeRuntime.RuntimeStatus = nil kubelet.updateRuntimeUp() checkNodeStatus(v1.ConditionFalse, "KubeletNotReady") // Should report node not ready if runtime status is empty. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{} kubelet.updateRuntimeUp() checkNodeStatus(v1.ConditionFalse, "KubeletNotReady") // Should report node not ready if RuntimeReady is false. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{ Conditions: []kubecontainer.RuntimeCondition{ {Type: kubecontainer.RuntimeReady, Status: false}, {Type: kubecontainer.NetworkReady, Status: true}, }, } kubelet.updateRuntimeUp() checkNodeStatus(v1.ConditionFalse, "KubeletNotReady") // Should report node ready if RuntimeReady is true. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{ Conditions: []kubecontainer.RuntimeCondition{ {Type: kubecontainer.RuntimeReady, Status: true}, {Type: kubecontainer.NetworkReady, Status: true}, }, } kubelet.updateRuntimeUp() checkNodeStatus(v1.ConditionTrue, "KubeletReady") // Should report node not ready if NetworkReady is false. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{ Conditions: []kubecontainer.RuntimeCondition{ {Type: kubecontainer.RuntimeReady, Status: true}, {Type: kubecontainer.NetworkReady, Status: false}, }, } kubelet.updateRuntimeUp() checkNodeStatus(v1.ConditionFalse, "KubeletNotReady") } func TestUpdateNodeStatusError(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet kubelet.kubeClient = nil // ensure only the heartbeat client is used // No matching node for the kubelet testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain assert.Error(t, kubelet.updateNodeStatus()) assert.Len(t, testKubelet.fakeKubeClient.Actions(), nodeStatusUpdateRetry) } func TestRegisterWithApiServer(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet kubeClient := testKubelet.fakeKubeClient kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) { // Return an error on create. return true, &v1.Node{}, &apierrors.StatusError{ ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists}, } }) kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { // Return an existing (matching) node on get. return true, &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: testKubeletHostname, Labels: map[string]string{ kubeletapis.LabelHostname: testKubeletHostname, kubeletapis.LabelOS: goruntime.GOOS, kubeletapis.LabelArch: goruntime.GOARCH, }, }, Spec: v1.NodeSpec{ExternalID: testKubeletHostname}, }, nil }) kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { return true, nil, fmt.Errorf("no reaction implemented for %s", action) }) machineInfo := &cadvisorapi.MachineInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", NumCores: 2, MemoryCapacity: 1024, } mockCadvisor := testKubelet.fakeCadvisor mockCadvisor.On("MachineInfo").Return(machineInfo, nil) versionInfo := &cadvisorapi.VersionInfo{ KernelVersion: "3.16.0-0.bpo.4-amd64", ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", DockerVersion: "1.5.0", } mockCadvisor.On("VersionInfo").Return(versionInfo, nil) mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 400, Capacity: 1000, Available: 600, }, nil) mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 9, Capacity: 10, }, nil) kubelet.machineInfo = machineInfo done := make(chan struct{}) go func() { kubelet.registerWithAPIServer() done <- struct{}{} }() select { case <-time.After(wait.ForeverTestTimeout): assert.Fail(t, "timed out waiting for registration") case <-done: return } } func TestTryRegisterWithApiServer(t *testing.T) { alreadyExists := &apierrors.StatusError{ ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists}, } conflict := &apierrors.StatusError{ ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}, } newNode := func(cmad bool, externalID string) *v1.Node { node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ kubeletapis.LabelHostname: testKubeletHostname, kubeletapis.LabelOS: goruntime.GOOS, kubeletapis.LabelArch: goruntime.GOARCH, }, }, Spec: v1.NodeSpec{ ExternalID: externalID, }, } if cmad { node.Annotations = make(map[string]string) node.Annotations[util.ControllerManagedAttachAnnotation] = "true" } return node } cases := []struct { name string newNode *v1.Node existingNode *v1.Node createError error getError error patchError error deleteError error expectedResult bool expectedActions int testSavedNode bool savedNodeIndex int savedNodeCMAD bool }{ { name: "success case - new node", newNode: &v1.Node{}, expectedResult: true, expectedActions: 1, }, { name: "success case - existing node - no change in CMAD", newNode: newNode(true, "a"), createError: alreadyExists, existingNode: newNode(true, "a"), expectedResult: true, expectedActions: 2, }, { name: "success case - existing node - CMAD disabled", newNode: newNode(false, "a"), createError: alreadyExists, existingNode: newNode(true, "a"), expectedResult: true, expectedActions: 3, testSavedNode: true, savedNodeIndex: 2, savedNodeCMAD: false, }, { name: "success case - existing node - CMAD enabled", newNode: newNode(true, "a"), createError: alreadyExists, existingNode: newNode(false, "a"), expectedResult: true, expectedActions: 3, testSavedNode: true, savedNodeIndex: 2, savedNodeCMAD: true, }, { name: "success case - external ID changed", newNode: newNode(false, "b"), createError: alreadyExists, existingNode: newNode(false, "a"), expectedResult: false, expectedActions: 3, }, { name: "create failed", newNode: newNode(false, "b"), createError: conflict, expectedResult: false, expectedActions: 1, }, { name: "get existing node failed", newNode: newNode(false, "a"), createError: alreadyExists, getError: conflict, expectedResult: false, expectedActions: 2, }, { name: "update existing node failed", newNode: newNode(false, "a"), createError: alreadyExists, existingNode: newNode(true, "a"), patchError: conflict, expectedResult: false, expectedActions: 3, }, { name: "delete existing node failed", newNode: newNode(false, "b"), createError: alreadyExists, existingNode: newNode(false, "a"), deleteError: conflict, expectedResult: false, expectedActions: 3, }, } notImplemented := func(action core.Action) (bool, runtime.Object, error) { return true, nil, fmt.Errorf("no reaction implemented for %s", action) } for _, tc := range cases { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled is a don't-care for this test */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet kubeClient := testKubelet.fakeKubeClient kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) { return true, nil, tc.createError }) kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { // Return an existing (matching) node on get. return true, tc.existingNode, tc.getError }) kubeClient.AddReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) { if action.GetSubresource() == "status" { return true, nil, tc.patchError } return notImplemented(action) }) kubeClient.AddReactor("delete", "nodes", func(action core.Action) (bool, runtime.Object, error) { return true, nil, tc.deleteError }) kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { return notImplemented(action) }) result := kubelet.tryRegisterWithAPIServer(tc.newNode) require.Equal(t, tc.expectedResult, result, "test [%s]", tc.name) actions := kubeClient.Actions() assert.Len(t, actions, tc.expectedActions, "test [%s]", tc.name) if tc.testSavedNode { var savedNode *v1.Node t.Logf("actions: %v: %+v", len(actions), actions) action := actions[tc.savedNodeIndex] if action.GetVerb() == "create" { createAction := action.(core.CreateAction) obj := createAction.GetObject() require.IsType(t, &v1.Node{}, obj) savedNode = obj.(*v1.Node) } else if action.GetVerb() == "patch" { patchAction := action.(core.PatchActionImpl) var err error savedNode, err = applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch()) require.NoError(t, err) } actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[util.ControllerManagedAttachAnnotation]) assert.Equal(t, tc.savedNodeCMAD, actualCMAD, "test [%s]", tc.name) } } } func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) { // generate one more than maxImagesInNodeStatus in inputImageList inputImageList, _ := generateTestingImageList(maxImagesInNodeStatus + 1) testKubelet := newTestKubeletWithImageList( t, inputImageList, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet kubelet.kubeClient = nil // ensure only the heartbeat client is used kubelet.containerManager = &localCM{ ContainerManager: cm.NewStubContainerManager(), allocatableReservation: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(40000, resource.DecimalSI), v1.ResourceEphemeralStorage: *resource.NewQuantity(1000, resource.BinarySI), }, capacity: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI), }, } kubeClient := testKubelet.fakeKubeClient existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}} kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain machineInfo := &cadvisorapi.MachineInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", NumCores: 2, MemoryCapacity: 10E9, // 10G } mockCadvisor := testKubelet.fakeCadvisor mockCadvisor.On("Start").Return(nil) mockCadvisor.On("MachineInfo").Return(machineInfo, nil) versionInfo := &cadvisorapi.VersionInfo{ KernelVersion: "3.16.0-0.bpo.4-amd64", ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", } mockCadvisor.On("VersionInfo").Return(versionInfo, nil) maxAge := 0 * time.Second options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge} mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil) mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 400, Capacity: 3000, Available: 600, }, nil) mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ Usage: 400, Capacity: 3000, Available: 600, }, nil) kubelet.machineInfo = machineInfo expectedNode := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, Spec: v1.NodeSpec{}, Status: v1.NodeStatus{ Capacity: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI), }, Allocatable: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI), }, }, } kubelet.updateRuntimeUp() assert.NoError(t, kubelet.updateNodeStatus()) actions := kubeClient.Actions() require.Len(t, actions, 2) require.True(t, actions[1].Matches("patch", "nodes")) require.Equal(t, actions[1].GetSubresource(), "status") updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch()) assert.NoError(t, err) assert.True(t, apiequality.Semantic.DeepEqual(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable), "%s", diff.ObjectDiff(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable)) } func TestUpdateDefaultLabels(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used cases := []struct { name string initialNode *v1.Node existingNode *v1.Node needsUpdate bool finalLabels map[string]string }{ { name: "make sure default labels exist", initialNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ kubeletapis.LabelHostname: "new-hostname", kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", kubeletapis.LabelZoneRegion: "new-zone-region", kubeletapis.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", }, }, }, existingNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{}, }, }, needsUpdate: true, finalLabels: map[string]string{ kubeletapis.LabelHostname: "new-hostname", kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", kubeletapis.LabelZoneRegion: "new-zone-region", kubeletapis.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", }, }, { name: "make sure default labels are up to date", initialNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ kubeletapis.LabelHostname: "new-hostname", kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", kubeletapis.LabelZoneRegion: "new-zone-region", kubeletapis.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", }, }, }, existingNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ kubeletapis.LabelHostname: "old-hostname", kubeletapis.LabelZoneFailureDomain: "old-zone-failure-domain", kubeletapis.LabelZoneRegion: "old-zone-region", kubeletapis.LabelInstanceType: "old-instance-type", kubeletapis.LabelOS: "old-os", kubeletapis.LabelArch: "old-arch", }, }, }, needsUpdate: true, finalLabels: map[string]string{ kubeletapis.LabelHostname: "new-hostname", kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", kubeletapis.LabelZoneRegion: "new-zone-region", kubeletapis.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", }, }, { name: "make sure existing labels do not get deleted", initialNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ kubeletapis.LabelHostname: "new-hostname", kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", kubeletapis.LabelZoneRegion: "new-zone-region", kubeletapis.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", }, }, }, existingNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ kubeletapis.LabelHostname: "new-hostname", kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", kubeletapis.LabelZoneRegion: "new-zone-region", kubeletapis.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", "please-persist": "foo", }, }, }, needsUpdate: false, finalLabels: map[string]string{ kubeletapis.LabelHostname: "new-hostname", kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", kubeletapis.LabelZoneRegion: "new-zone-region", kubeletapis.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", "please-persist": "foo", }, }, { name: "make sure existing labels do not get deleted when initial node has no opinion", initialNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{}, }, }, existingNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ kubeletapis.LabelHostname: "new-hostname", kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", kubeletapis.LabelZoneRegion: "new-zone-region", kubeletapis.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", "please-persist": "foo", }, }, }, needsUpdate: false, finalLabels: map[string]string{ kubeletapis.LabelHostname: "new-hostname", kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", kubeletapis.LabelZoneRegion: "new-zone-region", kubeletapis.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", "please-persist": "foo", }, }, { name: "no update needed", initialNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ kubeletapis.LabelHostname: "new-hostname", kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", kubeletapis.LabelZoneRegion: "new-zone-region", kubeletapis.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", }, }, }, existingNode: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ kubeletapis.LabelHostname: "new-hostname", kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", kubeletapis.LabelZoneRegion: "new-zone-region", kubeletapis.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", }, }, }, needsUpdate: false, finalLabels: map[string]string{ kubeletapis.LabelHostname: "new-hostname", kubeletapis.LabelZoneFailureDomain: "new-zone-failure-domain", kubeletapis.LabelZoneRegion: "new-zone-region", kubeletapis.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", }, }, } for _, tc := range cases { defer testKubelet.Cleanup() kubelet := testKubelet.kubelet needsUpdate := kubelet.updateDefaultLabels(tc.initialNode, tc.existingNode) assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name) assert.Equal(t, tc.finalLabels, tc.existingNode.Labels, tc.name) } } func TestValidateNodeIPParam(t *testing.T) { type test struct { nodeIP string success bool testName string } tests := []test{ { nodeIP: "", success: false, testName: "IP not set", }, { nodeIP: "127.0.0.1", success: false, testName: "IPv4 loopback address", }, { nodeIP: "::1", success: false, testName: "IPv6 loopback address", }, { nodeIP: "224.0.0.1", success: false, testName: "multicast IPv4 address", }, { nodeIP: "ff00::1", success: false, testName: "multicast IPv6 address", }, { nodeIP: "169.254.0.1", success: false, testName: "IPv4 link-local unicast address", }, { nodeIP: "fe80::0202:b3ff:fe1e:8329", success: false, testName: "IPv6 link-local unicast address", }, { nodeIP: "0.0.0.0", success: false, testName: "Unspecified IPv4 address", }, { nodeIP: "::", success: false, testName: "Unspecified IPv6 address", }, { nodeIP: "1.2.3.4", success: false, testName: "IPv4 address that doesn't belong to host", }, } addrs, err := net.InterfaceAddrs() if err != nil { assert.Error(t, err, fmt.Sprintf( "Unable to obtain a list of the node's unicast interface addresses.")) } for _, addr := range addrs { var ip net.IP switch v := addr.(type) { case *net.IPNet: ip = v.IP case *net.IPAddr: ip = v.IP } if ip.IsLoopback() || ip.IsLinkLocalUnicast() { break } successTest := test{ nodeIP: ip.String(), success: true, testName: fmt.Sprintf("Success test case for address %s", ip.String()), } tests = append(tests, successTest) } for _, test := range tests { err := validateNodeIP(net.ParseIP(test.nodeIP)) if test.success { assert.NoError(t, err, "test %s", test.testName) } else { assert.Error(t, err, fmt.Sprintf("test %s", test.testName)) } } }
apache-2.0
langfr/camunda-bpm-platform
model-api/bpmn-model/src/main/java/org/camunda/bpm/model/bpmn/impl/instance/ErrorRef.java
2006
/* * Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH * under one or more contributor license agreements. See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. Camunda licenses this file to you under the Apache License, * Version 2.0; you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.model.bpmn.impl.instance; import org.camunda.bpm.model.xml.ModelBuilder; import org.camunda.bpm.model.xml.impl.instance.ModelTypeInstanceContext; import org.camunda.bpm.model.xml.type.ModelElementTypeBuilder; import static org.camunda.bpm.model.bpmn.impl.BpmnModelConstants.BPMN20_NS; import static org.camunda.bpm.model.bpmn.impl.BpmnModelConstants.BPMN_ELEMENT_ERROR_REF; import static org.camunda.bpm.model.xml.type.ModelElementTypeBuilder.ModelTypeInstanceProvider; /** * The BPMN errorRef element of the BPMN tOperation type * * @author Sebastian Menski */ public class ErrorRef extends BpmnModelElementInstanceImpl { public static void registerType(ModelBuilder modelBuilder) { ModelElementTypeBuilder typeBuilder = modelBuilder.defineType(ErrorRef.class, BPMN_ELEMENT_ERROR_REF) .namespaceUri(BPMN20_NS) .instanceProvider(new ModelTypeInstanceProvider<ErrorRef>() { public ErrorRef newInstance(ModelTypeInstanceContext instanceContext) { return new ErrorRef(instanceContext); } }); typeBuilder.build(); } public ErrorRef(ModelTypeInstanceContext instanceContext) { super(instanceContext); } }
apache-2.0
liquidm/druid
extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/authorization/db/cache/CoordinatorBasicAuthorizerCacheNotifier.java
4009
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.druid.security.basic.authorization.db.cache; import com.google.common.base.Preconditions; import com.google.inject.Inject; import org.apache.druid.concurrent.LifecycleLock; import org.apache.druid.discovery.DruidNodeDiscoveryProvider; import org.apache.druid.guice.ManageLifecycle; import org.apache.druid.guice.annotations.EscalatedClient; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.lifecycle.LifecycleStart; import org.apache.druid.java.util.common.lifecycle.LifecycleStop; import org.apache.druid.java.util.http.client.HttpClient; import org.apache.druid.security.basic.BasicAuthDBConfig; import org.apache.druid.security.basic.CommonCacheNotifier; import org.apache.druid.security.basic.authorization.BasicRoleBasedAuthorizer; import org.apache.druid.server.security.Authorizer; import org.apache.druid.server.security.AuthorizerMapper; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; @ManageLifecycle public class CoordinatorBasicAuthorizerCacheNotifier implements BasicAuthorizerCacheNotifier { private final LifecycleLock lifecycleLock = new LifecycleLock(); private CommonCacheNotifier cacheNotifier; @Inject public CoordinatorBasicAuthorizerCacheNotifier( AuthorizerMapper authorizerMapper, DruidNodeDiscoveryProvider discoveryProvider, @EscalatedClient HttpClient httpClient ) { cacheNotifier = new CommonCacheNotifier( getAuthorizerConfigMap(authorizerMapper), discoveryProvider, httpClient, "/druid-ext/basic-security/authorization/listen/%s", "CoordinatorBasicAuthorizerCacheNotifier" ); } @LifecycleStart public void start() { if (!lifecycleLock.canStart()) { throw new ISE("can't start."); } try { cacheNotifier.start(); lifecycleLock.started(); } finally { lifecycleLock.exitStart(); } } @LifecycleStop public void stop() { if (!lifecycleLock.canStop()) { return; } try { cacheNotifier.stop(); } finally { lifecycleLock.exitStop(); } } @Override public void addUpdate(String updatedAuthorizerPrefix, byte[] updatedUserMap) { Preconditions.checkState(lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS)); cacheNotifier.addUpdate(updatedAuthorizerPrefix, updatedUserMap); } private Map<String, BasicAuthDBConfig> getAuthorizerConfigMap(AuthorizerMapper mapper) { Preconditions.checkNotNull(mapper); Preconditions.checkNotNull(mapper.getAuthorizerMap()); Map<String, BasicAuthDBConfig> authorizerConfigMap = new HashMap<>(); for (Map.Entry<String, Authorizer> entry : mapper.getAuthorizerMap().entrySet()) { Authorizer authorizer = entry.getValue(); if (authorizer instanceof BasicRoleBasedAuthorizer) { String authorizerName = entry.getKey(); BasicRoleBasedAuthorizer basicRoleBasedAuthorizer = (BasicRoleBasedAuthorizer) authorizer; BasicAuthDBConfig dbConfig = basicRoleBasedAuthorizer.getDbConfig(); authorizerConfigMap.put(authorizerName, dbConfig); } } return authorizerConfigMap; } }
apache-2.0
OSS-TheWeatherCompany/dasein-cloud-test
src/main/java/org/dasein/cloud/test/ci/CIResources.java
7372
/** * Copyright (C) 2009-2014 Dell, Inc. * See annotations for authorship information * * ==================================================================== * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ==================================================================== */ package org.dasein.cloud.test.ci; import org.apache.log4j.Logger; import org.dasein.cloud.CloudProvider; import org.dasein.cloud.ci.CIServices; import org.dasein.cloud.ci.ConvergedInfrastructure; import org.dasein.cloud.ci.ConvergedInfrastructureSupport; import org.dasein.cloud.ci.Topology; import org.dasein.cloud.ci.TopologyState; import org.dasein.cloud.ci.TopologySupport; import org.dasein.cloud.test.DaseinTestManager; import javax.annotation.Nonnull; import javax.annotation.Nullable; import java.util.HashMap; import java.util.Map; /** * [Class Documentation] * <p>Created by George Reese: 6/3/13 4:50 PM</p> * * @author George Reese */ public class CIResources { static private final Logger logger = Logger.getLogger(CIResources.class); private CloudProvider provider; private final HashMap<String,String> testInfrastructures = new HashMap<String, String>(); private final HashMap<String,String> testTopologies = new HashMap<String, String>(); public CIResources(@Nonnull CloudProvider provider) { this.provider = provider; } public int close() { CIServices ciServices = provider.getCIServices(); int count = 0; if( ciServices != null ) { ConvergedInfrastructureSupport ciSupport = ciServices.getConvergedInfrastructureSupport(); if( ciSupport != null ) { for( Map.Entry<String,String> entry : testInfrastructures.entrySet() ) { if( !entry.getKey().equals(DaseinTestManager.STATELESS) ) { try { ConvergedInfrastructure ci = ciSupport.getConvergedInfrastructure(entry.getValue()); if( ci != null ) { ciSupport.terminate(entry.getValue(), null); count++; } else { count++; } } catch( Throwable t ) { logger.warn("Failed to de-provision test CI " + entry.getValue() + ": " + t.getMessage()); } } } } TopologySupport tSupport = ciServices.getTopologySupport(); if( tSupport != null ) { for( Map.Entry<String,String> entry : testTopologies.entrySet() ) { if( !entry.getKey().equals(DaseinTestManager.STATELESS) ) { try { Topology t = tSupport.getTopology(entry.getValue()); if( t != null ) { // TODO: implement this // tSupport.remove(entry.getKey()); count++; } else { count++; } } catch( Throwable t ) { logger.warn("Failed to de-provision test topology " + entry.getValue() + ": " + t.getMessage()); } } } } } return count; } public @Nullable String getTestTopologyId(@Nonnull String label, boolean provisionIfNull) { if( label.equals(DaseinTestManager.STATELESS) ) { for( Map.Entry<String,String> entry : testTopologies.entrySet() ) { if( !entry.getKey().startsWith(DaseinTestManager.REMOVED) ) { String id = entry.getValue(); if( id != null ) { return id; } } } return findStatelessTopology(); } String id = testTopologies.get(label); if( id != null ) { return id; } if( !provisionIfNull ) { return null; } CIServices services = provider.getCIServices(); if( services != null ) { TopologySupport support = services.getTopologySupport(); if( support != null ) { try { // TODO: when support for creating topologies is implemented, use this return null; } catch( Throwable ignore ) { return null; } } } return null; } private @Nullable String findStatelessTopology() { CIServices services = provider.getCIServices(); if( services != null ) { TopologySupport support = services.getTopologySupport(); try { if( support != null && support.isSubscribed() ) { Topology defaultTopology = null; for( Topology t : support.listTopologies(null) ) { if( t.getCurrentState().equals(TopologyState.ACTIVE) ) { defaultTopology = t; break; } if( defaultTopology == null ) { defaultTopology = t; } } if( defaultTopology != null ) { String id = defaultTopology.getProviderTopologyId(); testTopologies.put(DaseinTestManager.STATELESS, id); return id; } } } catch( Throwable ignore ) { // ignore } } return null; } public int report() { boolean header = false; int count = 0; testInfrastructures.remove(DaseinTestManager.STATELESS); if( !testInfrastructures.isEmpty() ) { logger.info("Provisioned CI Resources:"); header = true; count += testInfrastructures.size(); DaseinTestManager.out(logger, null, "---> Infrastructures", testInfrastructures.size() + " " + testInfrastructures); } testTopologies.remove(DaseinTestManager.STATELESS); if( !testTopologies.isEmpty() ) { if( !header ) { logger.info("Provisioned CI Resources:"); } count += testTopologies.size(); DaseinTestManager.out(logger, null, "---> Topologies", testTopologies.size() + " " + testTopologies); } return count; } }
apache-2.0
cping/RipplePower
eclipse/jcoinlibs/src/org/ripple/bouncycastle/asn1/ocsp/ResponderID.java
2630
package org.ripple.bouncycastle.asn1.ocsp; import org.ripple.bouncycastle.asn1.ASN1Choice; import org.ripple.bouncycastle.asn1.ASN1Encodable; import org.ripple.bouncycastle.asn1.ASN1Object; import org.ripple.bouncycastle.asn1.ASN1OctetString; import org.ripple.bouncycastle.asn1.ASN1Primitive; import org.ripple.bouncycastle.asn1.ASN1TaggedObject; import org.ripple.bouncycastle.asn1.DEROctetString; import org.ripple.bouncycastle.asn1.DERTaggedObject; import org.ripple.bouncycastle.asn1.x500.X500Name; public class ResponderID extends ASN1Object implements ASN1Choice { private ASN1Encodable value; public ResponderID( ASN1OctetString value) { this.value = value; } public ResponderID( X500Name value) { this.value = value; } public static ResponderID getInstance( Object obj) { if (obj instanceof ResponderID) { return (ResponderID)obj; } else if (obj instanceof DEROctetString) { return new ResponderID((DEROctetString)obj); } else if (obj instanceof ASN1TaggedObject) { ASN1TaggedObject o = (ASN1TaggedObject)obj; if (o.getTagNo() == 1) { return new ResponderID(X500Name.getInstance(o, true)); } else { return new ResponderID(ASN1OctetString.getInstance(o, true)); } } return new ResponderID(X500Name.getInstance(obj)); } public static ResponderID getInstance( ASN1TaggedObject obj, boolean explicit) { return getInstance(obj.getObject()); // must be explicitly tagged } public byte[] getKeyHash() { if (this.value instanceof ASN1OctetString) { ASN1OctetString octetString = (ASN1OctetString)this.value; return octetString.getOctets(); } return null; } public X500Name getName() { if (this.value instanceof ASN1OctetString) { return null; } return X500Name.getInstance(value); } /** * Produce an object suitable for an ASN1OutputStream. * <pre> * ResponderID ::= CHOICE { * byName [1] Name, * byKey [2] KeyHash } * </pre> */ public ASN1Primitive toASN1Primitive() { if (value instanceof ASN1OctetString) { return new DERTaggedObject(true, 2, value); } return new DERTaggedObject(true, 1, value); } }
apache-2.0
SeleniumHQ/buck
src/com/facebook/buck/distributed/build_slave/MinionWorkloadAllocator.java
11966
/* * Copyright 2016-present Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package com.facebook.buck.distributed.build_slave; import com.facebook.buck.core.util.log.Logger; import com.facebook.buck.distributed.thrift.CoordinatorBuildProgress; import com.facebook.buck.distributed.thrift.MinionType; import com.facebook.buck.distributed.thrift.WorkUnit; import com.google.common.base.Preconditions; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Queue; import java.util.Set; import java.util.stream.Collectors; /** * Allocates and keeps track of what BuildTargetPaths are allocated to which Minions. This class is * thread safe. */ public class MinionWorkloadAllocator { private static final Logger LOG = Logger.get(MinionWorkloadAllocator.class); private final BuildTargetsQueue queue; private final Set<String> nodesAssignedToMinions = new HashSet<>(); private final Map<String, Set<WorkUnit>> workUnitsAssignedToMinions = new HashMap<>(); // Maps each target to the work unit that contains it. private final Map<String, WorkUnit> workUnitsByTarget = new HashMap<>(); // These should be immediately re-assigned when capacity becomes available on other minions private Queue<WorkUnit> workUnitsFromFailedMinions = new LinkedList<>(); private final Set<String> failedMinions = new HashSet<>(); private final Set<String> seenMinions = new HashSet<>(); private final Map<String, MinionType> minionTypesByMinionId = new HashMap<>(); private final Set<String> minionsAvailableForAllocation = new HashSet<>(); private final Map<String, Integer> minionFreeCapacities = new HashMap<>(); private final Optional<String> coordinatorMinionId; private final boolean releasingMinionsEarlyEnabled; private final DistBuildTraceTracker chromeTraceTracker; /** * Result of updating allocation - indicates if minion should be now released (capacity no longer * needed) and contains a list of new work units for the minion to process. */ public static class WorkloadAllocationResult { public final boolean shouldReleaseMinion; public final List<WorkUnit> newWorkUnitsForMinion; public WorkloadAllocationResult( boolean shouldReleaseMinion, List<WorkUnit> newWorkUnitsForMinion) { if (shouldReleaseMinion) { Preconditions.checkArgument(newWorkUnitsForMinion.isEmpty()); } this.shouldReleaseMinion = shouldReleaseMinion; this.newWorkUnitsForMinion = newWorkUnitsForMinion; } } public MinionWorkloadAllocator( BuildTargetsQueue queue, DistBuildTraceTracker chromeTraceTracker, Optional<String> coordinatorMinionId, boolean releasingMinionsEarlyEnabled) { this.queue = queue; this.chromeTraceTracker = chromeTraceTracker; this.coordinatorMinionId = coordinatorMinionId; this.releasingMinionsEarlyEnabled = releasingMinionsEarlyEnabled; } public synchronized boolean isBuildFinished() { return nodesAssignedToMinions.isEmpty() && workUnitsFromFailedMinions.isEmpty() && !queue.hasReadyZeroDependencyNodes(); } private void trySetupMinion(String minionId, MinionType minionType, int maxWorkUnitsToFetch) { if (seenMinions.contains(minionId)) { return; } if (!workUnitsAssignedToMinions.containsKey(minionId)) { workUnitsAssignedToMinions.put(minionId, new HashSet<>()); } minionTypesByMinionId.put(minionId, minionType); minionsAvailableForAllocation.add(minionId); minionFreeCapacities.put(minionId, maxWorkUnitsToFetch); seenMinions.add(minionId); } private boolean isMinionCapacityRedundant(String candidateMinionId) { if (!minionsAvailableForAllocation.contains(candidateMinionId)) { return true; } // Never release the coordinator's minion - we gain nothing doing that (machine is still // occupied) and we may get lucky and schedule work there later and release more minions. if (coordinatorMinionId.isPresent() && coordinatorMinionId.get().equals(candidateMinionId)) { return false; } int maxParallelWorkUnitsLeft = queue.getSafeApproxOfRemainingWorkUnitsCount(); int capacityAvailableOnOtherMinions = minionsAvailableForAllocation .stream() .filter(minionId -> !minionId.equals(candidateMinionId)) .mapToInt(minionId -> Objects.requireNonNull(minionFreeCapacities.get(minionId))) .sum(); return maxParallelWorkUnitsLeft <= capacityAvailableOnOtherMinions; } /** * Processes nodes finished by minion and, if minion's capacity is still needed, allocates it new * work units (out of units from failed minions and newly available units). */ public synchronized WorkloadAllocationResult updateMinionWorkloadAllocation( String minionId, MinionType minionType, List<String> finishedNodes, int maxWorkUnitsToFetch) { Preconditions.checkArgument(!failedMinions.contains(minionId)); trySetupMinion(minionId, minionType, maxWorkUnitsToFetch); Set<WorkUnit> workUnitsAllocatedToMinion = Objects.requireNonNull(workUnitsAssignedToMinions.get(minionId)); deallocateFinishedNodes(workUnitsAllocatedToMinion, finishedNodes); // First try and re-allocate work units from any minions that have failed recently List<WorkUnit> newWorkUnitsForMinion = reallocateWorkUnitsFromFailedMinions(minionId, maxWorkUnitsToFetch); // For any remaining capacity on this minion, fetch new work units, if they exist. maxWorkUnitsToFetch -= newWorkUnitsForMinion.size(); newWorkUnitsForMinion.addAll( queue.dequeueZeroDependencyNodes(finishedNodes, maxWorkUnitsToFetch)); List<String> newNodesForMinion = allocateNewNodes(workUnitsAllocatedToMinion, newWorkUnitsForMinion); LOG.info( String.format( "Minion [%s] finished [%s] nodes, and fetched [%s] new nodes. " + "Total nodes assigned to minions [%s]. Unscheduled zero dependency nodes? [%s]", minionId, finishedNodes.size(), newNodesForMinion.size(), nodesAssignedToMinions.size(), queue.hasReadyZeroDependencyNodes())); WorkloadAllocationResult result; // Check if we can release the minion - no work scheduled and capacity not needed anymore. if (workUnitsAllocatedToMinion.isEmpty() && releasingMinionsEarlyEnabled && isMinionCapacityRedundant(minionId)) { minionsAvailableForAllocation.remove(minionId); LOG.info( String.format( "Minion [%s] should now be released - capacity unneeded and no work assigned.", minionId)); result = new WorkloadAllocationResult(true, newWorkUnitsForMinion); } else { result = new WorkloadAllocationResult(false, newWorkUnitsForMinion); } minionFreeCapacities.put(minionId, maxWorkUnitsToFetch - result.newWorkUnitsForMinion.size()); chromeTraceTracker.updateWork(minionId, finishedNodes, result.newWorkUnitsForMinion); return result; } /** @return True if minion has been marked as failed previously */ public synchronized boolean hasMinionFailed(String minionId) { return failedMinions.contains(minionId); } /** * Queues up all work that was allocated to given minion for re-allocation to other minions * * @param minionId */ public synchronized void handleMinionFailure(String minionId) { if (failedMinions.contains(minionId)) { return; // Already handled } failedMinions.add(minionId); minionsAvailableForAllocation.remove(minionId); if (!workUnitsAssignedToMinions.containsKey(minionId)) { LOG.warn(String.format("Failed minion [%s] never had work assigned to it", minionId)); return; } Set<WorkUnit> workUnitsAllocatedToMinion = workUnitsAssignedToMinions.get(minionId); Set<String> allocatedTargets = workUnitsAllocatedToMinion .stream() .map(workUnit -> workUnit.getBuildTargets()) .flatMap(Collection::stream) .collect(Collectors.toSet()); LOG.warn( String.format( "Failed minion [%s] had [%d] active work units containing [%d] targets. Queueing for re-allocation.", minionId, workUnitsAllocatedToMinion.size(), allocatedTargets.size())); workUnitsAssignedToMinions.remove(minionId); workUnitsFromFailedMinions.addAll(workUnitsAllocatedToMinion); nodesAssignedToMinions.removeAll(allocatedTargets); } public synchronized boolean haveMostBuildRulesCompleted() { return queue.haveMostBuildRulesFinished(); } public synchronized CoordinatorBuildProgress getBuildProgress() { return queue.getBuildProgress(); } private List<WorkUnit> reallocateWorkUnitsFromFailedMinions(String minionId, int maxWorkUnits) { List<WorkUnit> reallocatedWorkUnits = new ArrayList<>(); MinionType minionType = minionTypesByMinionId.get(minionId); // Re-allocated work should always go to a more powerful hardware. if (minionType == MinionType.LOW_SPEC) { return reallocatedWorkUnits; } while (workUnitsFromFailedMinions.size() > 0 && reallocatedWorkUnits.size() < maxWorkUnits) { WorkUnit workUnitToReAssign = workUnitsFromFailedMinions.remove(); Preconditions.checkArgument(workUnitToReAssign.getBuildTargets().size() > 0); reallocatedWorkUnits.add(workUnitToReAssign); } if (reallocatedWorkUnits.size() > 0) { LOG.info( "Re-allocated [%d] work units from failed minion to [%s]", reallocatedWorkUnits.size(), minionId); } return reallocatedWorkUnits; } private List<String> allocateNewNodes( Set<WorkUnit> workUnitsForMinion, List<WorkUnit> newWorkUnitsForMinion) { List<String> nodesForMinion = new ArrayList<>(); for (WorkUnit workUnit : newWorkUnitsForMinion) { nodesForMinion.addAll(workUnit.getBuildTargets()); for (String node : workUnit.getBuildTargets()) { workUnitsByTarget.put(node, workUnit); } } workUnitsForMinion.addAll(newWorkUnitsForMinion); nodesAssignedToMinions.addAll(nodesForMinion); return nodesForMinion; } private void deallocateFinishedNodes( Set<WorkUnit> workUnitsForMinion, List<String> finishedNodes) { nodesAssignedToMinions.removeAll(finishedNodes); for (String finishedNode : finishedNodes) { if (!workUnitsByTarget.containsKey(finishedNode)) { LOG.error(String.format("No work unit could be found for target [%s]", finishedNode)); continue; } WorkUnit workUnitForNode = workUnitsByTarget.get(finishedNode); // Important: workUnitForNode must be removed from workUnitsForMinion Set before we modify // workUnitForNode, as after modification its hashCode/equals properties will have changed. Preconditions.checkArgument(workUnitsForMinion.remove(workUnitForNode)); Preconditions.checkArgument(workUnitForNode.getBuildTargets().remove(finishedNode)); if (workUnitForNode.getBuildTargets().size() > 0) { // Work unit still has items remaining, so re-add it to Set (using new hashCode) workUnitsForMinion.add(workUnitForNode); } } } }
apache-2.0
reynoldsm88/drools
kie-dmn/kie-dmn-feel/src/main/java/org/kie/dmn/feel/lang/SimpleType.java
175
package org.kie.dmn.feel.lang; /** * A simple type definition interface, i.e., a type that does not contain fields */ public interface SimpleType extends Type { }
apache-2.0
Addepar/buck
src-gen/com/facebook/buck/artifact_cache/thrift/BuckCacheMultiFetchRequest.java
28340
/** * Autogenerated by Thrift Compiler (0.12.0) * * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING * @generated */ package com.facebook.buck.artifact_cache.thrift; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) @javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.12.0)") public class BuckCacheMultiFetchRequest implements org.apache.thrift.TBase<BuckCacheMultiFetchRequest, BuckCacheMultiFetchRequest._Fields>, java.io.Serializable, Cloneable, Comparable<BuckCacheMultiFetchRequest> { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BuckCacheMultiFetchRequest"); private static final org.apache.thrift.protocol.TField RULE_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("ruleKeys", org.apache.thrift.protocol.TType.LIST, (short)1); private static final org.apache.thrift.protocol.TField REPOSITORY_FIELD_DESC = new org.apache.thrift.protocol.TField("repository", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField SCHEDULE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("scheduleType", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField BUILD_TARGETS_FIELD_DESC = new org.apache.thrift.protocol.TField("buildTargets", org.apache.thrift.protocol.TType.LIST, (short)5); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new BuckCacheMultiFetchRequestStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new BuckCacheMultiFetchRequestTupleSchemeFactory(); public @org.apache.thrift.annotation.Nullable java.util.List<RuleKey> ruleKeys; // optional public @org.apache.thrift.annotation.Nullable java.lang.String repository; // optional public @org.apache.thrift.annotation.Nullable java.lang.String scheduleType; // optional public @org.apache.thrift.annotation.Nullable java.util.List<java.lang.String> buildTargets; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { RULE_KEYS((short)1, "ruleKeys"), REPOSITORY((short)2, "repository"), SCHEDULE_TYPE((short)3, "scheduleType"), BUILD_TARGETS((short)5, "buildTargets"); private static final java.util.Map<java.lang.String, _Fields> byName = new java.util.HashMap<java.lang.String, _Fields>(); static { for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { byName.put(field.getFieldName(), field); } } /** * Find the _Fields constant that matches fieldId, or null if its not found. */ @org.apache.thrift.annotation.Nullable public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // RULE_KEYS return RULE_KEYS; case 2: // REPOSITORY return REPOSITORY; case 3: // SCHEDULE_TYPE return SCHEDULE_TYPE; case 5: // BUILD_TARGETS return BUILD_TARGETS; default: return null; } } /** * Find the _Fields constant that matches fieldId, throwing an exception * if it is not found. */ public static _Fields findByThriftIdOrThrow(int fieldId) { _Fields fields = findByThriftId(fieldId); if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); return fields; } /** * Find the _Fields constant that matches name, or null if its not found. */ @org.apache.thrift.annotation.Nullable public static _Fields findByName(java.lang.String name) { return byName.get(name); } private final short _thriftId; private final java.lang.String _fieldName; _Fields(short thriftId, java.lang.String fieldName) { _thriftId = thriftId; _fieldName = fieldName; } public short getThriftFieldId() { return _thriftId; } public java.lang.String getFieldName() { return _fieldName; } } // isset id assignments private static final _Fields optionals[] = {_Fields.RULE_KEYS,_Fields.REPOSITORY,_Fields.SCHEDULE_TYPE,_Fields.BUILD_TARGETS}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.RULE_KEYS, new org.apache.thrift.meta_data.FieldMetaData("ruleKeys", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, RuleKey.class)))); tmpMap.put(_Fields.REPOSITORY, new org.apache.thrift.meta_data.FieldMetaData("repository", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.SCHEDULE_TYPE, new org.apache.thrift.meta_data.FieldMetaData("scheduleType", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.BUILD_TARGETS, new org.apache.thrift.meta_data.FieldMetaData("buildTargets", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(BuckCacheMultiFetchRequest.class, metaDataMap); } public BuckCacheMultiFetchRequest() { } /** * Performs a deep copy on <i>other</i>. */ public BuckCacheMultiFetchRequest(BuckCacheMultiFetchRequest other) { if (other.isSetRuleKeys()) { java.util.List<RuleKey> __this__ruleKeys = new java.util.ArrayList<RuleKey>(other.ruleKeys.size()); for (RuleKey other_element : other.ruleKeys) { __this__ruleKeys.add(new RuleKey(other_element)); } this.ruleKeys = __this__ruleKeys; } if (other.isSetRepository()) { this.repository = other.repository; } if (other.isSetScheduleType()) { this.scheduleType = other.scheduleType; } if (other.isSetBuildTargets()) { java.util.List<java.lang.String> __this__buildTargets = new java.util.ArrayList<java.lang.String>(other.buildTargets); this.buildTargets = __this__buildTargets; } } public BuckCacheMultiFetchRequest deepCopy() { return new BuckCacheMultiFetchRequest(this); } @Override public void clear() { this.ruleKeys = null; this.repository = null; this.scheduleType = null; this.buildTargets = null; } public int getRuleKeysSize() { return (this.ruleKeys == null) ? 0 : this.ruleKeys.size(); } @org.apache.thrift.annotation.Nullable public java.util.Iterator<RuleKey> getRuleKeysIterator() { return (this.ruleKeys == null) ? null : this.ruleKeys.iterator(); } public void addToRuleKeys(RuleKey elem) { if (this.ruleKeys == null) { this.ruleKeys = new java.util.ArrayList<RuleKey>(); } this.ruleKeys.add(elem); } @org.apache.thrift.annotation.Nullable public java.util.List<RuleKey> getRuleKeys() { return this.ruleKeys; } public BuckCacheMultiFetchRequest setRuleKeys(@org.apache.thrift.annotation.Nullable java.util.List<RuleKey> ruleKeys) { this.ruleKeys = ruleKeys; return this; } public void unsetRuleKeys() { this.ruleKeys = null; } /** Returns true if field ruleKeys is set (has been assigned a value) and false otherwise */ public boolean isSetRuleKeys() { return this.ruleKeys != null; } public void setRuleKeysIsSet(boolean value) { if (!value) { this.ruleKeys = null; } } @org.apache.thrift.annotation.Nullable public java.lang.String getRepository() { return this.repository; } public BuckCacheMultiFetchRequest setRepository(@org.apache.thrift.annotation.Nullable java.lang.String repository) { this.repository = repository; return this; } public void unsetRepository() { this.repository = null; } /** Returns true if field repository is set (has been assigned a value) and false otherwise */ public boolean isSetRepository() { return this.repository != null; } public void setRepositoryIsSet(boolean value) { if (!value) { this.repository = null; } } @org.apache.thrift.annotation.Nullable public java.lang.String getScheduleType() { return this.scheduleType; } public BuckCacheMultiFetchRequest setScheduleType(@org.apache.thrift.annotation.Nullable java.lang.String scheduleType) { this.scheduleType = scheduleType; return this; } public void unsetScheduleType() { this.scheduleType = null; } /** Returns true if field scheduleType is set (has been assigned a value) and false otherwise */ public boolean isSetScheduleType() { return this.scheduleType != null; } public void setScheduleTypeIsSet(boolean value) { if (!value) { this.scheduleType = null; } } public int getBuildTargetsSize() { return (this.buildTargets == null) ? 0 : this.buildTargets.size(); } @org.apache.thrift.annotation.Nullable public java.util.Iterator<java.lang.String> getBuildTargetsIterator() { return (this.buildTargets == null) ? null : this.buildTargets.iterator(); } public void addToBuildTargets(java.lang.String elem) { if (this.buildTargets == null) { this.buildTargets = new java.util.ArrayList<java.lang.String>(); } this.buildTargets.add(elem); } @org.apache.thrift.annotation.Nullable public java.util.List<java.lang.String> getBuildTargets() { return this.buildTargets; } public BuckCacheMultiFetchRequest setBuildTargets(@org.apache.thrift.annotation.Nullable java.util.List<java.lang.String> buildTargets) { this.buildTargets = buildTargets; return this; } public void unsetBuildTargets() { this.buildTargets = null; } /** Returns true if field buildTargets is set (has been assigned a value) and false otherwise */ public boolean isSetBuildTargets() { return this.buildTargets != null; } public void setBuildTargetsIsSet(boolean value) { if (!value) { this.buildTargets = null; } } public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case RULE_KEYS: if (value == null) { unsetRuleKeys(); } else { setRuleKeys((java.util.List<RuleKey>)value); } break; case REPOSITORY: if (value == null) { unsetRepository(); } else { setRepository((java.lang.String)value); } break; case SCHEDULE_TYPE: if (value == null) { unsetScheduleType(); } else { setScheduleType((java.lang.String)value); } break; case BUILD_TARGETS: if (value == null) { unsetBuildTargets(); } else { setBuildTargets((java.util.List<java.lang.String>)value); } break; } } @org.apache.thrift.annotation.Nullable public java.lang.Object getFieldValue(_Fields field) { switch (field) { case RULE_KEYS: return getRuleKeys(); case REPOSITORY: return getRepository(); case SCHEDULE_TYPE: return getScheduleType(); case BUILD_TARGETS: return getBuildTargets(); } throw new java.lang.IllegalStateException(); } /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ public boolean isSet(_Fields field) { if (field == null) { throw new java.lang.IllegalArgumentException(); } switch (field) { case RULE_KEYS: return isSetRuleKeys(); case REPOSITORY: return isSetRepository(); case SCHEDULE_TYPE: return isSetScheduleType(); case BUILD_TARGETS: return isSetBuildTargets(); } throw new java.lang.IllegalStateException(); } @Override public boolean equals(java.lang.Object that) { if (that == null) return false; if (that instanceof BuckCacheMultiFetchRequest) return this.equals((BuckCacheMultiFetchRequest)that); return false; } public boolean equals(BuckCacheMultiFetchRequest that) { if (that == null) return false; if (this == that) return true; boolean this_present_ruleKeys = true && this.isSetRuleKeys(); boolean that_present_ruleKeys = true && that.isSetRuleKeys(); if (this_present_ruleKeys || that_present_ruleKeys) { if (!(this_present_ruleKeys && that_present_ruleKeys)) return false; if (!this.ruleKeys.equals(that.ruleKeys)) return false; } boolean this_present_repository = true && this.isSetRepository(); boolean that_present_repository = true && that.isSetRepository(); if (this_present_repository || that_present_repository) { if (!(this_present_repository && that_present_repository)) return false; if (!this.repository.equals(that.repository)) return false; } boolean this_present_scheduleType = true && this.isSetScheduleType(); boolean that_present_scheduleType = true && that.isSetScheduleType(); if (this_present_scheduleType || that_present_scheduleType) { if (!(this_present_scheduleType && that_present_scheduleType)) return false; if (!this.scheduleType.equals(that.scheduleType)) return false; } boolean this_present_buildTargets = true && this.isSetBuildTargets(); boolean that_present_buildTargets = true && that.isSetBuildTargets(); if (this_present_buildTargets || that_present_buildTargets) { if (!(this_present_buildTargets && that_present_buildTargets)) return false; if (!this.buildTargets.equals(that.buildTargets)) return false; } return true; } @Override public int hashCode() { int hashCode = 1; hashCode = hashCode * 8191 + ((isSetRuleKeys()) ? 131071 : 524287); if (isSetRuleKeys()) hashCode = hashCode * 8191 + ruleKeys.hashCode(); hashCode = hashCode * 8191 + ((isSetRepository()) ? 131071 : 524287); if (isSetRepository()) hashCode = hashCode * 8191 + repository.hashCode(); hashCode = hashCode * 8191 + ((isSetScheduleType()) ? 131071 : 524287); if (isSetScheduleType()) hashCode = hashCode * 8191 + scheduleType.hashCode(); hashCode = hashCode * 8191 + ((isSetBuildTargets()) ? 131071 : 524287); if (isSetBuildTargets()) hashCode = hashCode * 8191 + buildTargets.hashCode(); return hashCode; } @Override public int compareTo(BuckCacheMultiFetchRequest other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; lastComparison = java.lang.Boolean.valueOf(isSetRuleKeys()).compareTo(other.isSetRuleKeys()); if (lastComparison != 0) { return lastComparison; } if (isSetRuleKeys()) { lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ruleKeys, other.ruleKeys); if (lastComparison != 0) { return lastComparison; } } lastComparison = java.lang.Boolean.valueOf(isSetRepository()).compareTo(other.isSetRepository()); if (lastComparison != 0) { return lastComparison; } if (isSetRepository()) { lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.repository, other.repository); if (lastComparison != 0) { return lastComparison; } } lastComparison = java.lang.Boolean.valueOf(isSetScheduleType()).compareTo(other.isSetScheduleType()); if (lastComparison != 0) { return lastComparison; } if (isSetScheduleType()) { lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.scheduleType, other.scheduleType); if (lastComparison != 0) { return lastComparison; } } lastComparison = java.lang.Boolean.valueOf(isSetBuildTargets()).compareTo(other.isSetBuildTargets()); if (lastComparison != 0) { return lastComparison; } if (isSetBuildTargets()) { lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.buildTargets, other.buildTargets); if (lastComparison != 0) { return lastComparison; } } return 0; } @org.apache.thrift.annotation.Nullable public _Fields fieldForId(int fieldId) { return _Fields.findByThriftId(fieldId); } public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { scheme(iprot).read(iprot, this); } public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { scheme(oprot).write(oprot, this); } @Override public java.lang.String toString() { java.lang.StringBuilder sb = new java.lang.StringBuilder("BuckCacheMultiFetchRequest("); boolean first = true; if (isSetRuleKeys()) { sb.append("ruleKeys:"); if (this.ruleKeys == null) { sb.append("null"); } else { sb.append(this.ruleKeys); } first = false; } if (isSetRepository()) { if (!first) sb.append(", "); sb.append("repository:"); if (this.repository == null) { sb.append("null"); } else { sb.append(this.repository); } first = false; } if (isSetScheduleType()) { if (!first) sb.append(", "); sb.append("scheduleType:"); if (this.scheduleType == null) { sb.append("null"); } else { sb.append(this.scheduleType); } first = false; } if (isSetBuildTargets()) { if (!first) sb.append(", "); sb.append("buildTargets:"); if (this.buildTargets == null) { sb.append("null"); } else { sb.append(this.buildTargets); } first = false; } sb.append(")"); return sb.toString(); } public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { try { write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { try { read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } private static class BuckCacheMultiFetchRequestStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { public BuckCacheMultiFetchRequestStandardScheme getScheme() { return new BuckCacheMultiFetchRequestStandardScheme(); } } private static class BuckCacheMultiFetchRequestStandardScheme extends org.apache.thrift.scheme.StandardScheme<BuckCacheMultiFetchRequest> { public void read(org.apache.thrift.protocol.TProtocol iprot, BuckCacheMultiFetchRequest struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) { schemeField = iprot.readFieldBegin(); if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { break; } switch (schemeField.id) { case 1: // RULE_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { org.apache.thrift.protocol.TList _list58 = iprot.readListBegin(); struct.ruleKeys = new java.util.ArrayList<RuleKey>(_list58.size); @org.apache.thrift.annotation.Nullable RuleKey _elem59; for (int _i60 = 0; _i60 < _list58.size; ++_i60) { _elem59 = new RuleKey(); _elem59.read(iprot); struct.ruleKeys.add(_elem59); } iprot.readListEnd(); } struct.setRuleKeysIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; case 2: // REPOSITORY if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.repository = iprot.readString(); struct.setRepositoryIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; case 3: // SCHEDULE_TYPE if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.scheduleType = iprot.readString(); struct.setScheduleTypeIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; case 5: // BUILD_TARGETS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { org.apache.thrift.protocol.TList _list61 = iprot.readListBegin(); struct.buildTargets = new java.util.ArrayList<java.lang.String>(_list61.size); @org.apache.thrift.annotation.Nullable java.lang.String _elem62; for (int _i63 = 0; _i63 < _list61.size; ++_i63) { _elem62 = iprot.readString(); struct.buildTargets.add(_elem62); } iprot.readListEnd(); } struct.setBuildTargetsIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } iprot.readFieldEnd(); } iprot.readStructEnd(); // check for required fields of primitive type, which can't be checked in the validate method struct.validate(); } public void write(org.apache.thrift.protocol.TProtocol oprot, BuckCacheMultiFetchRequest struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.ruleKeys != null) { if (struct.isSetRuleKeys()) { oprot.writeFieldBegin(RULE_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.ruleKeys.size())); for (RuleKey _iter64 : struct.ruleKeys) { _iter64.write(oprot); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } } if (struct.repository != null) { if (struct.isSetRepository()) { oprot.writeFieldBegin(REPOSITORY_FIELD_DESC); oprot.writeString(struct.repository); oprot.writeFieldEnd(); } } if (struct.scheduleType != null) { if (struct.isSetScheduleType()) { oprot.writeFieldBegin(SCHEDULE_TYPE_FIELD_DESC); oprot.writeString(struct.scheduleType); oprot.writeFieldEnd(); } } if (struct.buildTargets != null) { if (struct.isSetBuildTargets()) { oprot.writeFieldBegin(BUILD_TARGETS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.buildTargets.size())); for (java.lang.String _iter65 : struct.buildTargets) { oprot.writeString(_iter65); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } } oprot.writeFieldStop(); oprot.writeStructEnd(); } } private static class BuckCacheMultiFetchRequestTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { public BuckCacheMultiFetchRequestTupleScheme getScheme() { return new BuckCacheMultiFetchRequestTupleScheme(); } } private static class BuckCacheMultiFetchRequestTupleScheme extends org.apache.thrift.scheme.TupleScheme<BuckCacheMultiFetchRequest> { @Override public void write(org.apache.thrift.protocol.TProtocol prot, BuckCacheMultiFetchRequest struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetRuleKeys()) { optionals.set(0); } if (struct.isSetRepository()) { optionals.set(1); } if (struct.isSetScheduleType()) { optionals.set(2); } if (struct.isSetBuildTargets()) { optionals.set(3); } oprot.writeBitSet(optionals, 4); if (struct.isSetRuleKeys()) { { oprot.writeI32(struct.ruleKeys.size()); for (RuleKey _iter66 : struct.ruleKeys) { _iter66.write(oprot); } } } if (struct.isSetRepository()) { oprot.writeString(struct.repository); } if (struct.isSetScheduleType()) { oprot.writeString(struct.scheduleType); } if (struct.isSetBuildTargets()) { { oprot.writeI32(struct.buildTargets.size()); for (java.lang.String _iter67 : struct.buildTargets) { oprot.writeString(_iter67); } } } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, BuckCacheMultiFetchRequest struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list68 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); struct.ruleKeys = new java.util.ArrayList<RuleKey>(_list68.size); @org.apache.thrift.annotation.Nullable RuleKey _elem69; for (int _i70 = 0; _i70 < _list68.size; ++_i70) { _elem69 = new RuleKey(); _elem69.read(iprot); struct.ruleKeys.add(_elem69); } } struct.setRuleKeysIsSet(true); } if (incoming.get(1)) { struct.repository = iprot.readString(); struct.setRepositoryIsSet(true); } if (incoming.get(2)) { struct.scheduleType = iprot.readString(); struct.setScheduleTypeIsSet(true); } if (incoming.get(3)) { { org.apache.thrift.protocol.TList _list71 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); struct.buildTargets = new java.util.ArrayList<java.lang.String>(_list71.size); @org.apache.thrift.annotation.Nullable java.lang.String _elem72; for (int _i73 = 0; _i73 < _list71.size; ++_i73) { _elem72 = iprot.readString(); struct.buildTargets.add(_elem72); } } struct.setBuildTargetsIsSet(true); } } } private static <S extends org.apache.thrift.scheme.IScheme> S scheme(org.apache.thrift.protocol.TProtocol proto) { return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); } }
apache-2.0
RanjithKumar5550/RanMifos
fineract-provider/src/main/java/org/apache/fineract/useradministration/handler/UpdateMakerCheckerPermissionsCommandHandler.java
2020
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.fineract.useradministration.handler; import org.apache.fineract.commands.annotation.CommandType; import org.apache.fineract.commands.handler.NewCommandSourceHandler; import org.apache.fineract.infrastructure.core.api.JsonCommand; import org.apache.fineract.infrastructure.core.data.CommandProcessingResult; import org.apache.fineract.useradministration.service.PermissionWritePlatformService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; @Service @CommandType(entity = "PERMISSION", action = "UPDATE") public class UpdateMakerCheckerPermissionsCommandHandler implements NewCommandSourceHandler { private final PermissionWritePlatformService writePlatformService; @Autowired public UpdateMakerCheckerPermissionsCommandHandler(final PermissionWritePlatformService writePlatformService) { this.writePlatformService = writePlatformService; } @Transactional @Override public CommandProcessingResult processCommand(final JsonCommand command) { return this.writePlatformService.updateMakerCheckerPermissions(command); } }
apache-2.0
aifargonos2/elk-reasoner
elk-reasoner/src/main/java/org/semanticweb/elk/reasoner/saturation/rules/subsumers/ObjectIntersectionFromConjunctRule.java
9448
package org.semanticweb.elk.reasoner.saturation.rules.subsumers; /* * #%L * ELK Reasoner * $Id:$ * $HeadURL:$ * %% * Copyright (C) 2011 - 2013 Department of Computer Science, University of Oxford * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import java.util.Map; import org.semanticweb.elk.reasoner.indexing.hierarchy.IndexedClassExpression; import org.semanticweb.elk.reasoner.indexing.hierarchy.IndexedObjectIntersectionOf; import org.semanticweb.elk.reasoner.indexing.modifiable.ModifiableIndexedClassExpression; import org.semanticweb.elk.reasoner.indexing.modifiable.ModifiableIndexedObjectIntersectionOf; import org.semanticweb.elk.reasoner.indexing.modifiable.ModifiableOntologyIndex; import org.semanticweb.elk.reasoner.saturation.conclusions.interfaces.Subsumer; import org.semanticweb.elk.reasoner.saturation.context.Context; import org.semanticweb.elk.reasoner.saturation.context.ContextPremises; import org.semanticweb.elk.reasoner.saturation.rules.ConclusionProducer; import org.semanticweb.elk.reasoner.saturation.tracing.inferences.ComposedConjunction; import org.semanticweb.elk.util.collections.ArrayHashMap; import org.semanticweb.elk.util.collections.LazySetIntersection; import org.semanticweb.elk.util.collections.chains.Chain; import org.semanticweb.elk.util.collections.chains.Matcher; import org.semanticweb.elk.util.collections.chains.ReferenceFactory; import org.semanticweb.elk.util.collections.chains.SimpleTypeBasedMatcher; /** * A {@link ChainableSubsumerRule} producing {@link Subsumer} for an * {@link IndexedObjectIntersectionOf} when processing one of its conjunct * {@link IndexedClassExpression} and when the other conjunct is contained in * the {@link Context} * * @author "Yevgeny Kazakov" */ public class ObjectIntersectionFromConjunctRule extends AbstractChainableSubsumerRule { // logger for events /* * private static final Logger LOGGER_ = LoggerFactory * .getLogger(ObjectIntersectionFromConjunctRule.class); */ public static final String NAME = "ObjectIntersectionOf Introduction"; private final Map<IndexedClassExpression, IndexedObjectIntersectionOf> conjunctionsByConjunct_; private ObjectIntersectionFromConjunctRule(ChainableSubsumerRule tail) { super(tail); this.conjunctionsByConjunct_ = new ArrayHashMap<IndexedClassExpression, IndexedObjectIntersectionOf>( 4); } private ObjectIntersectionFromConjunctRule(IndexedClassExpression conjunct, IndexedObjectIntersectionOf conjunction) { this(null); this.conjunctionsByConjunct_.put(conjunct, conjunction); } @Override public String getName() { return NAME; } /** * Add {@link ObjectIntersectionFromConjunctRule}s for the given * {@link ModifiableIndexedObjectIntersectionOf} in the given * {@link ModifiableOntologyIndex} * * @param conjunction * @param index */ public static boolean addRulesFor( ModifiableIndexedObjectIntersectionOf conjunction, ModifiableOntologyIndex index) { ModifiableIndexedClassExpression firstConjunct = conjunction .getFirstConjunct(); ModifiableIndexedClassExpression secondConjunct = conjunction .getSecondConjunct(); if (!index.add(firstConjunct, new ObjectIntersectionFromConjunctRule( secondConjunct, conjunction))) return false; // if both conjuncts are the same, we are done if (secondConjunct.equals(firstConjunct)) return true; // else index the second conjunct if (index.add(secondConjunct, new ObjectIntersectionFromConjunctRule( firstConjunct, conjunction))) return true; // else revert the changes made index.remove(firstConjunct, new ObjectIntersectionFromConjunctRule( secondConjunct, conjunction)); return false; } /** * Removes {@link ObjectIntersectionFromConjunctRule}s for the given * {@link ModifiableIndexedObjectIntersectionOf} in the given * {@link ModifiableOntologyIndex} * * @param conjunction * @param index */ public static boolean removeRulesFor( ModifiableIndexedObjectIntersectionOf conjunction, ModifiableOntologyIndex index) { ModifiableIndexedClassExpression firstConjunct = conjunction .getFirstConjunct(); ModifiableIndexedClassExpression secondConjunct = conjunction .getSecondConjunct(); if (!index.remove(firstConjunct, new ObjectIntersectionFromConjunctRule(secondConjunct, conjunction))) return false; // if both conjuncts are the same, we are done if (secondConjunct.equals(firstConjunct)) return true; // else index the second conjunct if (index.remove(secondConjunct, new ObjectIntersectionFromConjunctRule(firstConjunct, conjunction))) return true; // else revert the changes made index.add(firstConjunct, new ObjectIntersectionFromConjunctRule( secondConjunct, conjunction)); return false; } // TODO: hide this method public Map<IndexedClassExpression, IndexedObjectIntersectionOf> getConjunctionsByConjunct() { return conjunctionsByConjunct_; } @Override public void apply(IndexedClassExpression premise, ContextPremises premises, ConclusionProducer producer) { for (IndexedClassExpression common : new LazySetIntersection<IndexedClassExpression>( conjunctionsByConjunct_.keySet(), premises.getSubsumers())) { // producer.produce(premises.getRoot(), new // ComposedSubsumer(conjunctionsByConjunct_.get(common))); producer.produce(premises.getRoot(), new ComposedConjunction( conjunctionsByConjunct_.get(common))); } } @Override public boolean addTo(Chain<ChainableSubsumerRule> ruleChain) { if (isEmpty()) return true; ObjectIntersectionFromConjunctRule rule = ruleChain.getCreate(MATCHER_, FACTORY_); boolean success = true; int added = 0; for (Map.Entry<IndexedClassExpression, IndexedObjectIntersectionOf> entry : this.conjunctionsByConjunct_ .entrySet()) { if (rule.addConjunctionByConjunct(entry.getValue(), entry.getKey())) added++; else { success = false; break; } } if (success) return true; // else revert all changes for (Map.Entry<IndexedClassExpression, IndexedObjectIntersectionOf> entry : this.conjunctionsByConjunct_ .entrySet()) { if (added == 0) break; added--; rule.removeConjunctionByConjunct(entry.getValue(), entry.getKey()); } return false; } @Override public boolean removeFrom(Chain<ChainableSubsumerRule> ruleChain) { if (isEmpty()) return true; ObjectIntersectionFromConjunctRule rule = ruleChain.find(MATCHER_); if (rule == null) return false; // else boolean success = true; int removed = 0; for (Map.Entry<IndexedClassExpression, IndexedObjectIntersectionOf> entry : this.conjunctionsByConjunct_ .entrySet()) { if (rule.removeConjunctionByConjunct(entry.getValue(), entry.getKey())) removed++; else { success = false; break; } } if (success) { if (rule.isEmpty()) { ruleChain.remove(MATCHER_); } return true; } // else revert all changes for (Map.Entry<IndexedClassExpression, IndexedObjectIntersectionOf> entry : this.conjunctionsByConjunct_ .entrySet()) { if (removed == 0) break; removed--; rule.addConjunctionByConjunct(entry.getValue(), entry.getKey()); } return false; } @Override public void accept(LinkedSubsumerRuleVisitor visitor, IndexedClassExpression premise, ContextPremises premises, ConclusionProducer producer) { visitor.visit(this, premise, premises, producer); } private boolean addConjunctionByConjunct( IndexedObjectIntersectionOf conjunction, IndexedClassExpression conjunct) { IndexedObjectIntersectionOf previous = conjunctionsByConjunct_.put( conjunct, conjunction); if (previous == null) return true; // else revert the change; conjunctionsByConjunct_.put(conjunct, previous); return false; } private boolean removeConjunctionByConjunct( IndexedObjectIntersectionOf conjunction, IndexedClassExpression conjunct) { IndexedObjectIntersectionOf previous = conjunctionsByConjunct_ .remove(conjunct); if (previous == conjunction) return true; // else revert the change if (previous != null) conjunctionsByConjunct_.put(conjunct, previous); return false; } /** * @return {@code true} if this rule never does anything */ private boolean isEmpty() { return conjunctionsByConjunct_.isEmpty(); } private static final Matcher<ChainableSubsumerRule, ObjectIntersectionFromConjunctRule> MATCHER_ = new SimpleTypeBasedMatcher<ChainableSubsumerRule, ObjectIntersectionFromConjunctRule>( ObjectIntersectionFromConjunctRule.class); private static final ReferenceFactory<ChainableSubsumerRule, ObjectIntersectionFromConjunctRule> FACTORY_ = new ReferenceFactory<ChainableSubsumerRule, ObjectIntersectionFromConjunctRule>() { @Override public ObjectIntersectionFromConjunctRule create( ChainableSubsumerRule tail) { return new ObjectIntersectionFromConjunctRule(tail); } }; }
apache-2.0
OxBEEF/merge-server
wp-content/themes/skt-white/inc/includes/class-options-media-uploader.php
3476
<?php class Options_Framework_Media_Uploader { /** * Initialize the media uploader class * * @since 1.7.0 */ public function init() { add_action( 'admin_enqueue_scripts', array( $this, 'optionsframework_media_scripts' ) ); } /** * Media Uploader Using the WordPress Media Library. * * Parameters: * * string $_id - A token to identify this field (the name). * string $_value - The value of the field, if present. * string $_desc - An optional description of the field. * */ static function optionsframework_uploader( $_id, $_value, $_desc = '', $_name = '' ) { // Gets the unique option id $options_framework = new Options_Framework; $option_name = $options_framework->get_option_name(); $output = ''; $id = ''; $class = ''; $int = ''; $value = ''; $name = ''; $id = strip_tags( strtolower( $_id ) ); // If a value is passed and we don't have a stored value, use the value that's passed through. if ( $_value != '' && $value == '' ) { $value = $_value; } if ($_name != '') { $name = $_name; } else { $name = $option_name.'['.$id.']'; } if ( $value ) { $class = ' has-file'; } $output .= '<input id="' . $id . '" class="upload' . $class . '" type="text" name="'.$name.'" value="' . $value . '" placeholder="' . __('No file chosen', 'skt-white') .'" />' . "\n"; if ( function_exists( 'wp_enqueue_media' ) ) { if ( ( $value == '' ) ) { $output .= '<input id="upload-' . $id . '" class="upload-button button" type="button" value="' . __( 'Upload', 'skt-white' ) . '" />' . "\n"; } else { $output .= '<input id="remove-' . $id . '" class="remove-file button" type="button" value="' . __( 'Remove', 'skt-white' ) . '" />' . "\n"; } } else { $output .= '<p><i>' . __( 'Upgrade your version of WordPress for full media support.', 'skt-white' ) . '</i></p>'; } if ( $_desc != '' ) { $output .= '<span class="of-metabox-desc">' . $_desc . '</span>' . "\n"; } $output .= '<div class="screenshot" id="' . $id . '-image">' . "\n"; if ( $value != '' ) { $remove = '<a class="remove-image">Remove</a>'; $image = preg_match( '/(^.*\.jpg|jpeg|png|gif|ico*)/i', $value ); if ( $image ) { $output .= '<img src="' . $value . '" alt="" />' . $remove; } else { $parts = explode( "/", $value ); for( $i = 0; $i < sizeof( $parts ); ++$i ) { $title = $parts[$i]; } // No output preview if it's not an image. $output .= ''; // Standard generic output if it's not an image. $title = __( 'View File', 'skt-white' ); $output .= '<div class="no-image"><span class="file_link"><a href="' . $value . '" target="_blank" rel="external">'.$title.'</a></span></div>'; } } $output .= '</div>' . "\n"; return $output; } /** * Enqueue scripts for file uploader */ function optionsframework_media_scripts( $hook ) { $menu = Options_Framework_Admin::menu_settings(); if ( substr( $hook, -strlen( $menu['menu_slug'] ) ) !== $menu['menu_slug'] ) return; if ( function_exists( 'wp_enqueue_media' ) ) wp_enqueue_media(); wp_register_script( 'of-media-uploader', OPTIONS_FRAMEWORK_DIRECTORY .'js/media-uploader.js', array( 'jquery' ), Options_Framework::VERSION ); wp_enqueue_script( 'of-media-uploader' ); wp_localize_script( 'of-media-uploader', 'optionsframework_l10n', array( 'upload' => __( 'Upload', 'skt-white' ), 'remove' => __( 'Remove', 'skt-white' ) ) ); } }
apache-2.0
shinfan/gcloud-java
google-cloud-bigquery/src/main/java/com/google/cloud/bigquery/LoadConfiguration.java
7806
/* * Copyright 2015 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.cloud.bigquery; import com.google.cloud.bigquery.JobInfo.CreateDisposition; import com.google.cloud.bigquery.JobInfo.WriteDisposition; import com.google.cloud.bigquery.JobInfo.SchemaUpdateOption; import java.util.List; /** * Common interface for a load configuration. A load configuration * ({@link WriteChannelConfiguration}) can be used to load data into a table with a * {@link com.google.cloud.WriteChannel} ({@link BigQuery#writer(WriteChannelConfiguration)}). * A load configuration ({@link LoadJobConfiguration}) can also be used to create a load job * ({@link JobInfo#of(JobConfiguration)}). */ public interface LoadConfiguration { interface Builder { /** * Sets the destination table to load the data into. */ Builder setDestinationTable(TableId destinationTable); /** * Sets whether the job is allowed to create new tables. * * @see <a href="https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.createDisposition"> * Create Disposition</a> */ Builder setCreateDisposition(CreateDisposition createDisposition); /** * Sets the action that should occur if the destination table already exists. * * @see <a href="https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.writeDisposition"> * Write Disposition</a> */ Builder setWriteDisposition(WriteDisposition writeDisposition); /** * Sets the source format, and possibly some parsing options, of the external data. Supported * formats are {@code CSV}, {@code NEWLINE_DELIMITED_JSON} and {@code DATASTORE_BACKUP}. If not * specified, {@code CSV} format is assumed. * * <a href="https://cloud.google.com/bigquery/docs/reference/v2/tables#externalDataConfiguration.sourceFormat"> * Source Format</a> */ Builder setFormatOptions(FormatOptions formatOptions); /** * Sets the string that represents a null value in a CSV file. For example, if you specify "\N", * BigQuery interprets "\N" as a null value when loading a CSV file. The default value is the * empty string. If you set this property to a custom value, BigQuery throws an error if an * empty string is present for all data types except for {@code STRING} and {@code BYTE}. For * {@code STRING} and {@code BYTE} columns, BigQuery interprets the empty string as an empty * value. */ Builder setNullMarker(String nullMarker); /** * Sets the maximum number of bad records that BigQuery can ignore when running the job. If the * number of bad records exceeds this value, an invalid error is returned in the job result. * By default no bad record is ignored. */ Builder setMaxBadRecords(Integer maxBadRecords); /** * Sets the schema for the destination table. The schema can be omitted if the destination table * already exists, or if you're loading data from a Google Cloud Datastore backup (i.e. * {@code DATASTORE_BACKUP} format option). */ Builder setSchema(Schema schema); /** * Sets whether BigQuery should allow extra values that are not represented in the table schema. * If {@code true}, the extra values are ignored. If {@code false}, records with extra columns * are treated as bad records, and if there are too many bad records, an invalid error is * returned in the job result. By default unknown values are not allowed. */ Builder setIgnoreUnknownValues(Boolean ignoreUnknownValues); /** * [Experimental] Sets options allowing the schema of the destination table to be updated as a side effect of the * load job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when * writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition * decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. */ Builder setSchemaUpdateOptions(List<SchemaUpdateOption> schemaUpdateOptions); /** * [Experimental] Sets automatic inference of the options and schema for CSV and JSON sources. */ Builder setAutodetect(Boolean autodetect); LoadConfiguration build(); } /** * Returns the destination table to load the data into. */ TableId getDestinationTable(); /** * Returns whether the job is allowed to create new tables. * * @see <a href="https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.createDisposition"> * Create Disposition</a> */ CreateDisposition getCreateDisposition(); /** * Returns the action that should occur if the destination table already exists. * * @see <a href="https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.writeDisposition"> * Write Disposition</a> */ WriteDisposition getWriteDisposition(); /** * Returns the string that represents a null value in a CSV file. * * @see <a href="https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.nullMarker"> * Null Marker</a> */ String getNullMarker(); /** * Returns additional properties used to parse CSV data (used when {@link #getFormat()} is set * to CSV). Returns {@code null} if not set. */ CsvOptions getCsvOptions(); /** * Returns the maximum number of bad records that BigQuery can ignore when running the job. If the * number of bad records exceeds this value, an invalid error is returned in the job result. * By default no bad record is ignored. */ Integer getMaxBadRecords(); /** * Returns the schema for the destination table, if set. Returns {@code null} otherwise. */ Schema getSchema(); /** * Returns the format of the data files. */ String getFormat(); /** * Returns whether BigQuery should allow extra values that are not represented in the table * schema. If {@code true}, the extra values are ignored. If {@code true}, records with extra * columns are treated as bad records, and if there are too many bad records, an invalid error is * returned in the job result. By default unknown values are not allowed. */ Boolean ignoreUnknownValues(); /** * Returns additional options used to load from a Cloud datastore backup. */ DatastoreBackupOptions getDatastoreBackupOptions(); /** * [Experimental] Returns options allowing the schema of the destination table to be updated as a side effect of the * load job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when * writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition * decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. */ List<SchemaUpdateOption> getSchemaUpdateOptions(); /** * [Experimental] Returns whether automatic inference of the options and schema for CSV and JSON sources is set. */ Boolean getAutodetect(); /** * Returns a builder for the load configuration object. */ Builder toBuilder(); }
apache-2.0
vilic/TypeScript
tests/cases/fourslash/findAllRefsInsideWithBlock.ts
291
/// <reference path='fourslash.ts'/> ////var [|x|] = 0; //// ////with ({}) { //// var y = x; // Reference of x here should not be picked //// /*2*/y++; // also reference for y should be ignored ////} //// ////[|x|] = [|x|] + 1; verify.rangesReferenceEachOther();
apache-2.0
tcmoore32/sheer-madness
gosu-core/src/main/java/gw/internal/gosu/parser/ReducedDynamicFunctionSymbol.java
6279
/* * Copyright 2014 Guidewire Software, Inc. */ package gw.internal.gosu.parser; import gw.lang.parser.IDynamicFunctionSymbol; import gw.lang.parser.IReducedDynamicFunctionSymbol; import gw.lang.parser.IReducedSymbol; import gw.lang.parser.ISymbol; import gw.lang.reflect.IAttributedFeatureInfo; import gw.lang.reflect.IConstructorInfo; import gw.lang.reflect.IDFSBackedFeatureInfo; import gw.lang.reflect.IMethodInfo; import gw.lang.reflect.IModifierInfo; import gw.lang.reflect.IRelativeTypeInfo; import gw.lang.reflect.IType; import gw.lang.reflect.ITypeInfo; import gw.lang.reflect.gs.IGosuClass; import gw.lang.reflect.gs.IGosuProgram; import gw.lang.reflect.gs.IProgramInstance; import gw.util.GosuExceptionUtil; import java.util.ArrayList; import java.util.Collections; import java.util.List; /** */ public class ReducedDynamicFunctionSymbol extends ReducedSymbol implements IReducedDynamicFunctionSymbol { private IType[] _argTypes; private IType _returnType; private List<IReducedSymbol> _args; private IReducedDynamicFunctionSymbol _superDfs; private final boolean _isConstructor; ReducedDynamicFunctionSymbol(DynamicFunctionSymbol dfs) { super( dfs ); _isConstructor = dfs.isConstructor(); _argTypes = dfs.getArgTypes(); _returnType = dfs.getReturnType(); _args = makeArgs(dfs); _fullDescription = dfs.getFullDescription(); DynamicFunctionSymbol superDfs = dfs.getSuperDfs(); if (superDfs != null) { _superDfs = superDfs.createReducedSymbol(); } } private List<IReducedSymbol> makeArgs(IDynamicFunctionSymbol dfs) { List<ISymbol> args = dfs.getArgs(); List<IReducedSymbol> newArgs = new ArrayList<IReducedSymbol>(args.size()); for (ISymbol arg : args) { newArgs.add( arg.createReducedSymbol() ); } return newArgs; } @Override public IType[] getArgTypes() { return _argTypes; } @Override public IType getReturnType() { return _returnType; } @Override public String getFullDescription() { return _fullDescription; } @Override public List<IReducedSymbol> getArgs() { return _args; } @Override public IReducedDynamicFunctionSymbol getSuperDfs() { return _superDfs; } @Override public boolean isSuperOrThisConstructor() { return SuperConstructorFunctionSymbol.class.isAssignableFrom(getSymbolClass()) || ThisConstructorFunctionSymbol.class.isAssignableFrom(getSymbolClass()); } @Override public IReducedDynamicFunctionSymbol getBackingDfs() { return this; } @Override public IAttributedFeatureInfo getMethodOrConstructorInfo() { IGosuClass declaringType = getGosuClass(); if( declaringType == null ) { return null; } ITypeInfo typeInfo = declaringType.getTypeInfo(); List<? extends IMethodInfo> methods; if (typeInfo instanceof IRelativeTypeInfo) { methods = ((IRelativeTypeInfo) typeInfo).getMethods( declaringType ); } else { methods = typeInfo.getMethods(); } for( IMethodInfo mi : methods ) { if (mi instanceof IDFSBackedFeatureInfo) { IReducedDynamicFunctionSymbol dfs = ((IDFSBackedFeatureInfo) mi).getDfs(); if (this.equals(dfs) || getBackingDfs().equals(dfs)) { return mi; } } } List<? extends IConstructorInfo> ctors; if (typeInfo instanceof IRelativeTypeInfo) { ctors = ((IRelativeTypeInfo)typeInfo).getConstructors( declaringType ); } else { ctors = typeInfo.getConstructors(); } for( IConstructorInfo ci : ctors ) { if (ci instanceof IDFSBackedFeatureInfo) { IReducedDynamicFunctionSymbol dfs = ((IDFSBackedFeatureInfo) ci).getDfs(); if (this.equals(dfs) || getBackingDfs().equals(dfs)) { return ci; } else if (((this instanceof ReducedSuperConstructorFunctionSymbol) || (this instanceof ReducedThisConstructorFunctionSymbol)) && (dfs.getArgs().equals(getArgs()))) { return ci; } } } return null; } @Override public int hashCode() { return getName().hashCode(); } public boolean equals( Object o ) { if( this == o ) { return true; } if( o == null || !(o instanceof ReducedDynamicFunctionSymbol)) { return false; } ReducedDynamicFunctionSymbol that = (ReducedDynamicFunctionSymbol)o; String strName = getName(); return !(strName != null ? !strName.equals( that.getName() ) : that.getName() != null); } /** * Invokes the dynamic function. */ public Object invoke( Object[] args ) { return invokeFromBytecode(args); } private Object invokeFromBytecode( Object[] args ) { IGosuClassInternal gsClass = getGosuClass(); if( gsClass == null ) { throw new IllegalStateException( "Did not find Gosu Class/Program" ); } Class<?> javaClass = gsClass.getBackingClass(); IProgramInstance instance = null; if( gsClass instanceof IGosuProgram) { try { instance = (IProgramInstance)javaClass.newInstance(); instance.evaluate(null); } catch( Exception e ) { throw GosuExceptionUtil.forceThrow(e); } } IMethodInfo mi = gsClass.getTypeInfo().getMethod(gsClass, getDisplayName(), getArgTypes()); return mi.getCallHandler().handleCall( instance, args ); } public List<IGosuAnnotation> getAnnotations() { List<IGosuAnnotation> result; IAttributedFeatureInfo featureInfo = getMethodOrConstructorInfo(); if (featureInfo instanceof GosuBaseAttributedFeatureInfo) { IModifierInfo modifierInfo = ((GosuClassTypeInfo)getGosuClass().getTypeInfo()).getModifierInfo((GosuBaseAttributedFeatureInfo) featureInfo); result = modifierInfo != null ? modifierInfo.getAnnotations() : Collections.<IGosuAnnotation>emptyList(); } else { result = Collections.emptyList(); } return result; } public boolean isVarPropertyGet() { return VarPropertyGetFunctionSymbol.class.isAssignableFrom(getSymbolClass()); } public boolean isVarPropertySet() { return VarPropertySetFunctionSymbol.class.isAssignableFrom(getSymbolClass()); } public boolean isConstructor() { return _isConstructor; } }
apache-2.0
langfr/camunda-bpm-platform
qa/test-db-instance-migration/test-fixture-710/src/main/java/org/camunda/bpm/qa/upgrade/timestamp/UserLockExpTimeScenario.java
2871
/* * Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH * under one or more contributor license agreements. See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. Camunda licenses this file to you under the Apache License, * Version 2.0; you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.qa.upgrade.timestamp; import org.camunda.bpm.engine.IdentityService; import org.camunda.bpm.engine.ProcessEngine; import org.camunda.bpm.engine.identity.User; import org.camunda.bpm.engine.impl.cfg.ProcessEngineConfigurationImpl; import org.camunda.bpm.engine.impl.context.Context; import org.camunda.bpm.engine.impl.interceptor.Command; import org.camunda.bpm.engine.impl.interceptor.CommandContext; import org.camunda.bpm.engine.impl.persistence.entity.IdentityInfoManager; import org.camunda.bpm.engine.impl.persistence.entity.UserEntity; import org.camunda.bpm.qa.upgrade.DescribesScenario; import org.camunda.bpm.qa.upgrade.ScenarioSetup; import org.camunda.bpm.qa.upgrade.Times; /** * @author Nikola Koevski */ public class UserLockExpTimeScenario extends AbstractTimestampMigrationScenario { protected static final String USER_ID = "lockExpTimeTestUser"; protected static final String PASSWORD = "testPassword"; @DescribesScenario("initUserLockExpirationTime") @Times(1) public static ScenarioSetup initUserLockExpirationTime() { return new ScenarioSetup() { @Override public void execute(ProcessEngine processEngine, String s) { final IdentityService identityService = processEngine.getIdentityService(); User user = identityService.newUser(USER_ID); user.setPassword(PASSWORD); identityService.saveUser(user); ((ProcessEngineConfigurationImpl) processEngine.getProcessEngineConfiguration()).getCommandExecutorTxRequired().execute(new Command<Void>() { @Override public Void execute(CommandContext context) { IdentityInfoManager identityInfoManager = Context.getCommandContext() .getSession(IdentityInfoManager.class); UserEntity userEntity = (UserEntity) identityService.createUserQuery() .userId(USER_ID) .singleResult(); identityInfoManager.updateUserLock(userEntity, 10, TIMESTAMP); return null; } }); } }; } }
apache-2.0
davidocean/geoportal-server
geoportal/src/com/esri/gpt/catalog/arcgis/metadata/ServiceHandler.java
8184
/* See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * Esri Inc. licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.esri.gpt.catalog.arcgis.metadata; import com.esri.arcgisws.ServiceDescription; import com.esri.gpt.framework.context.ApplicationConfiguration; import com.esri.gpt.framework.context.ApplicationContext; import com.esri.gpt.framework.http.HttpClientRequest; import com.esri.gpt.framework.resource.api.Native; import com.esri.gpt.framework.resource.api.Publishable; import com.esri.gpt.framework.resource.api.Resource; import com.esri.gpt.framework.resource.api.SourceUri; import com.esri.gpt.framework.resource.common.UrlUri; import com.esri.gpt.framework.security.credentials.UsernamePasswordCredentials; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Date; import java.util.logging.Logger; /** * Super-class associated with the collection and publication of metadata * for an ArcGIS Server service. */ public abstract class ServiceHandler { private final static Logger LOGGER = Logger.getLogger(ServiceHandler.class.getCanonicalName()); protected final static HttpClientRequest http = new HttpClientRequest(); private UsernamePasswordCredentials credentials; /** constructors ============================================================ */ /** Default constructor. */ public ServiceHandler() {} /** * Gets credentials. * @return credentials */ public UsernamePasswordCredentials getCredentials() { return credentials; } /** * Sets credentials. * @param credentials credentials */ public void setCredentials(UsernamePasswordCredentials credentials) { this.credentials = credentials; } /** attributes =============================================================== */ /** methods ================================================================= */ /** * Collect metadata associated with the parent of a service. * @param processor the ArcGIS Server service processor * @param serviceInfo the active service information * @throws Exception if an exception occurs */ public void collectParentMetadata(AGSProcessor processor, ServiceInfo serviceInfo) throws Exception { this.collectParentMetadata(processor.getHandlerFactory(), serviceInfo); } /** * Collect metadata associated with the parent of a service. * @param handlerFacory handler factory * @param serviceInfo the active service information * @throws Exception if an exception occurs */ public void collectParentMetadata(ServiceHandlerFactory handlerFacory, ServiceInfo serviceInfo) throws Exception { String parentType = serviceInfo.getParentType(); if ((parentType != null) && (parentType.length() > 0)) { ServiceHandler parentHandler = handlerFacory.makeHandler(parentType); parentHandler.setCredentials(getCredentials()); if (parentHandler != null) { String soapUrl = serviceInfo.getSoapUrl(); try { int idx = soapUrl.lastIndexOf("/"); serviceInfo.setSoapUrl(soapUrl.substring(0,idx)); parentHandler.collectMetadata(handlerFacory, serviceInfo); } finally { serviceInfo.setSoapUrl(soapUrl); } serviceInfo.getKeywords().remove(parentType); } } } /** * Collect metadata associated with a service. * @param processor the ArcGIS Server service processor * @param serviceInfo the active service information * @throws Exception if an exception occurs */ public void collectMetadata(AGSProcessor processor, ServiceInfo serviceInfo) throws Exception { this.collectMetadata(processor.getHandlerFactory(), serviceInfo); } /** * Collect metadata associated with a service. * @param handlerFactory handler factory * @param serviceInfo the active service information * @throws Exception if an exception occurs */ public abstract void collectMetadata(ServiceHandlerFactory handlerFactory, ServiceInfo serviceInfo) throws Exception; /** * Creates new record instance (or more record instances) for given ServiceInfo * and appends to the collection. * @param records collection of records * @param factory handler factory * @param serviceInfo service info * @param isNative <code>true</code> to append native record * @throws Exception if an exception occurs */ public void appendRecord(Collection<Resource> records, ServiceHandlerFactory factory, ServiceInfo serviceInfo, boolean isNative) throws Exception { records.add(isNative? new NativeServiceRecord(factory, serviceInfo): new ServiceRecord(factory, serviceInfo)); } /** * Publishes metadata associated with a service. * @param processor the ArcGIS Server service processor * @param serviceInfo the active service information * @throws Exception if an exception occurs */ public void publishMetadata(AGSProcessor processor, ServiceInfo serviceInfo) throws Exception { String resourceUrl = serviceInfo.getResourceUrl(); String resourceXml = serviceInfo.asDublinCore(processor); processor.publishMetadata(resourceUrl,resourceXml); } /** * Creates service info. * @param parentInfo parent info * @param desc service description * @param currentRestUrl current REST URL * @param currentSoapUrl current SOAP URL * @return service info */ public ServiceInfo createServiceInfo(ServiceInfo parentInfo, ServiceDescription desc, String currentRestUrl, String currentSoapUrl) { ServiceInfo info = new ServiceInfo(); info.setCapabilities(desc.getCapabilities()); info.setDescription(desc.getDescription()); info.setName(desc.getName()); info.setParentType(desc.getParentType()); info.setResourceUrl(currentRestUrl); info.setRestUrl(currentRestUrl); info.setSoapUrl(currentSoapUrl); info.setType(desc.getType()); info.setParentInfo(parentInfo); return info; } /** * Service specific Record implementation. */ public class ServiceRecord extends ServiceInfoProvider implements Publishable { private ServiceHandlerFactory factory; public ServiceRecord(ServiceHandlerFactory factory, ServiceInfo info) { super(info); this.factory = factory; } @Override public SourceUri getSourceUri() { return new UrlUri(getServiceInfo().getResourceUrl()); } @Override public String getContent() throws IOException { ApplicationContext appCtx = ApplicationContext.getInstance(); ApplicationConfiguration cfg = appCtx.getConfiguration(); LOGGER.finer("Collecting metadata for: " + getServiceInfo().getSoapUrl()); try { ServiceHandler.this.collectMetadata(factory, getServiceInfo()); return getServiceInfo().asDublinCore(cfg, http); } catch (Exception ex) { throw new IOException("Error collecting metadata. Cause: "+ex.getMessage()); } } @Override public Iterable<Resource> getNodes() { return new ArrayList<Resource>(); } @Override public Date getUpdateDate() { return null; } } /** * Native service specific Record implementation. */ private class NativeServiceRecord extends ServiceRecord implements Native { public NativeServiceRecord(ServiceHandlerFactory factory, ServiceInfo info) { super(factory, info); } @Override public Date getUpdateDate() { throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. } } }
apache-2.0
tarikgwa/test
html/app/code/Magento/Paypal/view/adminhtml/web/js/rules/payflow/bml/disable.js
454
/** * Copyright © 2015 Magento. All rights reserved. * See COPYING.txt for license details. */ define([], function () { 'use strict'; return function ($target, $owner, data) { $target.find('label[for="' + $target.find(data.enableBml).attr('id') + '"]').removeClass('enabled'); $target.find(data.enableBml + ' option[value="0"]').prop('selected', true); $target.find(data.enableBml).prop('disabled', true); }; });
apache-2.0
psiroky/drools
drools-core/src/test/java/org/drools/Address.java
2515
/* * Copyright 2010 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools; public class Address { private String street; private String number; private String phone; public Address() {} public Address( String street, String number, String phone ) { this.street = street; this.number = number; this.phone = phone; } public String getNumber() { return number; } public void setNumber(String number) { this.number = number; } public String getPhone() { return phone; } public void setPhone(String phone) { this.phone = phone; } public String getStreet() { return street; } public void setStreet(String street) { this.street = street; } public String toString() { return "Address( "+this.street+", "+this.number+" - phone: "+this.phone+" )"; } public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((number == null) ? 0 : number.hashCode()); result = prime * result + ((phone == null) ? 0 : phone.hashCode()); result = prime * result + ((street == null) ? 0 : street.hashCode()); return result; } public boolean equals(Object obj) { if ( this == obj ) return true; if ( obj == null ) return false; if ( getClass() != obj.getClass() ) return false; Address other = (Address) obj; if ( number == null ) { if ( other.number != null ) return false; } else if ( !number.equals( other.number ) ) return false; if ( phone == null ) { if ( other.phone != null ) return false; } else if ( !phone.equals( other.phone ) ) return false; if ( street == null ) { if ( other.street != null ) return false; } else if ( !street.equals( other.street ) ) return false; return true; } }
apache-2.0
netscaler/neutron
neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py
10281
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Oleg Bondarev (obondarev@mirantis.com) import contextlib import mock from oslo.config import cfg as config from neutron.services.loadbalancer.drivers.haproxy import cfg from neutron.tests import base class TestHaproxyCfg(base.BaseTestCase): def test_save_config(self): with contextlib.nested( mock.patch('neutron.services.loadbalancer.' 'drivers.haproxy.cfg._build_global'), mock.patch('neutron.services.loadbalancer.' 'drivers.haproxy.cfg._build_defaults'), mock.patch('neutron.services.loadbalancer.' 'drivers.haproxy.cfg._build_frontend'), mock.patch('neutron.services.loadbalancer.' 'drivers.haproxy.cfg._build_backend'), mock.patch('neutron.agent.linux.utils.replace_file') ) as (b_g, b_d, b_f, b_b, replace): test_config = ['globals', 'defaults', 'frontend', 'backend'] b_g.return_value = [test_config[0]] b_d.return_value = [test_config[1]] b_f.return_value = [test_config[2]] b_b.return_value = [test_config[3]] cfg.save_config('test_path', mock.Mock()) replace.assert_called_once_with('test_path', '\n'.join(test_config)) def test_build_global(self): if not hasattr(config.CONF, 'user_group'): config.CONF.register_opt(config.StrOpt('user_group')) config.CONF.set_override('user_group', 'test_group') expected_opts = ['global', '\tdaemon', '\tuser nobody', '\tgroup test_group', '\tlog /dev/log local0', '\tlog /dev/log local1 notice', '\tstats socket test_path mode 0666 level user'] opts = cfg._build_global(mock.Mock(), 'test_path') self.assertEqual(expected_opts, list(opts)) config.CONF.reset() def test_build_defaults(self): expected_opts = ['defaults', '\tlog global', '\tretries 3', '\toption redispatch', '\ttimeout connect 5000', '\ttimeout client 50000', '\ttimeout server 50000'] opts = cfg._build_defaults(mock.Mock()) self.assertEqual(expected_opts, list(opts)) config.CONF.reset() def test_build_frontend(self): test_config = {'vip': {'id': 'vip_id', 'protocol': 'HTTP', 'port': {'fixed_ips': [ {'ip_address': '10.0.0.2'}] }, 'protocol_port': 80, 'connection_limit': 2000, }, 'pool': {'id': 'pool_id'}} expected_opts = ['frontend vip_id', '\toption tcplog', '\tbind 10.0.0.2:80', '\tmode http', '\tdefault_backend pool_id', '\tmaxconn 2000', '\toption forwardfor'] opts = cfg._build_frontend(test_config) self.assertEqual(expected_opts, list(opts)) test_config['vip']['connection_limit'] = -1 expected_opts.remove('\tmaxconn 2000') opts = cfg._build_frontend(test_config) self.assertEqual(expected_opts, list(opts)) def test_build_backend(self): test_config = {'pool': {'id': 'pool_id', 'protocol': 'HTTP', 'lb_method': 'ROUND_ROBIN'}, 'members': [{'status': 'ACTIVE', 'admin_state_up': True, 'id': 'member1_id', 'address': '10.0.0.3', 'protocol_port': 80, 'weight': 1}, {'status': 'INACTIVE', 'admin_state_up': True, 'id': 'member2_id', 'address': '10.0.0.4', 'protocol_port': 80, 'weight': 1}], 'healthmonitors': [{'admin_state_up': True, 'delay': 3, 'max_retries': 4, 'timeout': 2, 'type': 'TCP'}], 'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}} expected_opts = ['backend pool_id', '\tmode http', '\tbalance roundrobin', '\toption forwardfor', '\ttimeout check 2s', '\tcookie SRV insert indirect nocache', '\tserver member1_id 10.0.0.3:80 weight 1 ' 'check inter 3s fall 4 cookie 0', '\tserver member2_id 10.0.0.4:80 weight 1 ' 'check inter 3s fall 4 cookie 1'] opts = cfg._build_backend(test_config) self.assertEqual(expected_opts, list(opts)) def test_get_server_health_option(self): test_config = {'healthmonitors': [{'admin_state_up': False, 'delay': 3, 'max_retries': 4, 'timeout': 2, 'type': 'TCP', 'http_method': 'GET', 'url_path': '/', 'expected_codes': '200'}]} self.assertEqual(('', []), cfg._get_server_health_option(test_config)) self.assertEqual(('', []), cfg._get_server_health_option(test_config)) test_config['healthmonitors'][0]['admin_state_up'] = True expected = (' check inter 3s fall 4', ['timeout check 2s']) self.assertEqual(expected, cfg._get_server_health_option(test_config)) test_config['healthmonitors'][0]['type'] = 'HTTPS' expected = (' check inter 3s fall 4', ['timeout check 2s', 'option httpchk GET /', 'http-check expect rstatus 200', 'option ssl-hello-chk']) self.assertEqual(expected, cfg._get_server_health_option(test_config)) def test_has_http_cookie_persistence(self): config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}} self.assertTrue(cfg._has_http_cookie_persistence(config)) config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}} self.assertFalse(cfg._has_http_cookie_persistence(config)) config = {'vip': {'session_persistence': {}}} self.assertFalse(cfg._has_http_cookie_persistence(config)) def test_get_session_persistence(self): config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}} self.assertEqual(cfg._get_session_persistence(config), ['stick-table type ip size 10k', 'stick on src']) config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}} self.assertEqual(cfg._get_session_persistence(config), ['cookie SRV insert indirect nocache']) config = {'vip': {'session_persistence': {'type': 'APP_COOKIE', 'cookie_name': 'test'}}} self.assertEqual(cfg._get_session_persistence(config), ['appsession test len 56 timeout 3h']) config = {'vip': {'session_persistence': {'type': 'APP_COOKIE'}}} self.assertEqual(cfg._get_session_persistence(config), []) config = {'vip': {'session_persistence': {'type': 'UNSUPPORTED'}}} self.assertEqual(cfg._get_session_persistence(config), []) def test_expand_expected_codes(self): exp_codes = '' self.assertEqual(cfg._expand_expected_codes(exp_codes), set([])) exp_codes = '200' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200'])) exp_codes = '200, 201' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200', '201'])) exp_codes = '200, 201,202' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200', '201', '202'])) exp_codes = '200-202' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200', '201', '202'])) exp_codes = '200-202, 205' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200', '201', '202', '205'])) exp_codes = '200, 201-203' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200', '201', '202', '203'])) exp_codes = '200, 201-203, 205' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200', '201', '202', '203', '205'])) exp_codes = '201-200, 205' self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['205']))
apache-2.0
barthel/maven
maven-core/src/main/java/org/apache/maven/lifecycle/internal/LifecycleDependencyResolver.java
13386
package org.apache.maven.lifecycle.internal; /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; import javax.inject.Inject; import javax.inject.Named; import org.apache.maven.RepositoryUtils; import org.apache.maven.artifact.Artifact; import org.apache.maven.artifact.ArtifactUtils; import org.apache.maven.eventspy.internal.EventSpyDispatcher; import org.apache.maven.execution.MavenSession; import org.apache.maven.lifecycle.LifecycleExecutionException; import org.apache.maven.plugin.ProjectArtifactsCache; import org.apache.maven.project.DefaultDependencyResolutionRequest; import org.apache.maven.project.DependencyResolutionException; import org.apache.maven.project.DependencyResolutionResult; import org.apache.maven.project.MavenProject; import org.apache.maven.project.ProjectDependenciesResolver; import org.apache.maven.project.artifact.InvalidDependencyVersionException; import org.codehaus.plexus.logging.Logger; import org.eclipse.aether.graph.Dependency; import org.eclipse.aether.graph.DependencyFilter; import org.eclipse.aether.graph.DependencyNode; import org.eclipse.aether.util.filter.AndDependencyFilter; import org.eclipse.aether.util.filter.ScopeDependencyFilter; /** * Resolves dependencies for the artifacts in context of the lifecycle build * * @since 3.0 * @author Benjamin Bentmann * @author Jason van Zyl * @author Kristian Rosenvold (extracted class) * <p/> * NOTE: This class is not part of any public api and can be changed or deleted without prior notice. */ @Named public class LifecycleDependencyResolver { @Inject private ProjectDependenciesResolver dependenciesResolver; @Inject private Logger logger; @Inject private ProjectArtifactFactory artifactFactory; @Inject private EventSpyDispatcher eventSpyDispatcher; @Inject private ProjectArtifactsCache projectArtifactsCache; public LifecycleDependencyResolver() { } public LifecycleDependencyResolver( ProjectDependenciesResolver projectDependenciesResolver, Logger logger ) { this.dependenciesResolver = projectDependenciesResolver; this.logger = logger; } public static List<MavenProject> getProjects( MavenProject project, MavenSession session, boolean aggregator ) { if ( aggregator ) { return session.getProjects(); } else { return Collections.singletonList( project ); } } public void resolveProjectDependencies( MavenProject project, Collection<String> scopesToCollect, Collection<String> scopesToResolve, MavenSession session, boolean aggregating, Set<Artifact> projectArtifacts ) throws LifecycleExecutionException { ClassLoader tccl = Thread.currentThread().getContextClassLoader(); try { ClassLoader projectRealm = project.getClassRealm(); if ( projectRealm != null && projectRealm != tccl ) { Thread.currentThread().setContextClassLoader( projectRealm ); } if ( project.getDependencyArtifacts() == null ) { try { project.setDependencyArtifacts( artifactFactory.createArtifacts( project ) ); } catch ( InvalidDependencyVersionException e ) { throw new LifecycleExecutionException( e ); } } Set<Artifact> artifacts; ProjectArtifactsCache.Key cacheKey = projectArtifactsCache.createKey( project, scopesToCollect, scopesToResolve, aggregating, session.getRepositorySession() ); ProjectArtifactsCache.CacheRecord recordArtifacts; recordArtifacts = projectArtifactsCache.get( cacheKey ); if ( recordArtifacts != null ) { artifacts = recordArtifacts.artifacts; } else { try { artifacts = getDependencies( project, scopesToCollect, scopesToResolve, session, aggregating, projectArtifacts ); recordArtifacts = projectArtifactsCache.put( cacheKey, artifacts ); } catch ( LifecycleExecutionException e ) { projectArtifactsCache.put( cacheKey, e ); projectArtifactsCache.register( project, cacheKey, recordArtifacts ); throw e; } } projectArtifactsCache.register( project, cacheKey, recordArtifacts ); project.setResolvedArtifacts( artifacts ); Map<String, Artifact> map = new HashMap<>(); for ( Artifact artifact : artifacts ) { map.put( artifact.getDependencyConflictId(), artifact ); } for ( Artifact artifact : project.getDependencyArtifacts() ) { if ( artifact.getFile() == null ) { Artifact resolved = map.get( artifact.getDependencyConflictId() ); if ( resolved != null ) { artifact.setFile( resolved.getFile() ); artifact.setDependencyTrail( resolved.getDependencyTrail() ); artifact.setResolvedVersion( resolved.getVersion() ); artifact.setResolved( true ); } } } } finally { Thread.currentThread().setContextClassLoader( tccl ); } } private Set<Artifact> getDependencies( MavenProject project, Collection<String> scopesToCollect, Collection<String> scopesToResolve, MavenSession session, boolean aggregating, Set<Artifact> projectArtifacts ) throws LifecycleExecutionException { if ( scopesToCollect == null ) { scopesToCollect = Collections.emptySet(); } if ( scopesToResolve == null ) { scopesToResolve = Collections.emptySet(); } if ( scopesToCollect.isEmpty() && scopesToResolve.isEmpty() ) { return new LinkedHashSet<>(); } scopesToCollect = new HashSet<>( scopesToCollect ); scopesToCollect.addAll( scopesToResolve ); DependencyFilter collectionFilter = new ScopeDependencyFilter( null, negate( scopesToCollect ) ); DependencyFilter resolutionFilter = new ScopeDependencyFilter( null, negate( scopesToResolve ) ); resolutionFilter = AndDependencyFilter.newInstance( collectionFilter, resolutionFilter ); resolutionFilter = AndDependencyFilter.newInstance( resolutionFilter, new ReactorDependencyFilter( projectArtifacts ) ); DependencyResolutionResult result; try { DefaultDependencyResolutionRequest request = new DefaultDependencyResolutionRequest( project, session.getRepositorySession() ); request.setResolutionFilter( resolutionFilter ); eventSpyDispatcher.onEvent( request ); result = dependenciesResolver.resolve( request ); } catch ( DependencyResolutionException e ) { result = e.getResult(); /* * MNG-2277, the check below compensates for our bad plugin support where we ended up with aggregator * plugins that require dependency resolution although they usually run in phases of the build where project * artifacts haven't been assembled yet. The prime example of this is "mvn release:prepare". */ if ( aggregating && areAllDependenciesInReactor( session.getProjects(), result.getUnresolvedDependencies() ) ) { logger.warn( "The following dependencies could not be resolved at this point of the build" + " but seem to be part of the reactor:" ); for ( Dependency dependency : result.getUnresolvedDependencies() ) { logger.warn( "o " + dependency ); } logger.warn( "Try running the build up to the lifecycle phase \"package\"" ); } else { throw new LifecycleExecutionException( null, project, e ); } } eventSpyDispatcher.onEvent( result ); Set<Artifact> artifacts = new LinkedHashSet<>(); if ( result.getDependencyGraph() != null && !result.getDependencyGraph().getChildren().isEmpty() ) { RepositoryUtils.toArtifacts( artifacts, result.getDependencyGraph().getChildren(), Collections.singletonList( project.getArtifact().getId() ), collectionFilter ); } return artifacts; } private boolean areAllDependenciesInReactor( Collection<MavenProject> projects, Collection<Dependency> dependencies ) { Set<String> projectKeys = getReactorProjectKeys( projects ); for ( Dependency dependency : dependencies ) { org.eclipse.aether.artifact.Artifact a = dependency.getArtifact(); String key = ArtifactUtils.key( a.getGroupId(), a.getArtifactId(), a.getVersion() ); if ( !projectKeys.contains( key ) ) { return false; } } return true; } private Set<String> getReactorProjectKeys( Collection<MavenProject> projects ) { Set<String> projectKeys = new HashSet<>( projects.size() * 2 ); for ( MavenProject project : projects ) { String key = ArtifactUtils.key( project.getGroupId(), project.getArtifactId(), project.getVersion() ); projectKeys.add( key ); } return projectKeys; } private Collection<String> negate( Collection<String> scopes ) { Collection<String> result = new HashSet<>(); Collections.addAll( result, "system", "compile", "provided", "runtime", "test" ); for ( String scope : scopes ) { if ( "compile".equals( scope ) ) { result.remove( "compile" ); result.remove( "system" ); result.remove( "provided" ); } else if ( "runtime".equals( scope ) ) { result.remove( "compile" ); result.remove( "runtime" ); } else if ( "compile+runtime".equals( scope ) ) { result.remove( "compile" ); result.remove( "system" ); result.remove( "provided" ); result.remove( "runtime" ); } else if ( "runtime+system".equals( scope ) ) { result.remove( "compile" ); result.remove( "system" ); result.remove( "runtime" ); } else if ( "test".equals( scope ) ) { result.clear(); } } return result; } private static class ReactorDependencyFilter implements DependencyFilter { private Set<String> keys = new HashSet<>(); public ReactorDependencyFilter( Collection<Artifact> artifacts ) { for ( Artifact artifact : artifacts ) { String key = ArtifactUtils.key( artifact ); keys.add( key ); } } public boolean accept( DependencyNode node, List<DependencyNode> parents ) { Dependency dependency = node.getDependency(); if ( dependency != null ) { org.eclipse.aether.artifact.Artifact a = dependency.getArtifact(); String key = ArtifactUtils.key( a.getGroupId(), a.getArtifactId(), a.getVersion() ); return !keys.contains( key ); } return false; } } }
apache-2.0
daniellemayne/dasein-cloud-core
src/main/java/org/dasein/cloud/dc/DataCenterCapabilities.java
2417
/** * Copyright (C) 2009-2016 Dell, Inc. * See annotations for authorship information * * ==================================================================== * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ==================================================================== */ package org.dasein.cloud.dc; import org.dasein.cloud.Capabilities; import java.util.Locale; /** * Describes the capabilities of a region within a cloud for a specific account. * User: daniellemayne * Date: 04/07/2014 * Time: 16:21 */ public interface DataCenterCapabilities extends Capabilities { /** * Provides the cloud-specific term for a data center (e.g. "availability zone"). * @param locale the locale into which the term should be translated * @return the term for a data center */ public String getProviderTermForDataCenter(Locale locale); /** * Provides the cloud-specific term for a region. * @param locale the locale into which the term should be translated * @return the term for a region */ public String getProviderTermForRegion(Locale locale); /** * Inficates whether the underlying cloud supports affinity groups * @return {@code true} indicating support for affinity groups */ public boolean supportsAffinityGroups(); /** * Specifies whether the given cloud supports the concept of resource pools * @return {@code true} indicating support for resource pools */ public boolean supportsResourcePools(); /** * Specifies whether the given cloud supports the concept of storage pools * @return {@code true} indicating support for storage pools */ public boolean supportsStoragePools(); /** * Specifies whether the given cloud supports the concept of folders * @return {@code true} indicating support for folders */ public boolean supportsFolders(); }
apache-2.0
tsvetie/nativescript-cli
resources/vr/Assets/OVR/Scripts/OVROverlay.cs
7806
/************************************************************************************ Copyright : Copyright 2014 Oculus VR, LLC. All Rights reserved. Licensed under the Oculus VR Rift SDK License Version 3.3 (the "License"); you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at http://www.oculus.com/licenses/LICENSE-3.3 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ************************************************************************************/ using UnityEngine; using System; using System.Collections; using System.Runtime.InteropServices; using VR = UnityEngine.VR; /// <summary> /// Add OVROverlay script to an object with an optional mesh primitive /// rendered as a TimeWarp overlay instead by drawing it into the eye buffer. /// This will take full advantage of the display resolution and avoid double /// resampling of the texture. /// /// If the texture is dynamically generated, as for an interactive GUI or /// animation, it must be explicitly triple buffered to avoid flickering /// when it is referenced asynchronously by TimeWarp, check OVRRTOverlayConnector.cs for triple buffers design /// /// We support 3 types of Overlay shapes right now /// 1. Quad : This is most common overlay type , you render a quad in Timewarp space. /// 2. Cylinder: [Mobile Only][Experimental], Display overlay as partial surface of a cylinder /// * The cylinder's center will be your game object's center /// * We encoded the cylinder's parameters in transform.scale, /// **[scale.z] is the radius of the cylinder /// **[scale.y] is the height of the cylinder /// **[scale.x] is the length of the arc of cylinder /// * Limitations /// **Only the half of the cylinder can be displayed, which means the arc angle has to be smaller than 180 degree, [scale.x] / [scale.z] <= PI /// **Your camera has to be inside of the inscribed sphere of the cylinder, the overlay will be faded out automatically when the camera is close to the inscribed sphere's surface. /// **Translation only works correctly with vrDriver 1.04 or above /// 3. Cubemap: [Mobile Only], Display overlay as a cube map /// </summary> public class OVROverlay : MonoBehaviour { public enum OverlayShape { Quad = 0, // Display overlay as a quad Cylinder = 1, // [Mobile Only][Experimental] Display overlay as a cylinder, Translation only works correctly with vrDriver 1.04 or above Cubemap = 2, // [Mobile Only] Display overlay as a cube map } public enum OverlayType { None, // Disabled the overlay Underlay, // Eye buffers blend on top Overlay, // Blends on top of the eye buffer OverlayShowLod // (Deprecated) Blends on top and colorizes texture level of detail }; #if UNITY_ANDROID && !UNITY_EDITOR const int maxInstances = 3; #else const int maxInstances = 15; #endif static OVROverlay[] instances = new OVROverlay[maxInstances]; /// <summary> /// Specify overlay's type /// </summary> public OverlayType currentOverlayType = OverlayType.Overlay; /// <summary> /// Specify overlay's shape /// </summary> public OverlayShape currentOverlayShape = OverlayShape.Quad; /// <summary> /// Try to avoid setting texture frequently when app is running, texNativePtr updating is slow since rendering thread synchronization /// Please cache your nativeTexturePtr and use OverrideOverlayTextureInfo /// </summary> public Texture[] textures = new Texture[] { null, null }; private Texture[] cachedTextures = new Texture[] { null, null }; private IntPtr[] texNativePtrs = new IntPtr[] { IntPtr.Zero, IntPtr.Zero }; private int layerIndex = -1; Renderer rend; /// <summary> /// Use this function to set texture and texNativePtr when app is running /// GetNativeTexturePtr is a slow behavior, the value should be pre-cached /// </summary> public void OverrideOverlayTextureInfo(Texture srcTexture, IntPtr nativePtr, VR.VRNode node) { int index = (node == VR.VRNode.RightEye) ? 1 : 0; textures[index] = srcTexture; cachedTextures[index] = srcTexture; texNativePtrs[index] = nativePtr; } void Awake() { Debug.Log("Overlay Awake"); rend = GetComponent<Renderer>(); for (int i = 0; i < 2; ++i) { // Backward compatibility if (rend != null && textures[i] == null) textures[i] = rend.material.mainTexture; if (textures[i] != null) { cachedTextures[i] = textures[i]; texNativePtrs[i] = textures[i].GetNativeTexturePtr(); } } } void OnEnable() { if (!OVRManager.isHmdPresent) { enabled = false; return; } OnDisable(); for (int i = 0; i < maxInstances; ++i) { if (instances[i] == null || instances[i] == this) { layerIndex = i; instances[i] = this; break; } } } void OnDisable() { if (layerIndex != -1) { // Turn off the overlay if it was on. OVRPlugin.SetOverlayQuad(true, false, IntPtr.Zero, IntPtr.Zero, IntPtr.Zero, OVRPose.identity.ToPosef(), Vector3.one.ToVector3f(), layerIndex); instances[layerIndex] = null; } layerIndex = -1; } void OnRenderObject() { // The overlay must be specified every eye frame, because it is positioned relative to the // current head location. If frames are dropped, it will be time warped appropriately, // just like the eye buffers. if (!Camera.current.CompareTag("MainCamera") || Camera.current.cameraType != CameraType.Game || layerIndex == -1 || currentOverlayType == OverlayType.None) return; #if !UNITY_ANDROID || UNITY_EDITOR if (currentOverlayShape == OverlayShape.Cubemap || currentOverlayShape == OverlayShape.Cylinder) { Debug.LogWarning("Overlay shape " + currentOverlayShape + " is not supported on current platform"); } #endif for (int i = 0; i < 2; ++i) { if (i >= textures.Length) continue; if (textures[i] != cachedTextures[i]) { cachedTextures[i] = textures[i]; if (cachedTextures[i] != null) texNativePtrs[i] = cachedTextures[i].GetNativeTexturePtr(); } if (currentOverlayShape == OverlayShape.Cubemap) { if (textures[i] != null && textures[i].GetType() != typeof(Cubemap)) { Debug.LogError("Need Cubemap texture for cube map overlay"); return; } } } if (cachedTextures[0] == null || texNativePtrs[0] == IntPtr.Zero) return; bool overlay = (currentOverlayType == OverlayType.Overlay); bool headLocked = false; for (var t = transform; t != null && !headLocked; t = t.parent) headLocked |= (t == Camera.current.transform); OVRPose pose = (headLocked) ? transform.ToHeadSpacePose() : transform.ToTrackingSpacePose(); Vector3 scale = transform.lossyScale; for (int i = 0; i < 3; ++i) scale[i] /= Camera.current.transform.lossyScale[i]; // Cylinder overlay sanity checking if (currentOverlayShape == OverlayShape.Cylinder) { float arcAngle = scale.x / scale.z / (float)Math.PI * 180.0f; if (arcAngle > 180.0f) { Debug.LogError("Cylinder overlay's arc angle has to be below 180 degree, current arc angle is " + arcAngle + " degree." ); return ; } } bool isOverlayVisible = OVRPlugin.SetOverlayQuad(overlay, headLocked, texNativePtrs[0], texNativePtrs[1], IntPtr.Zero, pose.flipZ().ToPosef(), scale.ToVector3f(), layerIndex, (OVRPlugin.OverlayShape)currentOverlayShape); if (rend) rend.enabled = !isOverlayVisible; } }
apache-2.0
dankibler/aws-sdk-java
aws-java-sdk-ses/src/main/java/com/amazonaws/services/simpleemail/model/transform/CreateReceiptRuleRequestMarshaller.java
10549
/* * Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.simpleemail.model.transform; import java.util.HashMap; import java.util.List; import java.util.Map; import com.amazonaws.AmazonClientException; import com.amazonaws.Request; import com.amazonaws.DefaultRequest; import com.amazonaws.internal.ListWithAutoConstructFlag; import com.amazonaws.services.simpleemail.model.*; import com.amazonaws.transform.Marshaller; import com.amazonaws.util.StringUtils; /** * Create Receipt Rule Request Marshaller */ public class CreateReceiptRuleRequestMarshaller implements Marshaller<Request<CreateReceiptRuleRequest>, CreateReceiptRuleRequest> { public Request<CreateReceiptRuleRequest> marshall(CreateReceiptRuleRequest createReceiptRuleRequest) { if (createReceiptRuleRequest == null) { throw new AmazonClientException("Invalid argument passed to marshall(...)"); } Request<CreateReceiptRuleRequest> request = new DefaultRequest<CreateReceiptRuleRequest>(createReceiptRuleRequest, "AmazonSimpleEmailService"); request.addParameter("Action", "CreateReceiptRule"); request.addParameter("Version", "2010-12-01"); if (createReceiptRuleRequest.getRuleSetName() != null) { request.addParameter("RuleSetName", StringUtils.fromString(createReceiptRuleRequest.getRuleSetName())); } if (createReceiptRuleRequest.getAfter() != null) { request.addParameter("After", StringUtils.fromString(createReceiptRuleRequest.getAfter())); } ReceiptRule receiptRuleRule = createReceiptRuleRequest.getRule(); if (receiptRuleRule != null) { if (receiptRuleRule.getName() != null) { request.addParameter("Rule.Name", StringUtils.fromString(receiptRuleRule.getName())); } if (receiptRuleRule.isEnabled() != null) { request.addParameter("Rule.Enabled", StringUtils.fromBoolean(receiptRuleRule.isEnabled())); } if (receiptRuleRule.getTlsPolicy() != null) { request.addParameter("Rule.TlsPolicy", StringUtils.fromString(receiptRuleRule.getTlsPolicy())); } java.util.List<String> recipientsList = receiptRuleRule.getRecipients(); int recipientsListIndex = 1; for (String recipientsListValue : recipientsList) { if (recipientsListValue != null) { request.addParameter("Rule.Recipients.member." + recipientsListIndex, StringUtils.fromString(recipientsListValue)); } recipientsListIndex++; } java.util.List<ReceiptAction> actionsList = receiptRuleRule.getActions(); int actionsListIndex = 1; for (ReceiptAction actionsListValue : actionsList) { ReceiptAction receiptActionMember = actionsListValue; if (receiptActionMember != null) { S3Action s3ActionS3Action = receiptActionMember.getS3Action(); if (s3ActionS3Action != null) { if (s3ActionS3Action.getTopicArn() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".S3Action.TopicArn", StringUtils.fromString(s3ActionS3Action.getTopicArn())); } if (s3ActionS3Action.getBucketName() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".S3Action.BucketName", StringUtils.fromString(s3ActionS3Action.getBucketName())); } if (s3ActionS3Action.getObjectKeyPrefix() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".S3Action.ObjectKeyPrefix", StringUtils.fromString(s3ActionS3Action.getObjectKeyPrefix())); } if (s3ActionS3Action.getKmsKeyArn() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".S3Action.KmsKeyArn", StringUtils.fromString(s3ActionS3Action.getKmsKeyArn())); } } BounceAction bounceActionBounceAction = receiptActionMember.getBounceAction(); if (bounceActionBounceAction != null) { if (bounceActionBounceAction.getTopicArn() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".BounceAction.TopicArn", StringUtils.fromString(bounceActionBounceAction.getTopicArn())); } if (bounceActionBounceAction.getSmtpReplyCode() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".BounceAction.SmtpReplyCode", StringUtils.fromString(bounceActionBounceAction.getSmtpReplyCode())); } if (bounceActionBounceAction.getStatusCode() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".BounceAction.StatusCode", StringUtils.fromString(bounceActionBounceAction.getStatusCode())); } if (bounceActionBounceAction.getMessage() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".BounceAction.Message", StringUtils.fromString(bounceActionBounceAction.getMessage())); } if (bounceActionBounceAction.getSender() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".BounceAction.Sender", StringUtils.fromString(bounceActionBounceAction.getSender())); } } WorkmailAction workmailActionWorkmailAction = receiptActionMember.getWorkmailAction(); if (workmailActionWorkmailAction != null) { if (workmailActionWorkmailAction.getTopicArn() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".WorkmailAction.TopicArn", StringUtils.fromString(workmailActionWorkmailAction.getTopicArn())); } if (workmailActionWorkmailAction.getOrganizationArn() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".WorkmailAction.OrganizationArn", StringUtils.fromString(workmailActionWorkmailAction.getOrganizationArn())); } } LambdaAction lambdaActionLambdaAction = receiptActionMember.getLambdaAction(); if (lambdaActionLambdaAction != null) { if (lambdaActionLambdaAction.getTopicArn() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".LambdaAction.TopicArn", StringUtils.fromString(lambdaActionLambdaAction.getTopicArn())); } if (lambdaActionLambdaAction.getFunctionArn() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".LambdaAction.FunctionArn", StringUtils.fromString(lambdaActionLambdaAction.getFunctionArn())); } if (lambdaActionLambdaAction.getInvocationType() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".LambdaAction.InvocationType", StringUtils.fromString(lambdaActionLambdaAction.getInvocationType())); } } StopAction stopActionStopAction = receiptActionMember.getStopAction(); if (stopActionStopAction != null) { if (stopActionStopAction.getScope() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".StopAction.Scope", StringUtils.fromString(stopActionStopAction.getScope())); } if (stopActionStopAction.getTopicArn() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".StopAction.TopicArn", StringUtils.fromString(stopActionStopAction.getTopicArn())); } } AddHeaderAction addHeaderActionAddHeaderAction = receiptActionMember.getAddHeaderAction(); if (addHeaderActionAddHeaderAction != null) { if (addHeaderActionAddHeaderAction.getHeaderName() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".AddHeaderAction.HeaderName", StringUtils.fromString(addHeaderActionAddHeaderAction.getHeaderName())); } if (addHeaderActionAddHeaderAction.getHeaderValue() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".AddHeaderAction.HeaderValue", StringUtils.fromString(addHeaderActionAddHeaderAction.getHeaderValue())); } } SNSAction sNSActionSNSAction = receiptActionMember.getSNSAction(); if (sNSActionSNSAction != null) { if (sNSActionSNSAction.getTopicArn() != null) { request.addParameter("Rule.Actions.member." + actionsListIndex + ".SNSAction.TopicArn", StringUtils.fromString(sNSActionSNSAction.getTopicArn())); } } } actionsListIndex++; } if (receiptRuleRule.isScanEnabled() != null) { request.addParameter("Rule.ScanEnabled", StringUtils.fromBoolean(receiptRuleRule.isScanEnabled())); } } return request; } }
apache-2.0
smaring/pentaho-osgi-bundles
pentaho-platform-plugin-deployer/src/main/java/org/pentaho/osgi/platform/plugin/deployer/api/PluginMetadata.java
1380
/*! * Copyright 2010 - 2018 Hitachi Vantara. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package org.pentaho.osgi.platform.plugin.deployer.api; import org.w3c.dom.Document; import java.io.FileWriter; import java.io.IOException; import java.io.OutputStream; import java.util.List; /** * Created by bryan on 8/26/14. */ public interface PluginMetadata { public ManifestUpdater getManifestUpdater(); public Document getBlueprint(); public void writeBlueprint( String name, OutputStream outputStream ) throws IOException; public FileWriter getFileWriter( String path ) throws IOException; OutputStream getFileOutputStream( String path ) throws IOException; void addContentType( String contentType ); List<String> getContentTypes(); void setBlueprint( Document blueprint ); void executeAtEnd( Runnable runnable ); }
apache-2.0
resilience4j/resilience4j
resilience4j-rxjava3/src/test/java/io/github/resilience4j/rxjava3/bulkhead/operator/SingleBulkheadTest.java
2681
package io.github.resilience4j.rxjava3.bulkhead.operator; import io.github.resilience4j.bulkhead.Bulkhead; import io.github.resilience4j.bulkhead.BulkheadFullException; import io.reactivex.rxjava3.core.Observable; import io.reactivex.rxjava3.core.Single; import org.junit.Before; import org.junit.Test; import java.io.IOException; import java.util.Arrays; import java.util.concurrent.TimeUnit; import static org.mockito.BDDMockito.given; import static org.mockito.BDDMockito.then; import static org.mockito.Mockito.*; /** * Unit test for {@link SingleBulkhead} using {@link BulkheadOperator}. */ public class SingleBulkheadTest { private Bulkhead bulkhead; @Before public void setUp() { bulkhead = mock(Bulkhead.class, RETURNS_DEEP_STUBS); } @Test public void shouldEmitAllEvents() { given(bulkhead.tryAcquirePermission()).willReturn(true); Single.just(1) .compose(BulkheadOperator.of(bulkhead)) .test() .assertResult(1); then(bulkhead).should().onComplete(); } @Test public void shouldPropagateError() { given(bulkhead.tryAcquirePermission()).willReturn(true); Single.error(new IOException("BAM!")) .compose(BulkheadOperator.of(bulkhead)) .test() .assertError(IOException.class) .assertNotComplete(); then(bulkhead).should().onComplete(); } @Test public void shouldEmitErrorWithBulkheadFullException() { given(bulkhead.tryAcquirePermission()).willReturn(false); Single.just(1) .compose(BulkheadOperator.of(bulkhead)) .test() .assertError(BulkheadFullException.class) .assertNotComplete(); then(bulkhead).should(never()).onComplete(); } @Test public void shouldReleaseBulkheadOnlyOnce() { given(bulkhead.tryAcquirePermission()).willReturn(true); Single.just(Arrays.asList(1, 2, 3)) .compose(BulkheadOperator.of(bulkhead)) .flatMapObservable(Observable::fromIterable) .take(2) //this with the previous line triggers an extra dispose .test() .assertResult(1, 2); then(bulkhead).should().onComplete(); } @Test public void shouldReleasePermissionOnCancel() { given(bulkhead.tryAcquirePermission()).willReturn(true); Single.just(1) .delay(1, TimeUnit.DAYS) .compose(BulkheadOperator.of(bulkhead)) .test() .dispose(); then(bulkhead).should().releasePermission(); then(bulkhead).should(never()).onComplete(); } }
apache-2.0
alyiwang/WhereHows
wherehows-web/app/utils/helpers/ember-concurrency.ts
607
import { TaskInstance } from 'ember-concurrency'; /** * A task can be used instead of a promise in some cases, but a task * has the advantage of being cancellable. See ember-concurrency. */ export type PromiseOrTask<T> = PromiseLike<T> | TaskInstance<T> | undefined; /** * Will check if the type is a promise or a task. The difference is that * a task is cancellable where as a promise not (for now). * @param obj the object to check */ export function isTask<T>(obj: PromiseOrTask<T>): obj is TaskInstance<T> { return typeof obj !== 'undefined' && (<TaskInstance<T>>obj).cancel !== undefined; }
apache-2.0
JoeMayo/LinqToTwitter
src/LinqToTwitter5/LinqToTwitter.Shared/Common/Cursors.cs
1046
using System.Xml.Serialization; using LinqToTwitter.Common; using LitJson; namespace LinqToTwitter { /// <summary> /// Response from Twitter for previous and next pages /// </summary> /// <remarks> /// To use a cursor, start by setting the cursor to -1 /// and then use one of these response cursors to move /// backwards or forwards in paged results. /// </remarks> [XmlType(Namespace = "LinqToTwitter")] public class Cursors { public Cursors() {} internal Cursors(JsonData cursors) { Next = cursors.GetValue<long>("next_cursor"); Previous = cursors.GetValue<long>("previous_cursor"); } /// <summary> /// Use this value to retrieve the next page /// </summary> [XmlIgnore] public long Next { get; internal set; } /// <summary> /// Use this value to go back to the previous page /// </summary> [XmlIgnore] public long Previous { get; internal set; } } }
apache-2.0
ChrisLundquist/nagios
resources/serviceescalation.rb
887
# # Author:: Sander Botman <sbotman@schubergphilis.com> # Cookbook Name:: nagios # Resource:: serviceescalation # # Copyright 2015, Sander Botman # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. actions :create, :add, :delete, :remove default_action :create attribute :name, :kind_of => String, :name_attribute => true attribute :options, :kind_of => [Hash, DataBagItem], :default => nil
apache-2.0
mikibrv/sling
bundles/scripting/sightly/testing/src/test/java/LaunchpadReadyIT.java
1528
/******************************************************************************* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. ******************************************************************************/ import org.apache.sling.testing.clients.SlingClient; import org.apache.sling.testing.junit.rules.SlingInstanceRule; import org.junit.ClassRule; import org.junit.Test; public class LaunchpadReadyIT { @ClassRule public static final SlingInstanceRule SLING_INSTANCE_RULE = new SlingInstanceRule(); @Test public void testLaunchpadReady() throws Exception { SlingClient client = SLING_INSTANCE_RULE.getAdminClient(); client.waitUntilExists("/apps/sightly", 100, 100); client.waitUntilExists("/sightlytck", 100, 100); } }
apache-2.0
jbertouch/elasticsearch
core/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java
22705
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.percolator; import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder; import org.elasticsearch.action.percolate.MultiPercolateResponse; import org.elasticsearch.action.percolate.PercolateSourceBuilder; import org.elasticsearch.client.Requests; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; import static org.elasticsearch.percolator.PercolatorTestUtil.convertFromTextArray; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; /** */ public class MultiPercolatorIT extends ESIntegTestCase { public void testBasics() throws Exception { assertAcked(prepareCreate("test").addMapping("type", "field1", "type=text")); ensureGreen(); logger.info("--> register a queries"); client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) .execute().actionGet(); client().prepareIndex("test", PercolatorService.TYPE_NAME, "2") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) .execute().actionGet(); client().prepareIndex("test", PercolatorService.TYPE_NAME, "3") .setSource(jsonBuilder().startObject().field("query", boolQuery() .must(matchQuery("field1", "b")) .must(matchQuery("field1", "c")) ).endObject()) .execute().actionGet(); client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); refresh(); MultiPercolateResponse response = client().prepareMultiPercolate() .add(client().preparePercolate() .setIndices("test").setDocumentType("type") .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))) .add(client().preparePercolate() .setIndices("test").setDocumentType("type") .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject()))) .add(client().preparePercolate() .setIndices("test").setDocumentType("type") .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject()))) .add(client().preparePercolate() .setIndices("test").setDocumentType("type") .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject()))) .add(client().preparePercolate() // non existing doc, so error element .setIndices("test").setDocumentType("type") .setGetRequest(Requests.getRequest("test").type("type").id("5"))) .execute().actionGet(); MultiPercolateResponse.Item item = response.getItems()[0]; assertMatchCount(item.getResponse(), 2L); assertThat(item.getResponse().getMatches(), arrayWithSize(2)); assertThat(item.getErrorMessage(), nullValue()); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "4")); item = response.getItems()[1]; assertThat(item.getErrorMessage(), nullValue()); assertMatchCount(item.getResponse(), 2L); assertThat(item.getResponse().getMatches(), arrayWithSize(2)); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("2", "4")); item = response.getItems()[2]; assertThat(item.getErrorMessage(), nullValue()); assertMatchCount(item.getResponse(), 4L); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4")); item = response.getItems()[3]; assertThat(item.getErrorMessage(), nullValue()); assertMatchCount(item.getResponse(), 1L); assertThat(item.getResponse().getMatches(), arrayWithSize(1)); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContaining("4")); item = response.getItems()[4]; assertThat(item.getResponse(), nullValue()); assertThat(item.getErrorMessage(), notNullValue()); assertThat(item.getErrorMessage(), containsString("document missing")); } public void testWithRouting() throws Exception { assertAcked(prepareCreate("test").addMapping("type", "field1", "type=text")); ensureGreen(); logger.info("--> register a queries"); client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") .setRouting("a") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) .execute().actionGet(); client().prepareIndex("test", PercolatorService.TYPE_NAME, "2") .setRouting("a") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) .execute().actionGet(); client().prepareIndex("test", PercolatorService.TYPE_NAME, "3") .setRouting("a") .setSource(jsonBuilder().startObject().field("query", boolQuery() .must(matchQuery("field1", "b")) .must(matchQuery("field1", "c")) ).endObject()) .execute().actionGet(); client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") .setRouting("a") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); refresh(); MultiPercolateResponse response = client().prepareMultiPercolate() .add(client().preparePercolate() .setIndices("test").setDocumentType("type") .setRouting("a") .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))) .add(client().preparePercolate() .setIndices("test").setDocumentType("type") .setRouting("a") .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject()))) .add(client().preparePercolate() .setIndices("test").setDocumentType("type") .setRouting("a") .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject()))) .add(client().preparePercolate() .setIndices("test").setDocumentType("type") .setRouting("a") .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject()))) .add(client().preparePercolate() // non existing doc, so error element .setIndices("test").setDocumentType("type") .setRouting("a") .setGetRequest(Requests.getRequest("test").type("type").id("5"))) .execute().actionGet(); MultiPercolateResponse.Item item = response.getItems()[0]; assertMatchCount(item.getResponse(), 2L); assertThat(item.getResponse().getMatches(), arrayWithSize(2)); assertThat(item.getErrorMessage(), nullValue()); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "4")); item = response.getItems()[1]; assertThat(item.getErrorMessage(), nullValue()); assertMatchCount(item.getResponse(), 2L); assertThat(item.getResponse().getMatches(), arrayWithSize(2)); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("2", "4")); item = response.getItems()[2]; assertThat(item.getErrorMessage(), nullValue()); assertMatchCount(item.getResponse(), 4L); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4")); item = response.getItems()[3]; assertThat(item.getErrorMessage(), nullValue()); assertMatchCount(item.getResponse(), 1L); assertThat(item.getResponse().getMatches(), arrayWithSize(1)); assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContaining("4")); item = response.getItems()[4]; assertThat(item.getResponse(), nullValue()); assertThat(item.getErrorMessage(), notNullValue()); assertThat(item.getErrorMessage(), containsString("document missing")); } public void testExistingDocsOnly() throws Exception { createIndex("test"); int numQueries = randomIntBetween(50, 100); logger.info("--> register a queries"); for (int i = 0; i < numQueries; i++) { client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); } client().prepareIndex("test", "type", "1") .setSource(jsonBuilder().startObject().field("field", "a")) .execute().actionGet(); refresh(); MultiPercolateRequestBuilder builder = client().prepareMultiPercolate(); int numPercolateRequest = randomIntBetween(50, 100); for (int i = 0; i < numPercolateRequest; i++) { builder.add( client().preparePercolate() .setGetRequest(Requests.getRequest("test").type("type").id("1")) .setIndices("test").setDocumentType("type") .setSize(numQueries) ); } MultiPercolateResponse response = builder.execute().actionGet(); assertThat(response.items().length, equalTo(numPercolateRequest)); for (MultiPercolateResponse.Item item : response) { assertThat(item.isFailure(), equalTo(false)); assertMatchCount(item.getResponse(), numQueries); assertThat(item.getResponse().getMatches().length, equalTo(numQueries)); } // Non existing doc builder = client().prepareMultiPercolate(); for (int i = 0; i < numPercolateRequest; i++) { builder.add( client().preparePercolate() .setGetRequest(Requests.getRequest("test").type("type").id("2")) .setIndices("test").setDocumentType("type").setSize(numQueries) ); } response = builder.execute().actionGet(); assertThat(response.items().length, equalTo(numPercolateRequest)); for (MultiPercolateResponse.Item item : response) { assertThat(item.isFailure(), equalTo(true)); assertThat(item.getErrorMessage(), containsString("document missing")); assertThat(item.getResponse(), nullValue()); } // One existing doc builder = client().prepareMultiPercolate(); for (int i = 0; i < numPercolateRequest; i++) { builder.add( client().preparePercolate() .setGetRequest(Requests.getRequest("test").type("type").id("2")) .setIndices("test").setDocumentType("type").setSize(numQueries) ); } builder.add( client().preparePercolate() .setGetRequest(Requests.getRequest("test").type("type").id("1")) .setIndices("test").setDocumentType("type").setSize(numQueries) ); response = builder.execute().actionGet(); assertThat(response.items().length, equalTo(numPercolateRequest + 1)); assertThat(response.items()[numPercolateRequest].isFailure(), equalTo(false)); assertMatchCount(response.items()[numPercolateRequest].getResponse(), numQueries); assertThat(response.items()[numPercolateRequest].getResponse().getMatches().length, equalTo(numQueries)); } public void testWithDocsOnly() throws Exception { createIndex("test"); ensureGreen(); NumShards test = getNumShards("test"); int numQueries = randomIntBetween(50, 100); logger.info("--> register a queries"); for (int i = 0; i < numQueries; i++) { client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); } refresh(); MultiPercolateRequestBuilder builder = client().prepareMultiPercolate(); int numPercolateRequest = randomIntBetween(50, 100); for (int i = 0; i < numPercolateRequest; i++) { builder.add( client().preparePercolate() .setIndices("test").setDocumentType("type") .setSize(numQueries) .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "a").endObject()))); } MultiPercolateResponse response = builder.execute().actionGet(); assertThat(response.items().length, equalTo(numPercolateRequest)); for (MultiPercolateResponse.Item item : response) { assertThat(item.isFailure(), equalTo(false)); assertMatchCount(item.getResponse(), numQueries); assertThat(item.getResponse().getMatches().length, equalTo(numQueries)); } // All illegal json builder = client().prepareMultiPercolate(); for (int i = 0; i < numPercolateRequest; i++) { builder.add( client().preparePercolate() .setIndices("test").setDocumentType("type") .setSource("illegal json")); } response = builder.execute().actionGet(); assertThat(response.items().length, equalTo(numPercolateRequest)); for (MultiPercolateResponse.Item item : response) { assertThat(item.isFailure(), equalTo(false)); assertThat(item.getResponse().getSuccessfulShards(), equalTo(0)); assertThat(item.getResponse().getShardFailures().length, equalTo(test.numPrimaries)); for (ShardOperationFailedException shardFailure : item.getResponse().getShardFailures()) { assertThat(shardFailure.reason(), containsString("Failed to derive xcontent")); assertThat(shardFailure.status().getStatus(), equalTo(400)); } } // one valid request builder = client().prepareMultiPercolate(); for (int i = 0; i < numPercolateRequest; i++) { builder.add( client().preparePercolate() .setIndices("test").setDocumentType("type") .setSource("illegal json")); } builder.add( client().preparePercolate() .setSize(numQueries) .setIndices("test").setDocumentType("type") .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "a").endObject()))); response = builder.execute().actionGet(); assertThat(response.items().length, equalTo(numPercolateRequest + 1)); assertThat(response.items()[numPercolateRequest].isFailure(), equalTo(false)); assertMatchCount(response.items()[numPercolateRequest].getResponse(), numQueries); assertThat(response.items()[numPercolateRequest].getResponse().getMatches().length, equalTo(numQueries)); } public void testNestedMultiPercolation() throws IOException { initNestedIndexAndPercolation(); MultiPercolateRequestBuilder mpercolate= client().prepareMultiPercolate(); mpercolate.add(client().preparePercolate().setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(getNotMatchingNestedDoc())).setIndices("nestedindex").setDocumentType("company")); mpercolate.add(client().preparePercolate().setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(getMatchingNestedDoc())).setIndices("nestedindex").setDocumentType("company")); MultiPercolateResponse response = mpercolate.get(); assertEquals(response.getItems()[0].getResponse().getMatches().length, 0); assertEquals(response.getItems()[1].getResponse().getMatches().length, 1); assertEquals(response.getItems()[1].getResponse().getMatches()[0].getId().string(), "Q"); } public void testStartTimeIsPropagatedToShardRequests() throws Exception { // See: https://github.com/elastic/elasticsearch/issues/15908 internalCluster().ensureAtLeastNumDataNodes(2); client().admin().indices().prepareCreate("test") .setSettings(settingsBuilder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1) ) .addMapping("type", "date_field", "type=date,format=strict_date_optional_time||epoch_millis") .get(); ensureGreen(); client().prepareIndex("test", ".percolator", "1") .setSource(jsonBuilder().startObject().field("query", rangeQuery("date_field").lt("now+90d")).endObject()) .setRefresh(true) .get(); for (int i = 0; i < 32; i++) { MultiPercolateResponse response = client().prepareMultiPercolate() .add(client().preparePercolate().setDocumentType("type").setIndices("test") .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("date_field", "2015-07-21T10:28:01-07:00"))) .get(); assertThat(response.getItems()[0].getResponse().getCount(), equalTo(1L)); assertThat(response.getItems()[0].getResponse().getMatches()[0].getId().string(), equalTo("1")); } } void initNestedIndexAndPercolation() throws IOException { XContentBuilder mapping = XContentFactory.jsonBuilder(); mapping.startObject().startObject("properties").startObject("companyname").field("type", "text").endObject() .startObject("employee").field("type", "nested").startObject("properties") .startObject("name").field("type", "text").endObject().endObject().endObject().endObject() .endObject(); assertAcked(client().admin().indices().prepareCreate("nestedindex").addMapping("company", mapping)); ensureGreen("nestedindex"); client().prepareIndex("nestedindex", PercolatorService.TYPE_NAME, "Q").setSource(jsonBuilder().startObject() .field("query", QueryBuilders.nestedQuery("employee", QueryBuilders.matchQuery("employee.name", "virginia potts").operator(Operator.AND)).scoreMode(ScoreMode.Avg)).endObject()).get(); refresh(); } XContentBuilder getMatchingNestedDoc() throws IOException { XContentBuilder doc = XContentFactory.jsonBuilder(); doc.startObject().field("companyname", "stark").startArray("employee") .startObject().field("name", "virginia potts").endObject() .startObject().field("name", "tony stark").endObject() .endArray().endObject(); return doc; } XContentBuilder getNotMatchingNestedDoc() throws IOException { XContentBuilder doc = XContentFactory.jsonBuilder(); doc.startObject().field("companyname", "notstark").startArray("employee") .startObject().field("name", "virginia stark").endObject() .startObject().field("name", "tony potts").endObject() .endArray().endObject(); return doc; } }
apache-2.0
yohanboniface/tilestrata
index.js
1591
var TileLayer = module.exports.TileLayer = require('./lib/TileLayer.js'); var TileRequest = module.exports.TileRequest = require('./lib/TileRequest.js'); var TileRequestHandler = module.exports.TileRequestHandler = require('./lib/TileRequestHandler.js'); var TileServer = module.exports.TileServer = require('./lib/TileServer.js'); module.exports.createServer = function() { return new TileServer(); }; module.exports.middleware = function(options) { options = options || {}; var prefix = (options.prefix || '').replace(/\/$/, ''); var prefix_len = prefix.length; var server = options.server; if (!(server instanceof TileServer)) { throw new Error('"server" option required, and must be a TileServer instance'); } var pendingRequests = []; server.initialize(function(err) { if (err) throw err; while (pendingRequests.length) { var args = pendingRequests.shift(); handleRequest(args[0], args[1], args[2]); } }); function handleRequest(req, res, next) { var url = req.url; if (url.substring(0, prefix_len) === prefix) { url = url.substring(prefix_len); } else { return next(); } var tilereq = TileRequest.parse(url, req.headers, req.method); server.serve(tilereq, {req: req, res: res}, function(status, buffer, headers) { if (status === 404) return next(); res.writeHead(status, headers); res.write(buffer); res.end(); }); } return function(req, res, next) { if (!server.initialized) { var args = Array.prototype.slice.call(arguments); pendingRequests.push(args); } else { handleRequest(req, res, next); } }; };
apache-2.0
sumedh123/open-event-frontend
app/components/nav-bar.js
485
import Ember from 'ember'; const { Component } = Ember; export default Component.extend({ actions: { logout() { this.get('authManager').logout(); this.get('routing').transitionTo('index'); } }, didInsertElement() { this._super.call(this); this.$('.notification.item').popup({ popup : '.popup', on : 'click' }); }, willDestroyElement() { this._super.call(this); this.$('.notification.item').popup('destroy'); } });
apache-2.0
cf-platform-eng/aws-pcf-quickstart
vendor/github.com/cppforlife/go-patch/patch/test_op.go
1068
package patch import ( "fmt" "reflect" ) type TestOp struct { Path Pointer Value interface{} Absent bool } func (op TestOp) Apply(doc interface{}) (interface{}, error) { if op.Absent { return op.checkAbsence(doc) } return op.checkValue(doc) } func (op TestOp) checkAbsence(doc interface{}) (interface{}, error) { _, err := FindOp{Path: op.Path}.Apply(doc) if err != nil { if typedErr, ok := err.(OpMissingIndexErr); ok { if typedErr.Path.String() == op.Path.String() { return doc, nil } } if typedErr, ok := err.(OpMissingMapKeyErr); ok { if typedErr.Path.String() == op.Path.String() { return doc, nil } } return nil, err } return nil, fmt.Errorf("Expected to not find '%s'", op.Path) } func (op TestOp) checkValue(doc interface{}) (interface{}, error) { foundVal, err := FindOp{Path: op.Path}.Apply(doc) if err != nil { return nil, err } if !reflect.DeepEqual(foundVal, op.Value) { return nil, fmt.Errorf("Found value does not match expected value") } // Return same input document return doc, nil }
apache-2.0
ody/puppet-glance
spec/classes/glance_backend_swift_spec.rb
1806
require 'spec_helper' describe 'glance::backend::swift' do let :facts do { :osfamily => 'Debian' } end let :params do { :swift_store_user => 'user', :swift_store_key => 'key', } end let :pre_condition do 'class { "glance::api": keystone_password => "pass" }' end it { should contain_glance_api_config('DEFAULT/default_store').with_value('swift') } it { should contain_glance_api_config('DEFAULT/swift_store_key').with_value('key') } it { should contain_glance_api_config('DEFAULT/swift_store_user').with_value('user') } it { should contain_glance_api_config('DEFAULT/swift_store_auth_version').with_value('2') } it { should contain_glance_api_config('DEFAULT/swift_store_auth_address').with_value('127.0.0.1:5000/v2.0/') } it { should contain_glance_api_config('DEFAULT/swift_store_container').with_value('glance') } it { should contain_glance_api_config('DEFAULT/swift_store_create_container_on_put').with_value('False') } describe 'when overriding datadir' do let :params do { :swift_store_user => 'user', :swift_store_key => 'key', :swift_store_auth_version => '1', :swift_store_auth_address => '127.0.0.2:8080/v1.0/', :swift_store_container => 'swift', :swift_store_create_container_on_put => 'True' } end it { should contain_glance_api_config('DEFAULT/swift_store_container').with_value('swift') } it { should contain_glance_api_config('DEFAULT/swift_store_create_container_on_put').with_value('True') } it { should contain_glance_api_config('DEFAULT/swift_store_auth_version').with_value('1') } it { should contain_glance_api_config('DEFAULT/swift_store_auth_address').with_value('127.0.0.2:8080/v1.0/') } end end
apache-2.0
gosharplite/kubernetes
cmd/kubeadm/app/apis/kubeadm/v1alpha3/types.go
15291
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha3 import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // InitConfiguration contains a list of elements which make up master's // configuration object. type InitConfiguration struct { metav1.TypeMeta `json:",inline"` // `kubeadm init`-only information. These fields are solely used the first time `kubeadm init` runs. // After that, the information in the fields ARE NOT uploaded to the `kubeadm-config` ConfigMap // that is used by `kubeadm upgrade` for instance. These fields must be omitempty. // BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. // This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature BootstrapTokens []BootstrapToken `json:"bootstrapTokens,omitempty"` // NodeRegistration holds fields that relate to registering the new master node to the cluster NodeRegistration NodeRegistrationOptions `json:"nodeRegistration,omitempty"` // Cluster-wide configuration // TODO: Move these fields under some kind of ClusterConfiguration or similar struct that describes // one cluster. Eventually we want this kind of spec to align well with the Cluster API spec. // API holds configuration for the k8s apiserver. API API `json:"api"` // Etcd holds configuration for etcd. Etcd Etcd `json:"etcd"` // Networking holds configuration for the networking topology of the cluster. Networking Networking `json:"networking"` // KubernetesVersion is the target version of the control plane. KubernetesVersion string `json:"kubernetesVersion"` // APIServerExtraArgs is a set of extra flags to pass to the API Server or override // default ones in form of <flagname>=<value>. // TODO: This is temporary and ideally we would like to switch all components to // use ComponentConfig + ConfigMaps. APIServerExtraArgs map[string]string `json:"apiServerExtraArgs,omitempty"` // ControllerManagerExtraArgs is a set of extra flags to pass to the Controller Manager // or override default ones in form of <flagname>=<value> // TODO: This is temporary and ideally we would like to switch all components to // use ComponentConfig + ConfigMaps. ControllerManagerExtraArgs map[string]string `json:"controllerManagerExtraArgs,omitempty"` // SchedulerExtraArgs is a set of extra flags to pass to the Scheduler or override // default ones in form of <flagname>=<value> // TODO: This is temporary and ideally we would like to switch all components to // use ComponentConfig + ConfigMaps. SchedulerExtraArgs map[string]string `json:"schedulerExtraArgs,omitempty"` // APIServerExtraVolumes is an extra set of host volumes mounted to the API server. APIServerExtraVolumes []HostPathMount `json:"apiServerExtraVolumes,omitempty"` // ControllerManagerExtraVolumes is an extra set of host volumes mounted to the // Controller Manager. ControllerManagerExtraVolumes []HostPathMount `json:"controllerManagerExtraVolumes,omitempty"` // SchedulerExtraVolumes is an extra set of host volumes mounted to the scheduler. SchedulerExtraVolumes []HostPathMount `json:"schedulerExtraVolumes,omitempty"` // APIServerCertSANs sets extra Subject Alternative Names for the API Server signing cert. APIServerCertSANs []string `json:"apiServerCertSANs,omitempty"` // CertificatesDir specifies where to store or look for all required certificates. CertificatesDir string `json:"certificatesDir"` // ImageRepository what container registry to pull control plane images from ImageRepository string `json:"imageRepository"` // UnifiedControlPlaneImage specifies if a specific container image should // be used for all control plane components. UnifiedControlPlaneImage string `json:"unifiedControlPlaneImage"` // AuditPolicyConfiguration defines the options for the api server audit system AuditPolicyConfiguration AuditPolicyConfiguration `json:"auditPolicy"` // FeatureGates enabled by the user. FeatureGates map[string]bool `json:"featureGates,omitempty"` // The cluster name ClusterName string `json:"clusterName,omitempty"` } // API struct contains elements of API server address. type API struct { // AdvertiseAddress sets the IP address for the API server to advertise. AdvertiseAddress string `json:"advertiseAddress"` // ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it // can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. // In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort // are used; in case the ControlPlaneEndpoint is specified but without a TCP port, // the BindPort is used. // Possible usages are: // e.g. In an cluster with more than one control plane instances, this field should be // assigned the address of the external load balancer in front of the // control plane instances. // e.g. in environments with enforced node recycling, the ControlPlaneEndpoint // could be used for assigning a stable DNS to the control plane. ControlPlaneEndpoint string `json:"controlPlaneEndpoint"` // BindPort sets the secure port for the API Server to bind to. // Defaults to 6443. BindPort int32 `json:"bindPort"` } // NodeRegistrationOptions holds fields that relate to registering a new master or node to the cluster, either via "kubeadm init" or "kubeadm join" type NodeRegistrationOptions struct { // Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm joiń` operation. // This field is also used in the CommonName field of the kubelet's client certificate to the API server. // Defaults to the hostname of the node if not provided. Name string `json:"name,omitempty"` // CRISocket is used to retrieve container runtime info. This information will be annotated to the Node API object, for later re-use CRISocket string `json:"criSocket,omitempty"` // Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process // it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your master node, set this field to an // empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. Taints []v1.Taint `json:"taints,omitempty"` // KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file // kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap // Flags have higher higher priority when parsing. These values are local and specific to the node kubeadm is executing on. KubeletExtraArgs map[string]string `json:"kubeletExtraArgs,omitempty"` } // Networking contains elements describing cluster's networking configuration type Networking struct { // ServiceSubnet is the subnet used by k8s services. Defaults to "10.96.0.0/12". ServiceSubnet string `json:"serviceSubnet"` // PodSubnet is the subnet used by pods. PodSubnet string `json:"podSubnet"` // DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". DNSDomain string `json:"dnsDomain"` } // BootstrapToken describes one bootstrap token, stored as a Secret in the cluster type BootstrapToken struct { // Token is used for establishing bidirectional trust between nodes and masters. // Used for joining nodes in the cluster. Token *BootstrapTokenString `json:"token"` // Description sets a human-friendly message why this token exists and what it's used // for, so other administrators can know its purpose. Description string `json:"description,omitempty"` // TTL defines the time to live for this token. Defaults to 24h. // Expires and TTL are mutually exclusive. TTL *metav1.Duration `json:"ttl,omitempty"` // Expires specifies the timestamp when this token expires. Defaults to being set // dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. Expires *metav1.Time `json:"expires,omitempty"` // Usages describes the ways in which this token can be used. Can by default be used // for establishing bidirectional trust, but that can be changed here. Usages []string `json:"usages,omitempty"` // Groups specifies the extra groups that this token will authenticate as when/if // used for authentication Groups []string `json:"groups,omitempty"` } // Etcd contains elements describing Etcd configuration. type Etcd struct { // Local provides configuration knobs for configuring the local etcd instance // Local and External are mutually exclusive Local *LocalEtcd `json:"local,omitempty"` // External describes how to connect to an external etcd cluster // Local and External are mutually exclusive External *ExternalEtcd `json:"external,omitempty"` } // LocalEtcd describes that kubeadm should run an etcd cluster locally type LocalEtcd struct { // Image specifies which container image to use for running etcd. // If empty, automatically populated by kubeadm using the image // repository and default etcd version. Image string `json:"image"` // DataDir is the directory etcd will place its data. // Defaults to "/var/lib/etcd". DataDir string `json:"dataDir"` // ExtraArgs are extra arguments provided to the etcd binary // when run inside a static pod. ExtraArgs map[string]string `json:"extraArgs,omitempty"` // ServerCertSANs sets extra Subject Alternative Names for the etcd server signing cert. ServerCertSANs []string `json:"serverCertSANs,omitempty"` // PeerCertSANs sets extra Subject Alternative Names for the etcd peer signing cert. PeerCertSANs []string `json:"peerCertSANs,omitempty"` } // ExternalEtcd describes an external etcd cluster type ExternalEtcd struct { // Endpoints of etcd members. Useful for using external etcd. // If not provided, kubeadm will run etcd in a static pod. Endpoints []string `json:"endpoints"` // CAFile is an SSL Certificate Authority file used to secure etcd communication. CAFile string `json:"caFile"` // CertFile is an SSL certification file used to secure etcd communication. CertFile string `json:"certFile"` // KeyFile is an SSL key file used to secure etcd communication. KeyFile string `json:"keyFile"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // JoinConfiguration contains elements describing a particular node. // TODO: This struct should be replaced by dynamic kubelet configuration. type JoinConfiguration struct { metav1.TypeMeta `json:",inline"` // NodeRegistration holds fields that relate to registering the new master node to the cluster NodeRegistration NodeRegistrationOptions `json:"nodeRegistration"` // CACertPath is the path to the SSL certificate authority used to // secure comunications between node and master. // Defaults to "/etc/kubernetes/pki/ca.crt". CACertPath string `json:"caCertPath"` // DiscoveryFile is a file or url to a kubeconfig file from which to // load cluster information. DiscoveryFile string `json:"discoveryFile"` // DiscoveryToken is a token used to validate cluster information // fetched from the master. DiscoveryToken string `json:"discoveryToken"` // DiscoveryTokenAPIServers is a set of IPs to API servers from which info // will be fetched. Currently we only pay attention to one API server but // hope to support >1 in the future. DiscoveryTokenAPIServers []string `json:"discoveryTokenAPIServers,omitempty"` // DiscoveryTimeout modifies the discovery timeout DiscoveryTimeout *metav1.Duration `json:"discoveryTimeout,omitempty"` // TLSBootstrapToken is a token used for TLS bootstrapping. // Defaults to Token. TLSBootstrapToken string `json:"tlsBootstrapToken"` // Token is used for both discovery and TLS bootstrapping. Token string `json:"token"` // ClusterName is the name for the cluster in kubeconfig. ClusterName string `json:"clusterName,omitempty"` // DiscoveryTokenCACertHashes specifies a set of public key pins to verify // when token-based discovery is used. The root CA found during discovery // must match one of these values. Specifying an empty set disables root CA // pinning, which can be unsafe. Each hash is specified as "<type>:<value>", // where the only currently supported type is "sha256". This is a hex-encoded // SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded // ASN.1. These hashes can be calculated using, for example, OpenSSL: // openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex DiscoveryTokenCACertHashes []string `json:"discoveryTokenCACertHashes,omitempty"` // DiscoveryTokenUnsafeSkipCAVerification allows token-based discovery // without CA verification via DiscoveryTokenCACertHashes. This can weaken // the security of kubeadm since other nodes can impersonate the master. DiscoveryTokenUnsafeSkipCAVerification bool `json:"discoveryTokenUnsafeSkipCAVerification"` // ControlPlane flag specifies that the joining node should host an additional // control plane instance. ControlPlane bool `json:"controlPlane,omitempty"` // AdvertiseAddress sets the IP address for the API server to advertise; the // API server will be installed only on nodes hosting an additional control plane instance. AdvertiseAddress string `json:"advertiseAddress,omitempty"` // FeatureGates enabled by the user. FeatureGates map[string]bool `json:"featureGates,omitempty"` } // HostPathMount contains elements describing volumes that are mounted from the // host. type HostPathMount struct { // Name of the volume inside the pod template. Name string `json:"name"` // HostPath is the path in the host that will be mounted inside // the pod. HostPath string `json:"hostPath"` // MountPath is the path inside the pod where hostPath will be mounted. MountPath string `json:"mountPath"` // Writable controls write access to the volume Writable bool `json:"writable,omitempty"` // PathType is the type of the HostPath. PathType v1.HostPathType `json:"pathType,omitempty"` } // AuditPolicyConfiguration holds the options for configuring the api server audit policy. type AuditPolicyConfiguration struct { // Path is the local path to an audit policy. Path string `json:"path"` // LogDir is the local path to the directory where logs should be stored. LogDir string `json:"logDir"` // LogMaxAge is the number of days logs will be stored for. 0 indicates forever. LogMaxAge *int32 `json:"logMaxAge,omitempty"` //TODO(chuckha) add other options for audit policy. }
apache-2.0
delacruzjayveejoshua920/ICNG-App-for-Windows-8.1-and-Windows-Phone-8.1-
InfoAppWindows8/ICNG Phone/obj/ARM/Debug/Custom Tiles/TilesWithIcons/MediumTileIcon.g.i.cs
4329
 #pragma checksum "C:\Users\Jayvee\Desktop\InfoAppWindows8\ICNG Phone\Custom Tiles\TilesWithIcons\MediumTileIcon.xaml" "{406ea660-64cf-4c82-b6f0-42d48172a799}" "0BB410101202ACD008AEBA487848158B" //------------------------------------------------------------------------------ // <auto-generated> // This code was generated by a tool. // // Changes to this file may cause incorrect behavior and will be lost if // the code is regenerated. // </auto-generated> //------------------------------------------------------------------------------ namespace CustomLiveTiles { partial class MediumTileIcon : global::Windows.UI.Xaml.Controls.UserControl { [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.Windows.UI.Xaml.Build.Tasks"," 4.0.0.0")] private global::Windows.UI.Xaml.Media.Animation.Storyboard liveTileAnim1; [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.Windows.UI.Xaml.Build.Tasks"," 4.0.0.0")] private global::Windows.UI.Xaml.Media.Animation.Storyboard liveTileAnim1_Inverse; [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.Windows.UI.Xaml.Build.Tasks"," 4.0.0.0")] private global::Windows.UI.Xaml.Controls.Grid gridFont; [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.Windows.UI.Xaml.Build.Tasks"," 4.0.0.0")] private global::Windows.UI.Xaml.Controls.Grid gridBack; [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.Windows.UI.Xaml.Build.Tasks"," 4.0.0.0")] private global::Windows.UI.Xaml.Media.TranslateTransform panel2; [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.Windows.UI.Xaml.Build.Tasks"," 4.0.0.0")] private global::Windows.UI.Xaml.Controls.TextBlock txtMessage; [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.Windows.UI.Xaml.Build.Tasks"," 4.0.0.0")] private global::Windows.UI.Xaml.Controls.TextBlock txtBackTitle; [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.Windows.UI.Xaml.Build.Tasks"," 4.0.0.0")] private global::Windows.UI.Xaml.Media.TranslateTransform panel1; [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.Windows.UI.Xaml.Build.Tasks"," 4.0.0.0")] private global::Windows.UI.Xaml.Controls.Image imgBackground; [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.Windows.UI.Xaml.Build.Tasks"," 4.0.0.0")] private global::Windows.UI.Xaml.Controls.TextBlock txtTitle; [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.Windows.UI.Xaml.Build.Tasks"," 4.0.0.0")] private bool _contentLoaded; [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.Windows.UI.Xaml.Build.Tasks"," 4.0.0.0")] [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] public void InitializeComponent() { if (_contentLoaded) return; _contentLoaded = true; global::Windows.UI.Xaml.Application.LoadComponent(this, new global::System.Uri("ms-appx:///Custom Tiles/TilesWithIcons/MediumTileIcon.xaml"), global::Windows.UI.Xaml.Controls.Primitives.ComponentResourceLocation.Application); liveTileAnim1 = (global::Windows.UI.Xaml.Media.Animation.Storyboard)this.FindName("liveTileAnim1"); liveTileAnim1_Inverse = (global::Windows.UI.Xaml.Media.Animation.Storyboard)this.FindName("liveTileAnim1_Inverse"); gridFont = (global::Windows.UI.Xaml.Controls.Grid)this.FindName("gridFont"); gridBack = (global::Windows.UI.Xaml.Controls.Grid)this.FindName("gridBack"); panel2 = (global::Windows.UI.Xaml.Media.TranslateTransform)this.FindName("panel2"); txtMessage = (global::Windows.UI.Xaml.Controls.TextBlock)this.FindName("txtMessage"); txtBackTitle = (global::Windows.UI.Xaml.Controls.TextBlock)this.FindName("txtBackTitle"); panel1 = (global::Windows.UI.Xaml.Media.TranslateTransform)this.FindName("panel1"); imgBackground = (global::Windows.UI.Xaml.Controls.Image)this.FindName("imgBackground"); txtTitle = (global::Windows.UI.Xaml.Controls.TextBlock)this.FindName("txtTitle"); } } }
apache-2.0
hauleth/rust
src/libsyntax/diagnostics/plugin.rs
4651
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::cell::RefCell; use std::collections::BTreeMap; use ast; use ast::{Ident, Name, TokenTree}; use codemap::Span; use ext::base::{ExtCtxt, MacExpr, MacResult, MacItems}; use ext::build::AstBuilder; use parse::token; use ptr::P; thread_local! { static REGISTERED_DIAGNOSTICS: RefCell<BTreeMap<Name, Option<Name>>> = { RefCell::new(BTreeMap::new()) } } thread_local! { static USED_DIAGNOSTICS: RefCell<BTreeMap<Name, Span>> = { RefCell::new(BTreeMap::new()) } } fn with_registered_diagnostics<T, F>(f: F) -> T where F: FnOnce(&mut BTreeMap<Name, Option<Name>>) -> T, { REGISTERED_DIAGNOSTICS.with(move |slot| { f(&mut *slot.borrow_mut()) }) } fn with_used_diagnostics<T, F>(f: F) -> T where F: FnOnce(&mut BTreeMap<Name, Span>) -> T, { USED_DIAGNOSTICS.with(move |slot| { f(&mut *slot.borrow_mut()) }) } pub fn expand_diagnostic_used<'cx>(ecx: &'cx mut ExtCtxt, span: Span, token_tree: &[TokenTree]) -> Box<MacResult+'cx> { let code = match token_tree { [ast::TtToken(_, token::Ident(code, _))] => code, _ => unreachable!() }; with_used_diagnostics(|diagnostics| { match diagnostics.insert(code.name, span) { Some(previous_span) => { ecx.span_warn(span, &format!( "diagnostic code {} already used", &token::get_ident(code) )[]); ecx.span_note(previous_span, "previous invocation"); }, None => () } () }); with_registered_diagnostics(|diagnostics| { if !diagnostics.contains_key(&code.name) { ecx.span_err(span, &format!( "used diagnostic code {} not registered", &token::get_ident(code) )[]); } }); MacExpr::new(quote_expr!(ecx, ())) } pub fn expand_register_diagnostic<'cx>(ecx: &'cx mut ExtCtxt, span: Span, token_tree: &[TokenTree]) -> Box<MacResult+'cx> { let (code, description) = match token_tree { [ast::TtToken(_, token::Ident(ref code, _))] => { (code, None) }, [ast::TtToken(_, token::Ident(ref code, _)), ast::TtToken(_, token::Comma), ast::TtToken(_, token::Literal(token::StrRaw(description, _), None))] => { (code, Some(description)) } _ => unreachable!() }; with_registered_diagnostics(|diagnostics| { if diagnostics.insert(code.name, description).is_some() { ecx.span_err(span, &format!( "diagnostic code {} already registered", &token::get_ident(*code) )[]); } }); let sym = Ident::new(token::gensym(&( "__register_diagnostic_".to_string() + &token::get_ident(*code) )[])); MacItems::new(vec![quote_item!(ecx, mod $sym {}).unwrap()].into_iter()) } pub fn expand_build_diagnostic_array<'cx>(ecx: &'cx mut ExtCtxt, span: Span, token_tree: &[TokenTree]) -> Box<MacResult+'cx> { let name = match token_tree { [ast::TtToken(_, token::Ident(ref name, _))] => name, _ => unreachable!() }; let (count, expr) = with_registered_diagnostics(|diagnostics| { let descriptions: Vec<P<ast::Expr>> = diagnostics.iter().filter_map(|(code, description)| { description.map(|description| { ecx.expr_tuple(span, vec![ ecx.expr_str(span, token::get_name(*code)), ecx.expr_str(span, token::get_name(description))]) }) }).collect(); (descriptions.len(), ecx.expr_vec(span, descriptions)) }); MacItems::new(vec![quote_item!(ecx, pub static $name: [(&'static str, &'static str); $count] = $expr; ).unwrap()].into_iter()) }
apache-2.0
jcsp/manila
manila/api/v1/scheduler_stats.py
2015
# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.api.openstack import wsgi from manila.api.views import scheduler_stats as scheduler_stats_views from manila import policy from manila.scheduler import rpcapi POOLS_RESOURCES_NAME = 'scheduler_stats:pools' LOG = log.getLogger(__name__) class SchedulerStatsController(wsgi.Controller): """The Scheduler Stats API controller for the OpenStack API.""" def __init__(self): self.scheduler_api = rpcapi.SchedulerAPI() self._view_builder_class = scheduler_stats_views.ViewBuilder super(SchedulerStatsController, self).__init__() def pools_index(self, req): """Returns a list of storage pools known to the scheduler.""" return self._pools(req, action='index') def pools_detail(self, req): """Returns a detailed list of storage pools known to the scheduler.""" return self._pools(req, action='detail') def _pools(self, req, action='index'): context = req.environ['manila.context'] policy.check_policy(context, POOLS_RESOURCES_NAME, action) search_opts = {} search_opts.update(req.GET) pools = self.scheduler_api.get_pools(context, filters=search_opts) detail = (action == 'detail') return self._view_builder.pools(pools, detail=detail) def create_resource(): return wsgi.Resource(SchedulerStatsController())
apache-2.0
opennetworkinglab/onos
apps/routing/fibinstaller/src/test/java/org/onosproject/routing/fibinstaller/FibInstallerTest.java
16795
/* * Copyright 2017-present Open Networking Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.onosproject.routing.fibinstaller; import com.google.common.collect.Sets; import org.easymock.EasyMock; import org.junit.Before; import org.junit.Test; import org.onlab.packet.Ethernet; import org.onlab.packet.Ip4Prefix; import org.onlab.packet.IpAddress; import org.onlab.packet.IpPrefix; import org.onlab.packet.MacAddress; import org.onlab.packet.VlanId; import org.onosproject.TestApplicationId; import org.onosproject.app.ApplicationService; import org.onosproject.cfg.ComponentConfigService; import org.onosproject.core.ApplicationId; import org.onosproject.core.CoreService; import org.onosproject.net.intf.Interface; import org.onosproject.net.intf.InterfaceListener; import org.onosproject.net.intf.InterfaceService; import org.onosproject.net.intf.InterfaceServiceAdapter; import org.onosproject.routeservice.ResolvedRoute; import org.onosproject.routeservice.Route; import org.onosproject.routeservice.RouteEvent; import org.onosproject.routeservice.RouteListener; import org.onosproject.routeservice.RouteServiceAdapter; import org.onosproject.net.ConnectPoint; import org.onosproject.net.DeviceId; import org.onosproject.net.PortNumber; import org.onosproject.net.config.NetworkConfigListener; import org.onosproject.net.config.NetworkConfigRegistry; import org.onosproject.net.config.NetworkConfigService; import org.onosproject.net.device.DeviceListener; import org.onosproject.net.device.DeviceService; import org.onosproject.net.device.DeviceServiceAdapter; import org.onosproject.net.flow.DefaultTrafficSelector; import org.onosproject.net.flow.DefaultTrafficTreatment; import org.onosproject.net.flow.TrafficSelector; import org.onosproject.net.flow.TrafficTreatment; import org.onosproject.net.flowobjective.DefaultForwardingObjective; import org.onosproject.net.flowobjective.DefaultNextObjective; import org.onosproject.net.flowobjective.FlowObjectiveService; import org.onosproject.net.flowobjective.ForwardingObjective; import org.onosproject.net.flowobjective.NextObjective; import org.onosproject.net.host.InterfaceIpAddress; import org.onosproject.routing.RoutingService; import org.onosproject.routing.config.RouterConfig; import org.onosproject.routing.config.RoutersConfig; import org.osgi.service.component.ComponentContext; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Set; import static org.easymock.EasyMock.anyObject; import static org.easymock.EasyMock.anyString; import static org.easymock.EasyMock.createMock; import static org.easymock.EasyMock.createNiceMock; import static org.easymock.EasyMock.eq; import static org.easymock.EasyMock.expect; import static org.easymock.EasyMock.expectLastCall; import static org.easymock.EasyMock.replay; import static org.easymock.EasyMock.reset; import static org.easymock.EasyMock.verify; /** * Unit tests for SingleSwitchFibInstaller. */ public class FibInstallerTest { private static final DeviceId DEVICE_ID = DeviceId.deviceId("of:0000000000000001"); private static final ConnectPoint SW1_ETH1 = new ConnectPoint( DEVICE_ID, PortNumber.portNumber(1)); private static final ConnectPoint SW1_ETH2 = new ConnectPoint( DEVICE_ID, PortNumber.portNumber(2)); private static final int NEXT_ID = 11; private static final VlanId VLAN1 = VlanId.vlanId((short) 1); private static final MacAddress MAC1 = MacAddress.valueOf("00:00:00:00:00:01"); private static final MacAddress MAC2 = MacAddress.valueOf("00:00:00:00:00:02"); private static final IpPrefix PREFIX1 = Ip4Prefix.valueOf("1.1.1.0/24"); private static final IpAddress NEXT_HOP1 = IpAddress.valueOf("192.168.10.1"); private static final IpAddress NEXT_HOP2 = IpAddress.valueOf("192.168.20.1"); private static final InterfaceIpAddress INTF1 = InterfaceIpAddress.valueOf("192.168.10.2/24"); private static final InterfaceIpAddress INTF2 = InterfaceIpAddress.valueOf("192.168.20.2/24"); private final Set<Interface> interfaces = Sets.newHashSet(); private InterfaceService interfaceService; private NetworkConfigService networkConfigService; private NetworkConfigRegistry networkConfigRegistry; private FlowObjectiveService flowObjectiveService; private ApplicationService applicationService; private DeviceService deviceService; private static final ApplicationId APPID = TestApplicationId.create("foo"); private RouteListener routeListener; private DeviceListener deviceListener; private RouterConfig routerConfig; private FibInstaller sSfibInstaller; private InterfaceListener interfaceListener; @Before public void setUp() throws Exception { sSfibInstaller = new FibInstaller(); sSfibInstaller.componentConfigService = createNiceMock(ComponentConfigService.class); ComponentContext mockContext = createNiceMock(ComponentContext.class); routerConfig = new TestRouterConfig(); interfaceService = createMock(InterfaceService.class); networkConfigService = createMock(NetworkConfigService.class); networkConfigService.addListener(anyObject(NetworkConfigListener.class)); expectLastCall().anyTimes(); networkConfigRegistry = createMock(NetworkConfigRegistry.class); flowObjectiveService = createMock(FlowObjectiveService.class); applicationService = createNiceMock(ApplicationService.class); replay(applicationService); deviceService = new TestDeviceService(); CoreService coreService = createNiceMock(CoreService.class); expect(coreService.registerApplication(anyString())).andReturn(APPID).anyTimes(); replay(coreService); sSfibInstaller.networkConfigService = networkConfigService; sSfibInstaller.networkConfigRegistry = networkConfigRegistry; sSfibInstaller.interfaceService = interfaceService; sSfibInstaller.flowObjectiveService = flowObjectiveService; sSfibInstaller.applicationService = applicationService; sSfibInstaller.coreService = coreService; sSfibInstaller.routeService = new TestRouteService(); sSfibInstaller.deviceService = deviceService; setUpNetworkConfigService(); setUpInterfaceService(); sSfibInstaller.activate(mockContext); } /** * Sets up InterfaceService. */ private void setUpInterfaceService() { interfaceService.addListener(anyObject(InterfaceListener.class)); expectLastCall().andDelegateTo(new TestInterfaceService()); // Interface with no VLAN Interface sw1Eth1 = new Interface("intf1", SW1_ETH1, Collections.singletonList(INTF1), MAC1, VlanId.NONE); expect(interfaceService.getMatchingInterface(NEXT_HOP1)).andReturn(sw1Eth1); interfaces.add(sw1Eth1); // Interface with a VLAN Interface sw2Eth1 = new Interface("intf2", SW1_ETH2, Collections.singletonList(INTF2), MAC2, VLAN1); expect(interfaceService.getMatchingInterface(NEXT_HOP2)).andReturn(sw2Eth1); interfaces.add(sw2Eth1); expect(interfaceService.getInterfaces()).andReturn(interfaces); replay(interfaceService); } /* * Sets up NetworkConfigService. */ private void setUpNetworkConfigService() { expect(networkConfigService.getConfig( anyObject(ApplicationId.class), eq(RoutingService.ROUTER_CONFIG_CLASS))). andReturn(routerConfig); expect(networkConfigService.getConfig(anyObject(ApplicationId.class), eq(RoutersConfig.class))) .andReturn(null); replay(networkConfigService); } /** * Sets up FlowObjectiveService. */ private void setUpFlowObjectiveService() { expect(flowObjectiveService.allocateNextId()).andReturn(NEXT_ID); replay(flowObjectiveService); } /** * Creates a next objective with the given parameters. * * @param srcMac source MAC address * @param dstMac destination MAC address * @param port port number * @param vlan vlan ID * @param add whether to create an add objective or remove objective * @return new next objective */ private NextObjective createNextObjective(MacAddress srcMac, MacAddress dstMac, PortNumber port, VlanId vlan, boolean add) { TrafficTreatment.Builder treatment = DefaultTrafficTreatment.builder() .setEthSrc(srcMac) .setEthDst(dstMac); TrafficSelector.Builder metabuilder = null; if (!vlan.equals(VlanId.NONE)) { treatment.pushVlan() .setVlanId(vlan) .setVlanPcp((byte) 0); } else { metabuilder = DefaultTrafficSelector.builder(); metabuilder.matchVlanId(VlanId.vlanId(FibInstaller.ASSIGNED_VLAN)); } treatment.setOutput(port); NextObjective.Builder nextBuilder = DefaultNextObjective.builder() .withId(NEXT_ID) .addTreatment(treatment.build()) .withType(NextObjective.Type.SIMPLE) .fromApp(APPID); if (metabuilder != null) { nextBuilder.withMeta(metabuilder.build()); } return add ? nextBuilder.add() : nextBuilder.remove(); } /** * Creates a new forwarding objective with the given parameters. * * @param prefix IP prefix * @param add whether to create an add objective or a remove objective * @return new forwarding objective */ private ForwardingObjective createForwardingObjective(IpPrefix prefix, boolean add) { TrafficSelector selector = DefaultTrafficSelector.builder() .matchEthType(Ethernet.TYPE_IPV4) .matchIPDst(prefix) .build(); int priority = prefix.prefixLength() * 5 + 100; ForwardingObjective.Builder fwdBuilder = DefaultForwardingObjective.builder() .fromApp(APPID) .makePermanent() .withSelector(selector) .withPriority(priority) .withFlag(ForwardingObjective.Flag.SPECIFIC); if (add) { fwdBuilder.nextStep(NEXT_ID); } else { fwdBuilder.withTreatment(DefaultTrafficTreatment.builder().build()); } return add ? fwdBuilder.add() : fwdBuilder.remove(); } /** * Tests adding a route. * * We verify that the flowObjectiveService records the correct state and that the * correct flow is submitted to the flowObjectiveService. */ @Test public void testRouteAdd() { ResolvedRoute resolvedRoute = createRoute(PREFIX1, NEXT_HOP1, MAC1); // Create the next objective NextObjective nextObjective = createNextObjective(MAC1, MAC1, SW1_ETH1.port(), VlanId.NONE, true); flowObjectiveService.next(DEVICE_ID, nextObjective); // Create the flow objective ForwardingObjective fwd = createForwardingObjective(PREFIX1, true); flowObjectiveService.forward(DEVICE_ID, fwd); EasyMock.expectLastCall().once(); setUpFlowObjectiveService(); // Send in the add event RouteEvent routeEvent = new RouteEvent(RouteEvent.Type.ROUTE_ADDED, resolvedRoute); routeListener.event(routeEvent); verify(flowObjectiveService); } /** * Tests adding a route with to a next hop in a VLAN. * * We verify that the flowObjectiveService records the correct state and that the * correct flowObjectiveService is submitted to the flowObjectiveService. */ @Test public void testRouteAddWithVlan() { ResolvedRoute route = createRoute(PREFIX1, NEXT_HOP2, MAC2); // Create the next objective NextObjective nextObjective = createNextObjective(MAC2, MAC2, SW1_ETH2.port(), VLAN1, true); flowObjectiveService.next(DEVICE_ID, nextObjective); // Create the flow objective ForwardingObjective fwd = createForwardingObjective(PREFIX1, true); flowObjectiveService.forward(DEVICE_ID, fwd); EasyMock.expectLastCall().once(); setUpFlowObjectiveService(); // Send in the add event routeListener.event(new RouteEvent(RouteEvent.Type.ROUTE_ADDED, route)); verify(flowObjectiveService); } /** * Tests updating a route. * * We verify that the flowObjectiveService records the correct state and that the * correct flow is submitted to the flowObjectiveService. */ @Test public void testRouteUpdate() { // Firstly add a route testRouteAdd(); reset(flowObjectiveService); ResolvedRoute oldRoute = createRoute(PREFIX1, NEXT_HOP1, MAC1); ResolvedRoute route = createRoute(PREFIX1, NEXT_HOP2, MAC2); // Create the next objective NextObjective nextObjective = createNextObjective(MAC2, MAC2, SW1_ETH2.port(), VLAN1, true); flowObjectiveService.next(DEVICE_ID, nextObjective); // Create the flow objective ForwardingObjective fwd = createForwardingObjective(PREFIX1, true); flowObjectiveService.forward(DEVICE_ID, fwd); EasyMock.expectLastCall().once(); setUpFlowObjectiveService(); // Send in the update event routeListener.event(new RouteEvent(RouteEvent.Type.ROUTE_UPDATED, route, oldRoute)); verify(flowObjectiveService); } /** * Tests deleting a route. * * We verify that the flowObjectiveService records the correct state and that the * correct flow is withdrawn from the flowObjectiveService. */ @Test public void testRouteDelete() { // Firstly add a route testRouteAdd(); // Construct the existing route ResolvedRoute route = createRoute(PREFIX1, NEXT_HOP1, MAC1); // Create the flow objective reset(flowObjectiveService); ForwardingObjective fwd = createForwardingObjective(PREFIX1, false); flowObjectiveService.forward(DEVICE_ID, fwd); replay(flowObjectiveService); // Send in the delete event routeListener.event(new RouteEvent(RouteEvent.Type.ROUTE_REMOVED, route)); verify(flowObjectiveService); } private static ResolvedRoute createRoute(IpPrefix prefix, IpAddress nextHop, MacAddress nextHopMac) { return new ResolvedRoute( new Route(Route.Source.UNDEFINED, prefix, nextHop), nextHopMac); } private class TestInterfaceService extends InterfaceServiceAdapter { @Override public void addListener(InterfaceListener listener) { interfaceListener = listener; } } private class TestRouteService extends RouteServiceAdapter { @Override public void addListener(RouteListener listener) { FibInstallerTest.this.routeListener = listener; } } private class TestRouterConfig extends RouterConfig { @Override public List<String> getInterfaces() { ArrayList<String> interfaces = new ArrayList<>(); interfaces.add("of:0000000000000001/1"); interfaces.add("of:0000000000000001/2"); return interfaces; } @Override public ConnectPoint getControlPlaneConnectPoint() { return SW1_ETH1; } @Override public boolean getOspfEnabled() { return true; } } private class TestDeviceService extends DeviceServiceAdapter { @Override public boolean isAvailable(DeviceId deviceId) { return true; } @Override public void addListener(DeviceListener listener) { FibInstallerTest.this.deviceListener = listener; } } }
apache-2.0
ernestp/consulo
platform/editor-ui-ex/src/com/intellij/openapi/editor/ex/util/LayeredHighlighterIterator.java
899
/* * Copyright 2000-2013 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.openapi.editor.ex.util; import com.intellij.openapi.editor.highlighter.HighlighterIterator; import com.intellij.openapi.fileTypes.SyntaxHighlighter; public interface LayeredHighlighterIterator extends HighlighterIterator { SyntaxHighlighter getActiveSyntaxHighlighter(); }
apache-2.0
42wim/matterbridge
vendor/github.com/paulrosania/go-charset/data/data_ibm437.cp.go
885
// This file is automatically generated by generate-charset-data. // Do not hand-edit. package data import ( "github.com/paulrosania/go-charset/charset" "io" "io/ioutil" "strings" ) func init() { charset.RegisterDataFile("ibm437.cp", func() (io.ReadCloser, error) { r := strings.NewReader("\x00\x01\x02\x03\x04\x05\x06\a\b\t\n\v\f\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\u007fÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒáíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αßΓπΣσµτΦΘΩδ∞∅∈∩≡±≥≤⌠⌡÷≈°•·√ⁿ²∎\u00a0") return ioutil.NopCloser(r), nil }) }
apache-2.0
aertoria/opennlp
opennlp-uima/src/main/java/opennlp/uima/doccat/DocumentCategorizerTrainer.java
4765
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package opennlp.uima.doccat; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; import opennlp.tools.doccat.DoccatModel; import opennlp.tools.doccat.DocumentCategorizerME; import opennlp.tools.doccat.DocumentSample; import opennlp.tools.ml.maxent.GIS; import opennlp.tools.util.ObjectStreamUtils; import opennlp.uima.util.CasConsumerUtil; import opennlp.uima.util.OpennlpUtil; import opennlp.uima.util.UimaUtil; import org.apache.uima.UimaContext; import org.apache.uima.cas.CAS; import org.apache.uima.cas.FSIndex; import org.apache.uima.cas.Feature; import org.apache.uima.cas.Type; import org.apache.uima.cas.TypeSystem; import org.apache.uima.cas.text.AnnotationFS; import org.apache.uima.collection.CasConsumer_ImplBase; import org.apache.uima.resource.ResourceInitializationException; import org.apache.uima.resource.ResourceProcessException; import org.apache.uima.util.Level; import org.apache.uima.util.Logger; import org.apache.uima.util.ProcessTrace; /** * OpenNLP NameFinder trainer. * * Note: This class is still work in progress, and should not be used! */ public class DocumentCategorizerTrainer extends CasConsumer_ImplBase { private UimaContext mContext; private Logger mLogger; private String mModelName; private List<DocumentSample> documentSamples = new ArrayList<DocumentSample>(); private Type mTokenType; private Type mCategoryType; private Feature mCategoryFeature; private String language; public void initialize() throws ResourceInitializationException { super.initialize(); mContext = getUimaContext(); mLogger = mContext.getLogger(); if (mLogger.isLoggable(Level.INFO)) { mLogger.log(Level.INFO, "Initializing the OpenNLP Doccat Trainer."); } mModelName = CasConsumerUtil.getRequiredStringParameter(mContext, UimaUtil.MODEL_PARAMETER); language = CasConsumerUtil.getRequiredStringParameter(mContext, UimaUtil.LANGUAGE_PARAMETER); } public void typeSystemInit(TypeSystem typeSystem) throws ResourceInitializationException { String tokenTypeName = CasConsumerUtil.getRequiredStringParameter(mContext, UimaUtil.SENTENCE_TYPE_PARAMETER); mTokenType = CasConsumerUtil.getType(typeSystem, tokenTypeName); String categoryTypeName = CasConsumerUtil.getRequiredStringParameter(mContext, "opennlp.uima.doccat.CategoryType"); mCategoryType = CasConsumerUtil.getType(typeSystem, categoryTypeName); // get feature name String categoryFeatureName = CasConsumerUtil.getRequiredStringParameter(mContext, "opennlp.uima.doccat.CategoryFeature"); mCategoryFeature = mCategoryType.getFeatureByBaseName(categoryFeatureName); } public void processCas(CAS cas) throws ResourceProcessException { FSIndex categoryIndex = cas.getAnnotationIndex(mCategoryType); if (categoryIndex.size() > 0) { AnnotationFS categoryAnnotation = (AnnotationFS) categoryIndex.iterator().next(); // add to event collection DocumentSample sample = new DocumentSample( categoryAnnotation.getStringValue(mCategoryFeature), cas.getDocumentText()); documentSamples.add(sample); } } public void collectionProcessComplete(ProcessTrace trace) throws ResourceProcessException, IOException { GIS.PRINT_MESSAGES = false; DoccatModel categoryModel = DocumentCategorizerME.train(language, ObjectStreamUtils.createObjectStream(documentSamples)); File modelFile = new File(getUimaContextAdmin().getResourceManager() .getDataPath() + File.separatorChar + mModelName); OpennlpUtil.serialize(categoryModel, modelFile); } /** * The trainer is not stateless. */ public boolean isStateless() { return false; } /** * Destroys the current instance. */ public void destroy() { // dereference to allow garbage collection documentSamples = null; } }
apache-2.0
nknize/elasticsearch
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java
2616
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ package org.elasticsearch.xpack.core.searchablesnapshots; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Objects; public class SearchableSnapshotFeatureSetUsage extends XPackFeatureSet.Usage { private final int numberOfSearchableSnapshotIndices; public SearchableSnapshotFeatureSetUsage(StreamInput input) throws IOException { super(input); numberOfSearchableSnapshotIndices = input.readVInt(); } @Override public Version getMinimalSupportedVersion() { return Version.V_7_9_0; } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(numberOfSearchableSnapshotIndices); } public SearchableSnapshotFeatureSetUsage(boolean available, int numberOfSearchableSnapshotIndices) { super(XPackField.SEARCHABLE_SNAPSHOTS, available, true); this.numberOfSearchableSnapshotIndices = numberOfSearchableSnapshotIndices; } @Override protected void innerXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { super.innerXContent(builder, params); builder.field("indices_count", numberOfSearchableSnapshotIndices); } public int getNumberOfSearchableSnapshotIndices() { return numberOfSearchableSnapshotIndices; } @Override public int hashCode() { return Objects.hash(available, enabled, numberOfSearchableSnapshotIndices); } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } SearchableSnapshotFeatureSetUsage other = (SearchableSnapshotFeatureSetUsage) obj; return Objects.equals(available, other.available) && Objects.equals(enabled, other.enabled) && Objects.equals(numberOfSearchableSnapshotIndices, other.numberOfSearchableSnapshotIndices); } }
apache-2.0
droolsjbpm/droolsjbpm-integration
kie-maven-plugin/src/it/kie-maven-plugin-test-kjar-10-default/src/test/java-filtered/org/kie/maven/plugin/ittests/ExecModelParameterTestIT.java
2044
/* * Copyright 2021 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.maven.plugin.ittests; import java.net.URL; import org.assertj.core.api.Assertions; import org.drools.compiler.kie.builder.impl.KieContainerImpl; import org.junit.Test; import org.kie.api.builder.KieModule; import org.kie.api.runtime.KieContainer; import org.kie.api.runtime.KieSession; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; public class ExecModelParameterTestIT { private static final String GAV_ARTIFACT_ID = "kie-maven-plugin-test-kjar-10-default"; private static final String GAV_VERSION = "${org.kie.version}"; private final static String KBASE_NAME = "SimpleKBase"; private final static String RULE_NAME = "Hello"; private static final String CANONICAL_KIE_MODULE = "org.drools.modelcompiler.CanonicalKieModule"; @Test public void testWithoutDroolsModelCompilerOnClassPathDoNotRunExecModel() throws Exception { KieModule kieModule = fireRule(); assertNotNull(kieModule); assertFalse(kieModule.getClass().getCanonicalName().equals(CANONICAL_KIE_MODULE)); } private KieModule fireRule() throws Exception { final URL targetLocation = ExecModelParameterTestIT.class.getProtectionDomain().getCodeSource().getLocation(); return ITTestsUtils.fireRule(targetLocation, GAV_ARTIFACT_ID, GAV_VERSION, KBASE_NAME, RULE_NAME); } }
apache-2.0
Donnerbart/hazelcast
hazelcast/src/main/java/com/hazelcast/concurrent/lock/ObjectNamespaceSerializationHelper.java
1933
/* * Copyright (c) 2008-2018, Hazelcast, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hazelcast.concurrent.lock; import com.hazelcast.nio.ObjectDataInput; import com.hazelcast.nio.ObjectDataOutput; import com.hazelcast.spi.DefaultObjectNamespace; import com.hazelcast.spi.DistributedObjectNamespace; import com.hazelcast.spi.ObjectNamespace; import java.io.IOException; /** * Serialization helper to serialize/deserialize {@link DistributedObjectNamespace} * and {@link DefaultObjectNamespace} between 3.8 and 3.9 members compatibly. * <p> * This class is not needed for versions after 3.9. */ public final class ObjectNamespaceSerializationHelper { private ObjectNamespaceSerializationHelper() { } public static void writeNamespaceCompatibly(ObjectNamespace namespace, ObjectDataOutput out) throws IOException { if (namespace.getClass() == DefaultObjectNamespace.class) { out.writeObject(new DistributedObjectNamespace(namespace)); } else { out.writeObject(namespace); } } public static ObjectNamespace readNamespaceCompatibly(ObjectDataInput in) throws IOException { ObjectNamespace namespace = in.readObject(); if (namespace.getClass() == DefaultObjectNamespace.class) { namespace = new DistributedObjectNamespace(namespace); } return namespace; } }
apache-2.0
BrightSet/LightOS
dojoedit/Libraries/dojo-release-1.4.2/dojox/layout/DragPane.js
1156
/* Copyright (c) 2004-2009, The Dojo Foundation All Rights Reserved. Available via Academic Free License >= 2.1 OR the modified BSD license. see: http://dojotoolkit.org/license for details */ if(!dojo._hasResource["dojox.layout.DragPane"]){ dojo._hasResource["dojox.layout.DragPane"]=true; dojo.provide("dojox.layout.DragPane"); dojo.require("dijit._Widget"); dojo.declare("dojox.layout.DragPane",dijit._Widget,{invert:true,postCreate:function(){ this.inherited(arguments); this.connect(this.domNode,"onmousedown","_down"); this.connect(this.domNode,"onmouseup","_up"); },_down:function(e){ var t=this.domNode; dojo.style(t,"cursor","move"); this._x=e.pageX; this._y=e.pageY; if((this._x<t.offsetLeft+t.clientWidth)&&(this._y<t.offsetTop+t.clientHeight)){ dojo.setSelectable(t,false); this._mover=this.connect(t,"onmousemove","_move"); } },_up:function(e){ dojo.setSelectable(this.domNode,true); dojo.style(this.domNode,"cursor","pointer"); this.disconnect(this._mover); },_move:function(e){ var _1=this.invert?1:-1; this.domNode.scrollTop+=(this._y-e.pageY)*_1; this.domNode.scrollLeft+=(this._x-e.pageX)*_1; this._x=e.pageX; this._y=e.pageY; }}); }
apache-2.0
oeeagle/onos
providers/pcep/tunnel/src/test/java/org/onosproject/provider/pcep/tunnel/impl/PcepClientControllerAdapter.java
5697
package org.onosproject.provider.pcep.tunnel.impl; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import org.apache.felix.scr.annotations.Activate; import org.apache.felix.scr.annotations.Deactivate; import org.onlab.packet.IpAddress; import org.onosproject.pcep.controller.PccId; import org.onosproject.pcep.controller.PcepClient; import org.onosproject.pcep.controller.PcepClientController; import org.onosproject.pcep.controller.PcepClientListener; import org.onosproject.pcep.controller.PcepEventListener; import org.onosproject.pcep.controller.driver.PcepAgent; import org.onosproject.pcepio.protocol.PcepMessage; import org.onosproject.pcepio.protocol.PcepVersion; import com.google.common.collect.Sets; public class PcepClientControllerAdapter implements PcepClientController { protected ConcurrentHashMap<PccId, PcepClient> connectedClients = new ConcurrentHashMap<PccId, PcepClient>(); protected PcepClientAgent agent = new PcepClientAgent(); protected Set<PcepClientListener> pcepClientListener = new HashSet<>(); protected Set<PcepEventListener> pcepEventListener = Sets.newHashSet(); @Activate public void activate() { } @Deactivate public void deactivate() { } @Override public Collection<PcepClient> getClients() { return connectedClients.values(); } @Override public PcepClient getClient(PccId pccId) { //return connectedClients.get(pccIpAddress); PcepClientAdapter pc = new PcepClientAdapter(); pc.init(PccId.pccId(IpAddress.valueOf(0xac000001)), PcepVersion.PCEP_1); return pc; } @Override public void addListener(PcepClientListener listener) { if (!pcepClientListener.contains(listener)) { this.pcepClientListener.add(listener); } } @Override public void removeListener(PcepClientListener listener) { this.pcepClientListener.remove(listener); } @Override public void addEventListener(PcepEventListener listener) { pcepEventListener.add(listener); } @Override public void removeEventListener(PcepEventListener listener) { pcepEventListener.remove(listener); } @Override public void writeMessage(PccId pccId, PcepMessage msg) { this.getClient(pccId).sendMessage(msg); } @Override public void processClientMessage(PccId pccId, PcepMessage msg) { PcepClient pc = getClient(pccId); switch (msg.getType()) { case NONE: break; case OPEN: break; case KEEP_ALIVE: //log.debug("Sending Keep Alive Message to {" + pccIpAddress.toString() + "}"); pc.sendMessage(Collections.singletonList(pc.factory().buildKeepaliveMsg().build())); break; case PATH_COMPUTATION_REQUEST: break; case PATH_COMPUTATION_REPLY: break; case NOTIFICATION: break; case ERROR: break; case CLOSE: //log.debug("Sending Close Message to { }", pccIpAddress.toString()); pc.sendMessage(Collections.singletonList(pc.factory().buildCloseMsg().build())); break; case REPORT: for (PcepEventListener l : pcepEventListener) { l.handleMessage(pccId, msg); } break; case UPDATE: for (PcepEventListener l : pcepEventListener) { l.handleMessage(pccId, msg); } break; case INITIATE: for (PcepEventListener l : pcepEventListener) { l.handleMessage(pccId, msg); } break; case LABEL_UPDATE: break; case MAX: break; case END: break; default: break; } } @Override public void closeConnectedClients() { PcepClient pc; for (PccId id : connectedClients.keySet()) { pc = getClient(id); pc.disconnectClient(); } } /** * Implementation of an Pcep Agent which is responsible for * keeping track of connected clients and the state in which * they are. */ public class PcepClientAgent implements PcepAgent { @Override public boolean addConnectedClient(PccId pccId, PcepClient pc) { if (connectedClients.get(pccId) != null) { return false; } else { connectedClients.put(pccId, pc); for (PcepClientListener l : pcepClientListener) { l.clientConnected(pccId); } return true; } } @Override public boolean validActivation(PccId pccId) { if (connectedClients.get(pccId) == null) { //log.error("Trying to activate client but is not in " // + "connected switches: pccIp {}. Aborting ..", pccIpAddress.toString()); return false; } return true; } @Override public void removeConnectedClient(PccId pccId) { connectedClients.remove(pccId); for (PcepClientListener l : pcepClientListener) { //log.warn("removal for {}", pccIpAddress.toString()); l.clientDisconnected(pccId); } } @Override public void processPcepMessage(PccId pccId, PcepMessage m) { processClientMessage(pccId, m); } } }
apache-2.0
michaelgallacher/intellij-community
platform/lang-impl/src/com/intellij/profile/codeInspection/ui/filter/InspectionFilterAction.java
10129
/* * Copyright 2000-2016 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.profile.codeInspection.ui.filter; import com.intellij.codeHighlighting.HighlightDisplayLevel; import com.intellij.codeInsight.daemon.impl.SeverityRegistrar; import com.intellij.codeInspection.ex.InspectionProfileImpl; import com.intellij.codeInspection.ex.ScopeToolState; import com.intellij.icons.AllIcons; import com.intellij.lang.Language; import com.intellij.lang.annotation.HighlightSeverity; import com.intellij.openapi.actionSystem.*; import com.intellij.openapi.actionSystem.ex.CheckboxAction; import com.intellij.openapi.application.ApplicationInfo; import com.intellij.openapi.application.ApplicationNamesInfo; import com.intellij.openapi.project.DumbAware; import com.intellij.openapi.project.DumbAwareAction; import com.intellij.openapi.project.Project; import com.intellij.openapi.util.text.StringUtil; import com.intellij.profile.codeInspection.SeverityProvider; import com.intellij.profile.codeInspection.ui.LevelChooserAction; import com.intellij.profile.codeInspection.ui.SingleInspectionProfilePanel; import com.intellij.ui.FilterComponent; import com.intellij.util.containers.HashSet; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.util.*; /** * @author Dmitry Batkovich */ public class InspectionFilterAction extends DefaultActionGroup implements Toggleable, DumbAware { private final static int MIN_LANGUAGE_COUNT_TO_WRAP = 11; private final SeverityRegistrar mySeverityRegistrar; private final InspectionsFilter myInspectionsFilter; @NotNull private final FilterComponent myFilterComponent; public InspectionFilterAction(@NotNull InspectionProfileImpl profile, @NotNull InspectionsFilter inspectionsFilter, @NotNull Project project, @NotNull FilterComponent filterComponent) { super("Filter Inspections", true); myInspectionsFilter = inspectionsFilter; myFilterComponent = filterComponent; mySeverityRegistrar = ((SeverityProvider)profile.getProfileManager()).getOwnSeverityRegistrar(); getTemplatePresentation().setIcon(AllIcons.General.Filter); tune(profile, project); } @Override public void update(AnActionEvent e) { super.update(e); e.getPresentation().putClientProperty(Toggleable.SELECTED_PROPERTY, !myInspectionsFilter.isEmptyFilter()); } private void tune(InspectionProfileImpl profile, Project project) { addAction(new ResetFilterAction()); addSeparator(); if (ApplicationNamesInfo.getInstance().getProductName().contains("IDEA")) { // minor IDEs don't have "New in XXX" in inspection descriptions addAction(new ShowNewInspectionsAction()); } addSeparator(); addAction(new ShowEnabledOrDisabledInspectionsAction(true)); addAction(new ShowEnabledOrDisabledInspectionsAction(false)); addAction(new ShowOnlyModifiedInspectionsAction()); addSeparator(); for (final HighlightSeverity severity : LevelChooserAction.getSeverities(mySeverityRegistrar)) { add(new ShowWithSpecifiedSeverityInspectionsAction(severity)); } addSeparator(); final Set<String> languageIds = new HashSet<>(); for (ScopeToolState state : profile.getDefaultStates(project)) { final String languageId = state.getTool().getLanguage(); languageIds.add(languageId); } final List<Language> languages = new ArrayList<>(); for (String id : languageIds) { if (id != null) { final Language language = Language.findLanguageByID(id); if (language != null) { languages.add(language); } } } if (!languages.isEmpty()) { final DefaultActionGroup languageActionGroupParent = new DefaultActionGroup("Filter by Language", languages.size() >= MIN_LANGUAGE_COUNT_TO_WRAP); add(languageActionGroupParent); Collections.sort(languages, (l1, l2) -> l1.getDisplayName().compareTo(l2.getDisplayName())); for (Language language : languages) { languageActionGroupParent.add(new LanguageFilterAction(language)); } languageActionGroupParent.add(new LanguageFilterAction(null)); addSeparator(); } add(new ShowAvailableOnlyOnAnalyzeInspectionsAction()); add(new ShowOnlyCleanupInspectionsAction()); } private class ResetFilterAction extends DumbAwareAction { public ResetFilterAction() { super("Reset Filter"); } @Override public void actionPerformed(@NotNull AnActionEvent e) { myInspectionsFilter.reset(); } @Override public void update(@NotNull AnActionEvent e) { final Presentation presentation = e.getPresentation(); presentation.setEnabled(!myInspectionsFilter.isEmptyFilter()); } } private class ShowOnlyCleanupInspectionsAction extends CheckboxAction implements DumbAware{ public ShowOnlyCleanupInspectionsAction() { super("Show Only Cleanup Inspections"); } @Override public boolean isSelected(final AnActionEvent e) { return myInspectionsFilter.isShowOnlyCleanupInspections(); } @Override public void setSelected(final AnActionEvent e, final boolean state) { myInspectionsFilter.setShowOnlyCleanupInspections(state); } } private class ShowAvailableOnlyOnAnalyzeInspectionsAction extends CheckboxAction implements DumbAware { public ShowAvailableOnlyOnAnalyzeInspectionsAction() { super("Show Only \"Available only for Analyze | Inspect Code\""); } @Override public boolean isSelected(final AnActionEvent e) { return myInspectionsFilter.isAvailableOnlyForAnalyze(); } @Override public void setSelected(final AnActionEvent e, final boolean state) { myInspectionsFilter.setAvailableOnlyForAnalyze(state); } } private class ShowWithSpecifiedSeverityInspectionsAction extends CheckboxAction implements DumbAware { private final HighlightSeverity mySeverity; private ShowWithSpecifiedSeverityInspectionsAction(final HighlightSeverity severity) { super(SingleInspectionProfilePanel.renderSeverity(severity), null, HighlightDisplayLevel.find(severity).getIcon()); mySeverity = severity; } @Override public boolean isSelected(final AnActionEvent e) { return myInspectionsFilter.containsSeverity(mySeverity); } @Override public void setSelected(final AnActionEvent e, final boolean state) { if (state) { myInspectionsFilter.addSeverity(mySeverity); } else { myInspectionsFilter.removeSeverity(mySeverity); } } } private class ShowEnabledOrDisabledInspectionsAction extends CheckboxAction implements DumbAware{ private final Boolean myShowEnabledActions; public ShowEnabledOrDisabledInspectionsAction(final boolean showEnabledActions) { super("Show Only " + (showEnabledActions ? "Enabled" : "Disabled")); myShowEnabledActions = showEnabledActions; } @Override public boolean isSelected(final AnActionEvent e) { return myInspectionsFilter.getSuitableInspectionsStates() == myShowEnabledActions; } @Override public void setSelected(final AnActionEvent e, final boolean state) { final boolean previousState = isSelected(e); myInspectionsFilter.setSuitableInspectionsStates(previousState ? null : myShowEnabledActions); } } private class LanguageFilterAction extends CheckboxAction implements DumbAware { private final String myLanguageId; public LanguageFilterAction(final @Nullable Language language) { super(language == null ? "Language is not specified" : language.getDisplayName()); myLanguageId = language == null ? null : language.getID(); } @Override public boolean isSelected(AnActionEvent e) { return myInspectionsFilter.containsLanguageId(myLanguageId); } @Override public void setSelected(AnActionEvent e, boolean state) { if (state) { myInspectionsFilter.addLanguageId(myLanguageId); } else { myInspectionsFilter.removeLanguageId(myLanguageId); } } } private final String version = ApplicationInfo.getInstance().getMajorVersion() + (StringUtil.isEmptyOrSpaces(StringUtil.trimStart(ApplicationInfo.getInstance().getMinorVersion(),"0")) ? "" : "."+ApplicationInfo.getInstance().getMinorVersion()); private final String presentableVersion = ApplicationNamesInfo.getInstance().getProductName() + " " + version; private class ShowNewInspectionsAction extends AnAction implements DumbAware { private ShowNewInspectionsAction() { super("Show New Inspections in " + presentableVersion, "Shows new inspections which are available since " + presentableVersion, AllIcons.Actions.Lightning); } @Override public void actionPerformed(AnActionEvent e) { myFilterComponent.setFilter("\"New in " + version + "\""); } } private class ShowOnlyModifiedInspectionsAction extends CheckboxAction implements DumbAware { public ShowOnlyModifiedInspectionsAction() { super("Show Only Modified Inspections"); } @Override public boolean isSelected(AnActionEvent e) { return myInspectionsFilter.isShowOnlyModifiedInspections(); } @Override public void setSelected(AnActionEvent e, boolean state) { myInspectionsFilter.setShowOnlyModifiedInspections(state); } } }
apache-2.0
datametica/calcite
core/src/main/java/org/apache/calcite/schema/impl/ModifiableViewTable.java
11262
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.calcite.schema.impl; import org.apache.calcite.plan.RelOptTable; import org.apache.calcite.plan.RelOptUtil; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeFactory; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.rel.type.RelDataTypeImpl; import org.apache.calcite.rel.type.RelProtoDataType; import org.apache.calcite.rex.RexBuilder; import org.apache.calcite.rex.RexNode; import org.apache.calcite.schema.ColumnStrategy; import org.apache.calcite.schema.ExtensibleTable; import org.apache.calcite.schema.ModifiableView; import org.apache.calcite.schema.Path; import org.apache.calcite.schema.Table; import org.apache.calcite.schema.Wrapper; import org.apache.calcite.sql.SqlFunction; import org.apache.calcite.sql2rel.InitializerContext; import org.apache.calcite.sql2rel.InitializerExpressionFactory; import org.apache.calcite.sql2rel.NullInitializerExpressionFactory; import org.apache.calcite.util.ImmutableIntList; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.checkerframework.checker.nullness.qual.Nullable; import java.lang.reflect.Type; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import static org.apache.calcite.sql.validate.SqlValidatorUtil.mapNameToIndex; import static java.util.Objects.requireNonNull; /** Extension to {@link ViewTable} that is modifiable. */ public class ModifiableViewTable extends ViewTable implements ModifiableView, Wrapper { private final Table table; private final Path tablePath; private final RexNode constraint; private final ImmutableIntList columnMapping; private final InitializerExpressionFactory initializerExpressionFactory; /** Creates a ModifiableViewTable. */ public ModifiableViewTable(Type elementType, RelProtoDataType rowType, String viewSql, List<String> schemaPath, @Nullable List<String> viewPath, Table table, Path tablePath, RexNode constraint, ImmutableIntList columnMapping) { super(elementType, rowType, viewSql, schemaPath, viewPath); this.table = table; this.tablePath = tablePath; this.constraint = constraint; this.columnMapping = columnMapping; this.initializerExpressionFactory = new ModifiableViewTableInitializerExpressionFactory(); } @Override public RexNode getConstraint(RexBuilder rexBuilder, RelDataType tableRowType) { return rexBuilder.copy(constraint); } @Override public ImmutableIntList getColumnMapping() { return columnMapping; } @Override public Table getTable() { return table; } @Override public Path getTablePath() { return tablePath; } @Override public <C extends Object> @Nullable C unwrap(Class<C> aClass) { if (aClass.isInstance(initializerExpressionFactory)) { return aClass.cast(initializerExpressionFactory); } else if (aClass.isInstance(table)) { return aClass.cast(table); } return super.unwrap(aClass); } /** * Extends the underlying table and returns a new view with updated row-type * and column-mapping. * * <p>The type factory is used to perform some scratch calculations, viz the * type mapping, but the "real" row-type will be assigned later, when the * table has been bound to the statement's type factory. The is important, * because adding types to type factories that do not belong to a statement * could potentially leak memory. * * @param extendedColumns Extended fields * @param typeFactory Type factory */ public final ModifiableViewTable extend( List<RelDataTypeField> extendedColumns, RelDataTypeFactory typeFactory) { final ExtensibleTable underlying = unwrap(ExtensibleTable.class); assert underlying != null; final RelDataTypeFactory.Builder builder = typeFactory.builder(); final RelDataType rowType = getRowType(typeFactory); for (RelDataTypeField column : rowType.getFieldList()) { builder.add(column); } for (RelDataTypeField column : extendedColumns) { builder.add(column); } // The characteristics of the new view. final RelDataType newRowType = builder.build(); final ImmutableIntList newColumnMapping = getNewColumnMapping(underlying, getColumnMapping(), extendedColumns, typeFactory); // Extend the underlying table with only the fields that // duplicate column names in neither the view nor the base table. final List<RelDataTypeField> underlyingColumns = underlying.getRowType(typeFactory).getFieldList(); final List<RelDataTypeField> columnsOfExtendedBaseTable = RelOptUtil.deduplicateColumns(underlyingColumns, extendedColumns); final List<RelDataTypeField> extendColumnsOfBaseTable = columnsOfExtendedBaseTable.subList( underlyingColumns.size(), columnsOfExtendedBaseTable.size()); final Table extendedTable = underlying.extend(extendColumnsOfBaseTable); return extend(extendedTable, RelDataTypeImpl.proto(newRowType), newColumnMapping); } /** * Creates a mapping from the view index to the index in the underlying table. */ private static ImmutableIntList getNewColumnMapping(Table underlying, ImmutableIntList oldColumnMapping, List<RelDataTypeField> extendedColumns, RelDataTypeFactory typeFactory) { final List<RelDataTypeField> baseColumns = underlying.getRowType(typeFactory).getFieldList(); final Map<String, Integer> nameToIndex = mapNameToIndex(baseColumns); final ImmutableList.Builder<Integer> newMapping = ImmutableList.builder(); newMapping.addAll(oldColumnMapping); int newMappedIndex = baseColumns.size(); for (RelDataTypeField extendedColumn : extendedColumns) { String extendedColumnName = extendedColumn.getName(); if (nameToIndex.containsKey(extendedColumnName)) { // The extended column duplicates a column in the underlying table. // Map to the index in the underlying table. newMapping.add(nameToIndex.get(extendedColumnName)); } else { // The extended column is not in the underlying table. newMapping.add(newMappedIndex++); } } return ImmutableIntList.copyOf(newMapping.build()); } protected ModifiableViewTable extend(Table extendedTable, RelProtoDataType protoRowType, ImmutableIntList newColumnMapping) { return new ModifiableViewTable(getElementType(), protoRowType, getViewSql(), getSchemaPath(), getViewPath(), extendedTable, getTablePath(), constraint, newColumnMapping); } /** * Initializes columns based on the view constraint. */ private class ModifiableViewTableInitializerExpressionFactory extends NullInitializerExpressionFactory { private final ImmutableMap<Integer, RexNode> projectMap; private ModifiableViewTableInitializerExpressionFactory() { super(); final Map<Integer, RexNode> projectMap = new HashMap<>(); final List<RexNode> filters = new ArrayList<>(); RelOptUtil.inferViewPredicates(projectMap, filters, constraint); assert filters.isEmpty(); this.projectMap = ImmutableMap.copyOf(projectMap); } @Override public ColumnStrategy generationStrategy(RelOptTable table, int iColumn) { final ModifiableViewTable viewTable = requireNonNull( table.unwrap(ModifiableViewTable.class), () -> "unable to unwrap ModifiableViewTable from " + table); assert iColumn < viewTable.columnMapping.size(); // Use the view constraint to generate the default value if the column is // constrained. final int mappedOrdinal = viewTable.columnMapping.get(iColumn); final RexNode viewConstraint = projectMap.get(mappedOrdinal); if (viewConstraint != null) { return ColumnStrategy.DEFAULT; } // Otherwise use the default value of the underlying table. final Table schemaTable = viewTable.getTable(); if (schemaTable instanceof Wrapper) { final InitializerExpressionFactory initializerExpressionFactory = ((Wrapper) schemaTable).unwrap(InitializerExpressionFactory.class); if (initializerExpressionFactory != null) { return initializerExpressionFactory.generationStrategy(table, iColumn); } } return super.generationStrategy(table, iColumn); } @Override public RexNode newColumnDefaultValue(RelOptTable table, int iColumn, InitializerContext context) { final ModifiableViewTable viewTable = requireNonNull( table.unwrap(ModifiableViewTable.class), () -> "unable to unwrap ModifiableViewTable from " + table); assert iColumn < viewTable.columnMapping.size(); final RexBuilder rexBuilder = context.getRexBuilder(); final RelDataTypeFactory typeFactory = rexBuilder.getTypeFactory(); final RelDataType viewType = viewTable.getRowType(typeFactory); final RelDataType iType = viewType.getFieldList().get(iColumn).getType(); // Use the view constraint to generate the default value if the column is constrained. final int mappedOrdinal = viewTable.columnMapping.get(iColumn); final RexNode viewConstraint = projectMap.get(mappedOrdinal); if (viewConstraint != null) { return rexBuilder.ensureType(iType, viewConstraint, true); } // Otherwise use the default value of the underlying table. final Table schemaTable = viewTable.getTable(); if (schemaTable instanceof Wrapper) { final InitializerExpressionFactory initializerExpressionFactory = ((Wrapper) schemaTable).unwrap(InitializerExpressionFactory.class); if (initializerExpressionFactory != null) { final RexNode tableConstraint = initializerExpressionFactory.newColumnDefaultValue(table, iColumn, context); return rexBuilder.ensureType(iType, tableConstraint, true); } } // Otherwise Sql type of NULL. return super.newColumnDefaultValue(table, iColumn, context); } @Override public RexNode newAttributeInitializer(RelDataType type, SqlFunction constructor, int iAttribute, List<RexNode> constructorArgs, InitializerContext context) { throw new UnsupportedOperationException("Not implemented - unknown requirements"); } } }
apache-2.0
Dubland/kaa
server/common/transport-shared/src/main/java/org/kaaproject/kaa/server/transport/message/SessionDisconnectMessage.java
1007
/* * Copyright 2014-2016 CyberVision, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kaaproject.kaa.server.transport.message; import org.kaaproject.kaa.server.transport.session.SessionInfo; /** * An abstract class that identifies a session disconnect message. * * @author Andrew Shvayka * */ public abstract class SessionDisconnectMessage extends SessionControlMessage { public SessionDisconnectMessage(SessionInfo session) { super(session); } }
apache-2.0
kironapublic/vaadin
uitest/src/main/java/com/vaadin/tests/themes/valo/GridDisabled.java
718
package com.vaadin.tests.themes.valo; import com.vaadin.server.VaadinRequest; import com.vaadin.tests.components.AbstractTestUI; import com.vaadin.ui.Button; import com.vaadin.v7.ui.Grid; public class GridDisabled extends AbstractTestUI { @Override protected void setup(VaadinRequest request) { final Grid grid = new Grid(); grid.addColumn("foo", String.class); grid.addRow("Foo"); grid.select(grid.addRow("Bar")); addComponent(grid); addButton("Disable", new Button.ClickListener() { @Override public void buttonClick(Button.ClickEvent event) { grid.setEnabled(!grid.isEnabled()); } }); } }
apache-2.0
ya7lelkom/googleads-java-lib
modules/dfp_axis/src/main/java/com/google/api/ads/dfp/axis/v201405/CreativeWrapperServiceLocator.java
6167
/** * CreativeWrapperServiceLocator.java * * This file was auto-generated from WSDL * by the Apache Axis 1.4 Mar 02, 2009 (07:08:06 PST) WSDL2Java emitter. */ package com.google.api.ads.dfp.axis.v201405; public class CreativeWrapperServiceLocator extends org.apache.axis.client.Service implements com.google.api.ads.dfp.axis.v201405.CreativeWrapperService { public CreativeWrapperServiceLocator() { } public CreativeWrapperServiceLocator(org.apache.axis.EngineConfiguration config) { super(config); } public CreativeWrapperServiceLocator(java.lang.String wsdlLoc, javax.xml.namespace.QName sName) throws javax.xml.rpc.ServiceException { super(wsdlLoc, sName); } // Use to get a proxy class for CreativeWrapperServiceInterfacePort private java.lang.String CreativeWrapperServiceInterfacePort_address = "https://ads.google.com/apis/ads/publisher/v201405/CreativeWrapperService"; public java.lang.String getCreativeWrapperServiceInterfacePortAddress() { return CreativeWrapperServiceInterfacePort_address; } // The WSDD service name defaults to the port name. private java.lang.String CreativeWrapperServiceInterfacePortWSDDServiceName = "CreativeWrapperServiceInterfacePort"; public java.lang.String getCreativeWrapperServiceInterfacePortWSDDServiceName() { return CreativeWrapperServiceInterfacePortWSDDServiceName; } public void setCreativeWrapperServiceInterfacePortWSDDServiceName(java.lang.String name) { CreativeWrapperServiceInterfacePortWSDDServiceName = name; } public com.google.api.ads.dfp.axis.v201405.CreativeWrapperServiceInterface getCreativeWrapperServiceInterfacePort() throws javax.xml.rpc.ServiceException { java.net.URL endpoint; try { endpoint = new java.net.URL(CreativeWrapperServiceInterfacePort_address); } catch (java.net.MalformedURLException e) { throw new javax.xml.rpc.ServiceException(e); } return getCreativeWrapperServiceInterfacePort(endpoint); } public com.google.api.ads.dfp.axis.v201405.CreativeWrapperServiceInterface getCreativeWrapperServiceInterfacePort(java.net.URL portAddress) throws javax.xml.rpc.ServiceException { try { com.google.api.ads.dfp.axis.v201405.CreativeWrapperServiceSoapBindingStub _stub = new com.google.api.ads.dfp.axis.v201405.CreativeWrapperServiceSoapBindingStub(portAddress, this); _stub.setPortName(getCreativeWrapperServiceInterfacePortWSDDServiceName()); return _stub; } catch (org.apache.axis.AxisFault e) { return null; } } public void setCreativeWrapperServiceInterfacePortEndpointAddress(java.lang.String address) { CreativeWrapperServiceInterfacePort_address = address; } /** * For the given interface, get the stub implementation. * If this service has no port for the given interface, * then ServiceException is thrown. */ public java.rmi.Remote getPort(Class serviceEndpointInterface) throws javax.xml.rpc.ServiceException { try { if (com.google.api.ads.dfp.axis.v201405.CreativeWrapperServiceInterface.class.isAssignableFrom(serviceEndpointInterface)) { com.google.api.ads.dfp.axis.v201405.CreativeWrapperServiceSoapBindingStub _stub = new com.google.api.ads.dfp.axis.v201405.CreativeWrapperServiceSoapBindingStub(new java.net.URL(CreativeWrapperServiceInterfacePort_address), this); _stub.setPortName(getCreativeWrapperServiceInterfacePortWSDDServiceName()); return _stub; } } catch (java.lang.Throwable t) { throw new javax.xml.rpc.ServiceException(t); } throw new javax.xml.rpc.ServiceException("There is no stub implementation for the interface: " + (serviceEndpointInterface == null ? "null" : serviceEndpointInterface.getName())); } /** * For the given interface, get the stub implementation. * If this service has no port for the given interface, * then ServiceException is thrown. */ public java.rmi.Remote getPort(javax.xml.namespace.QName portName, Class serviceEndpointInterface) throws javax.xml.rpc.ServiceException { if (portName == null) { return getPort(serviceEndpointInterface); } java.lang.String inputPortName = portName.getLocalPart(); if ("CreativeWrapperServiceInterfacePort".equals(inputPortName)) { return getCreativeWrapperServiceInterfacePort(); } else { java.rmi.Remote _stub = getPort(serviceEndpointInterface); ((org.apache.axis.client.Stub) _stub).setPortName(portName); return _stub; } } public javax.xml.namespace.QName getServiceName() { return new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201405", "CreativeWrapperService"); } private java.util.HashSet ports = null; public java.util.Iterator getPorts() { if (ports == null) { ports = new java.util.HashSet(); ports.add(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201405", "CreativeWrapperServiceInterfacePort")); } return ports.iterator(); } /** * Set the endpoint address for the specified port name. */ public void setEndpointAddress(java.lang.String portName, java.lang.String address) throws javax.xml.rpc.ServiceException { if ("CreativeWrapperServiceInterfacePort".equals(portName)) { setCreativeWrapperServiceInterfacePortEndpointAddress(address); } else { // Unknown Port Name throw new javax.xml.rpc.ServiceException(" Cannot set Endpoint Address for Unknown Port" + portName); } } /** * Set the endpoint address for the specified port name. */ public void setEndpointAddress(javax.xml.namespace.QName portName, java.lang.String address) throws javax.xml.rpc.ServiceException { setEndpointAddress(portName.getLocalPart(), address); } }
apache-2.0
DanielTing/presto
presto-main/src/main/java/com/facebook/presto/operator/scalar/ConcatFunction.java
6841
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto.operator.scalar; import com.facebook.presto.byteCode.ByteCodeBlock; import com.facebook.presto.byteCode.ClassDefinition; import com.facebook.presto.byteCode.DynamicClassLoader; import com.facebook.presto.byteCode.MethodDefinition; import com.facebook.presto.byteCode.Parameter; import com.facebook.presto.byteCode.Scope; import com.facebook.presto.byteCode.Variable; import com.facebook.presto.byteCode.expression.ByteCodeExpression; import com.facebook.presto.metadata.FunctionInfo; import com.facebook.presto.metadata.FunctionRegistry; import com.facebook.presto.metadata.ParametricFunction; import com.facebook.presto.metadata.Signature; import com.facebook.presto.spi.PrestoException; import com.facebook.presto.spi.type.StandardTypes; import com.facebook.presto.spi.type.Type; import com.facebook.presto.spi.type.TypeManager; import com.facebook.presto.sql.gen.CompilerUtils; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import io.airlift.slice.Slice; import io.airlift.slice.Slices; import java.lang.invoke.MethodHandle; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.stream.IntStream; import static com.facebook.presto.byteCode.Access.FINAL; import static com.facebook.presto.byteCode.Access.PRIVATE; import static com.facebook.presto.byteCode.Access.PUBLIC; import static com.facebook.presto.byteCode.Access.STATIC; import static com.facebook.presto.byteCode.Access.a; import static com.facebook.presto.byteCode.Parameter.arg; import static com.facebook.presto.byteCode.ParameterizedType.type; import static com.facebook.presto.byteCode.expression.ByteCodeExpressions.add; import static com.facebook.presto.byteCode.expression.ByteCodeExpressions.constantInt; import static com.facebook.presto.byteCode.expression.ByteCodeExpressions.invokeStatic; import static com.facebook.presto.metadata.FunctionType.SCALAR; import static com.facebook.presto.metadata.Signature.internalScalarFunction; import static com.facebook.presto.spi.StandardErrorCode.INVALID_FUNCTION_ARGUMENT; import static com.facebook.presto.spi.type.VarcharType.VARCHAR; import static com.facebook.presto.sql.gen.CompilerUtils.defineClass; import static com.facebook.presto.util.ImmutableCollectors.toImmutableList; import static com.facebook.presto.util.Reflection.methodHandle; import static java.lang.Math.addExact; public final class ConcatFunction implements ParametricFunction { public static final ConcatFunction CONCAT = new ConcatFunction(); private static final Signature SIGNATURE = new Signature("concat", SCALAR, ImmutableList.of(), StandardTypes.VARCHAR, ImmutableList.of(StandardTypes.VARCHAR), true); @Override public Signature getSignature() { return SIGNATURE; } @Override public boolean isHidden() { return false; } @Override public boolean isDeterministic() { return true; } @Override public String getDescription() { return "concatenates given strings"; } @Override public FunctionInfo specialize(Map<String, Type> types, int arity, TypeManager typeManager, FunctionRegistry functionRegistry) { if (arity < 2) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "There must be two or more concatenation arguments"); } Class<?> clazz = generateConcat(arity); MethodHandle methodHandle = methodHandle(clazz, "concat", Collections.nCopies(arity, Slice.class).toArray(new Class<?>[arity])); List<Boolean> nullableParameters = ImmutableList.copyOf(Collections.nCopies(arity, false)); Signature specializedSignature = internalScalarFunction(SIGNATURE.getName(), VARCHAR.getTypeSignature(), Collections.nCopies(arity, VARCHAR.getTypeSignature())); return new FunctionInfo(specializedSignature, getDescription(), isHidden(), methodHandle, isDeterministic(), false, nullableParameters); } private static Class<?> generateConcat(int arity) { ClassDefinition definition = new ClassDefinition( a(PUBLIC, FINAL), CompilerUtils.makeClassName("Concat" + arity + "ScalarFunction"), type(Object.class)); // Generate constructor definition.declareDefaultConstructor(a(PRIVATE)); // Generate concat() List<Parameter> parameters = IntStream.range(0, arity) .mapToObj(i -> arg("arg" + i, Slice.class)) .collect(toImmutableList()); MethodDefinition method = definition.declareMethod(a(PUBLIC, STATIC), "concat", type(Slice.class), parameters); Scope scope = method.getScope(); ByteCodeBlock body = method.getBody(); Variable length = scope.declareVariable(int.class, "length"); body.append(length.set(constantInt(0))); for (int i = 0; i < arity; ++i) { body.append(length.set(generateCheckedAdd(length, parameters.get(i).invoke("length", int.class)))); } Variable result = scope.declareVariable(Slice.class, "result"); body.append(result.set(invokeStatic(Slices.class, "allocate", Slice.class, length))); Variable position = scope.declareVariable(int.class, "position"); body.append(position.set(constantInt(0))); for (int i = 0; i < arity; ++i) { body.append(result.invoke("setBytes", void.class, position, parameters.get(i))); body.append(position.set(add(position, parameters.get(i).invoke("length", int.class)))); } body.getVariable(result) .retObject(); return defineClass(definition, Object.class, ImmutableMap.of(), new DynamicClassLoader(ConcatFunction.class.getClassLoader())); } private static ByteCodeExpression generateCheckedAdd(ByteCodeExpression x, ByteCodeExpression y) { return invokeStatic(ConcatFunction.class, "checkedAdd", int.class, x, y); } public static int checkedAdd(int x, int y) { try { return addExact(x, y); } catch (ArithmeticException e) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "Concatenated string is too large"); } } }
apache-2.0
dvamedveda/b.savelev
chapter_006/src/test/java/ru/job4j/tracker/tracker/package-info.java
185
/** * Тесты пакета ru.job4j.tracker.tracker. * * @author - b.savelev (mailto: justmustdie@yandex.ru) * @version - 1.0 * @since 0.1 */ package ru.job4j.tracker.tracker;
apache-2.0
jiaphuan/models
research/syntaxnet/syntaxnet/util/resources_test.py
1527
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for resources.""" from tensorflow.python.platform import googletest from syntaxnet.util import resources class ResourcesTest(googletest.TestCase): """Testing rig.""" def testInvalidResource(self): for path in [ 'bad/path/to/no/file', 'syntaxnet/testdata', 'syntaxnet/testdata/context.pbtxt', ]: with self.assertRaises(IOError): resources.GetSyntaxNetResource(path) with self.assertRaises(IOError): resources.GetSyntaxNetResourceAsFile(path) def testValidResource(self): path = 'syntaxnet/testdata/hello.txt' self.assertEqual('hello world\n', resources.GetSyntaxNetResource(path)) with resources.GetSyntaxNetResourceAsFile(path) as resource_file: self.assertEqual('hello world\n', resource_file.read()) if __name__ == '__main__': googletest.main()
apache-2.0
cloudfoundry/bosh-agent
agent/action/migrate_disk.go
1206
package action import ( "errors" boshplatform "github.com/cloudfoundry/bosh-agent/platform" boshdirs "github.com/cloudfoundry/bosh-agent/settings/directories" bosherr "github.com/cloudfoundry/bosh-utils/errors" ) type MigrateDiskAction struct { platform boshplatform.Platform dirProvider boshdirs.Provider } func NewMigrateDisk( platform boshplatform.Platform, dirProvider boshdirs.Provider, ) (action MigrateDiskAction) { action.platform = platform action.dirProvider = dirProvider return } func (a MigrateDiskAction) IsAsynchronous(_ ProtocolVersion) bool { return true } func (a MigrateDiskAction) IsPersistent() bool { return false } func (a MigrateDiskAction) IsLoggable() bool { return true } func (a MigrateDiskAction) Run() (value interface{}, err error) { err = a.platform.MigratePersistentDisk(a.dirProvider.StoreDir(), a.dirProvider.StoreMigrationDir()) if err != nil { err = bosherr.WrapError(err, "Migrating persistent disk") return } value = map[string]string{} return } func (a MigrateDiskAction) Resume() (interface{}, error) { return nil, errors.New("not supported") } func (a MigrateDiskAction) Cancel() error { return errors.New("not supported") }
apache-2.0
bjornna/hapi-fhir
hapi-fhir-structures-hl7org-dstu2/src/main/java/org/hl7/fhir/instance/model/Base.java
5416
package org.hl7.fhir.instance.model; import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.hl7.fhir.instance.model.api.IBase; import org.hl7.fhir.utilities.xhtml.XhtmlNode; public abstract class Base implements Serializable, IBase { /** * User appended data items - allow users to add extra information to the class */ private Map<String, Object> userData; /** * Round tracking xml comments for testing convenience */ private List<String> formatCommentsPre; /** * Round tracking xml comments for testing convenience */ private List<String> formatCommentsPost; public Object getUserData(String name) { if (userData == null) return null; return userData.get(name); } public void setUserData(String name, Object value) { if (userData == null) userData = new HashMap<String, Object>(); userData.put(name, value); } public void setUserDataINN(String name, Object value) { if (value == null) return; if (userData == null) userData = new HashMap<String, Object>(); userData.put(name, value); } public boolean hasUserData(String name) { if (userData == null) return false; else return userData.containsKey(name); } public String getUserString(String name) { return (String) getUserData(name); } public int getUserInt(String name) { if (!hasUserData(name)) return 0; return (Integer) getUserData(name); } public boolean hasFormatComment() { return (formatCommentsPre != null && !formatCommentsPre.isEmpty()) || (formatCommentsPost != null && !formatCommentsPost.isEmpty()); } public List<String> getFormatCommentsPre() { if (formatCommentsPre == null) formatCommentsPre = new ArrayList<String>(); return formatCommentsPre; } public List<String> getFormatCommentsPost() { if (formatCommentsPost == null) formatCommentsPost = new ArrayList<String>(); return formatCommentsPost; } protected abstract void listChildren(List<Property> result) ; /** * Supports iterating the children elements in some generic processor or browser * All defined children will be listed, even if they have no value on this instance * * Note that the actual content of primitive or xhtml elements is not iterated explicitly. * To find these, the processing code must recognise the element as a primitive, typecast * the value to a {@link Type}, and examine the value * * @return a list of all the children defined for this element */ public List<Property> children() { List<Property> result = new ArrayList<Property>(); listChildren(result); return result; } public Property getChildByName(String name) { List<Property> children = new ArrayList<Property>(); listChildren(children); for (Property c : children) if (c.getName().equals(name)) return c; return null; } public List<Base> listChildrenByName(String name) { List<Property> children = new ArrayList<Property>(); listChildren(children); for (Property c : children) if (c.getName().equals(name) || (c.getName().endsWith("[x]") && name.startsWith(c.getName()))) return c.getValues(); return new ArrayList<Base>(); } public boolean isEmpty() { return true; // userData does not count } public boolean equalsDeep(Base other) { return other != null; } public boolean equalsShallow(Base other) { return other != null; } public static boolean compareDeep(List<? extends Base> e1, List<? extends Base> e2, boolean allowNull) { if (noList(e1) && noList(e2) && allowNull) return true; if (noList(e1) || noList(e2)) return false; if (e1.size() != e2.size()) return false; for (int i = 0; i < e1.size(); i++) { if (!compareDeep(e1.get(i), e2.get(i), allowNull)) return false; } return true; } private static boolean noList(List<? extends Base> list) { return list == null || list.isEmpty(); } public static boolean compareDeep(Base e1, Base e2, boolean allowNull) { if (e1 == null && e2 == null && allowNull) return true; if (e1 == null || e2 == null) return false; return e1.equalsDeep(e2); } public static boolean compareDeep(XhtmlNode div1, XhtmlNode div2, boolean allowNull) { if (div1 == null && div2 == null && allowNull) return true; if (div1 == null || div2 == null) return false; return div1.equalsDeep(div2); } public static boolean compareValues(List<? extends PrimitiveType> e1, List<? extends PrimitiveType> e2, boolean allowNull) { if (e1 == null && e2 == null && allowNull) return true; if (e1 == null || e2 == null) return false; if (e1.size() != e2.size()) return false; for (int i = 0; i < e1.size(); i++) { if (!compareValues(e1.get(i), e2.get(i), allowNull)) return false; } return true; } public static boolean compareValues(PrimitiveType e1, PrimitiveType e2, boolean allowNull) { if (e1 == null && e2 == null && allowNull) return true; if (e1 == null || e2 == null) return false; return e1.equalsShallow(e2); } }
apache-2.0
hyperledger/fabric
gossip/util/grpc.go
2351
/* Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package util import ( "crypto/tls" "crypto/x509" "fmt" "net" "strconv" "time" "github.com/hyperledger/fabric/common/crypto/tlsgen" "github.com/hyperledger/fabric/gossip/api" "github.com/hyperledger/fabric/gossip/common" "github.com/hyperledger/fabric/internal/pkg/comm" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) // CA that generates TLS key-pairs var ca = createCAOrPanic() func createCAOrPanic() tlsgen.CA { ca, err := tlsgen.NewCA() if err != nil { panic(fmt.Sprintf("failed creating CA: %+v", err)) } return ca } // CreateGRPCLayer returns a new gRPC server with associated port, TLS certificates, SecureDialOpts and DialOption func CreateGRPCLayer() (port int, gRPCServer *comm.GRPCServer, certs *common.TLSCertificates, secureDialOpts api.PeerSecureDialOpts, dialOpts []grpc.DialOption) { serverKeyPair, err := ca.NewServerCertKeyPair("127.0.0.1") if err != nil { panic(err) } clientKeyPair, err := ca.NewClientCertKeyPair() if err != nil { panic(err) } tlsServerCert, err := tls.X509KeyPair(serverKeyPair.Cert, serverKeyPair.Key) if err != nil { panic(err) } tlsClientCert, err := tls.X509KeyPair(clientKeyPair.Cert, clientKeyPair.Key) if err != nil { panic(err) } tlsConf := &tls.Config{ Certificates: []tls.Certificate{tlsClientCert}, ClientAuth: tls.RequestClientCert, RootCAs: x509.NewCertPool(), } tlsConf.RootCAs.AppendCertsFromPEM(ca.CertBytes()) ta := credentials.NewTLS(tlsConf) dialOpts = append(dialOpts, grpc.WithTransportCredentials(ta)) secureDialOpts = func() []grpc.DialOption { return dialOpts } certs = &common.TLSCertificates{} certs.TLSServerCert.Store(&tlsServerCert) certs.TLSClientCert.Store(&tlsClientCert) srvConfig := comm.ServerConfig{ ConnectionTimeout: time.Second, SecOpts: comm.SecureOptions{ Key: serverKeyPair.Key, Certificate: serverKeyPair.Cert, UseTLS: true, }, } gRPCServer, err = comm.NewGRPCServer("127.0.0.1:", srvConfig) if err != nil { panic(err) } _, portString, err := net.SplitHostPort(gRPCServer.Address()) if err != nil { panic(err) } portInt, err := strconv.Atoi(portString) if err != nil { panic(err) } return portInt, gRPCServer, certs, secureDialOpts, dialOpts }
apache-2.0
fengshao0907/Open-Source-Research
iBATIS/test/com/mydomain/data/IbatisTest.java
3646
package com.mydomain.data; import com.ibatis.sqlmap.client.SqlMapClient; import com.ibatis.sqlmap.client.SqlMapClientBuilder; import com.ibatis.common.resources.Resources; import com.mydomain.domain.Account; import java.io.Reader; import java.io.IOException; import java.util.List; import java.sql.SQLException; /** * This is not a best practices class. It's just an example to give you an idea * of how iBATIS works. For a more complete example, see JPetStore 5.0 at * http://www.ibatis.com. */ public class IbatisTest { /** * SqlMapClient instances are thread safe, so you only need one. In this * case, we'll use a static singleton. So sue me. ;-) */ private static SqlMapClient sqlMapper; // private static SqlMapSession sqlMapper; /** * It's not a good idea to put code that can fail in a class initializer, * but for sake of argument, here's how you configure an SQL Map. */ static { try { java.util.Properties p = new java.util.Properties(); p.put("myvar1", "myvalue1"); p.put("myvar2", "myvalue2"); Reader reader = Resources.getResourceAsReader("com/mydomain/data/SqlMapConfig.xml"); sqlMapper = SqlMapClientBuilder.buildSqlMapClient(reader); // sqlMapper2 = SqlMapClientBuilder.buildSqlMapClient(reader, p); System.out.println("sqlMapper=" + sqlMapper); reader.close(); } catch (IOException e) { // Fail fast. throw new RuntimeException("Something bad happened while building the SqlMapClient instance." + e, e); } } public static List<?> selectAllAccounts() throws SQLException { return sqlMapper.queryForList("selectAllAccounts"); } public static Account selectAccountById(int id) throws SQLException { return (Account) sqlMapper.queryForObject("selectAccountById", id); } public static void insertAccount(Account account) throws SQLException { sqlMapper.insert("insertAccount", account); } public static void updateAccount(Account account) throws SQLException { sqlMapper.update("updateAccount", account); } public static void deleteAccount(int id) throws SQLException { sqlMapper.delete("deleteAccountById", id); } public static void insertAccount(SqlMapClient sqlMapper, Account account) throws SQLException { sqlMapper.insert("insertAccount", account); } public static List<?> selectAllAccounts(SqlMapClient sqlMapper) throws SQLException { return sqlMapper.queryForList("selectAllAccounts"); } public static Account selectAccountById(SqlMapClient sqlMapper, int id) throws SQLException { return (Account) sqlMapper.queryForObject("selectAccountById", id); } public static void main(String[] args) throws Throwable { // System.out.println("selectAccountById(10)="+selectAccountById(10)); Account account = new Account(); account.setId(2); account.setFirstName("a"); account.setLastName("b"); account.setEmailAddress("c"); insertAccount(sqlMapper, account); // // // List list = selectAllAccounts(sqlMapper); // System.out.println(list.size()); // System.out.println(list); // // list = selectAllAccounts(sqlMapper2); // System.out.println(list.size()); // System.out.println(list); // // // System.out.println(selectAccountById(sqlMapper,222416)); // System.out.println(selectAccountById(sqlMapper2,222416)); } }
apache-2.0
huguesv/PTVS
Python/Product/Debugger.Concord/ValueStore.cs
1922
// Python Tools for Visual Studio // Copyright(c) Microsoft Corporation // All rights reserved. // // Licensed under the Apache License, Version 2.0 (the License); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS // OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY // IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABILITY OR NON-INFRINGEMENT. // // See the Apache Version 2.0 License for specific language governing // permissions and limitations under the License. namespace Microsoft.PythonTools.Debugger.Concord { /// <summary> /// Represents a stored value, with a potentially non-imdepotent (if the backing store changes) and potentially expensive retrieval operation. /// </summary> internal interface IValueStore { /// <summary> /// Read the stored value. /// </summary> /// <remarks> /// This operation is not imdepotent, and can be expensive - don't repeatedly call on the same proxy unless deliberately trying to obtain a fresh value. /// </remarks> object Read(); } /// <summary> /// Represents a stored typed value. /// </summary> internal interface IValueStore<out T> : IValueStore { new T Read(); } /// <summary> /// A simple implementation of <see cref="IValueStore"/> which simply wraps the provided value. /// </summary> internal class ValueStore<T> : IValueStore<T> { private readonly T _value; public ValueStore(T value) { _value = value; } public T Read() { return _value; } object IValueStore.Read() { return Read(); } } }
apache-2.0
williamchengit/TestRepo
solr-4.9.0/lucene/misc/src/java/org/apache/lucene/index/sorter/SortingMergePolicy.java
8074
package org.apache.lucene.index.sorter; /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Map; import org.apache.lucene.analysis.Analyzer; // javadocs import org.apache.lucene.index.AtomicReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.MergeState; import org.apache.lucene.index.MergeTrigger; import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SegmentReader; import org.apache.lucene.index.SlowCompositeReaderWrapper; import org.apache.lucene.search.Sort; import org.apache.lucene.store.Directory; import org.apache.lucene.util.Bits; import org.apache.lucene.util.packed.MonotonicAppendingLongBuffer; /** A {@link MergePolicy} that reorders documents according to a {@link Sort} * before merging them. As a consequence, all segments resulting from a merge * will be sorted while segments resulting from a flush will be in the order * in which documents have been added. * <p><b>NOTE</b>: Never use this policy if you rely on * {@link IndexWriter#addDocuments(Iterable, Analyzer) IndexWriter.addDocuments} * to have sequentially-assigned doc IDs, this policy will scatter doc IDs. * <p><b>NOTE</b>: This policy should only be used with idempotent {@code Sort}s * so that the order of segments is predictable. For example, using * {@link Sort#INDEXORDER} in reverse (which is not idempotent) will make * the order of documents in a segment depend on the number of times the segment * has been merged. * @lucene.experimental */ public final class SortingMergePolicy extends MergePolicy { /** * Put in the {@link SegmentInfo#getDiagnostics() diagnostics} to denote that * this segment is sorted. */ public static final String SORTER_ID_PROP = "sorter"; class SortingOneMerge extends OneMerge { List<AtomicReader> unsortedReaders; Sorter.DocMap docMap; AtomicReader sortedView; SortingOneMerge(List<SegmentCommitInfo> segments) { super(segments); } @Override public List<AtomicReader> getMergeReaders() throws IOException { if (unsortedReaders == null) { unsortedReaders = super.getMergeReaders(); final AtomicReader atomicView; if (unsortedReaders.size() == 1) { atomicView = unsortedReaders.get(0); } else { final IndexReader multiReader = new MultiReader(unsortedReaders.toArray(new AtomicReader[unsortedReaders.size()])); atomicView = SlowCompositeReaderWrapper.wrap(multiReader); } docMap = sorter.sort(atomicView); sortedView = SortingAtomicReader.wrap(atomicView, docMap); } // a null doc map means that the readers are already sorted return docMap == null ? unsortedReaders : Collections.singletonList(sortedView); } @Override public void setInfo(SegmentCommitInfo info) { Map<String,String> diagnostics = info.info.getDiagnostics(); diagnostics.put(SORTER_ID_PROP, sorter.getID()); super.setInfo(info); } private MonotonicAppendingLongBuffer getDeletes(List<AtomicReader> readers) { MonotonicAppendingLongBuffer deletes = new MonotonicAppendingLongBuffer(); int deleteCount = 0; for (AtomicReader reader : readers) { final int maxDoc = reader.maxDoc(); final Bits liveDocs = reader.getLiveDocs(); for (int i = 0; i < maxDoc; ++i) { if (liveDocs != null && !liveDocs.get(i)) { ++deleteCount; } else { deletes.add(deleteCount); } } } deletes.freeze(); return deletes; } @Override public MergePolicy.DocMap getDocMap(final MergeState mergeState) { if (unsortedReaders == null) { throw new IllegalStateException(); } if (docMap == null) { return super.getDocMap(mergeState); } assert mergeState.docMaps.length == 1; // we returned a singleton reader final MonotonicAppendingLongBuffer deletes = getDeletes(unsortedReaders); return new MergePolicy.DocMap() { @Override public int map(int old) { final int oldWithDeletes = old + (int) deletes.get(old); final int newWithDeletes = docMap.oldToNew(oldWithDeletes); return mergeState.docMaps[0].get(newWithDeletes); } }; } } class SortingMergeSpecification extends MergeSpecification { @Override public void add(OneMerge merge) { super.add(new SortingOneMerge(merge.segments)); } @Override public String segString(Directory dir) { return "SortingMergeSpec(" + super.segString(dir) + ", sorter=" + sorter + ")"; } } /** Returns {@code true} if the given {@code reader} is sorted by the specified {@code sort}. */ public static boolean isSorted(AtomicReader reader, Sort sort) { if (reader instanceof SegmentReader) { final SegmentReader segReader = (SegmentReader) reader; final Map<String, String> diagnostics = segReader.getSegmentInfo().info.getDiagnostics(); if (diagnostics != null && sort.toString().equals(diagnostics.get(SORTER_ID_PROP))) { return true; } } return false; } private MergeSpecification sortedMergeSpecification(MergeSpecification specification) { if (specification == null) { return null; } MergeSpecification sortingSpec = new SortingMergeSpecification(); for (OneMerge merge : specification.merges) { sortingSpec.add(merge); } return sortingSpec; } final MergePolicy in; final Sorter sorter; final Sort sort; /** Create a new {@code MergePolicy} that sorts documents with the given {@code sort}. */ public SortingMergePolicy(MergePolicy in, Sort sort) { this.in = in; this.sorter = new Sorter(sort); this.sort = sort; } @Override public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos, IndexWriter writer) throws IOException { return sortedMergeSpecification(in.findMerges(mergeTrigger, segmentInfos, writer)); } @Override public MergeSpecification findForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer) throws IOException { return sortedMergeSpecification(in.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge, writer)); } @Override public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos, IndexWriter writer) throws IOException { return sortedMergeSpecification(in.findForcedDeletesMerges(segmentInfos, writer)); } @Override public void close() { in.close(); } @Override public boolean useCompoundFile(SegmentInfos segments, SegmentCommitInfo newSegment, IndexWriter writer) throws IOException { return in.useCompoundFile(segments, newSegment, writer); } @Override public String toString() { return "SortingMergePolicy(" + in + ", sorter=" + sorter + ")"; } }
apache-2.0
haveal/googleads-python-lib
examples/dfp/v201411/content_metadata_key_hierarchy_service/create_content_metadata_key_hierarchies.py
2795
#!/usr/bin/python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This code example creates new content metadata key hierarchies. To determine which content metadata key hierarchies exist, run get_all_content_metadata_key_hierarchies.py. This feature is only available to DFP video publishers. The LoadFromStorage method is pulling credentials and properties from a "googleads.yaml" file. By default, it looks for this file in your home directory. For more information, see the "Caching authentication information" section of our README. """ import uuid # Import appropriate modules from the client library. from googleads import dfp # Set the IDs of the custom targeting keys for the hierarchy. HIERARCHY_LEVEL_ONE_KEY_ID = 'INSERT_LEVEL_ONE_CUSTOM_TARGETING_KEY_ID_HERE' HIERARCHY_LEVEL_TWO_KEY_ID = 'INSERT_LEVEL_TWO_CUSTOM_TARGETING_KEY_ID_HERE' def main(client, hierarchy_level_one_key_id, hierarchy_level_two_key_id): # Initialize appropriate service. content_metadata_key_hierarchy_service = client.GetService( 'ContentMetadataKeyHierarchyService', version='v201411') hierarchy_level_1 = { 'customTargetingKeyId': hierarchy_level_one_key_id, 'hierarchyLevel': '1' } hierarchy_level_2 = { 'customTargetingKeyId': hierarchy_level_two_key_id, 'hierarchyLevel': '2' } hierarchy_levels = [hierarchy_level_1, hierarchy_level_2] # Create content metadata key hierarchy object. content_metadata_key_hierarchy = { 'name': 'Content Metadata Key Hierarchy #%s' % uuid.uuid4(), 'hierarchyLevels': hierarchy_levels } content_metadata_key_hierarchies = ( content_metadata_key_hierarchy_service .createContentMetadataKeyHierarchies([content_metadata_key_hierarchy])) # Display results. for content_metadata_key_hierarchy in content_metadata_key_hierarchies: print ('Content metadata key hierarchy with id \'%s\' and name \'%s\'' ' was created.' % (content_metadata_key_hierarchy['id'], content_metadata_key_hierarchy['name'])) if __name__ == '__main__': # Initialize client object. dfp_client = dfp.DfpClient.LoadFromStorage() main(dfp_client, HIERARCHY_LEVEL_ONE_KEY_ID, HIERARCHY_LEVEL_TWO_KEY_ID)
apache-2.0
porcelli-forks/uberfire
uberfire-extensions/uberfire-social-activities/uberfire-social-activities-backend/src/main/java/org/ext/uberfire/social/activities/server/SocialActivitiesServer.java
1742
/* * Copyright 2015 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.ext.uberfire.social.activities.server; import javax.enterprise.context.ApplicationScoped; import javax.inject.Inject; import org.ext.uberfire.social.activities.model.SocialActivitiesEvent; import org.ext.uberfire.social.activities.repository.SocialTimeLineRepository; import org.ext.uberfire.social.activities.service.SocialActivitiesAPI; import org.jboss.errai.bus.server.annotations.Service; @Service @ApplicationScoped public class SocialActivitiesServer implements SocialActivitiesAPI { @Inject SocialTimeLineRepository timeLineRepository; public SocialActivitiesServer() { } public SocialActivitiesServer(SocialTimeLineRepository timeLineRepository) { this.timeLineRepository = timeLineRepository; } @Override public void register(SocialActivitiesEvent event) { registerTypeEvent(event); registerEventUserTimeLine(event); } private void registerEventUserTimeLine(SocialActivitiesEvent event) { timeLineRepository.saveUserEvent(event); } private void registerTypeEvent(SocialActivitiesEvent event) { timeLineRepository.saveTypeEvent(event); } }
apache-2.0
mpimenov/omim
android/src/com/mapswithme/maps/location/GPSCheck.java
986
package com.mapswithme.maps.location; import android.content.BroadcastReceiver; import android.content.Context; import android.content.Intent; import com.mapswithme.maps.MwmApplication; import com.mapswithme.util.log.Logger; import com.mapswithme.util.log.LoggerFactory; import static com.mapswithme.maps.MwmApplication.backgroundTracker; public class GPSCheck extends BroadcastReceiver { private static final Logger LOGGER = LoggerFactory.INSTANCE.getLogger(LoggerFactory.Type.LOCATION); private static final String TAG = GPSCheck.class.getSimpleName(); @Override public void onReceive(Context context, Intent intent) { String msg = "onReceive: " + intent + " app in background = " + !backgroundTracker(context).isForeground(); LOGGER.i(TAG, msg); if (MwmApplication.from(context).arePlatformAndCoreInitialized() && MwmApplication.backgroundTracker(context).isForeground()) { LocationHelper.INSTANCE.restart(); } } }
apache-2.0
haoyanjun21/jstorm
jstorm-utility/jstorm-flux/flux-core/src/main/java/com/alibaba/jstorm/flux/Flux.java
11399
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.jstorm.flux; import backtype.storm.Config; import backtype.storm.LocalCluster; import backtype.storm.StormSubmitter; import backtype.storm.generated.StormTopology; import backtype.storm.generated.SubmitOptions; import backtype.storm.generated.TopologyInitialStatus; import backtype.storm.utils.Utils; import com.alibaba.jstorm.flux.model.ExecutionContext; import com.alibaba.jstorm.flux.model.SpoutDef; import com.alibaba.jstorm.flux.parser.FluxParser; import org.apache.commons.cli.*; import com.alibaba.jstorm.flux.model.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.*; /** * Flux entry point. * */ public class Flux { private static final Logger LOG = LoggerFactory.getLogger(Flux.class); private static final Long DEFAULT_LOCAL_SLEEP_TIME = 60000l; private static final Long DEFAULT_ZK_PORT = 2181l; private static final String OPTION_LOCAL = "local"; private static final String OPTION_REMOTE = "remote"; private static final String OPTION_RESOURCE = "resource"; private static final String OPTION_SLEEP = "sleep"; private static final String OPTION_DRY_RUN = "dry-run"; private static final String OPTION_NO_DETAIL = "no-detail"; private static final String OPTION_NO_SPLASH = "no-splash"; private static final String OPTION_INACTIVE = "inactive"; private static final String OPTION_ZOOKEEPER = "zookeeper"; private static final String OPTION_FILTER = "filter"; private static final String OPTION_ENV_FILTER = "env-filter"; public static void main(String[] args) throws Exception { Options options = new Options(); options.addOption(option(0, "l", OPTION_LOCAL, "Run the topology in local mode.")); options.addOption(option(0, "r", OPTION_REMOTE, "Deploy the topology to a remote cluster.")); options.addOption(option(0, "R", OPTION_RESOURCE, "Treat the supplied path as a classpath resource instead of a file.")); options.addOption(option(1, "s", OPTION_SLEEP, "ms", "When running locally, the amount of time to sleep (in ms.) " + "before killing the topology and shutting down the local cluster.")); options.addOption(option(0, "d", OPTION_DRY_RUN, "Do not run or deploy the topology. Just build, validate, " + "and print information about the topology.")); options.addOption(option(0, "q", OPTION_NO_DETAIL, "Suppress the printing of topology details.")); options.addOption(option(0, "n", OPTION_NO_SPLASH, "Suppress the printing of the splash screen.")); options.addOption(option(0, "i", OPTION_INACTIVE, "Deploy the topology, but do not activate it.")); options.addOption(option(1, "z", OPTION_ZOOKEEPER, "host:port", "When running in local mode, use the ZooKeeper at the " + "specified <host>:<port> instead of the in-process ZooKeeper. (requires Storm 0.9.3 or later)")); options.addOption(option(1, "f", OPTION_FILTER, "file", "Perform property substitution. Use the specified file " + "as a source of properties, and replace keys identified with {$[property name]} with the value defined " + "in the properties file.")); options.addOption(option(0, "e", OPTION_ENV_FILTER, "Perform environment variable substitution. Replace keys" + "identified with `${ENV-[NAME]}` will be replaced with the corresponding `NAME` environment value")); CommandLineParser parser = new BasicParser(); CommandLine cmd = parser.parse(options, args); if (cmd.getArgs().length != 1) { usage(options); System.exit(1); } runCli(cmd); } private static Option option(int argCount, String shortName, String longName, String description){ return option(argCount, shortName, longName, longName, description); } private static Option option(int argCount, String shortName, String longName, String argName, String description){ Option option = OptionBuilder.hasArgs(argCount) .withArgName(argName) .withLongOpt(longName) .withDescription(description) .create(shortName); return option; } private static void usage(Options options) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("storm jar <my_topology_uber_jar.jar> " + Flux.class.getName() + " [options] <topology-config.yaml>", options); } private static void runCli(CommandLine cmd)throws Exception { if(!cmd.hasOption(OPTION_NO_SPLASH)) { printSplash(); } boolean dumpYaml = cmd.hasOption("dump-yaml"); TopologyDef topologyDef = null; String filePath = (String)cmd.getArgList().get(0); // TODO conditionally load properties from a file our resource String filterProps = null; if(cmd.hasOption(OPTION_FILTER)){ filterProps = cmd.getOptionValue(OPTION_FILTER); } boolean envFilter = cmd.hasOption(OPTION_ENV_FILTER); if(cmd.hasOption(OPTION_RESOURCE)){ printf("Parsing classpath resource: %s", filePath); topologyDef = FluxParser.parseResource(filePath, dumpYaml, true, filterProps, envFilter); } else { printf("Parsing file: %s", new File(filePath).getAbsolutePath()); topologyDef = FluxParser.parseFile(filePath, dumpYaml, true, filterProps, envFilter); } String topologyName = topologyDef.getName(); // merge contents of `config` into topology config Config conf = FluxBuilder.buildConfig(topologyDef); ExecutionContext context = new ExecutionContext(topologyDef, conf); StormTopology topology = FluxBuilder.buildTopology(context); if(!cmd.hasOption(OPTION_NO_DETAIL)){ printTopologyInfo(context); } if(!cmd.hasOption(OPTION_DRY_RUN)) { if (cmd.hasOption(OPTION_REMOTE)) { LOG.info("Running remotely..."); try { // should the topology be active or inactive SubmitOptions submitOptions = null; if(cmd.hasOption(OPTION_INACTIVE)){ LOG.info("Deploying topology in an INACTIVE state..."); submitOptions = new SubmitOptions(TopologyInitialStatus.INACTIVE); } else { LOG.info("Deploying topology in an ACTIVE state..."); submitOptions = new SubmitOptions(TopologyInitialStatus.ACTIVE); } StormSubmitter.submitTopology(topologyName, conf, topology, submitOptions); } catch (Exception e) { LOG.warn("Unable to deploy topology to remote cluster.", e); } } else { LOG.info("Running in local mode..."); String sleepStr = cmd.getOptionValue(OPTION_SLEEP); Long sleepTime = DEFAULT_LOCAL_SLEEP_TIME; if (sleepStr != null) { sleepTime = Long.parseLong(sleepStr); } LOG.debug("Sleep time: {}", sleepTime); LocalCluster cluster = null; // in-process or external zookeeper if(cmd.hasOption(OPTION_ZOOKEEPER)){ String zkStr = cmd.getOptionValue(OPTION_ZOOKEEPER); LOG.info("Using ZooKeeper at '{}' instead of in-process one.", zkStr); long zkPort = DEFAULT_ZK_PORT; String zkHost = null; if(zkStr.contains(":")){ String[] hostPort = zkStr.split(":"); zkHost = hostPort[0]; zkPort = hostPort.length > 1 ? Long.parseLong(hostPort[1]) : DEFAULT_ZK_PORT; } else { zkHost = zkStr; } // the following constructor is only available in 0.9.3 and later /* try { cluster = new LocalCluster(zkHost, zkPort); } catch (NoSuchMethodError e){ LOG.error("The --zookeeper option can only be used with Apache Storm 0.9.3 and later."); System.exit(1); }*/ LOG.error("sorry, jstorm don't support this operation!!!"); System.exit(1); } else { cluster = new LocalCluster(); } cluster.submitTopology(topologyName, conf, topology); Utils.sleep(sleepTime); cluster.killTopology(topologyName); cluster.shutdown(); } } } static void printTopologyInfo(ExecutionContext ctx){ TopologyDef t = ctx.getTopologyDef(); if(t.isDslTopology()) { print("---------- TOPOLOGY DETAILS ----------"); printf("Topology Name: %s", t.getName()); print("--------------- SPOUTS ---------------"); for (SpoutDef s : t.getSpouts()) { printf("%s [%d] (%s)", s.getId(), s.getParallelism(), s.getClassName()); } print("---------------- BOLTS ---------------"); for (BoltDef b : t.getBolts()) { printf("%s [%d] (%s)", b.getId(), b.getParallelism(), b.getClassName()); } print("--------------- STREAMS ---------------"); for (StreamDef sd : t.getStreams()) { printf("%s --%s--> %s", sd.getFrom(), sd.getGrouping().getType(), sd.getTo()); } print("--------------------------------------"); } } // save a little typing private static void printf(String format, Object... args){ print(String.format(format, args)); } private static void print(String string){ System.out.println(string); } private static void printSplash() throws IOException { // banner InputStream is = Flux.class.getResourceAsStream("/splash.txt"); if(is != null){ InputStreamReader isr = new InputStreamReader(is, "UTF-8"); BufferedReader br = new BufferedReader(isr); String line = null; while((line = br.readLine()) != null){ System.out.println(line); } } } }
apache-2.0
asurve/incubator-systemml
src/main/java/org/apache/sysml/runtime/codegen/CodegenUtils.java
10677
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.sysml.runtime.codegen; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.net.URL; import java.net.URLClassLoader; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; import javax.tools.Diagnostic; import javax.tools.Diagnostic.Kind; import javax.tools.DiagnosticCollector; import javax.tools.JavaCompiler; import javax.tools.JavaCompiler.CompilationTask; import javax.tools.JavaFileObject; import javax.tools.StandardJavaFileManager; import javax.tools.ToolProvider; import org.apache.commons.io.IOUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.sysml.api.DMLScript; import org.apache.sysml.hops.codegen.SpoofCompiler; import org.apache.sysml.hops.codegen.SpoofCompiler.CompilerType; import org.apache.sysml.runtime.DMLRuntimeException; import org.apache.sysml.runtime.codegen.SpoofOperator.SideInput; import org.apache.sysml.runtime.codegen.SpoofOperator.SideInputSparseCell; import org.apache.sysml.runtime.io.IOUtilFunctions; import org.apache.sysml.runtime.matrix.data.MatrixBlock; import org.apache.sysml.runtime.util.LocalFileUtils; import org.apache.sysml.utils.Statistics; import org.codehaus.janino.SimpleCompiler; public class CodegenUtils { private static final Log LOG = LogFactory.getLog(CodegenUtils.class.getName()); //cache to reuse compiled and loaded classes private static ConcurrentHashMap<String, Class<?>> _cache = new ConcurrentHashMap<>(); //janino-specific map of source code transfer/recompile on-demand private static ConcurrentHashMap<String, String> _src = new ConcurrentHashMap<>(); //javac-specific working directory for src/class files private static String _workingDir = null; public static Class<?> compileClass(String name, String src) throws DMLRuntimeException { //reuse existing compiled class Class<?> ret = _cache.get(name); if( ret != null ) return ret; long t0 = DMLScript.STATISTICS ? System.nanoTime() : 0; //compile java source w/ specific compiler if( SpoofCompiler.JAVA_COMPILER == CompilerType.JANINO ) ret = compileClassJanino(name, src); else ret = compileClassJavac(name, src); //keep compiled class for reuse _cache.put(name, ret); if( DMLScript.STATISTICS ) { Statistics.incrementCodegenClassCompile(); Statistics.incrementCodegenClassCompileTime(System.nanoTime()-t0); } return ret; } public static Class<?> getClass(String name) throws DMLRuntimeException { return getClass(name, null); } public synchronized static Class<?> getClassSync(String name, byte[] classBytes) throws DMLRuntimeException { //In order to avoid anomalies of concurrently compiling and loading the same //class with the same name multiple times in spark executors, this indirection //synchronizes the class compilation. This synchronization leads to the first //thread compiling the common class and all other threads simply reusing the //cached class instance, which also ensures that the same class is not loaded //multiple times which causes unnecessary JIT compilation overhead. return getClass(name, classBytes); } public static Class<?> getClass(String name, byte[] classBytes) throws DMLRuntimeException { //reuse existing compiled class Class<?> ret = _cache.get(name); if( ret != null ) return ret; //get class in a compiler-specific manner if( SpoofCompiler.JAVA_COMPILER == CompilerType.JANINO ) ret = compileClassJanino(name, new String(classBytes)); else ret = loadFromClassFile(name, classBytes); //keep loaded class for reuse _cache.put(name, ret); return ret; } public static byte[] getClassData(String name) throws DMLRuntimeException { //get class in a compiler-specific manner if( SpoofCompiler.JAVA_COMPILER == CompilerType.JANINO ) return _src.get(name).getBytes(); else return getClassAsByteArray(name); } public static void clearClassCache() { _cache.clear(); _src.clear(); } public static void clearClassCache(Class<?> cla) { //one-pass, in-place filtering of class cache Iterator<Entry<String,Class<?>>> iter = _cache.entrySet().iterator(); while( iter.hasNext() ) if( iter.next().getValue()==cla ) iter.remove(); } public static SpoofOperator createInstance(Class<?> cla) throws DMLRuntimeException { SpoofOperator ret = null; try { ret = (SpoofOperator) cla.newInstance(); } catch( Exception ex ) { throw new DMLRuntimeException(ex); } return ret; } public static SideInput createSideInput(MatrixBlock in) { SideInput ret = (in.isInSparseFormat() || !in.isAllocated()) ? new SideInput(null, in, in.getNumColumns()) : new SideInput(in.getDenseBlock(), null, in.getNumColumns()); return (ret.mdat != null) ? new SideInputSparseCell(ret) : ret; } //////////////////////////// //JANINO-specific methods (used for spark environments) private static Class<?> compileClassJanino(String name, String src) throws DMLRuntimeException { try { //compile source code SimpleCompiler compiler = new SimpleCompiler(); compiler.cook(src); //keep source code for later re-construction _src.put(name, src); //load compile class return compiler.getClassLoader() .loadClass(name); } catch(Exception ex) { LOG.error("Failed to compile class "+name+": \n"+src); throw new DMLRuntimeException("Failed to compile class "+name+".", ex); } } //////////////////////////// //JAVAC-specific methods (used for hadoop environments) private static Class<?> compileClassJavac(String name, String src) throws DMLRuntimeException { try { //create working dir on demand if( _workingDir == null ) createWorkingDir(); //write input file (for debugging / classpath handling) File ftmp = new File(_workingDir+"/"+name.replace(".", "/")+".java"); if( !ftmp.getParentFile().exists() ) ftmp.getParentFile().mkdirs(); LocalFileUtils.writeTextFile(ftmp, src); //get system java compiler JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); if( compiler == null ) throw new RuntimeException("Unable to obtain system java compiler."); //prepare file manager DiagnosticCollector<JavaFileObject> diagnostics = new DiagnosticCollector<>(); StandardJavaFileManager fileManager = compiler.getStandardFileManager(diagnostics, null, null); //prepare input source code Iterable<? extends JavaFileObject> sources = fileManager .getJavaFileObjectsFromFiles(Arrays.asList(ftmp)); //prepare class path URL runDir = CodegenUtils.class.getProtectionDomain().getCodeSource().getLocation(); String classpath = System.getProperty("java.class.path") + File.pathSeparator + runDir.getPath(); List<String> options = Arrays.asList("-classpath",classpath); //compile source code CompilationTask task = compiler.getTask(null, fileManager, diagnostics, options, null, sources); Boolean success = task.call(); //output diagnostics and error handling for(Diagnostic<? extends JavaFileObject> tmp : diagnostics.getDiagnostics()) if( tmp.getKind()==Kind.ERROR ) System.err.println("ERROR: "+tmp.toString()); if( success == null || !success ) throw new RuntimeException("Failed to compile class "+name); //dynamically load compiled class URLClassLoader classLoader = null; try { classLoader = new URLClassLoader( new URL[]{new File(_workingDir).toURI().toURL(), runDir}, CodegenUtils.class.getClassLoader()); return classLoader.loadClass(name); } finally { IOUtilFunctions.closeSilently(classLoader); } } catch(Exception ex) { LOG.error("Failed to compile class "+name+": \n"+src); throw new DMLRuntimeException("Failed to compile class "+name+".", ex); } } private static Class<?> loadFromClassFile(String name, byte[] classBytes) throws DMLRuntimeException { if(classBytes != null) { //load from byte representation of class file try(ByteClassLoader byteLoader = new ByteClassLoader(new URL[]{}, CodegenUtils.class.getClassLoader(), classBytes)) { return byteLoader.findClass(name); } catch (Exception e) { throw new DMLRuntimeException(e); } } else { //load compiled class file URL runDir = CodegenUtils.class.getProtectionDomain().getCodeSource().getLocation(); try(URLClassLoader classLoader = new URLClassLoader(new URL[]{new File(_workingDir) .toURI().toURL(), runDir}, CodegenUtils.class.getClassLoader())) { return classLoader.loadClass(name); } catch (Exception e) { throw new DMLRuntimeException(e); } } } private static byte[] getClassAsByteArray(String name) throws DMLRuntimeException { String classAsPath = name.replace('.', '/') + ".class"; URLClassLoader classLoader = null; InputStream stream = null; try { //dynamically load compiled class URL runDir = CodegenUtils.class.getProtectionDomain().getCodeSource().getLocation(); classLoader = new URLClassLoader( new URL[]{new File(_workingDir).toURI().toURL(), runDir}, CodegenUtils.class.getClassLoader()); stream = classLoader.getResourceAsStream(classAsPath); return IOUtils.toByteArray(stream); } catch (IOException e) { throw new DMLRuntimeException(e); } finally { IOUtilFunctions.closeSilently(classLoader); IOUtilFunctions.closeSilently(stream); } } private static void createWorkingDir() throws DMLRuntimeException { if( _workingDir != null ) return; String tmp = LocalFileUtils.getWorkingDir(LocalFileUtils.CATEGORY_CODEGEN); LocalFileUtils.createLocalFileIfNotExist(tmp); _workingDir = tmp; } }
apache-2.0
tarikgwa/test
html/lib/internal/Magento/Framework/Pricing/Price/BasePriceProviderInterface.php
250
<?php /** * Copyright © 2015 Magento. All rights reserved. * See COPYING.txt for license details. */ namespace Magento\Framework\Pricing\Price; /** * Interface BasePriceProviderInterface * * @api */ interface BasePriceProviderInterface { }
apache-2.0
jeremymwells/angular
modules/angular2/test/compiler/template_compiler_spec.ts
15814
import { ddescribe, describe, xdescribe, it, iit, xit, expect, beforeEach, afterEach, AsyncTestCompleter, inject, beforeEachBindings } from 'angular2/test_lib'; import {Promise, PromiseWrapper} from 'angular2/src/core/facade/async'; import {Type, isPresent, isBlank, stringify, isString} from 'angular2/src/core/facade/lang'; import {MapWrapper, SetWrapper, ListWrapper} from 'angular2/src/core/facade/collection'; import {RuntimeMetadataResolver} from 'angular2/src/compiler/runtime_metadata'; import { TemplateCompiler, NormalizedComponentWithViewDirectives } from 'angular2/src/compiler/template_compiler'; import {CompileDirectiveMetadata} from 'angular2/src/compiler/directive_metadata'; import {evalModule} from './eval_module'; import {SourceModule, moduleRef} from 'angular2/src/compiler/source_module'; import {XHR} from 'angular2/src/core/render/xhr'; import {MockXHR} from 'angular2/src/core/render/xhr_mock'; import {Locals} from 'angular2/src/core/change_detection/change_detection'; import { CommandVisitor, TextCmd, NgContentCmd, BeginElementCmd, BeginComponentCmd, EmbeddedTemplateCmd, TemplateCmd, visitAllCommands, CompiledTemplate } from 'angular2/src/core/compiler/template_commands'; import {Component, View, Directive} from 'angular2/core'; import {TEST_BINDINGS} from './test_bindings'; import {TestContext, TestDispatcher, TestPipes} from './change_detector_mocks'; import {codeGenValueFn, codeGenExportVariable} from 'angular2/src/compiler/util'; // Attention: This path has to point to this test file! const THIS_MODULE = 'angular2/test/compiler/template_compiler_spec'; var THIS_MODULE_REF = moduleRef(THIS_MODULE); export function main() { describe('TemplateCompiler', () => { var compiler: TemplateCompiler; var runtimeMetadataResolver: RuntimeMetadataResolver; beforeEachBindings(() => TEST_BINDINGS); beforeEach(inject([TemplateCompiler, RuntimeMetadataResolver], (_compiler, _runtimeMetadataResolver) => { compiler = _compiler; runtimeMetadataResolver = _runtimeMetadataResolver; })); describe('compile templates', () => { function runTests(compile) { it('should compile host components', inject([AsyncTestCompleter], (async) => { compile([CompWithBindingsAndStyles]) .then((humanizedTemplate) => { expect(humanizedTemplate['styles']).toEqual([]); expect(humanizedTemplate['commands'][0]).toEqual('<comp-a>'); expect(humanizedTemplate['cd']).toEqual(['elementProperty(title)=someDirValue']); async.done(); }); })); it('should compile nested components', inject([AsyncTestCompleter], (async) => { compile([CompWithBindingsAndStyles]) .then((humanizedTemplate) => { var nestedTemplate = humanizedTemplate['commands'][1]; expect(nestedTemplate['styles']).toEqual(['div {color: red}']); expect(nestedTemplate['commands'][0]).toEqual('<a>'); expect(nestedTemplate['cd']).toEqual(['elementProperty(href)=someCtxValue']); async.done(); }); })); it('should compile recursive components', inject([AsyncTestCompleter], (async) => { compile([TreeComp]) .then((humanizedTemplate) => { expect(humanizedTemplate['commands'][0]).toEqual('<tree>'); expect(humanizedTemplate['commands'][1]['commands'][0]).toEqual('<tree>'); expect(humanizedTemplate['commands'][1]['commands'][1]['commands'][0]) .toEqual('<tree>'); async.done(); }); })); it('should pass the right change detector to embedded templates', inject([AsyncTestCompleter], (async) => { compile([CompWithEmbeddedTemplate]) .then((humanizedTemplate) => { expect(humanizedTemplate['commands'][1]['commands'][0]).toEqual('<template>'); expect(humanizedTemplate['commands'][1]['commands'][1]['cd']) .toEqual(['elementProperty(href)=someCtxValue']); async.done(); }); })); } describe('compileHostComponentRuntime', () => { function compile(components: Type[]): Promise<any[]> { return compiler.compileHostComponentRuntime(components[0]).then(humanizeTemplate); } runTests(compile); it('should cache components for parallel requests', inject([AsyncTestCompleter, XHR], (async, xhr: MockXHR) => { xhr.expect('angular2/test/compiler/compUrl.html', 'a'); PromiseWrapper.all([compile([CompWithTemplateUrl]), compile([CompWithTemplateUrl])]) .then((humanizedTemplates) => { expect(humanizedTemplates[0]['commands'][1]['commands']).toEqual(['#text(a)']); expect(humanizedTemplates[1]['commands'][1]['commands']).toEqual(['#text(a)']); async.done(); }); xhr.flush(); })); it('should cache components for sequential requests', inject([AsyncTestCompleter, XHR], (async, xhr: MockXHR) => { xhr.expect('angular2/test/compiler/compUrl.html', 'a'); compile([CompWithTemplateUrl]) .then((humanizedTemplate0) => { return compile([CompWithTemplateUrl]) .then((humanizedTemplate1) => { expect(humanizedTemplate0['commands'][1]['commands']) .toEqual(['#text(a)']); expect(humanizedTemplate1['commands'][1]['commands']) .toEqual(['#text(a)']); async.done(); }); }); xhr.flush(); })); it('should allow to clear the cache', inject([AsyncTestCompleter, XHR], (async, xhr: MockXHR) => { xhr.expect('angular2/test/compiler/compUrl.html', 'a'); compile([CompWithTemplateUrl]) .then((humanizedTemplate) => { compiler.clearCache(); xhr.expect('angular2/test/compiler/compUrl.html', 'b'); var result = compile([CompWithTemplateUrl]); xhr.flush(); return result; }) .then((humanizedTemplate) => { expect(humanizedTemplate['commands'][1]['commands']).toEqual(['#text(b)']); async.done(); }); xhr.flush(); })); }); describe('compileTemplatesCodeGen', () => { function normalizeComponent(component: Type): Promise<NormalizedComponentWithViewDirectives> { var compAndViewDirMetas = [runtimeMetadataResolver.getMetadata(component)].concat( runtimeMetadataResolver.getViewDirectivesMetadata(component)); return PromiseWrapper.all(compAndViewDirMetas.map( meta => compiler.normalizeDirectiveMetadata(meta))) .then((normalizedCompAndViewDirMetas: CompileDirectiveMetadata[]) => new NormalizedComponentWithViewDirectives( normalizedCompAndViewDirMetas[0], normalizedCompAndViewDirMetas.slice(1))); } function compile(components: Type[]): Promise<any[]> { return PromiseWrapper.all(components.map(normalizeComponent)) .then((normalizedCompWithViewDirMetas: NormalizedComponentWithViewDirectives[]) => { var sourceModule = compiler.compileTemplatesCodeGen(THIS_MODULE, normalizedCompWithViewDirMetas); var sourceWithImports = testableTemplateModule(sourceModule, normalizedCompWithViewDirMetas[0].component) .getSourceWithImports(); return evalModule(sourceWithImports.source, sourceWithImports.imports, null); }); } runTests(compile); }); }); describe('serializeDirectiveMetadata and deserializeDirectiveMetadata', () => { it('should serialize and deserialize', inject([AsyncTestCompleter], (async) => { compiler.normalizeDirectiveMetadata( runtimeMetadataResolver.getMetadata(CompWithBindingsAndStyles)) .then((meta: CompileDirectiveMetadata) => { var json = compiler.serializeDirectiveMetadata(meta); expect(isString(json)).toBe(true); // Note: serializing will clear our the runtime type! var clonedMeta = compiler.deserializeDirectiveMetadata(json); expect(meta.changeDetection).toEqual(clonedMeta.changeDetection); expect(meta.template).toEqual(clonedMeta.template); expect(meta.selector).toEqual(clonedMeta.selector); expect(meta.exportAs).toEqual(clonedMeta.exportAs); expect(meta.type.name).toEqual(clonedMeta.type.name); async.done(); }); })); }); describe('normalizeDirectiveMetadata', () => { it('should normalize the template', inject([AsyncTestCompleter, XHR], (async, xhr: MockXHR) => { xhr.expect('angular2/test/compiler/compUrl.html', 'loadedTemplate'); compiler.normalizeDirectiveMetadata( runtimeMetadataResolver.getMetadata(CompWithTemplateUrl)) .then((meta: CompileDirectiveMetadata) => { expect(meta.template.template).toEqual('loadedTemplate'); async.done(); }); xhr.flush(); })); it('should copy all the other fields', inject([AsyncTestCompleter], (async) => { var meta = runtimeMetadataResolver.getMetadata(CompWithBindingsAndStyles); compiler.normalizeDirectiveMetadata(meta).then((normMeta: CompileDirectiveMetadata) => { expect(normMeta.type).toEqual(meta.type); expect(normMeta.isComponent).toEqual(meta.isComponent); expect(normMeta.dynamicLoadable).toEqual(meta.dynamicLoadable); expect(normMeta.selector).toEqual(meta.selector); expect(normMeta.exportAs).toEqual(meta.exportAs); expect(normMeta.changeDetection).toEqual(meta.changeDetection); expect(normMeta.properties).toEqual(meta.properties); expect(normMeta.events).toEqual(meta.events); expect(normMeta.hostListeners).toEqual(meta.hostListeners); expect(normMeta.hostProperties).toEqual(meta.hostProperties); expect(normMeta.hostAttributes).toEqual(meta.hostAttributes); expect(normMeta.lifecycleHooks).toEqual(meta.lifecycleHooks); async.done(); }); })); }); describe('compileStylesheetCodeGen', () => { it('should compile stylesheets into code', inject([AsyncTestCompleter], (async) => { var cssText = 'div {color: red}'; var sourceModule = compiler.compileStylesheetCodeGen('someModuleId', cssText)[0]; var sourceWithImports = testableStylesModule(sourceModule).getSourceWithImports(); evalModule(sourceWithImports.source, sourceWithImports.imports, null) .then(loadedCssText => { expect(loadedCssText).toEqual([cssText]); async.done(); }); })); }); }); } @Component({ selector: 'comp-a', host: {'[title]': 'someProp'}, moduleId: THIS_MODULE, exportAs: 'someExportAs' }) @View({template: '<a [href]="someProp"></a>', styles: ['div {color: red}']}) class CompWithBindingsAndStyles { } @Component({selector: 'tree', moduleId: THIS_MODULE}) @View({template: '<tree></tree>', directives: [TreeComp]}) class TreeComp { } @Component({selector: 'comp-url', moduleId: THIS_MODULE}) @View({templateUrl: 'compUrl.html'}) class CompWithTemplateUrl { } @Component({selector: 'comp-tpl', moduleId: THIS_MODULE}) @View({template: '<template><a [href]="someProp"></a></template>'}) class CompWithEmbeddedTemplate { } @Directive({selector: 'plain', moduleId: THIS_MODULE}) class PlainDirective { } @Component({selector: 'comp', moduleId: THIS_MODULE}) @View({template: ''}) class CompWithoutHost { } function testableTemplateModule(sourceModule: SourceModule, normComp: CompileDirectiveMetadata): SourceModule { var resultExpression = `${THIS_MODULE_REF}humanizeTemplate(Host${normComp.type.name}Template)`; var testableSource = `${sourceModule.sourceWithModuleRefs} ${codeGenExportVariable('run')}${codeGenValueFn(['_'], resultExpression)};`; return new SourceModule(sourceModule.moduleId, testableSource); } function testableStylesModule(sourceModule: SourceModule): SourceModule { var testableSource = `${sourceModule.sourceWithModuleRefs} ${codeGenExportVariable('run')}${codeGenValueFn(['_'], 'STYLES')};`; return new SourceModule(sourceModule.moduleId, testableSource); } // Attention: read by eval! export function humanizeTemplate(template: CompiledTemplate, humanizedTemplates: Map<number, StringMap<string, any>> = null): StringMap<string, any> { if (isBlank(humanizedTemplates)) { humanizedTemplates = new Map(); } var result = humanizedTemplates.get(template.id); if (isPresent(result)) { return result; } var commands = []; result = { 'styles': template.styles, 'commands': commands, 'cd': testChangeDetector(template.changeDetectorFactory) }; humanizedTemplates.set(template.id, result); visitAllCommands(new CommandHumanizer(commands, humanizedTemplates), template.commands); return result; } function testChangeDetector(changeDetectorFactory: Function): string[] { var ctx = new TestContext(); ctx.someProp = 'someCtxValue'; var dir1 = new TestContext(); dir1.someProp = 'someDirValue'; var dispatcher = new TestDispatcher([dir1], []); var cd = changeDetectorFactory(dispatcher); var locals = new Locals(null, MapWrapper.createFromStringMap({'someVar': null})); cd.hydrate(ctx, locals, dispatcher, new TestPipes()); cd.detectChanges(); return dispatcher.log; } class CommandHumanizer implements CommandVisitor { constructor(private result: any[], private humanizedTemplates: Map<number, StringMap<string, any>>) {} visitText(cmd: TextCmd, context: any): any { this.result.push(`#text(${cmd.value})`); return null; } visitNgContent(cmd: NgContentCmd, context: any): any { return null; } visitBeginElement(cmd: BeginElementCmd, context: any): any { this.result.push(`<${cmd.name}>`); return null; } visitEndElement(context: any): any { this.result.push('</>'); return null; } visitBeginComponent(cmd: BeginComponentCmd, context: any): any { this.result.push(`<${cmd.name}>`); this.result.push(humanizeTemplate(cmd.template, this.humanizedTemplates)); return null; } visitEndComponent(context: any): any { return this.visitEndElement(context); } visitEmbeddedTemplate(cmd: EmbeddedTemplateCmd, context: any): any { this.result.push(`<template>`); this.result.push({'cd': testChangeDetector(cmd.changeDetectorFactory)}); this.result.push(`</template>`); return null; } }
apache-2.0
chrisleck/kubernetes
pkg/registry/serviceaccount/etcd/etcd.go
2249
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package etcd import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/generic" etcdgeneric "k8s.io/kubernetes/pkg/registry/generic/etcd" "k8s.io/kubernetes/pkg/registry/serviceaccount" "k8s.io/kubernetes/pkg/runtime" ) type REST struct { *etcdgeneric.Etcd } // NewREST returns a RESTStorage object that will work against service accounts. func NewREST(opts generic.RESTOptions) *REST { prefix := "/serviceaccounts" newListFunc := func() runtime.Object { return &api.ServiceAccountList{} } storageInterface := opts.Decorator( opts.Storage, cachesize.GetWatchCacheSizeByResource(cachesize.ServiceAccounts), &api.ServiceAccount{}, prefix, serviceaccount.Strategy, newListFunc) store := &etcdgeneric.Etcd{ NewFunc: func() runtime.Object { return &api.ServiceAccount{} }, NewListFunc: newListFunc, KeyRootFunc: func(ctx api.Context) string { return etcdgeneric.NamespaceKeyRootFunc(ctx, prefix) }, KeyFunc: func(ctx api.Context, name string) (string, error) { return etcdgeneric.NamespaceKeyFunc(ctx, prefix, name) }, ObjectNameFunc: func(obj runtime.Object) (string, error) { return obj.(*api.ServiceAccount).Name, nil }, PredicateFunc: func(label labels.Selector, field fields.Selector) generic.Matcher { return serviceaccount.Matcher(label, field) }, QualifiedResource: api.Resource("serviceaccounts"), CreateStrategy: serviceaccount.Strategy, UpdateStrategy: serviceaccount.Strategy, ReturnDeletedObject: true, Storage: storageInterface, } return &REST{store} }
apache-2.0
ern/elasticsearch
x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java
5036
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ package org.elasticsearch.xpack.watcher.test.integration; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchRequestBuilder; import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequestBuilder; import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction; import org.elasticsearch.xpack.watcher.actions.logging.LoggingLevel; import org.elasticsearch.xpack.watcher.common.text.TextTemplate; import org.elasticsearch.xpack.watcher.condition.CompareCondition; import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; import static org.elasticsearch.xpack.watcher.input.InputBuilders.noneInput; import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.cron; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; public class WatchMetadataTests extends AbstractWatcherIntegrationTestCase { public void testWatchMetadata() throws Exception { Map<String, Object> metadata = new HashMap<>(); metadata.put("foo", "bar"); List<String> metaList = new ArrayList<>(); metaList.add("this"); metaList.add("is"); metaList.add("a"); metaList.add("test"); metadata.put("baz", metaList); new PutWatchRequestBuilder(client()).setId("_name") .setSource(watchBuilder() .trigger(schedule(cron("0/5 * * * * ? *"))) .input(noneInput()) .condition(new CompareCondition("ctx.payload.hits.total.value", CompareCondition.Op.EQ, 1L)) .metadata(metadata)) .get(); timeWarp().trigger("_name"); refresh(); SearchResponse searchResponse = client().prepareSearch(HistoryStoreField.DATA_STREAM + "*") .setQuery(termQuery("metadata.foo", "bar")) .get(); assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); } public void testWatchMetadataAvailableAtExecution() throws Exception { Map<String, Object> metadata = new HashMap<>(); metadata.put("foo", "bar"); metadata.put("logtext", "This is a test"); LoggingAction.Builder loggingAction = loggingAction(new TextTemplate("_logging")) .setLevel(LoggingLevel.DEBUG) .setCategory("test"); new PutWatchRequestBuilder(client()).setId("_name") .setSource(watchBuilder() .trigger(schedule(cron("0 0 0 1 1 ? 2050"))) .input(noneInput()) .condition(InternalAlwaysCondition.INSTANCE) .addAction("testLogger", loggingAction) .defaultThrottlePeriod(TimeValue.timeValueSeconds(0)) .metadata(metadata)) .get(); TriggerEvent triggerEvent = new ScheduleTriggerEvent(ZonedDateTime.now(ZoneOffset.UTC), ZonedDateTime.now(ZoneOffset.UTC)); ExecuteWatchResponse executeWatchResponse = new ExecuteWatchRequestBuilder(client()).setId("_name") .setTriggerEvent(triggerEvent).setActionMode("_all", ActionExecutionMode.SIMULATE).get(); Map<String, Object> result = executeWatchResponse.getRecordSource().getAsMap(); logger.info("result=\n{}", result); assertThat(ObjectPath.<String>eval("metadata.foo", result), equalTo("bar")); assertThat(ObjectPath.<String>eval("result.actions.0.id", result), equalTo("testLogger")); assertThat(ObjectPath.<String>eval("result.actions.0.logging.logged_text", result), equalTo("_logging")); } }
apache-2.0
ingokegel/intellij-community
java/java-tests/testSrc/com/intellij/openapi/command/undo/UndoTestCase.java
7121
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.openapi.command.undo; import com.intellij.codeInsight.JavaCodeInsightTestCase; import com.intellij.history.integration.LocalHistoryImpl; import com.intellij.openapi.actionSystem.CommonDataKeys; import com.intellij.openapi.actionSystem.DataContext; import com.intellij.openapi.actionSystem.IdeActions; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.command.CommandProcessor; import com.intellij.openapi.command.WriteCommandAction; import com.intellij.openapi.command.impl.UndoManagerImpl; import com.intellij.openapi.editor.Editor; import com.intellij.openapi.editor.actionSystem.EditorActionManager; import com.intellij.openapi.editor.actionSystem.TypedAction; import com.intellij.openapi.fileEditor.FileEditor; import com.intellij.openapi.fileEditor.FileEditorManager; import com.intellij.openapi.fileEditor.OpenFileDescriptor; import com.intellij.openapi.fileEditor.impl.CurrentEditorProvider; import com.intellij.openapi.fileEditor.impl.text.TextEditorProvider; import com.intellij.openapi.util.ThrowableComputable; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.testFramework.EditorTestUtil; import com.intellij.testFramework.PsiTestUtil; import com.intellij.util.DocumentUtil; import org.jetbrains.annotations.NotNull; import java.io.IOException; public abstract class UndoTestCase extends JavaCodeInsightTestCase { private CurrentEditorProvider myOldEditorProvider; protected UndoManagerImpl myManager; protected VirtualFile myRoot; @Override protected void setUp() throws Exception { super.setUp(); myManager = (UndoManagerImpl)UndoManager.getInstance(myProject); myOldEditorProvider = myManager.getEditorProvider(); ApplicationManager.getApplication().runWriteAction(() -> { try { setUpInWriteAction(); } catch (Exception e) { LOG.error(e); } }); } @Override protected void tearDown() throws Exception { try { myManager.setEditorProvider(myOldEditorProvider); myManager = null; myOldEditorProvider = null; } catch (Throwable e) { addSuppressedException(e); } finally { super.tearDown(); } } @Override protected void initApplication() throws Exception { super.initApplication(); LocalHistoryImpl.getInstanceImpl().cleanupForNextTest(); } protected void setUpInWriteAction() throws Exception { myRoot = createTestProjectStructure(); } void typeInChar(Editor e, char c) { EditorActionManager.getInstance(); TypedAction.getInstance().actionPerformed(e, c, createDataContextFor(e)); } protected void typeInText(Editor editor, String text) { char[] chars = text.toCharArray(); for (char aChar : chars) { typeInChar(editor, aChar); } } protected static void moveCaret(final Editor e, final String dir, final boolean selection) { executeEditorAction(e, "Editor" + dir + (selection ? "WithSelection" : "")); } protected static void enter(final Editor e) { executeEditorAction(e, IdeActions.ACTION_EDITOR_ENTER); } @Override protected void backspace(@NotNull final Editor e) { executeEditorAction(e, IdeActions.ACTION_EDITOR_BACKSPACE); } @Override protected void delete(@NotNull final Editor e) { executeEditorAction(e, IdeActions.ACTION_EDITOR_DELETE); } static void executeEditorAction(@NotNull Editor editor, @NotNull String actionId) { EditorTestUtil.executeAction(editor, actionId); } VirtualFile createFileInCommand(final String name) { try { return WriteCommandAction .runWriteCommandAction(getProject(), (ThrowableComputable<VirtualFile, IOException>)() -> myRoot.createChildData(this, name)); } catch (IOException e) { throw new RuntimeException(e); } } protected void addContentRoot() { PsiTestUtil.addContentRoot(getModule(), getTempDir().createVirtualDir()); } protected void executeCommand(@NotNull Runnable command, String name) { CommandProcessor.getInstance().executeCommand(myProject, command, name, null); } private DataContext createDataContextFor(final Editor editor) { return dataId -> { if (CommonDataKeys.EDITOR.is(dataId)) return editor; if (CommonDataKeys.PROJECT.is(dataId)) return getProject(); return null; }; } boolean isUndoAvailable(Editor e) { return myManager.isUndoAvailable(getFileEditor(e)); } protected void undo(Editor e) { FileEditor fe = getFileEditor(e); assertTrue("undo is not available", myManager.isUndoAvailable(fe)); myManager.undo(fe); } boolean isRedoAvailable(Editor e) { return myManager.isRedoAvailable(getFileEditor(e)); } void redo(Editor e) { FileEditor fe = getFileEditor(e); assertTrue("redo is not available", myManager.isRedoAvailable(fe)); myManager.redo(fe); } void globalUndo() { undo(null); } void globalRedo() { redo(null); } protected Editor getEditor(VirtualFile file) { return FileEditorManager.getInstance(myProject).openTextEditor(new OpenFileDescriptor(myProject, file, 0), false); } static void assertStartsWith(String prefix, String text) { assertTrue(text, text.startsWith(prefix)); } void assertGlobalUndoIsAvailable() { assertUndoIsAvailable(null); } void assertGlobalUndoNotAvailable() { assertUndoNotAvailable(null); } void assertGlobalRedoIsAvailable() { assertRedoIsAvailable(null); } void assertGlobalRedoNotAvailable() { assertRedoNotAvailable(null); } void assertRedoNotAvailable(Editor e) { assertFalse(myManager.isRedoAvailable(getFileEditor(e))); } void assertUndoIsAvailable(Editor e) { assertTrue(myManager.isUndoAvailable(getFileEditor(e))); } void assertUndoNotAvailable(Editor e) { assertFalse(myManager.isUndoAvailable(getFileEditor(e))); } void assertRedoIsAvailable(Editor e) { assertTrue(myManager.isRedoAvailable(getFileEditor(e))); } protected static FileEditor getFileEditor(Editor e) { return e == null ? null : TextEditorProvider.getInstance().getTextEditor(e); } protected void executeCommand(Command c) { executeCommand("", c); } protected void executeCommand(String name, Command command) { executeCommand(name, null, command); } protected void executeCommand(final String name, final Object groupId, final Command command) { CommandProcessor.getInstance().executeCommand(myProject, () -> { try { command.run(); } catch (Exception e) { throw new RuntimeException(e); } }, name, groupId); } static void executeTransparently(final Command r) { DocumentUtil.writeInRunUndoTransparentAction(() -> { try { r.run(); } catch (Exception e) { throw new RuntimeException(e); } }); } @FunctionalInterface protected interface Command { void run() throws Exception; } }
apache-2.0
kimroen/rust
src/librustdoc/doctree.rs
5613
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! This module is used to store stuff from Rust's AST in a more convenient //! manner (and with prettier names) before cleaning. pub use self::StructType::*; pub use self::TypeBound::*; use syntax; use syntax::codemap::Span; use syntax::ast; use syntax::attr; use syntax::ast::{Ident, NodeId}; use syntax::ptr::P; pub struct Module { pub name: Option<Ident>, pub attrs: Vec<ast::Attribute>, pub where_outer: Span, pub where_inner: Span, pub structs: Vec<Struct>, pub enums: Vec<Enum>, pub fns: Vec<Function>, pub mods: Vec<Module>, pub id: NodeId, pub typedefs: Vec<Typedef>, pub statics: Vec<Static>, pub constants: Vec<Constant>, pub traits: Vec<Trait>, pub vis: ast::Visibility, pub stab: Option<attr::Stability>, pub impls: Vec<Impl>, pub foreigns: Vec<ast::ForeignMod>, pub view_items: Vec<ast::ViewItem>, pub macros: Vec<Macro>, pub is_crate: bool, } impl Module { pub fn new(name: Option<Ident>) -> Module { Module { name : name, id: 0, vis: ast::Inherited, stab: None, where_outer: syntax::codemap::DUMMY_SP, where_inner: syntax::codemap::DUMMY_SP, attrs : Vec::new(), structs : Vec::new(), enums : Vec::new(), fns : Vec::new(), mods : Vec::new(), typedefs : Vec::new(), statics : Vec::new(), constants : Vec::new(), traits : Vec::new(), impls : Vec::new(), view_items : Vec::new(), foreigns : Vec::new(), macros : Vec::new(), is_crate : false, } } } #[derive(Show, Clone, RustcEncodable, RustcDecodable, Copy)] pub enum StructType { /// A normal struct Plain, /// A tuple struct Tuple, /// A newtype struct (tuple struct with one element) Newtype, /// A unit struct Unit } pub enum TypeBound { RegionBound, TraitBound(ast::TraitRef) } pub struct Struct { pub vis: ast::Visibility, pub stab: Option<attr::Stability>, pub id: NodeId, pub struct_type: StructType, pub name: Ident, pub generics: ast::Generics, pub attrs: Vec<ast::Attribute>, pub fields: Vec<ast::StructField>, pub whence: Span, } pub struct Enum { pub vis: ast::Visibility, pub stab: Option<attr::Stability>, pub variants: Vec<Variant>, pub generics: ast::Generics, pub attrs: Vec<ast::Attribute>, pub id: NodeId, pub whence: Span, pub name: Ident, } pub struct Variant { pub name: Ident, pub attrs: Vec<ast::Attribute>, pub kind: ast::VariantKind, pub id: ast::NodeId, pub vis: ast::Visibility, pub stab: Option<attr::Stability>, pub whence: Span, } pub struct Function { pub decl: ast::FnDecl, pub attrs: Vec<ast::Attribute>, pub id: NodeId, pub name: Ident, pub vis: ast::Visibility, pub stab: Option<attr::Stability>, pub unsafety: ast::Unsafety, pub whence: Span, pub generics: ast::Generics, } pub struct Typedef { pub ty: P<ast::Ty>, pub gen: ast::Generics, pub name: Ident, pub id: ast::NodeId, pub attrs: Vec<ast::Attribute>, pub whence: Span, pub vis: ast::Visibility, pub stab: Option<attr::Stability>, } #[derive(Show)] pub struct Static { pub type_: P<ast::Ty>, pub mutability: ast::Mutability, pub expr: P<ast::Expr>, pub name: Ident, pub attrs: Vec<ast::Attribute>, pub vis: ast::Visibility, pub stab: Option<attr::Stability>, pub id: ast::NodeId, pub whence: Span, } pub struct Constant { pub type_: P<ast::Ty>, pub expr: P<ast::Expr>, pub name: Ident, pub attrs: Vec<ast::Attribute>, pub vis: ast::Visibility, pub stab: Option<attr::Stability>, pub id: ast::NodeId, pub whence: Span, } pub struct Trait { pub unsafety: ast::Unsafety, pub name: Ident, pub items: Vec<ast::TraitItem>, //should be TraitItem pub generics: ast::Generics, pub bounds: Vec<ast::TyParamBound>, pub attrs: Vec<ast::Attribute>, pub id: ast::NodeId, pub whence: Span, pub vis: ast::Visibility, pub stab: Option<attr::Stability>, } pub struct Impl { pub unsafety: ast::Unsafety, pub polarity: ast::ImplPolarity, pub generics: ast::Generics, pub trait_: Option<ast::TraitRef>, pub for_: P<ast::Ty>, pub items: Vec<ast::ImplItem>, pub attrs: Vec<ast::Attribute>, pub whence: Span, pub vis: ast::Visibility, pub stab: Option<attr::Stability>, pub id: ast::NodeId, } pub struct Macro { pub name: Ident, pub id: ast::NodeId, pub attrs: Vec<ast::Attribute>, pub whence: Span, pub stab: Option<attr::Stability>, } pub fn struct_type_from_def(sd: &ast::StructDef) -> StructType { if sd.ctor_id.is_some() { // We are in a tuple-struct match sd.fields.len() { 0 => Unit, 1 => Newtype, _ => Tuple } } else { Plain } }
apache-2.0
scottfrederick/springdoclet
sample/src/main/java/org/springframework/samples/petclinic/NamedEntity.java
504
package org.springframework.samples.petclinic; /** * Simple JavaBean domain object adds a name property to <code>BaseEntity</code>. * Used as a base class for objects needing these properties. * * @author Ken Krebs * @author Juergen Hoeller */ public class NamedEntity extends BaseEntity { private String name; public void setName(String name) { this.name = name; } public String getName() { return this.name; } @Override public String toString() { return this.getName(); } }
apache-2.0
vmahuli/contrail-controller
src/db/db_table_partition.cc
5891
/* * Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. */ #include <tbb/mutex.h> #include "base/logging.h" #include "base/time_util.h" #include "db/db.h" #include "db/db_entry.h" #include "db/db_partition.h" #include "db/db_table.h" #include "db/db_table_partition.h" using namespace std; // concurrency: called from DBPartition task. void DBTablePartBase::Notify(DBEntryBase *entry) { if (entry->is_onlist()) { return; } entry->set_onlist(); bool was_empty = change_list_.empty(); change_list_.push_back(*entry); if (was_empty) { DB *db = parent()->database(); DBPartition *partition = db->GetPartition(index_); partition->OnTableChange(this); } } // // Concurrency: called from db::DBTable task. // // Evaluate concurrency issues with DBEntryBase::ClearState when making // changes to this method. We expect that either this method or ClearState // is responsible for removing the DBEntryBase when they run concurrently, // assuming the DBEntryBase is eligible for removal. The dbstate_mutex is // used for synchronization. // bool DBTablePartBase::RunNotify() { for (int i = 0; ((i < kMaxIterations) && !change_list_.empty()); ++i) { DBEntryBase *entry = &change_list_.front(); change_list_.pop_front(); parent()->RunNotify(this, entry); entry->clear_onlist(); // If the entry is marked deleted and all DBStates are removed // and it's not already on the remove queue, it can be removed // from the tree right away. // // Note that IsOnRemoveQ must be called after is_state_empty as // synchronization with DBEntryBase::ClearState happens via the // call to is_state_empty, and ClearState can set the OnRemoveQ // bit in the entry. if (entry->IsDeleted() && entry->is_state_empty(this) && !entry->IsOnRemoveQ()) { Remove(entry); } } if (!change_list_.empty()) { DB *db = parent()->database(); DBPartition *partition = db->GetPartition(index_); partition->OnTableChange(this); return false; } return true; } void DBTablePartBase::Delete(DBEntryBase *entry) { if (parent_->HasListeners()) { entry->MarkDelete(); Notify(entry); } else { // Remove from change_list if (entry->is_onlist()) { change_list_.erase(change_list_.iterator_to(*entry)); } Remove(entry); } } DBTablePartition::DBTablePartition(DBTable *table, int index) : DBTablePartBase(table, index) { } void DBTablePartition::Process(DBClient *client, DBRequest *req) { DBTable *table = static_cast<DBTable *>(parent()); table->incr_input_count(); table->Input(this, client, req); } void DBTablePartition::Add(DBEntry *entry) { tbb::mutex::scoped_lock lock(mutex_); std::pair<Tree::iterator, bool> ret = tree_.insert(*entry); assert(ret.second); entry->set_table_partition(static_cast<DBTablePartBase *>(this)); Notify(entry); parent()->AddRemoveCallback(entry, true); } void DBTablePartition::Change(DBEntry *entry) { tbb::mutex::scoped_lock lock(mutex_); Notify(entry); } void DBTablePartition::Remove(DBEntryBase *db_entry) { tbb::mutex::scoped_lock lock(mutex_); DBEntry *entry = static_cast<DBEntry *>(db_entry); parent()->AddRemoveCallback(entry, false); bool success = tree_.erase(*entry); if (!success) { LOG(FATAL, "ABORT: DB node erase failed for table " + parent()->name()); LOG(FATAL, "Invalid node " + db_entry->ToString()); abort(); } delete entry; // If a table is marked for deletion, then we may trigger the deletion // process when the last prefix is deleted if (tree_.empty()) table()->RetryDelete(); } DBEntry *DBTablePartition::Find(const DBEntry *entry) { tbb::mutex::scoped_lock lock(mutex_); Tree::iterator loc = tree_.find(*entry); if (loc != tree_.end()) { return loc.operator->(); } return NULL; } DBEntry *DBTablePartition::Find(const DBRequestKey *key) { tbb::mutex::scoped_lock lock(mutex_); DBTable *table = static_cast<DBTable *>(parent()); std::auto_ptr<DBEntry> entry_ptr = table->AllocEntry(key); Tree::iterator loc = tree_.find(*(entry_ptr.get())); if (loc != tree_.end()) { return loc.operator->(); } return NULL; } DBEntry *DBTablePartition::FindNext(const DBRequestKey *key) { tbb::mutex::scoped_lock lock(mutex_); DBTable *table = static_cast<DBTable *>(parent()); std::auto_ptr<DBEntry> entry_ptr = table->AllocEntry(key); Tree::iterator loc = tree_.upper_bound(*(entry_ptr.get())); if (loc != tree_.end()) { return loc.operator->(); } return NULL; } // Returns the matching entry or next in lex order DBEntry *DBTablePartition::lower_bound(const DBEntryBase *key) { const DBEntry *entry = static_cast<const DBEntry *>(key); tbb::mutex::scoped_lock lock(mutex_); Tree::iterator it = tree_.lower_bound(*entry); if (it != tree_.end()) { return (it.operator->()); } return NULL; } DBEntry *DBTablePartition::GetFirst() { tbb::mutex::scoped_lock lock(mutex_); Tree::iterator it = tree_.begin(); if (it == tree_.end()) { return NULL; } return it.operator->(); } // Returns the next entry (Doesn't search). Threaded walk DBEntry *DBTablePartition::GetNext(const DBEntryBase *key) { const DBEntry *entry = static_cast<const DBEntry *>(key); tbb::mutex::scoped_lock lock(mutex_); Tree::const_iterator it = tree_.iterator_to(*entry); it++; if (it != tree_.end()) { return const_cast<DBEntry *>(it.operator->()); } return NULL; } DBTable *DBTablePartition::table() { return static_cast<DBTable *>(parent()); }
apache-2.0
Addepar/buck
src/com/facebook/buck/core/build/engine/buildinfo/OnDiskBuildInfo.java
3124
/* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.buck.core.build.engine.buildinfo; import com.facebook.buck.core.rulekey.RuleKey; import com.facebook.buck.util.hashing.FileHashLoader; import com.facebook.buck.util.types.Either; import com.google.common.base.Predicate; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSortedSet; import java.io.IOException; import java.nio.file.Path; import java.util.Optional; import java.util.Set; /** Provides access to the on-disk rule metadata (both "artifact" and "build"). */ public interface OnDiskBuildInfo { /** @return the rule "artifact" metadata value associated with the specified key, if it exists. */ Either<String, Exception> getValue(String key); /** @return the build engine metadata value associated with the specified key, if it exists. */ Optional<String> getBuildValue(String key); /** @return the sequence of "artifact" values associated with the specified key, if it exists. */ Optional<ImmutableList<String>> getValues(String key); /** * Tries to read the "artifact" values and if it fails it logs the attributes of the file it tried * to read. */ ImmutableList<String> getValuesOrThrow(String key) throws IOException; /** * @return the map of strings associated with the specified key in the "build" metadata, if it * exists. */ Optional<ImmutableMap<String, String>> getMap(String key); /** * Returns the {@link RuleKey} for the rule whose output is currently stored on disk. * * <p>This value would have been written the last time the rule was built successfully. */ Optional<RuleKey> getRuleKey(String key); /** Returns the recorded output paths of the rule for creating a cache artifact. */ ImmutableSortedSet<Path> getPathsForArtifact() throws IOException; /** * Returns the "build" metadata that is stored with a cache artifact ("artifact" metadata is * stored within the artifact itself). */ ImmutableMap<String, String> getMetadataForArtifact() throws IOException; /** Deletes both "artifact" and "build" metadata. */ void deleteExistingMetadata() throws IOException; void calculateOutputSizeAndWriteMetadata( FileHashLoader fileHashLoader, ImmutableSortedSet<Path> recordedPaths, Predicate<Long> shouldWriteOutputHashes) throws IOException; void validateArtifact(Set<Path> extractedFiles) throws IOException; ImmutableSortedSet<Path> getOutputPaths(); }
apache-2.0
monix/monix
monix-catnap/shared/src/test/scala/monix/catnap/cancelables/BooleanCancelableFSuite.scala
2219
/* * Copyright (c) 2014-2021 by The Monix Project Developers. * See the project homepage at: https://monix.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monix.catnap package cancelables import cats.effect.IO import minitest.SimpleTestSuite object BooleanCancelableFSuite extends SimpleTestSuite { test("apply") { var effect = 0 val task = IO { effect += 1 } val ref = BooleanCancelableF[IO](task) val cf = ref.unsafeRunSync() assert(!cf.isCanceled.unsafeRunSync(), "!cf.isCanceled") assertEquals(effect, 0) cf.cancel.unsafeRunSync() assert(cf.isCanceled.unsafeRunSync(), "cf.isCanceled") assertEquals(effect, 1) cf.cancel.unsafeRunSync() assert(cf.isCanceled.unsafeRunSync(), "cf.isCanceled") assertEquals(effect, 1) // Referential transparency test val cf2 = ref.unsafeRunSync() assert(!cf2.isCanceled.unsafeRunSync(), "!cf2.isCanceled") assertEquals(effect, 1) cf2.cancel.unsafeRunSync() assert(cf2.isCanceled.unsafeRunSync(), "cf2.isCanceled") assertEquals(effect, 2) cf2.cancel.unsafeRunSync() assert(cf2.isCanceled.unsafeRunSync(), "cf2.isCanceled") assertEquals(effect, 2) } test("alreadyCanceled") { val cf = BooleanCancelableF.alreadyCanceled[IO] assert(cf.isCanceled.unsafeRunSync(), "cf.isCanceled") cf.cancel.unsafeRunSync() cf.cancel.unsafeRunSync() assert(cf.isCanceled.unsafeRunSync(), "cf.isCanceled") } test("dummy") { val cf = BooleanCancelableF.dummy[IO] assert(!cf.isCanceled.unsafeRunSync(), "!cf.isCanceled") cf.cancel.unsafeRunSync() cf.cancel.unsafeRunSync() assert(!cf.isCanceled.unsafeRunSync(), "!cf.isCanceled") } }
apache-2.0
gfyoung/elasticsearch
x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java
1473
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; import java.time.temporal.ChronoField; import java.util.TimeZone; /** * Extract the hour of the day from a datetime. */ public class HourOfDay extends DateTimeFunction { public HourOfDay(Location location, Expression field, TimeZone timeZone) { super(location, field, timeZone); } @Override protected NodeCtor2<Expression, TimeZone, BaseDateTimeFunction> ctorForInfo() { return HourOfDay::new; } @Override protected HourOfDay replaceChild(Expression newChild) { return new HourOfDay(location(), newChild, timeZone()); } @Override public String dateTimeFormat() { return "hour"; } @Override protected ChronoField chronoField() { return ChronoField.HOUR_OF_DAY; } @Override protected DateTimeExtractor extractor() { return DateTimeExtractor.HOUR_OF_DAY; } }
apache-2.0
bitbouncer/avro
lang/java/mapred/src/test/java/org/apache/avro/hadoop/file/TestHadoopCodecFactory.java
2528
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.avro.hadoop.file; import org.apache.avro.file.CodecFactory; import org.junit.Test; import static org.junit.Assert.assertTrue; public class TestHadoopCodecFactory { @Test public void testHadoopCodecFactoryDeflate(){ CodecFactory hadoopDeflateCodec = HadoopCodecFactory.fromHadoopString("org.apache.hadoop.io.compress.DeflateCodec"); CodecFactory avroDeflateCodec = CodecFactory.fromString("deflate"); assertTrue(hadoopDeflateCodec.getClass().equals(avroDeflateCodec.getClass())); } @Test public void testHadoopCodecFactorySnappy(){ CodecFactory hadoopSnappyCodec = HadoopCodecFactory.fromHadoopString("org.apache.hadoop.io.compress.SnappyCodec"); CodecFactory avroSnappyCodec = CodecFactory.fromString("snappy"); assertTrue(hadoopSnappyCodec.getClass().equals(avroSnappyCodec.getClass())); } @Test public void testHadoopCodecFactoryBZip2(){ CodecFactory hadoopSnappyCodec = HadoopCodecFactory.fromHadoopString("org.apache.hadoop.io.compress.BZip2Codec"); CodecFactory avroSnappyCodec = CodecFactory.fromString("bzip2"); assertTrue(hadoopSnappyCodec.getClass().equals(avroSnappyCodec.getClass())); } @Test public void testHadoopCodecFactoryGZip(){ CodecFactory hadoopSnappyCodec = HadoopCodecFactory.fromHadoopString("org.apache.hadoop.io.compress.GZipCodec"); CodecFactory avroSnappyCodec = CodecFactory.fromString("deflate"); assertTrue(hadoopSnappyCodec.getClass().equals(avroSnappyCodec.getClass())); } @Test public void testHadoopCodecFactoryFail(){ CodecFactory hadoopSnappyCodec = HadoopCodecFactory.fromHadoopString("org.apache.hadoop.io.compress.FooCodec"); assertTrue(hadoopSnappyCodec == null); } }
apache-2.0
tboyce021/home-assistant
homeassistant/components/recorder/util.py
6463
"""SQLAlchemy util functions.""" from contextlib import contextmanager from datetime import timedelta import logging import os import time from sqlalchemy.exc import OperationalError, SQLAlchemyError import homeassistant.util.dt as dt_util from .const import CONF_DB_INTEGRITY_CHECK, DATA_INSTANCE, SQLITE_URL_PREFIX from .models import ALL_TABLES, process_timestamp _LOGGER = logging.getLogger(__name__) RETRIES = 3 QUERY_RETRY_WAIT = 0.1 SQLITE3_POSTFIXES = ["", "-wal", "-shm"] # This is the maximum time after the recorder ends the session # before we no longer consider startup to be a "restart" and we # should do a check on the sqlite3 database. MAX_RESTART_TIME = timedelta(minutes=10) @contextmanager def session_scope(*, hass=None, session=None): """Provide a transactional scope around a series of operations.""" if session is None and hass is not None: session = hass.data[DATA_INSTANCE].get_session() if session is None: raise RuntimeError("Session required") need_rollback = False try: yield session if session.transaction: need_rollback = True session.commit() except Exception as err: _LOGGER.error("Error executing query: %s", err) if need_rollback: session.rollback() raise finally: session.close() def commit(session, work): """Commit & retry work: Either a model or in a function.""" for _ in range(0, RETRIES): try: if callable(work): work(session) else: session.add(work) session.commit() return True except OperationalError as err: _LOGGER.error("Error executing query: %s", err) session.rollback() time.sleep(QUERY_RETRY_WAIT) return False def execute(qry, to_native=False, validate_entity_ids=True): """Query the database and convert the objects to HA native form. This method also retries a few times in the case of stale connections. """ for tryno in range(0, RETRIES): try: timer_start = time.perf_counter() if to_native: result = [ row for row in ( row.to_native(validate_entity_id=validate_entity_ids) for row in qry ) if row is not None ] else: result = list(qry) if _LOGGER.isEnabledFor(logging.DEBUG): elapsed = time.perf_counter() - timer_start if to_native: _LOGGER.debug( "converting %d rows to native objects took %fs", len(result), elapsed, ) else: _LOGGER.debug( "querying %d rows took %fs", len(result), elapsed, ) return result except SQLAlchemyError as err: _LOGGER.error("Error executing query: %s", err) if tryno == RETRIES - 1: raise time.sleep(QUERY_RETRY_WAIT) def validate_or_move_away_sqlite_database(dburl: str, db_integrity_check: bool) -> bool: """Ensure that the database is valid or move it away.""" dbpath = dburl[len(SQLITE_URL_PREFIX) :] if not os.path.exists(dbpath): # Database does not exist yet, this is OK return True if not validate_sqlite_database(dbpath, db_integrity_check): _move_away_broken_database(dbpath) return False return True def last_run_was_recently_clean(cursor): """Verify the last recorder run was recently clean.""" cursor.execute("SELECT end FROM recorder_runs ORDER BY start DESC LIMIT 1;") end_time = cursor.fetchone() if not end_time or not end_time[0]: return False last_run_end_time = process_timestamp(dt_util.parse_datetime(end_time[0])) now = dt_util.utcnow() _LOGGER.debug("The last run ended at: %s (now: %s)", last_run_end_time, now) if last_run_end_time + MAX_RESTART_TIME < now: return False return True def basic_sanity_check(cursor): """Check tables to make sure select does not fail.""" for table in ALL_TABLES: cursor.execute(f"SELECT * FROM {table} LIMIT 1;") # nosec # not injection return True def validate_sqlite_database(dbpath: str, db_integrity_check: bool) -> bool: """Run a quick check on an sqlite database to see if it is corrupt.""" import sqlite3 # pylint: disable=import-outside-toplevel try: conn = sqlite3.connect(dbpath) run_checks_on_open_db(dbpath, conn.cursor(), db_integrity_check) conn.close() except sqlite3.DatabaseError: _LOGGER.exception("The database at %s is corrupt or malformed.", dbpath) return False return True def run_checks_on_open_db(dbpath, cursor, db_integrity_check): """Run checks that will generate a sqlite3 exception if there is corruption.""" if basic_sanity_check(cursor) and last_run_was_recently_clean(cursor): _LOGGER.debug( "The quick_check will be skipped as the system was restarted cleanly and passed the basic sanity check" ) return if not db_integrity_check: # Always warn so when it does fail they remember it has # been manually disabled _LOGGER.warning( "The quick_check on the sqlite3 database at %s was skipped because %s was disabled", dbpath, CONF_DB_INTEGRITY_CHECK, ) return _LOGGER.debug( "A quick_check is being performed on the sqlite3 database at %s", dbpath ) cursor.execute("PRAGMA QUICK_CHECK") def _move_away_broken_database(dbfile: str) -> None: """Move away a broken sqlite3 database.""" isotime = dt_util.utcnow().isoformat() corrupt_postfix = f".corrupt.{isotime}" _LOGGER.error( "The system will rename the corrupt database file %s to %s in order to allow startup to proceed", dbfile, f"{dbfile}{corrupt_postfix}", ) for postfix in SQLITE3_POSTFIXES: path = f"{dbfile}{postfix}" if not os.path.exists(path): continue os.rename(path, f"{path}{corrupt_postfix}")
apache-2.0